text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from ase import *
from ase.dft import monkhorst_pack
from ase.structure import bulk
from gpaw import *
from gpaw.test import equal
import numpy as np
a0 = 5.43
cell = bulk('Si', 'fcc', a=a0).get_cell()
Si = Atoms('Si2', cell=cell, pbc=True,
scaled_positions=((0,0,0), (0.25,0.25,0.25)))
kpts = monkhorst_pack((2,2,2))
kpts += np.array([1/4., 1/4., 1/4.])
calc = GPAW(h=0.18,
kpts=kpts,
# parallel={'domain':1}, idiotproof=False,
occupations=FermiDirac(0.001))
Si.set_calculator(calc)
E = Si.get_potential_energy()
from gpaw.xc.hybridq import HybridXC
exx = HybridXC('EXX')
E_q = E + calc.get_xc_difference(exx)
from gpaw.xc.hybridk import HybridXC
exx = HybridXC('EXX', acdf=True, etotflag=True)
E_k1 = E + calc.get_xc_difference(exx)
from gpaw.xc.hybridk import HybridXC
exx = HybridXC('EXX', acdf=False, etotflag=True)
E_k2 = E + calc.get_xc_difference(exx)
print 'Hartree-Fock ACDF method (hybridq.py) :', E_q
print 'Hartree-Fock ACDF method (hybridk.py) :', E_k1
print 'Hartree-Fock Standard method :', E_k2
equal(E_k2, E_k1, 0.001)
equal(E_q, E_k1, 0.001)
equal(E_q, -27.71, 0.01)
|
ajylee/gpaw-rtxs
|
gpaw/test/exx_q.py
|
Python
|
gpl-3.0
| 1,155
|
[
"ASE",
"GPAW"
] |
26625acab1367fff93a66c99935dd404907e19c38cc30158e6a0a222a344daa7
|
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_import as h2i, h2o_exec as h2e
def define_params():
paramDict = {
'standardize': [None, 0,1],
'beta_epsilon': [None, 0.0001],
'family': [None, 'gaussian', 'binomial', 'poisson'],
'lambda': [0,1e-8,1e-4,1e-3],
'alpha': [0,0.8,0.75],
'ignored_cols': [1,'C1','1,2','C1,C2'],
'max_iter': [None, 10],
'higher_accuracy': [None, 0, 1],
'use_all_factor_levels': [None, 0, 1],
'lambda_search': [None, 0], # FIX! what if lambda is set when lambda_search=1
'tweedie_variance_power': [None, 0, 1],
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_params_rand2(self):
h2o.beta_features = True
csvPathname = 'covtype/covtype.20k.data'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put', hex_key="covtype.20k")
CLASS = 1
# make a binomial version
execExpr="B.hex=%s; B.hex[,%s]=(B.hex[,%s]==%s)" % ('covtype.20k', 54+1, 54+1, CLASS)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
paramDict = define_params()
for trial in range(20):
# params is mutable. This is default.
params = {
'response': 54,
'alpha': 0.1,
# 'lambda': 1e-4,
'lambda': 0,
'n_folds': 1,
}
colX = h2o_glm.pickRandGlmParams(paramDict, params)
kwargs = params.copy()
if 'family' not in kwargs or kwargs['family']=='binomial':
bHack = {'destination_key': 'B.hex'}
else:
bHack = parseResult
start = time.time()
glm = h2o_cmd.runGLM(timeoutSecs=300, parseResult=bHack, **kwargs)
# pass the kwargs with all the params, so we know what we asked for!
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
h2o.check_sandbox_for_errors()
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
|
woobe/h2o
|
py/testdir_single_jvm/test_GLM2_params_rand2.py
|
Python
|
apache-2.0
| 2,725
|
[
"Gaussian"
] |
27d47451f7f10e344dc183af816744c5e0e150d6fa813118cb4a26de2c7b53ae
|
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from os import path
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber
from Tools.GetEcmInfo import GetEcmInfo
from Components.Converter.Poll import Poll
caid_data = (
("0x100", "0x1ff", "Seca", "S", True),
("0x500", "0x5ff", "Via", "V", True),
("0x600", "0x6ff", "Irdeto", "I", True),
("0x900", "0x9ff", "NDS", "Nd", True),
("0xb00", "0xbff", "Conax", "Co", True),
("0xd00", "0xdff", "CryptoW", "Cw", True),
("0xe00", "0xeff", "PowerVU", "P", False),
("0x1000", "0x10FF", "Tandberg", "TB", False),
("0x1700", "0x17ff", "Beta", "B", True),
("0x1800", "0x18ff", "Nagra", "N", True),
("0x2600", "0x2600", "Biss", "Bi", False),
("0x4ae0", "0x4ae1", "Dre", "D", False),
("0x4aee", "0x4aee", "BulCrypt", "B1", False),
("0x5581", "0x5581", "BulCrypt", "B2", False)
)
# stream type to codec map
codec_data = {
-1: "N/A",
0: "MPEG2",
1: "AVC",
2: "H263",
3: "VC1",
4: "MPEG4-VC",
5: "VC1-SM",
6: "MPEG1",
7: "HEVC",
8: "VP8",
9: "VP9",
10: "XVID",
11: "N/A 11",
12: "N/A 12",
13: "DIVX 3.11",
14: "DIVX 4",
15: "DIVX 5",
16: "AVS",
17: "N/A 17",
18: "VP6",
19: "N/A 19",
20: "N/A 20",
21: "SPARK",
}
def addspace(text):
if text:
text += " "
return text
class PliExtraInfo(Poll, Converter):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.ca_table = (
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidIrdetoAvailable", "I", False),
("CryptoCaidNDSAvailable", "Nd", False),
("CryptoCaidConaxAvailable", "Co", False),
("CryptoCaidCryptoWAvailable", "Cw", False),
("CryptoCaidPowerVUAvailable", "P", False),
("CryptoCaidBetaAvailable", "B", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidBissAvailable", "Bi", False),
("CryptoCaidDreAvailable", "D", False),
("CryptoCaidBulCrypt1Available", "B1", False),
("CryptoCaidBulCrypt2Available", "B2", False),
("CryptoCaidTandbergAvailable", "T", False),
("CryptoCaidSecaSelected", "S", True),
("CryptoCaidViaSelected", "V", True),
("CryptoCaidIrdetoSelected", "I", True),
("CryptoCaidNDSSelected", "Nd", True),
("CryptoCaidConaxSelected", "Co", True),
("CryptoCaidCryptoWSelected", "Cw", True),
("CryptoCaidPowerVUSelected", "P", True),
("CryptoCaidBetaSelected", "B", True),
("CryptoCaidNagraSelected", "N", True),
("CryptoCaidBissSelected", "Bi", True),
("CryptoCaidDreSelected", "D", True),
("CryptoCaidBulCrypt1Selected", "B1", True),
("CryptoCaidBulCrypt2Selected", "B2", True),
("CryptoCaidTandbergSelected", "T", True)
)
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
def getCryptoInfo(self, info):
if info.getInfo(iServiceInformation.sIsCrypted) == 1:
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
color = "\c0000ff00"
else:
color = "\c007f7f7f"
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
color = "\c00ffff00"
except:
pass
if color != "\c007f7f7f" or caid_entry[4]:
if res:
res += " "
res += color + caid_entry[3]
res += "\c00ffffff"
return res
def createCryptoSeca(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x100', 16) <= int(self.current_caid, 16) <= int('0x1ff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x100', 16) <= caid <= int('0x1ff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'S'
res += "\c00ffffff"
return res
def createCryptoVia(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x500', 16) <= int(self.current_caid, 16) <= int('0x5ff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x500', 16) <= caid <= int('0x5ff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'V'
res += "\c00ffffff"
return res
def createCryptoIrdeto(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x600', 16) <= int(self.current_caid, 16) <= int('0x6ff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x600', 16) <= caid <= int('0x6ff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'I'
res += "\c00ffffff"
return res
def createCryptoNDS(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x900', 16) <= int(self.current_caid, 16) <= int('0x9ff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x900', 16) <= caid <= int('0x9ff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'NDS'
res += "\c00ffffff"
return res
def createCryptoConax(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xb00', 16) <= int(self.current_caid, 16) <= int('0xbff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0xb00', 16) <= caid <= int('0xbff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'CO'
res += "\c00ffffff"
return res
def createCryptoCryptoW(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xd00', 16) <= int(self.current_caid, 16) <= int('0xdff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0xd00', 16) <= caid <= int('0xdff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'CW'
res += "\c00ffffff"
return res
def createCryptoPowerVU(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xe00', 16) <= int(self.current_caid, 16) <= int('0xeff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0xe00', 16) <= caid <= int('0xeff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'P'
res += "\c00ffffff"
return res
def createCryptoTandberg(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1010', 16) <= int(self.current_caid, 16) <= int('0x1010', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x1010', 16) <= caid <= int('0x1010', 16):
color = "\c00eeee00"
except:
pass
res = color + 'T'
res += "\c00ffffff"
return res
def createCryptoBeta(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1700', 16) <= int(self.current_caid, 16) <= int('0x17ff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x1700', 16) <= caid <= int('0x17ff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'B'
res += "\c00ffffff"
return res
def createCryptoNagra(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1800', 16) <= int(self.current_caid, 16) <= int('0x18ff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x1800', 16) <= caid <= int('0x18ff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'N'
res += "\c00ffffff"
return res
def createCryptoBiss(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x2600', 16) <= int(self.current_caid, 16) <= int('0x26ff', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x2600', 16) <= caid <= int('0x26ff', 16):
color = "\c00eeee00"
except:
pass
res = color + 'BI'
res += "\c00ffffff"
return res
def createCryptoDre(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x4ae0', 16) <= int(self.current_caid, 16) <= int('0x4ae1', 16):
color = "\c004c7d3f"
else:
color = "\c009f9f9f"
try:
for caid in available_caids:
if int('0x4ae0', 16) <= caid <= int('0x4ae1', 16):
color = "\c00eeee00"
except:
pass
res = color + 'DC'
res += "\c00ffffff"
return res
def createCryptoSpecial(self, info):
caid_name = "FTA"
try:
for caid_entry in caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x" % (int(self.current_caid, 16), int(self.current_provid, 16), info.getInfo(iServiceInformation.sSID))
except:
pass
return ""
def createCryptoNameCaid(self, info):
caid_name = "FTA"
if int(self.current_caid, 16) == 0:
return caid_name
try:
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x" % (int(self.current_caid, 16))
except:
pass
return ""
def createResolution(self, info):
video_height = 0
video_width = 0
video_pol = " "
video_rate = 0
if path.exists("/proc/stb/vmpeg/0/yres"):
f = open("/proc/stb/vmpeg/0/yres", "r")
try:
video_height = int(f.read(), 16)
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/xres"):
f = open("/proc/stb/vmpeg/0/xres", "r")
try:
video_width = int(f.read(), 16)
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/progressive"):
f = open("/proc/stb/vmpeg/0/progressive", "r")
try:
video_pol = "p" if int(f.read(), 16) else "i"
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/framerate"):
f = open("/proc/stb/vmpeg/0/framerate", "r")
try:
video_rate = int(f.read())
except:
pass
f.close()
fps = str((video_rate + 500) / 1000)
gamma = ("SDR", "HDR", "HDR10", "HLG", "")[info.getInfo(iServiceInformation.sGamma)]
return str(video_width) + "x" + str(video_height) + video_pol + fps + addspace(gamma)
def createVideoCodec(self, info):
return codec_data.get(info.getInfo(iServiceInformation.sVideoType), "N/A")
def createServiceRef(self, info):
return info.getInfoString(iServiceInformation.sServiceref)
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
tsid = info.getInfo(iServiceInformation.sTSID)
onid = info.getInfo(iServiceInformation.sONID)
if vpid < 0:
vpid = 0
if apid < 0:
apid = 0
if pcrpid < 0:
pcrpid = 0
if sidpid < 0:
sidpid = 0
if tsid < 0:
tsid = 0
if onid < 0:
onid = 0
return "%d-%d:%05d:%04d:%04d:%04d" % (onid, tsid, sidpid, vpid, apid, pcrpid)
def createTransponderInfo(self, fedata, feraw, info):
if not feraw:
refstr = info.getInfoString(iServiceInformation.sServiceref)
if "%3a//" in refstr.lower():
return refstr.split(":")[10].replace("%3a", ":").replace("%3A", ":")
return ""
elif "DVB-T" in feraw.get("tuner_type"):
tmp = addspace(self.createChannelNumber(fedata, feraw)) + addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))
else:
tmp = addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))
return addspace(self.createTunerSystem(fedata)) + tmp + addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) \
+ addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) + addspace(self.createMisPls(fedata))
def createFrequency(self, fedata):
frequency = fedata.get("frequency")
if frequency:
return str(frequency)
return ""
def createChannelNumber(self, fedata, feraw):
return "DVB-T" in feraw.get("tuner_type") and fedata.get("channel") or ""
def createSymbolRate(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
bandwidth = fedata.get("bandwidth")
if bandwidth:
return bandwidth
else:
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate)
return ""
def createPolarization(self, fedata):
return fedata.get("polarization_abbreviation") or ""
def createFEC(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
code_rate_lp = fedata.get("code_rate_lp")
code_rate_hp = fedata.get("code_rate_hp")
guard_interval = fedata.get('guard_interval')
if code_rate_lp and code_rate_hp and guard_interval:
return code_rate_lp + "-" + code_rate_hp + "-" + guard_interval
else:
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
if fedata.get("tuner_type") == _("Terrestrial"):
constellation = fedata.get("constellation")
if constellation:
return constellation
else:
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
return feraw.get("tuner_type") or ""
def createTunerSystem(self, fedata):
return fedata.get("system") or ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos:
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + u"\u00B0" + "W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + u"\u00B0" + "E"
return ""
def createOrbPosOrTunerSystem(self, fedata, feraw):
orbpos = self.createOrbPos(feraw)
if orbpos != "":
return orbpos
return self.createTunerSystem(fedata)
def createTransponderName(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos is None: # Not satellite
return ""
freq = feraw.get("frequency")
if freq and freq < 10700000: # C-band
if orbpos > 1800:
orbpos += 1
else:
orbpos -= 1
sat_names = {
30: 'Rascom/Eutelsat 3E',
48: 'SES 5',
70: 'Eutelsat 7E',
90: 'Eutelsat 9E',
100: 'Eutelsat 10E',
130: 'Hot Bird',
160: 'Eutelsat 16E',
192: 'Astra 1KR/1L/1M/1N',
200: 'Arabsat 20E',
216: 'Eutelsat 21.5E',
235: 'Astra 3',
255: 'Eutelsat 25.5E',
260: 'Badr 4/5/6',
282: 'Astra 2E/2F/2G',
305: 'Arabsat 30.5E',
315: 'Astra 5',
330: 'Eutelsat 33E',
360: 'Eutelsat 36E',
380: 'Paksat',
390: 'Hellas Sat',
400: 'Express 40E',
420: 'Turksat',
450: 'Intelsat 45E',
480: 'Afghansat',
490: 'Yamal 49E',
530: 'Express 53E',
570: 'NSS 57E',
600: 'Intelsat 60E',
620: 'Intelsat 62E',
685: 'Intelsat 68.5E',
705: 'Eutelsat 70.5E',
720: 'Intelsat 72E',
750: 'ABS',
765: 'Apstar',
785: 'ThaiCom',
800: 'Express 80E',
830: 'Insat',
851: 'Intelsat/Horizons',
880: 'ST2',
900: 'Yamal 90E',
915: 'Mesat',
950: 'NSS/SES 95E',
1005: 'AsiaSat 100E',
1030: 'Express 103E',
1055: 'Asiasat 105E',
1082: 'NSS/SES 108E',
1100: 'BSat/NSAT',
1105: 'ChinaSat',
1130: 'KoreaSat',
1222: 'AsiaSat 122E',
1380: 'Telstar 18',
1440: 'SuperBird',
2310: 'Ciel',
2390: 'Echostar/Galaxy 121W',
2410: 'Echostar/DirectTV 119W',
2500: 'Echostar/DirectTV 110W',
2630: 'Galaxy 97W',
2690: 'NIMIQ 91W',
2780: 'NIMIQ 82W',
2830: 'Echostar/QuetzSat',
2880: 'AMC 72W',
2900: 'Star One',
2985: 'Echostar 61.5W',
2990: 'Amazonas',
3020: 'Intelsat 58W',
3045: 'Intelsat 55.5W',
3070: 'Intelsat 53W',
3100: 'Intelsat 50W',
3150: 'Intelsat 45W',
3169: 'Intelsat 43.1W',
3195: 'SES 40.5W',
3225: 'NSS/Telstar 37W',
3255: 'Intelsat 34.5W',
3285: 'Intelsat 31.5W',
3300: 'Hispasat',
3325: 'Intelsat 27.5W',
3355: 'Intelsat 24.5W',
3380: 'SES 22W',
3400: 'NSS 20W',
3420: 'Intelsat 18W',
3450: 'Telstar 15W',
3460: 'Express 14W',
3475: 'Eutelsat 12.5W',
3490: 'Express 11W',
3520: 'Eutelsat 8W',
3530: 'Nilesat/Eutelsat 7W',
3550: 'Eutelsat 5W',
3560: 'Amos',
3592: 'Thor/Intelsat'
}
if orbpos in sat_names:
return sat_names[orbpos]
elif orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "W"
else:
return str((float(orbpos)) / 10.0) + "E"
def createProviderName(self, info):
return info.getInfoString(iServiceInformation.sProvider)
def createMisPls(self, fedata):
tmp = ""
if fedata.get("is_id"):
if fedata.get("is_id") > -1:
tmp = "MIS %d" % fedata.get("is_id")
if fedata.get("pls_code"):
if fedata.get("pls_code") > 0:
tmp = addspace(tmp) + "%s %d" % (fedata.get("pls_mode"), fedata.get("pls_code"))
if fedata.get("t2mi_plp_id"):
if fedata.get("t2mi_plp_id") > -1:
tmp = addspace(tmp) + "T2MI %d PID %d" % (fedata.get("t2mi_plp_id"), fedata.get("t2mi_pid"))
return tmp
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if self.type == "CryptoInfo":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.value) > 0:
return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info)
else:
return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info)
if self.type == "CryptoBar":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBar(info)
else:
return ""
if self.type == "CryptoSeca":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoSeca(info)
else:
return ""
if self.type == "CryptoVia":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoVia(info)
else:
return ""
if self.type == "CryptoIrdeto":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoIrdeto(info)
else:
return ""
if self.type == "CryptoNDS":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNDS(info)
else:
return ""
if self.type == "CryptoConax":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoConax(info)
else:
return ""
if self.type == "CryptoCryptoW":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoCryptoW(info)
else:
return ""
if self.type == "CryptoBeta":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBeta(info)
else:
return ""
if self.type == "CryptoNagra":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNagra(info)
else:
return ""
if self.type == "CryptoBiss":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBiss(info)
else:
return ""
if self.type == "CryptoDre":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoDre(info)
else:
return ""
if self.type == "CryptoTandberg":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoTandberg(info)
else:
return ""
if self.type == "CryptoSpecial":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
else:
return ""
if self.type == "CryptoNameCaid":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNameCaid(info)
else:
return ""
if self.type == "ResolutionString":
return self.createResolution(info)
if self.type == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(config.usage.infobar_frontend_source.value == "settings")
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
if not feraw:
feraw = info.getInfoObject(iServiceInformation.sTransponderData)
if not feraw:
return ""
fedata = ConvertToHumanReadable(feraw)
else:
fedata = self.fedata
if self.type == "All":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.value) > 0:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw, info) + addspace(self.createTransponderName(feraw)) + "\n"\
+ addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n"\
+ addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
else:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw, info) + addspace(self.createTransponderName(feraw)) + "\n" \
+ addspace(self.createCryptoBar(info)) + self.current_source + "\n" \
+ addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "ServiceInfo":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) + addspace(self.createTransponderName(feraw))\
+ addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "TransponderInfo2line":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createTransponderName(feraw)) + '\n'\
+ addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))\
+ addspace(self.createSymbolRate(fedata, feraw)) + self.createModulation(fedata) + '-' + addspace(self.createFEC(fedata, feraw))
if self.type == "PIDInfo":
return self.createPIDInfo(info)
if self.type == "ServiceRef":
return self.createServiceRef(info)
if not feraw:
return ""
if self.type == "TransponderInfo":
return self.createTransponderInfo(fedata, feraw, info)
if self.type == "TransponderFrequency":
return self.createFrequency(feraw)
if self.type == "TransponderSymbolRate":
return self.createSymbolRate(fedata, feraw)
if self.type == "TransponderPolarization":
return self.createPolarization(fedata)
if self.type == "TransponderFEC":
return self.createFEC(fedata, feraw)
if self.type == "TransponderModulation":
return self.createModulation(fedata)
if self.type == "OrbitalPosition":
return self.createOrbPos(feraw)
if self.type == "TunerType":
return self.createTunerType(feraw)
if self.type == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata, feraw)
if self.type == "TerrestrialChannelNumber":
return self.createChannelNumber(fedata, feraw)
return _("invalid type")
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in caid_data:
if caid_entry[3] == request_caid:
if request_selected:
if int(caid_entry[0], 16) <= int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
|
openatv/enigma2
|
lib/python/Components/Converter/PliExtraInfo.py
|
Python
|
gpl-2.0
| 25,454
|
[
"Galaxy"
] |
1f884a7134a99447383e583cf733e4a0df8d4131b008a86e9e22f85b92d25322
|
# -*- coding: utf-8 -*-
"""
Created on 25 Jun 2014
@author: Éric Piel
Copyright © 2014-2015 Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not,
see http://www.gnu.org/licenses/.
"""
# Contains all the static streams, which only provide projections of the data
# they were initialised with.
from __future__ import division
import collections
import logging
import math
import numpy
from odemis import model
from odemis.acq import calibration
from odemis.model import MD_POS, MD_PIXEL_SIZE, VigilantAttribute
from odemis.util import img, conversion, polar, spectrum
from scipy import ndimage
from ._base import Stream
class StaticStream(Stream):
"""
Stream containing one static image.
For testing and static images.
"""
def __init__(self, name, raw):
"""
Note: parameters are different from the base class.
raw (DataArray, DataArrayShadow or list of DataArray): The data to display.
"""
super(StaticStream, self).__init__(name, None, None, None, raw=raw)
class RGBStream(StaticStream):
"""
A static stream which gets as input the actual RGB image
"""
def __init__(self, name, raw):
"""
Note: parameters are different from the base class.
raw (DataArray, DataArrayShadow or list of DataArray): The data to display.
"""
# Check it's RGB
if isinstance(raw, (model.DataArray, model.DataArrayShadow)):
raw = [raw]
else:
raw = raw
for d in raw:
dims = d.metadata.get(model.MD_DIMS, "CTZYX"[-d.ndim::])
ci = dims.find("C") # -1 if not found
if not (dims in ("CYX", "YXC") and d.shape[ci] in (3, 4)):
raise ValueError("Data must be RGB(A)")
super(RGBStream, self).__init__(name, raw)
def _init_projection_vas(self):
''' On RGBStream, the projection is done on RGBSpatialProjection
'''
pass
def _init_thread(self):
''' The thread for updating the image on RGBStream resides on DataProjection
TODO remove this function when all the streams become projectionless
'''
pass
class Static2DStream(StaticStream):
"""
Stream containing one static image.
For testing and static images.
"""
def __init__(self, name, raw):
"""
Note: parameters are different from the base class.
raw (DataArray or DataArrayShadow): The data to display.
"""
super(Static2DStream, self).__init__(name, [raw])
def _init_projection_vas(self):
''' On Static2DStream, the projection is done on RGBSpatialProjection
'''
pass
def _init_thread(self):
''' The thread for updating the image on Static2DStream resides on DataProjection
TODO remove this function when all the streams become projectionless
'''
pass
class StaticSEMStream(Static2DStream):
"""
Same as a StaticStream, but considered a SEM stream
"""
pass
class StaticCLStream(Static2DStream):
"""
Same as a StaticStream, but has a emission wavelength
"""
def __init__(self, name, raw):
"""
Note: parameters are different from the base class.
raw (DataArray of shape (111)YX): raw data. The metadata should
contain at least MD_POS and MD_PIXEL_SIZE. It should also contain
MD_OUT_WL.
"""
try:
em_range = raw.metadata[model.MD_OUT_WL]
if isinstance(em_range, basestring):
unit = None
else:
unit = "m"
self.emission = VigilantAttribute(em_range, unit=unit,
readonly=True)
except KeyError:
logging.warning("No emission wavelength for CL stream")
# Do it at the end, as it forces it the update of the image
Static2DStream.__init__(self, name, raw)
class StaticBrightfieldStream(Static2DStream):
"""
Same as a StaticStream, but considered a Brightfield stream
"""
pass
class StaticFluoStream(Static2DStream):
"""Static Stream containing images obtained via epifluorescence.
It basically knows how to show the excitation/emission wavelengths,
and how to taint the image.
"""
def __init__(self, name, raw):
"""
Note: parameters are different from the base class.
raw (DataArray of shape (111)YX): raw data. The metadata should
contain at least MD_POS and MD_PIXEL_SIZE. It should also contain
MD_IN_WL and MD_OUT_WL.
"""
# Note: it will update the image, and changing the tint will do it again
super(StaticFluoStream, self).__init__(name, raw)
# Wavelengths
try:
exc_range = raw.metadata[model.MD_IN_WL]
self.excitation = VigilantAttribute(exc_range, unit="m",
readonly=True)
except KeyError:
logging.warning("No excitation wavelength for fluorescence stream")
default_tint = (0, 255, 0) # green is most typical
try:
em_range = raw.metadata[model.MD_OUT_WL]
if isinstance(em_range, basestring):
unit = None
else:
unit = "m"
default_tint = conversion.wave2rgb(numpy.mean(em_range))
self.emission = VigilantAttribute(em_range, unit=unit,
readonly=True)
except KeyError:
logging.warning("No emission wavelength for fluorescence stream")
# colouration of the image
tint = raw.metadata.get(model.MD_USER_TINT, default_tint)
self.tint.value = tint
class StaticARStream(StaticStream):
"""
A angular resolved stream for one set of data.
There is no directly nice (=obvious) format to store AR data.
The difficulty is that data is somehow 4 dimensions: SEM-X, SEM-Y, CCD-X,
CCD-Y. CCD-dimensions do not correspond directly to quantities, until
converted into angle/angle (knowing the position of the pole).
As it's possible that positions on the SEM are relatively random, and it
is convenient to have a simple format when only one SEM pixel is scanned,
we've picked the following convention:
* each CCD image is a separate DataArray
* each CCD image contains metadata about the SEM position (MD_POS, in m)
pole (MD_AR_POLE, in px), and acquisition time (MD_ACQ_DATE)
* multiple CCD images are grouped together in a list
"""
def __init__(self, name, data):
"""
name (string)
data (model.DataArray(Shadow) of shape (YX) or list of such DataArray(Shadow)).
The metadata MD_POS and MD_AR_POLE should be provided
"""
if not isinstance(data, collections.Iterable):
data = [data] # from now it's just a list of DataArray
# TODO: support DAS, as a "delayed loading" by only calling .getData()
# when the projection for the particular data needs to be computed (or
# .raw needs to be accessed?)
# Ensure all the data is a DataArray, as we don't handle (yet) DAS
data = [d.getData() if isinstance(d, model.DataArrayShadow) else d for d in data]
# find positions of each acquisition
# tuple of 2 floats -> DataArray: position on SEM -> data
self._sempos = {}
for d in data:
try:
self._sempos[d.metadata[MD_POS]] = img.ensure2DImage(d)
except KeyError:
logging.info("Skipping DataArray without known position")
# Cached conversion of the CCD image to polar representation
# TODO: automatically fill it in a background thread
self._polar = {} # dict tuple 2 floats -> DataArray
# SEM position displayed, (None, None) == no point selected
self.point = model.VAEnumerated((None, None),
choices=frozenset([(None, None)] + list(self._sempos.keys())))
# The background data (typically, an acquisition without ebeam).
# It is subtracted from the acquisition data.
# If set to None, a simple baseline background value is subtracted.
self.background = model.VigilantAttribute(None,
setter=self._setBackground)
self.background.subscribe(self._onBackground)
if self._sempos:
# Pick one point, e.g., top-left
bbtl = (min(x for x, y in self._sempos.keys() if x is not None),
min(y for x, y in self._sempos.keys() if y is not None))
# top-left point is the closest from the bounding-box top-left
def dis_bbtl(v):
try:
return math.hypot(bbtl[0] - v[0], bbtl[1] - v[1])
except TypeError:
return float("inf") # for None, None
self.point.value = min(self._sempos.keys(), key=dis_bbtl)
# no need for init=True, as Stream.__init__ will update the image
self.point.subscribe(self._onPoint)
super(StaticARStream, self).__init__(name, list(self._sempos.values()))
def _project2Polar(self, pos):
"""
Return the polar projection of the image at the given position.
pos (tuple of 2 floats): position (must be part of the ._sempos
returns DataArray: the polar projection
"""
if pos in self._polar:
polard = self._polar[pos]
else:
# Compute the polar representation
data = self._sempos[pos]
try:
if numpy.prod(data.shape) > (1280 * 1080):
# AR conversion fails with very large images due to too much
# memory consumed (> 2Gb). So, rescale + use a "degraded" type that
# uses less memory. As the display size is small (compared
# to the size of the input image, it shouldn't actually
# affect much the output.
logging.info("AR image is very large %s, will convert to "
"azimuthal projection in reduced precision.",
data.shape)
y, x = data.shape
if y > x:
small_shape = 1024, int(round(1024 * x / y))
else:
small_shape = int(round(1024 * y / x)), 1024
# resize
data = img.rescale_hq(data, small_shape)
dtype = numpy.float16
else:
dtype = None # just let the function use the best one
# 2 x size of original image (on smallest axis) and at most
# the size of a full-screen canvas
size = min(min(data.shape) * 2, 1134)
# TODO: First compute quickly a low resolution and then
# compute a high resolution version.
# TODO: could use the size of the canvas that will display
# the image to save some computation time.
bg_data = self.background.value
if bg_data is None:
# Simple version: remove the background value
data0 = polar.ARBackgroundSubtract(data)
else:
data0 = img.Subtract(data, bg_data) # metadata from data
# Warning: allocates lot of memory, which will not be free'd until
# the current thread is terminated.
polard = polar.AngleResolved2Polar(data0, size, hole=False, dtype=dtype)
# TODO: don't hold too many of them in cache (eg, max 3 * 1134**2)
self._polar[pos] = polard
except Exception:
logging.exception("Failed to convert to azimuthal projection")
return data # display it raw as fallback
return polard
def _find_metadata(self, md):
# For polar view, no PIXEL_SIZE nor POS
return {}
def _updateImage(self):
""" Recomputes the image with all the raw data available for the current
selected point.
"""
if not self.raw:
return
pos = self.point.value
try:
if pos == (None, None):
self.image.value = None
else:
polard = self._project2Polar(pos)
# update the histogram
# TODO: cache the histogram per image
# FIXME: histogram should not include the black pixels outside
# of the circle. => use a masked array?
# reset the drange to ensure that it doesn't depend on older data
self._drange = None
self._updateHistogram(polard)
self.image.value = self._projectXY2RGB(polard)
except Exception:
logging.exception("Updating %s image", self.__class__.__name__)
def _onPoint(self, pos):
self._shouldUpdateImage()
def _setBackground(self, data):
"""Called when the background is about to be changed"""
if data is None:
return
# check it's compatible with the data
data = img.ensure2DImage(data)
arpole = data.metadata[model.MD_AR_POLE] # we expect the data has AR_POLE
# TODO: allow data which is the same shape but lower binning by
# estimating the binned image
# Check the background data and all the raw data have the same resolution
# TODO: how to handle if the .raw has different resolutions?
for r in self.raw:
if data.shape != r.shape:
raise ValueError("Incompatible resolution of background data "
"%s with the angular resolved resolution %s." %
(data.shape, r.shape))
if data.dtype != r.dtype:
raise ValueError("Incompatible encoding of background data "
"%s with the angular resolved encoding %s." %
(data.dtype, r.dtype))
try:
if data.metadata[model.MD_BPP] != r.metadata[model.MD_BPP]:
raise ValueError(
"Incompatible format of background data "
"(%d bits) with the angular resolved format "
"(%d bits)." %
(data.metadata[model.MD_BPP], r.metadata[model.MD_BPP]))
except KeyError:
pass # no metadata, let's hope it's the same BPP
# check the AR pole is at the same position
for r in self.raw:
if r.metadata[model.MD_AR_POLE] != arpole:
logging.warning("Pole position of background data %s is "
"different from the data %s.",
arpole, r.metadata[model.MD_AR_POLE])
return data
def _onBackground(self, data):
"""Called when the background is changed"""
# uncache all the polar images, and update the current image
self._polar = {}
self._shouldUpdateImage()
class StaticSpectrumStream(StaticStream):
"""
A Spectrum stream which displays only one static image/data.
The main difference from the normal streams is that the data is 3D (a cube)
The metadata should have a MD_WL_POLYNOMIAL or MD_WL_LIST
Note that the data received should be of the (numpy) shape CYX or C11YX.
When saving, the data will be converted to CTZYX (where TZ is 11)
The histogram corresponds to the data after calibration, and selected via
the spectrumBandwidth VA.
"""
def __init__(self, name, image):
"""
name (string)
image (model.DataArray(Shadow) of shape (CYX) or (C11YX)). The metadata
MD_WL_POLYNOMIAL or MD_WL_LIST should be included in order to associate the C to a
wavelength.
"""
# Spectrum stream has in addition to normal stream:
# * information about the current bandwidth displayed (avg. spectrum)
# * coordinates of 1st point (1-point, line)
# * coordinates of 2nd point (line)
# TODO: need to handle DAS properly, in case it's tiled (in XY), to avoid
# loading too much data in memory.
# Ensure the data is a DataArray, as we don't handle (yet) DAS
if isinstance(image, model.DataArrayShadow):
image = image.getData()
if len(image.shape) == 3:
# force 5D
image = image[:, numpy.newaxis, numpy.newaxis, :, :]
elif len(image.shape) != 5 or image.shape[1:3] != (1, 1):
logging.error("Cannot handle data of shape %s", image.shape)
raise NotImplementedError("SpectrumStream needs a cube data")
# This is for "average spectrum" projection
try:
# cached list of wavelength for each pixel pos
self._wl_px_values = spectrum.get_wavelength_per_pixel(image)
except (ValueError, KeyError):
# useless polynomial => just show pixels values (ex: -50 -> +50 px)
# TODO: try to make them always int?
max_bw = image.shape[0] // 2
min_bw = (max_bw - image.shape[0]) + 1
self._wl_px_values = range(min_bw, max_bw + 1)
assert(len(self._wl_px_values) == image.shape[0])
unit_bw = "px"
cwl = (max_bw + min_bw) // 2
width = image.shape[0] // 12
else:
min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1]
unit_bw = "m"
cwl = (max_bw + min_bw) / 2
width = (max_bw - min_bw) / 12
# TODO: allow to pass the calibration data as argument to avoid
# recomputing the data just after init?
# Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration)
self.efficiencyCompensation = model.VigilantAttribute(None, setter=self._setEffComp)
# The background data (typically, an acquisition without e-beam).
# It is subtracted from the acquisition data.
# If set to None, a simple baseline background value is subtracted.
self.background = model.VigilantAttribute(None, setter=self._setBackground)
# low/high values of the spectrum displayed
self.spectrumBandwidth = model.TupleContinuous(
(cwl - width, cwl + width),
range=((min_bw, min_bw), (max_bw, max_bw)),
unit=unit_bw,
cls=(int, long, float))
# Whether the (per bandwidth) display should be split intro 3 sub-bands
# which are applied to RGB
self.fitToRGB = model.BooleanVA(False)
# This attribute is used to keep track of any selected pixel within the
# data for the display of a spectrum
self.selected_pixel = model.TupleVA((None, None)) # int, int
# first point, second point in pixels. It must be 2 elements long.
self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine)
# Peak method index, None if spectrum peak fitting curve is not displayed
self.peak_method = model.VAEnumerated("gaussian", {"gaussian", "lorentzian", None})
# The thickness of a point or a line (shared).
# A point of width W leads to the average value between all the pixels
# which are within W/2 from the center of the point.
# A line of width W leads to a 1D spectrum taking into account all the
# pixels which fit on an orthogonal line to the selected line at a
# distance <= W/2.
self.selectionWidth = model.IntContinuous(1, [1, 50], unit="px")
self.fitToRGB.subscribe(self.onFitToRGB)
self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth)
self.efficiencyCompensation.subscribe(self._onCalib)
self.background.subscribe(self._onCalib)
self.selectionWidth.subscribe(self._onSelectionWidth)
self._calibrated = image # the raw data after calibration
super(StaticSpectrumStream, self).__init__(name, [image])
# Automatically select point/line if data is small (can only be done
# after .raw is set)
if image.shape[-2:] == (1, 1): # Only one point => select it immediately
self.selected_pixel.value = (0, 0)
elif image.shape[-2] == 1: # Horizontal line => select line immediately
self.selected_line.value = [(0, 0), (image.shape[-1] - 1, 0)]
elif image.shape[-1] == 1: # Vertical line => select line immediately
self.selected_line.value = [(0, 0), (0, image.shape[-2] - 1)]
# The tricky part is we need to keep the raw data as .raw for things
# like saving the stream or updating the calibration, but all the
# display-related methods must work on the calibrated data.
def _updateDRange(self, data=None):
if data is None:
data = self._calibrated
super(StaticSpectrumStream, self)._updateDRange(data)
def _updateHistogram(self, data=None):
if data is None:
spec_range = self._get_bandwidth_in_pixel()
data = self._calibrated[spec_range[0]:spec_range[1] + 1]
super(StaticSpectrumStream, self)._updateHistogram(data)
def _setLine(self, line):
"""
Checks that the value set could be correct
"""
if len(line) != 2:
raise ValueError("selected_line must be of length 2")
shape = self.raw[0].shape[-1:-3:-1]
for p in line:
if p == (None, None):
continue
if len(p) != 2:
raise ValueError("selected_line must contain only tuples of 2 ints")
if not 0 <= p[0] < shape[0] or not 0 <= p[1] < shape[1]:
raise ValueError("selected_line must only contain coordinates "
"within %s" % (shape,))
if not isinstance(p[0], int) or not isinstance(p[1], int):
raise ValueError("selected_line must only contain ints but is %s"
% (line,))
return line
def _get_bandwidth_in_pixel(self):
"""
Return the current bandwidth in pixels index
returns (2-tuple of int): low and high pixel coordinates (included)
"""
low, high = self.spectrumBandwidth.value
# Find the closest pixel position for the requested wavelength
low_px = numpy.searchsorted(self._wl_px_values, low, side="left")
low_px = min(low_px, len(self._wl_px_values) - 1) # make sure it fits
# TODO: might need better handling to show just one pixel (in case it's
# useful) as in almost all cases, it will end up displaying 2 pixels at
# least
if high == low:
high_px = low_px
else:
high_px = numpy.searchsorted(self._wl_px_values, high, side="right")
high_px = min(high_px, len(self._wl_px_values) - 1)
logging.debug("Showing between %g -> %g nm = %d -> %d px",
low * 1e9, high * 1e9, low_px, high_px)
assert low_px <= high_px
return low_px, high_px
def get_spatial_spectrum(self, data=None, raw=False):
"""
Project a spectrum cube (CYX) to XY space in RGB, by averaging the
intensity over all the wavelengths (selected by the user)
data (DataArray or None): if provided, will use the cube, otherwise,
will use the whole data from the stream.
raw (bool): if True, will return the "raw" values (ie, same data type as
the original data). Otherwise, it will return a RGB image.
return (DataArray YXC of uint8 or YX of same data type as data): average
intensity over the selected wavelengths
"""
if data is None:
data = self._calibrated
md = self._find_metadata(data.metadata)
# pick only the data inside the bandwidth
spec_range = self._get_bandwidth_in_pixel()
logging.debug("Spectrum range picked: %s px", spec_range)
if raw:
av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
av_data = img.ensure2DImage(av_data).astype(data.dtype)
return model.DataArray(av_data, md)
else:
irange = self._getDisplayIRange() # will update histogram if not yet present
if not self.fitToRGB.value:
# TODO: use better intermediary type if possible?, cf semcomedi
av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
av_data = img.ensure2DImage(av_data)
rgbim = img.DataArray2RGB(av_data, irange)
else:
# Note: For now this method uses three independent bands. To give
# a better sense of continuum, and be closer to reality when using
# the visible light's band, we should take a weighted average of the
# whole spectrum for each band. But in practice, that would be less
# useful.
# divide the range into 3 sub-ranges (BRG) of almost the same length
len_rng = spec_range[1] - spec_range[0] + 1
brange = [spec_range[0], int(round(spec_range[0] + len_rng / 3)) - 1]
grange = [brange[1] + 1, int(round(spec_range[0] + 2 * len_rng / 3)) - 1]
rrange = [grange[1] + 1, spec_range[1]]
# ensure each range contains at least one pixel
brange[1] = max(brange)
grange[1] = max(grange)
rrange[1] = max(rrange)
# FIXME: unoptimized, as each channel is duplicated 3 times, and discarded
av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0)
av_data = img.ensure2DImage(av_data)
rgbim = img.DataArray2RGB(av_data, irange)
av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0)
av_data = img.ensure2DImage(av_data)
gim = img.DataArray2RGB(av_data, irange)
rgbim[:, :, 1] = gim[:, :, 0]
av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0)
av_data = img.ensure2DImage(av_data)
bim = img.DataArray2RGB(av_data, irange)
rgbim[:, :, 2] = bim[:, :, 0]
rgbim.flags.writeable = False
md[model.MD_DIMS] = "YXC" # RGB format
return model.DataArray(rgbim, md)
def get_spectrum_range(self):
""" Return the wavelength for each pixel of a (complete) spectrum
returns (list of numbers or None): one wavelength per spectrum pixel.
Values are in meters, unless the spectrum cannot be determined, in
which case integers representing pixels index is returned.
If no data is available, None is returned.
(str): unit of spectrum range
"""
data = self._calibrated
try:
return spectrum.get_wavelength_per_pixel(data), "m"
except (ValueError, KeyError):
# useless polynomial => just show pixels values (ex: -50 -> +50 px)
max_bw = data.shape[0] // 2
min_bw = (max_bw - data.shape[0]) + 1
return range(min_bw, max_bw + 1), "px"
def get_pixel_spectrum(self):
"""
Return the (0D) spectrum belonging to the selected pixel.
See get_spectrum_range() to know the wavelength values for each index of
the spectrum dimension
return (None or DataArray with 1 dimension): the spectrum of the given
pixel or None if no spectrum is selected.
"""
if self.selected_pixel.value == (None, None):
return None
x, y = self.selected_pixel.value
spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims
# We treat width as the diameter of the circle which contains the center
# of the pixels to be taken into account
width = self.selectionWidth.value
if width == 1: # short-cut for simple case
return spec2d[:, y, x]
# There are various ways to do it with numpy. As typically the spectrum
# dimension is big, and the number of pixels to sum is small, it seems
# the easiest way is to just do some kind of "clever" mean. Using a
# masked array would also work, but that'd imply having a huge mask.
radius = width / 2
n = 0
# TODO: use same cleverness as mean() for dtype?
datasum = numpy.zeros(spec2d.shape[0], dtype=numpy.float64)
# Scan the square around the point, and only pick the points in the circle
for px in range(max(0, int(x - radius)),
min(int(x + radius) + 1, spec2d.shape[-1])):
for py in range(max(0, int(y - radius)),
min(int(y + radius) + 1, spec2d.shape[-2])):
if math.hypot(x - px, y - py) <= radius:
n += 1
datasum += spec2d[:, py, px]
mean = datasum / n
return model.DataArray(mean.astype(spec2d.dtype))
def get_line_spectrum(self, raw=False):
""" Return the 1D spectrum representing the (average) spectrum
Call get_spectrum_range() to know the wavelength values for each index
of the spectrum dimension.
raw (bool): if True, will return the "raw" values (ie, same data type as
the original data). Otherwise, it will return a RGB image.
return (None or DataArray with 3 dimensions): first axis (Y) is spatial
(along the line), second axis (X) is spectrum. If not raw, third axis
is colour (RGB, but actually always greyscale). Note: when not raw,
the beginning of the line (Y) is at the "bottom".
MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum
If the selected_line is not valid, it will return None
"""
if (None, None) in self.selected_line.value:
return None
spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims
width = self.selectionWidth.value
# Number of points to return: the length of the line
start, end = self.selected_line.value
v = (end[0] - start[0], end[1] - start[1])
l = math.hypot(*v)
n = 1 + int(l)
if l < 1: # a line of just one pixel is considered not valid
return None
# FIXME: if the data has a width of 1 (ie, just a line), and the
# requested width is an even number, the output is empty (because all
# the interpolated points are outside of the data.
# Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X)
# The line is scanned from the end till the start so that the spectra
# closest to the origin of the line are at the bottom.
coord = numpy.empty((3, width, n, spec2d.shape[0]))
coord[0] = numpy.arange(spec2d.shape[0]) # spectra = all
coord_spc = coord.swapaxes(2, 3) # just a view to have (line) space as last dim
coord_spc[-1] = numpy.linspace(end[0], start[0], n) # X axis
coord_spc[-2] = numpy.linspace(end[1], start[1], n) # Y axis
# Spread over the width
# perpendicular unit vector
pv = (-v[1] / l, v[0] / l)
width_coord = numpy.empty((2, width))
spread = (width - 1) / 2
width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width) # X axis
width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width) # Y axis
coord_cw = coord[1:].swapaxes(0, 2).swapaxes(1, 3) # view with coordinates and width as last dims
coord_cw += width_coord
# Interpolate the values based on the data
if width == 1:
# simple version for the most usual case
spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=1)
else:
# FIXME: the mean should be dependent on how many pixels inside the
# original data were pick on each line. Currently if some pixels fall
# out of the original data, the outside pixels count as 0.
# force the intermediate values to float, as mean() still needs to run
spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=1)
spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype)
assert spec1d.shape == (n, spec2d.shape[0])
# Use metadata to indicate spatial distance between pixel
pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE]
pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1)
md = {MD_PIXEL_SIZE: (None, pxs)} # for the spectrum, use get_spectrum_range()
if raw:
return model.DataArray(spec1d[::-1, :], md)
else:
# Scale and convert to RGB image
if self.auto_bc.value:
hist, edges = img.histogram(spec1d)
irange = img.findOptimalRange(hist, edges,
self.auto_bc_outliers.value / 100)
else:
# use the values requested by the user
irange = sorted(self.intensityRange.value)
rgb8 = img.DataArray2RGB(spec1d, irange)
return model.DataArray(rgb8, md)
# TODO: have an "area=None" argument which allows to specify the 2D region
# within which the spectrum should be computed
# TODO: should it also return the wavelength values? Or maybe another method
# can do it?
def getMeanSpectrum(self):
"""
Compute the global spectrum of the data as an average over all the pixels
returns (numpy.ndarray of float): average intensity for each wavelength
You need to use the metadata of the raw data to find out what is the
wavelength for each pixel, but the range of wavelengthBandwidth is
the same as the range of this spectrum.
"""
data = self._calibrated
# flatten all but the C dimension, for the average
data = data.reshape((data.shape[0], numpy.prod(data.shape[1:])))
av_data = numpy.mean(data, axis=1)
return av_data
def _updateImage(self):
""" Recomputes the image with all the raw data available
Note: for spectrum-based data, it mostly computes a projection of the
3D data to a 2D array.
"""
try:
data = self._calibrated
if data is None: # can happen during __init__
return
self.image.value = self.get_spatial_spectrum(data)
except Exception:
logging.exception("Updating %s image", self.__class__.__name__)
# We don't have problems of rerunning this when the data is updated,
# as the data is static.
def _updateCalibratedData(self, bckg=None, coef=None):
"""
Try to update the data with new calibration. The two parameters are
the same as compensate_spectrum_efficiency(). The input data comes from
.raw and the calibrated data is saved in ._calibrated
bckg (DataArray or None)
coef (DataArray or None)
raise ValueError: if the data and calibration data are not valid or
compatible. In that case the current calibrated data is unchanged.
"""
data = self.raw[0]
if data is None:
self._calibrated = None
return
if bckg is None and coef is None:
# make sure to not display any other error
self._calibrated = data
return
if not (set(data.metadata.keys()) &
{model.MD_WL_LIST, model.MD_WL_POLYNOMIAL}):
raise ValueError("Spectrum data contains no wavelength information")
# will raise an exception if incompatible
calibrated = calibration.compensate_spectrum_efficiency(data, bckg, coef)
self._calibrated = calibrated
def _setBackground(self, bckg):
"""
Setter of the spectrum background
raises ValueError if it's impossible to apply it (eg, no wavelength info)
"""
# If the coef data is wrong, this function will fail with an exception,
# and the value never be set.
self._updateCalibratedData(bckg=bckg, coef=self.efficiencyCompensation.value)
return bckg
def _setEffComp(self, coef):
"""
Setter of the spectrum efficiency compensation
raises ValueError if it's impossible to apply it (eg, no wavelength info)
"""
# If the coef data is wrong, this function will fail with an exception,
# and the value never be set.
self._updateCalibratedData(bckg=self.background.value, coef=coef)
return coef
def _force_selected_spectrum_update(self):
# There is no explicit way to do it, so instead, pretend the pixel and
# line have changed (to the same value).
# TODO: It could be solved by using dataflows (in which case a new data
# would come whenever settings change).
if self.selected_pixel.value != (None, None):
self.selected_pixel.notify(self.selected_pixel.value)
if not (None, None) in self.selected_line.value:
self.selected_line.notify(self.selected_line.value)
def _onCalib(self, unused):
"""
called when the background or efficiency compensation is changed
"""
# histogram will change as the pixel intensity is different
self._updateHistogram()
self._shouldUpdateImage()
self._force_selected_spectrum_update()
def _onSelectionWidth(self, width):
"""
Called when the selection width is updated
"""
# 0D and/or 1D spectrum will need updates
self._force_selected_spectrum_update()
def _onAutoBC(self, enabled):
super(StaticSpectrumStream, self)._onAutoBC(enabled)
# if changing to auto, need to recompute line spectrum
if enabled:
self._force_selected_spectrum_update()
def _onOutliers(self, outliers):
super(StaticSpectrumStream, self)._onOutliers(outliers)
# if changing outliers while in auto, need to recompute line spectrum
if self.auto_bc.value:
self._force_selected_spectrum_update()
def _onIntensityRange(self, irange):
super(StaticSpectrumStream, self)._onIntensityRange(irange)
# If auto_bc is active, it will not affect the line spectrum directly
if not self.auto_bc.value:
self._force_selected_spectrum_update()
def onFitToRGB(self, value):
"""
called when fitToRGB is changed
"""
self._shouldUpdateImage()
def onSpectrumBandwidth(self, value):
"""
called when spectrumBandwidth is changed
"""
self._updateHistogram()
self._shouldUpdateImage()
|
gstiebler/odemis
|
src/odemis/acq/stream/_static.py
|
Python
|
gpl-2.0
| 39,904
|
[
"Gaussian"
] |
2e8530add6ef30e8b41e0ffdf60a259e3e43e053a1e0d2c10593e9c362ba3581
|
# Copyright 2017 The 'Scalable Private Learning with PATE' Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Illustrates how noisy thresholding check changes distribution of queries.
A script in support of the paper "Scalable Private Learning with PATE" by
Nicolas Papernot, Shuang Song, Ilya Mironov, Ananth Raghunathan, Kunal Talwar,
Ulfar Erlingsson (https://arxiv.org/abs/1802.08908).
The input is a file containing a numpy array of votes, one query per row, one
class per column. Ex:
43, 1821, ..., 3
31, 16, ..., 0
...
0, 86, ..., 438
The output is one of two graphs depending on the setting of the plot variable.
The output is written to a pdf file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
sys.path.append('..') # Main modules reside in the parent directory.
from absl import app
from absl import flags
import core as pate
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
from six.moves import xrange
plt.style.use('ggplot')
FLAGS = flags.FLAGS
flags.DEFINE_enum('plot', 'small', ['small', 'large'], 'Selects which of'
'the two plots is produced.')
flags.DEFINE_string('counts_file', None, 'Counts file.')
flags.DEFINE_string('plot_file', '', 'Plot file to write.')
flags.mark_flag_as_required('counts_file')
def compute_count_per_bin(bin_num, votes):
"""Tabulates number of examples in each bin.
Args:
bin_num: Number of bins.
votes: A matrix of votes, where each row contains votes in one instance.
Returns:
Array of counts of length bin_num.
"""
sums = np.sum(votes, axis=1)
# Check that all rows contain the same number of votes.
assert max(sums) == min(sums)
s = max(sums)
counts = np.zeros(bin_num)
n = votes.shape[0]
for i in xrange(n):
v = votes[i,]
bin_idx = int(math.floor(max(v) * bin_num / s))
assert 0 <= bin_idx < bin_num
counts[bin_idx] += 1
return counts
def compute_privacy_cost_per_bins(bin_num, votes, sigma2, order):
"""Outputs average privacy cost per bin.
Args:
bin_num: Number of bins.
votes: A matrix of votes, where each row contains votes in one instance.
sigma2: The scale (std) of the Gaussian noise. (Same as sigma_2 in
Algorithms 1 and 2.)
order: The Renyi order for which privacy cost is computed.
Returns:
Expected eps of RDP (ignoring delta) per example in each bin.
"""
n = votes.shape[0]
bin_counts = np.zeros(bin_num)
bin_rdp = np.zeros(bin_num) # RDP at order=order
for i in xrange(n):
v = votes[i,]
logq = pate.compute_logq_gaussian(v, sigma2)
rdp_at_order = pate.rdp_gaussian(logq, sigma2, order)
bin_idx = int(math.floor(max(v) * bin_num / sum(v)))
assert 0 <= bin_idx < bin_num
bin_counts[bin_idx] += 1
bin_rdp[bin_idx] += rdp_at_order
if (i + 1) % 1000 == 0:
print('example {}'.format(i + 1))
sys.stdout.flush()
return bin_rdp / bin_counts
def compute_expected_answered_per_bin(bin_num, votes, threshold, sigma1):
"""Computes expected number of answers per bin.
Args:
bin_num: Number of bins.
votes: A matrix of votes, where each row contains votes in one instance.
threshold: The threshold against which check is performed.
sigma1: The std of the Gaussian noise with which check is performed. (Same
as sigma_1 in Algorithms 1 and 2.)
Returns:
Expected number of queries answered per bin.
"""
n = votes.shape[0]
bin_answered = np.zeros(bin_num)
for i in xrange(n):
v = votes[i,]
p = math.exp(pate.compute_logpr_answered(threshold, sigma1, v))
bin_idx = int(math.floor(max(v) * bin_num / sum(v)))
assert 0 <= bin_idx < bin_num
bin_answered[bin_idx] += p
if (i + 1) % 1000 == 0:
print('example {}'.format(i + 1))
sys.stdout.flush()
return bin_answered
def main(argv):
del argv # Unused.
fin_name = os.path.expanduser(FLAGS.counts_file)
print('Reading raw votes from ' + fin_name)
sys.stdout.flush()
votes = np.load(fin_name)
votes = votes[:4000,] # truncate to 4000 samples
if FLAGS.plot == 'small':
bin_num = 5
m_check = compute_expected_answered_per_bin(bin_num, votes, 3500, 1500)
elif FLAGS.plot == 'large':
bin_num = 10
m_check = compute_expected_answered_per_bin(bin_num, votes, 3500, 1500)
a_check = compute_expected_answered_per_bin(bin_num, votes, 5000, 1500)
eps = compute_privacy_cost_per_bins(bin_num, votes, 100, 50)
else:
raise ValueError('--plot flag must be one of ["small", "large"]')
counts = compute_count_per_bin(bin_num, votes)
bins = np.linspace(0, 100, num=bin_num, endpoint=False)
plt.close('all')
fig, ax = plt.subplots()
if FLAGS.plot == 'small':
fig.set_figheight(5)
fig.set_figwidth(5)
ax.bar(
bins,
counts,
20,
color='orangered',
linestyle='dotted',
linewidth=5,
edgecolor='red',
fill=False,
alpha=.5,
align='edge',
label='LNMax answers')
ax.bar(
bins,
m_check,
20,
color='g',
alpha=.5,
linewidth=0,
edgecolor='g',
align='edge',
label='Confident-GNMax\nanswers')
elif FLAGS.plot == 'large':
fig.set_figheight(4.7)
fig.set_figwidth(7)
ax.bar(
bins,
counts,
10,
linestyle='dashed',
linewidth=5,
edgecolor='red',
fill=False,
alpha=.5,
align='edge',
label='LNMax answers')
ax.bar(
bins,
m_check,
10,
color='g',
alpha=.5,
linewidth=0,
edgecolor='g',
align='edge',
label='Confident-GNMax\nanswers (moderate)')
ax.bar(
bins,
a_check,
10,
color='b',
alpha=.5,
align='edge',
label='Confident-GNMax\nanswers (aggressive)')
ax2 = ax.twinx()
bin_centers = [x + 5 for x in bins]
ax2.plot(bin_centers, eps, 'ko', alpha=.8)
ax2.set_ylim([1e-200, 1.])
ax2.set_yscale('log')
ax2.grid(False)
ax2.set_yticks([1e-3, 1e-50, 1e-100, 1e-150, 1e-200])
plt.tick_params(which='minor', right='off')
ax2.set_ylabel(r'Per query privacy cost $\varepsilon$', fontsize=16)
plt.xlim([0, 100])
ax.set_ylim([0, 2500])
# ax.set_yscale('log')
ax.set_xlabel('Percentage of teachers that agree', fontsize=16)
ax.set_ylabel('Number of queries answered', fontsize=16)
vals = ax.get_xticks()
ax.set_xticklabels([str(int(x)) + '%' for x in vals])
ax.tick_params(labelsize=14, bottom=True, top=True, left=True, right=True)
ax.legend(loc=2, prop={'size': 16})
# simple: 'figures/noisy_thresholding_check_perf.pdf')
# detailed: 'figures/noisy_thresholding_check_perf_details.pdf'
print('Saving the graph to ' + FLAGS.plot_file)
plt.savefig(os.path.expanduser(FLAGS.plot_file), bbox_inches='tight')
plt.show()
if __name__ == '__main__':
app.run(main)
|
tensorflow/privacy
|
research/pate_2018/ICLR2018/rdp_bucketized.py
|
Python
|
apache-2.0
| 7,703
|
[
"Gaussian"
] |
8d979d07a1e6f9a6666d0661abe78537364e7b09cdfee2b830c5fa5b616d2938
|
#!/usr/bin/env python
import os, pygtk, gtk, webbrowser
import playlist
EDIT_WIDTH, EDIT_HEIGHT = 300, 200
#
# Provides a standard menu for open, save, help, etc..
# Main widget: menubar
#
class Menu:
pymp, popup, path = None, None, None
#
# Creates the menu and adds it to the provided treeview.
#
def __init__(self, pymp):
self.pymp = pymp
self.path = pymp.prefs.get("path")
menuDef = """
<ui>
<popup>
<menu action="File">
<menuitem action="Open File"/>
<menuitem action="Open Location"/>
<menuitem action="Save List"/>
<menuitem action="Clear List"/>
<separator/>
<menuitem action="Quit"/>
</menu>
<menu action="Options">
<menuitem action="Continuous Play"/>
<menuitem action="Random Play"/>
<menuitem action="Repeat List"/>
<menuitem action="Edit Config"/>
</menu>
<menu action="Help">
<menuitem action="About"/>
</menu>
</popup>
</ui>
"""
uiManager = gtk.UIManager()
uiManager.add_ui_from_string(menuDef)
actionGroup = gtk.ActionGroup("Actions")
actions = (
("File", None, "_File"),
("Open File", gtk.STOCK_OPEN, "_Open File", None , None, self.openFile),
("Open Location", gtk.STOCK_CONVERT, "Open _Location", "<Ctrl>L", None, self.openLocation),
("Save List", gtk.STOCK_SAVE, "_Save List", None , None, self.saveList),
("Clear List", gtk.STOCK_CANCEL, "_Clear List", None, None, self.clearList),
("Options", None, "_Options"),
("Edit Config", gtk.STOCK_PROPERTIES, "_Edit Config", None, None, self.editConfig),
("Quit", gtk.STOCK_QUIT, "_Quit", None, None, pymp.quit),
("Help", None, "_Help"),
("About", gtk.STOCK_HELP, "About", None, None, self.openAbout),
)
actionGroup.add_actions(actions)
cont = gtk.ToggleAction("Continuous Play", "_Continuous Play", None, None)
cont.connect("toggled", self.toggleContinuous)
cont.set_active(self.pymp.playlist.continuous)
actionGroup.add_action(cont)
rand = gtk.ToggleAction("Random Play", "_Random Play", None, None)
rand.connect("toggled", self.toggleRandom)
rand.set_active(self.pymp.playlist.random)
actionGroup.add_action(rand)
rep = gtk.ToggleAction("Repeat List", "Repeat _List", None, None)
rep.connect("toggled", self.toggleRepeat)
rep.set_active(self.pymp.playlist.repeat)
actionGroup.add_action(rep)
uiManager.insert_action_group(actionGroup, 0)
popup = uiManager.get_widget("/popup")
self.pymp.window.add_accel_group(uiManager.get_accel_group())
self.popup = popup
#
# Displays a dialog to open a location.
#
def openLocation(self, widget, data=None):
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
buttons = ( #define okay and cancel buttons
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT,
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT
)
entry = gtk.Entry(255) #create text entry field
entry.set_activates_default(True)
entry.set_width_chars(40)
entry.show()
dialog = gtk.Dialog("Open Location...", self.pymp.window, flags, buttons)
dialog.set_default_response(gtk.RESPONSE_ACCEPT)
dialog.vbox.pack_start(entry, False, True, 0)
if dialog.run() == gtk.RESPONSE_ACCEPT: #process location
self.pymp.playlist.add(entry.get_text())
dialog.destroy()
return True
#
# Displays a file chooser dialog to add files to playlist.
#
def openFile(self, widget, data=None):
SELECT_ALL = 1234
buttons = ( #define open and cancel buttons
"Select All", SELECT_ALL,
gtk.STOCK_OPEN, gtk.RESPONSE_ACCEPT,
gtk.STOCK_CANCEL, gtk.RESPONSE_CLOSE
)
fileChooser = gtk.FileChooserDialog(self.pymp.versionString,
self.pymp.window, 0, buttons) #create chooser
fileChooser.set_default_response(gtk.RESPONSE_ACCEPT)
fileChooser.set_current_folder(self.path)
fileChooser.set_select_multiple(True)
while True:
response = fileChooser.run()
if response == SELECT_ALL: #select all and continune
fileChooser.select_all()
continue
if response == gtk.RESPONSE_ACCEPT: #process selected files
for f in fileChooser.get_filenames(): #load files or lists
self.pymp.playlist.add(f)
self.path = fileChooser.get_current_folder()
break #break from loop
fileChooser.destroy() #dispose of chooser
return True
#
# Saves the current playlist to the specified file.
#
def saveList(self, widget, data=None):
buttons = ( #define save and cancel buttons
gtk.STOCK_SAVE, gtk.RESPONSE_ACCEPT,
gtk.STOCK_CANCEL, gtk.RESPONSE_CLOSE
)
fileChooser = gtk.FileChooserDialog(self.pymp.versionString, self.pymp.window,
gtk.FILE_CHOOSER_ACTION_SAVE, buttons) #create chooser
fileChooser.set_default_response(gtk.RESPONSE_ACCEPT)
fileChooser.set_current_folder(self.path)
fileChooser.set_current_name(".m3u")
if fileChooser.run() == gtk.RESPONSE_ACCEPT: #save list
self.pymp.playlist.save(fileChooser.get_filename())
self.path = fileChooser.get_current_folder()
fileChooser.destroy() #dispose of chooser
return True
#
# Clears the current playlist.
#
def clearList(self, widget, data=None):
self.pymp.playlist.clear()
return True
#
# Sets the continuous playback option from a toggle item.
#
def toggleContinuous(self, widget):
self.pymp.playlist.continuous = widget.get_active()
return True
#
# Sets the random playback option from a toggle item.
#
def toggleRandom(self, widget):
self.pymp.playlist.random = widget.get_active()
return True
#
# Sets the reapeat option from a toggle item.
#
def toggleRepeat(self, widget):
self.pymp.playlist.repeat = widget.get_active()
return True
#
# Opens ~/.mplayer/config and allows editing.
#
def editConfig(self, widget):
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
buttons = ( #define okay and cancel buttons
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT,
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT
)
config = open(os.path.expanduser("~/.mplayer/config"))
buff = gtk.TextBuffer() #create text entry field
buff.set_text(config.read())
config.close()
view = gtk.TextView(buff)
view.set_size_request(EDIT_WIDTH, EDIT_HEIGHT)
view.show()
dialog = gtk.Dialog("Edit Config...", self.pymp.window, flags, buttons)
dialog.set_default_response(gtk.RESPONSE_ACCEPT)
dialog.vbox.pack_start(view, True, True, 0)
if dialog.run() == gtk.RESPONSE_ACCEPT: #overwrite config
config = open(os.path.expanduser("~/.mplayer/config"), "w")
start, end = buff.get_start_iter(), buff.get_end_iter()
config.write(buff.get_text(start, end))
config.close()
dialog.destroy()
return True
#
# Attempts to visit the Pymp homepage in a new browser tab.
#
def openHomepage(self, dialog, link, data=None):
webbrowser.open(link, 2, 1)
#
# Displays an AboutDialog for the application.
#
def openAbout(self, widget, data=None):
gtk.about_dialog_set_url_hook(self.openHomepage)
about = gtk.AboutDialog()
about.set_name("")
about.set_version(self.pymp.versionString)
about.set_authors(["Jay Dolan <jdolan@jdolan.dyndns.org>",
"Lucas Hazel <lucas@die.net.au>"])
about.set_artists(["Jay Dolan <jdolan@jdolan.dyndns.org>"])
about.set_website("http://jdolan.dyndns.org/pymp")
about.set_logo(self.pymp.getIcon())
about.show()
about.run()
about.hide()
about.destroy()
return True
#End of file
|
jantman/TuxTruck-wxPython
|
pymp/menu.py
|
Python
|
gpl-3.0
| 7,477
|
[
"VisIt"
] |
2fd5b896abd5d809c9fdc1594737c907742c8fa77c6fd728d8e18efb2235ee34
|
# Copyright 2016 Brian Innes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['vPiP', 'constrainDrawingRectangle']
|
brianinnes/vPiP
|
python/vPiP/__init__.py
|
Python
|
apache-2.0
| 623
|
[
"Brian"
] |
7abfea5d3f8215d75d08927a2916f3ce1fa826854cecda8ff9e1a56d89d8a8ac
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Hsiaoming Yang <http://lepture.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Python LiveReload
=================
`LiveReload <http://livereload.com/>`_ Server in Python Version.
Web Developers need to refresh a browser everytime when he saved a file (css,
javascript, html), it is really boring. LiveReload will take care of that for
you. When you saved a file, your browser will refresh itself. And what's more,
it can do some tasks like compiling less to css before the browser refreshing.
Installation
------------
Python LiveReload is designed for web developers who know Python.
Install python-livereload
~~~~~~~~~~~~~~~~~~~~~~~~~
Install Python LiveReload with pip::
$ pip install livereload
If you don't have pip installed, try easy_install::
$ easy_install livereload
Install Browser Extensions
~~~~~~~~~~~~~~~~~~~~~~~~~~
Get Browser Extensions From LiveReload.com
+ Chrome Extension
+ Safari Extension
+ Firefox Extension
Visit: http://help.livereload.com/kb/general-use/browser-extensions
Get Notification
~~~~~~~~~~~~~~~~~
If you are on Mac, and you are a Growl user::
$ pip install gntp
If you are on Ubuntu, you don't need to do anything. Notification just works.
Working with file protocal
~~~~~~~~~~~~~~~~~~~~~~~~~~
Enable file protocal on Chrome:
.. image:: http://i.imgur.com/qGpJI.png
Quickstart
------------
LiveReload is designed for more complex tasks, not just for refreshing a
browser. But you can still do the simple task.
Assume you have livereload and its extension installed, and now you are in your
working directory. With command::
$ livereload
your browser will reload, if any file in the working directory changed.
Guardfile
----------
More complex tasks can be done by Guardfile. Write a Guardfile in your working
directory, the basic syntax::
#!/usr/bin/env python
from livereload.task import Task
Task.add('static/style.css')
Task.add('*.html')
Now livereload will only guard static/style.css and html in your workding
directory.
But python-livereload is more than that, you can specify a task before
refreshing the browser::
#!/usr/bin/env python
from livereload.task import Task
from livereload.compiler import lessc
Task.add('style.less', lessc('style.less', 'style.css'))
And it will compile less css before refreshing the browser now.
Others
--------
If you are on a Mac, you can buy `LiveReload2 <http://livereload.com/>`_.
If you are a rubist, you can get guard-livereload.
"""
__version__ = '0.14'
__author__ = 'Hsiaoming Yang <lepture@me.com>'
__homepage__ = 'http://lab.lepture.com/livereload/'
|
hiphopsmurf/bitcoin-secured
|
online/build/livereload/livereload/__init__.py
|
Python
|
mit
| 4,165
|
[
"VisIt"
] |
a1525c7366fa78924aba48cdb32a34f81416c88932e36b147e51ccc3a1a98ef1
|
import sys
sys.path.append("..")
from identify_locus import *
import pytest
import pysam
def test_randomletters_len():
length = 5
letters = randomletters(length)
assert len(letters) == length
def test_randomletters_diff():
length = 10
letters1 = randomletters(length)
letters2 = randomletters(length)
assert letters1 != letters2
def test_detect_readlen():
bamfile = 'test_data/11_L001_R1.STRdecoy.bam'
maxlen, count_noCIGAR = detect_readlen(bamfile, sample = 20)
assert maxlen == 150
assert count_noCIGAR == 0
@pytest.mark.parametrize("test_region, target_region, expected", [
# Easy ones
((1,10), (4,6), True),
((1,2), (4,6), False),
# Borderline cases
((1,10), (1,10), True),
((1,9), (1,10), False),
((2,10), (1,10), False),
# Single base
((1,10), (5,5), True),
((1,10), (10,10), True),
((1,10), (1,1), True),
# Nonsense input - should probably generate errors XXX
((2,-10), (1,10), False),
])
def test_spans_region(test_region, target_region, expected):
assert spans_region(test_region, target_region) == expected
@pytest.mark.parametrize("test_region, target_region", [
# Nonsense input - should generate errors
((2,-10), (1)),
((2,-10), (1,3,6)),
])
def test_spans_region_errors(test_region, target_region):
with pytest.raises(TypeError):
spans_region(test_region, target_region)
@pytest.mark.parametrize("position, region, expected", [
# Easy ones
(5, (1,10), True),
(20, (1,10), False),
# Borderline cases
(1, (1,10), True),
(10, (1,10), True),
(0, (1,10), False),
(11, (1,10), False),
# Nonsense input - should probably generate errors XXX
#((1,2), (1,10), False),
])
def test_in_region(position, region, expected):
assert in_region(position, region) == expected
@pytest.mark.parametrize("position, region", [
# Nonsense input - should generate errors
(2, (1,10,20)),
])
def test_in_region_errors(position, region):
with pytest.raises(TypeError):
in_region(position, region)
def test_indel_size_nonspanning():
"""Raise ValueError if read doesn't span region"""
bamfile = 'test_data/11_L001_R1.STRdecoy.bam'
test_read_name = '1-3871'
region = (70713514, 70713561)
bam = pysam.Samfile(bamfile, 'rb')
with pytest.raises(ValueError):
for read in bam.fetch():
if read.query_name == test_read_name:
indel_size(read, region)
break
def test_indel_size_wrongchr():
"""Raise ValueError if read doesn't span region because the chromosome doesn't match"""
bamfile = 'test_data/49_tests.STRdecoy.sam'
test_read_name = '1-293:0'
region = (70713514, 70713561)
chrom = 'chr5'
bam = pysam.Samfile(bamfile, 'rb')
with pytest.raises(ValueError) as e:
for read in bam.fetch():
if read.query_name == test_read_name:
indel_size(read, region, chrom)
break
print(e)
@pytest.mark.parametrize("test_read_name, expected", [
('1-293:0', 0), # same as ref
('1-293:10I', 10), # easy insertion
('1-293:5D', -5), # easy deletion
('1-293:0compound', 0), # insertion and deletion same size cancel out
('1-293:2Dcompound', -2), # insertion and deletion of different sizes makes deletion
('1-293:20Icompound', 20), # two insertions added together
('1-293:outside', 0), # indel outside STR
])
def test_indel_size(test_read_name, expected):
"""Test indel corretly identified"""
bamfile = 'test_data/49_tests.STRdecoy.sam'
region = (70713514, 70713561)
chrom = 'chr13'
bam = pysam.Samfile(bamfile, 'rb')
for read in bam.fetch():
if read.query_name == test_read_name:
try:
assert indel_size(read, region, chrom) == expected
except ValueError:
continue
@pytest.mark.parametrize("all_alleles, n, expected", [
([1,1,1,1,0], 2, [(1,4),(0,1)] ),
([1,1,1,1,0], 1, [(1,4)] ),
([1,1,1,1], 2, [(1,4)] ),
([1,1,1,1,0,2,2], None, [(1,4), (2,2), (0,1)] ),
])
def test_allele_freq(all_alleles, n, expected):
assert allele_freq(all_alleles, n) == expected
# Genotypes
# ==> 11 <==
# chr13 70713514 70713561 CTG_16/201
# ==> 49 <==
# chr13 70713514 70713561 CTG_16/1
@pytest.mark.parametrize("bamfile, n, expected", [
('test_data/49_L001_R1.STRdecoy.bam', 2, [(0,11), (3,10)] ),
('test_data/11_L001_R1.STRdecoy.bam', 2, [(0,17)] ),
])
def test_indel_size_allele_freq(bamfile, n, expected):
"""Test indel corretly identified"""
region = (70713514, 70713561)
chrom = 'chr13'
bam = pysam.Samfile(bamfile, 'rb')
all_indels = {}
for read in bam.fetch():
try:
all_indels[read.query_name] = indel_size(read, region, chrom)
#print(read.is_secondary)
except ValueError:
continue
print(all_indels)
all_indels_list = [all_indels[x] for x in all_indels]
alleles_by_frequency = allele_freq(all_indels_list, n)
assert alleles_by_frequency == expected
def test_locus_counts_bamlist(outfile = None, max_distance = 500):
bamfiles = 'test_data/49_L001_R1.STRdecoy.bam' # This is supposed to be a list so throw error
bedfile = '../../reference-data/hg19.simpleRepeat_period1-6_dedup.sorted.bed'
with pytest.raises(TypeError):
locus_counts(bamfiles, bedfile, outfile, max_distance)
def test_locus_counts(outfile = 'test.txt', max_distance = 500):
bamfiles = ['test_data/49_L001_R1.STRdecoy.bam']
bedfile = '../../reference-data/hg19.simpleRepeat_period1-6_dedup.sorted.bed'
locus_counts(bamfiles, bedfile, outfile, max_distance)
def test_parse_args_none():
"""Exit and display usage when no args/options are provided"""
with pytest.raises(SystemExit):
parser = parse_args([])
def test_parse_args_defaults():
"""Check correct defaults are set when not given"""
args = parse_args(['--bam', 'test.bam', '--bed', 'test.bed'])
assert args.bam == ['test.bam']
assert args.bed == 'test.bed'
assert args.output == None
assert args.dist == 500
|
Oshlack/STRetch
|
scripts/tests/test_identify_locus.py
|
Python
|
mit
| 6,140
|
[
"pysam"
] |
20a229c26f73d5d551ccc21b2abe0245f802514f1c38c7fd88ac7fad00dc383f
|
#
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 617 $
# $Date: 2009-02-05 02:24:12 -0700 (Thu, 05 Feb 2009) $
# $Author: gabalz $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/tests/test_rl_episode_experiment.py $
import sys
import rlglue.RLGlue as RLGlue
from glue_test import glue_test
tester =glue_test("test_rl_episode")
task_spec=RLGlue.RL_init()
isTerminal = RLGlue.RL_episode(0);
tester.check_fail(isTerminal!=1);
tester.check_fail(RLGlue.RL_num_steps()!=5);
isTerminal = RLGlue.RL_episode(1);
tester.check_fail(isTerminal!=0);
tester.check_fail(RLGlue.RL_num_steps()!=1);
isTerminal = RLGlue.RL_episode(2);
tester.check_fail(isTerminal!=0);
tester.check_fail(RLGlue.RL_num_steps()!=2);
isTerminal = RLGlue.RL_episode(4);
tester.check_fail(isTerminal!=0);
tester.check_fail(RLGlue.RL_num_steps()!=4);
isTerminal = RLGlue.RL_episode(5);
tester.check_fail(isTerminal!=0);
tester.check_fail(RLGlue.RL_num_steps()!=5);
isTerminal = RLGlue.RL_episode(6);
tester.check_fail(isTerminal!=1);
tester.check_fail(RLGlue.RL_num_steps()!=5);
isTerminal = RLGlue.RL_episode(7);
tester.check_fail(isTerminal!=1);
tester.check_fail(RLGlue.RL_num_steps()!=5);
print(tester.get_summary())
sys.exit(tester.getFailCount())
|
okkhoy/rlglue-python3-codec
|
src/tests/test_rl_episode_experiment.py
|
Python
|
apache-2.0
| 1,851
|
[
"Brian"
] |
533ee1bb831883282e69cfc7ec246d709e369536193ef027dbdb1c30658e0bc6
|
import sys
import unittest
sys.path.append('./code')
from transforms import Transform, UnivariateGaussianization
from models import MoGaussian
from numpy import abs, all
Transform.VERBOSITY = 0
class Test(unittest.TestCase):
def test_inverse(self):
"""
Make sure inverse Gaussianization is inverse to Gaussianization.
"""
mog = MoGaussian(10)
mog.initialize('laplace')
# generate test data
samples = mog.sample(100)
ug = UnivariateGaussianization(mog)
# reconstructed samples
samples_ = ug.inverse(ug(samples))
# distance between norm and reconstructed sample
dist = abs(samples_ - samples)
self.assertTrue(all(dist < 1E-6))
###
mog = MoGaussian(2)
mog.scales[0] = 1.
mog.scales[1] = 2.
mog.means[0] = -4.
mog.means[1] = 3.
# generate test data
samples = mog.sample(100)
ug = UnivariateGaussianization(mog)
# reconstructed samples
samples_ = ug.inverse(ug(samples))
# distance between norm and reconstructed sample
dist = abs(samples_ - samples)
self.assertTrue(all(dist < 1E-6))
def test_logjacobian(self):
"""
Test log-Jacobian.
"""
# test one-dimensional Gaussian
mog = MoGaussian(10)
mog.initialize('laplace')
# standard normal distribution
gauss = MoGaussian(1)
gauss.means[0] = 0.
gauss.scales[0] = 1.
# generate test data
samples = mog.sample(100)
ug = UnivariateGaussianization(mog)
# after Gaussianization, samples should be Gaussian distributed
loglik_mog = mog.loglikelihood(samples)
loglik_gauss = gauss.loglikelihood(ug(samples)) + ug.logjacobian(samples)
dist = abs(loglik_mog - loglik_gauss)
self.assertTrue(all(dist < 1E-6))
###
# test one-dimensional Gaussian
mog = MoGaussian(2)
mog.scales[0] = 1.
mog.scales[1] = 2.
mog.means[0] = -4.
mog.means[1] = 3.
# standard normal distribution
gauss = MoGaussian(1)
gauss.means[0] = 0.
gauss.scales[0] = 1.
# generate test data
samples = mog.sample(100)
ug = UnivariateGaussianization(mog)
# after Gaussianization, samples should be Gaussian distributed
loglik_mog = mog.loglikelihood(samples)
loglik_gauss = gauss.loglikelihood(ug(samples)) + ug.logjacobian(samples)
dist = abs(loglik_mog - loglik_gauss)
self.assertTrue(all(dist < 1E-6))
if __name__ == '__main__':
unittest.main()
|
lucastheis/isa
|
code/transforms/tests/univariategaussianization_test.py
|
Python
|
mit
| 2,311
|
[
"Gaussian"
] |
07980b6b1220d85e1c7ba3f083095cd1997646ff58a03aa9f5d52edf0af3a3cc
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.integrate as sci
import scipy.optimize as opt
import time
import tarfile
import os
import csv
from montagn_utils_Lu import *
from montagn_classes import *
from montagn_polar import *
import matplotlib.pyplot as plt
from gui_random import *
######################################################
### Parameter defining file for montAGN simulation ###
# Change below parameter to change parameter used #
#def makemodel(ask=1,usemodel=0.,display=1,checktau=0,nsimu=1,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
def makemodel(info=[],usemodel=0.,grain_to_use=[1,0,0,0],rgmin=[0.005e-6,0.005e-6,0.005e-6],rgmax=[0.25e-6,0.25e-6,0.25e-6],alphagrain=[-3.5,-3.5,-3.5]):
"""Fonction de création d'un modèle pour MontAGN.
Renvoie un objet de type "model" cf montagn_class"""
if(info==[]):
info=Info(1,0,[],0,1,0,1)
### map asked properties ###
if(info.ask==0):
if(int(usemodel)==0): ## tunable model ##
if(info.nsimu==1):
print "Tunable model (0)"
# Should be containing all informations ! One line per parameter combination
# For multiple configuration use further lines
# put "[]" for not using one of these density models
denspower=[] # [[Radial power index, Radial typical profile size (in m), Vertical decay size (in m), [Density of grains (in particles / m3) at radial typical profile size (one for each grain type)]]]
spherepower=[] # [[Radial power index, Radial typical profile size (in m), [Density of grains (in particles / m3) at radial typical profile size (one for each grain type)]]]
densturb=[] # not usable
cloud=[] # too many imputs --> not usable
torus=[] #[[Disc outer radius (in pc),[density coefficent of grains (in kg / m3 ?)],ratio of the disk height at the disk boundary to the disk outer radius,Envelope mass infall (in Msol/yr),Mass of the star (in Msol)]]
torus_const=[] #[[Disc outer radius (in m),[density coefficent of grains in the torus (in kg / m3 )],[density coefficent of grains in the envelope (in kg / m3 )],[density coefficent of grains in the cone (in kg / m3 )]]]
cylinder=[] #[[cylinder radius (in m),cylinder height (in m),[density of grains (in particles / m3)]]]
### grid parameters ###
res_map=0.1*pc # m Résolution de la grille
rmax_map=10*pc # m Taille de la grille
### model parameters ###
l_agn=1e36 #W Luminosité de l'AGN (ou objet central - etoile) AGN : 1e36 ; etoile : 3.846*1e26
af=20 #° Ouverture du cone d'ionisation 20
enpaq=3.846*1e26 #J Energy in each "photon" object #1.0 #1e10
centrobject='AGN' #(star or AGN)
fichierspectre="s5700_spectrum.dat" #("spectre_picH.dat") #spectre_agn.dat
### Rsub for dust ###
rsubsilicate=0.5*pc # m Rayon de sublimation approximatif pour convergence
elif(int(usemodel)==1): ## star cloud model ##
if(info.nsimu==1):
print "star cloud model (1)"
denspower=[]
spherepower=[[0,1.0*pc,[17000.0,0,0,0]]] #old [[0,[0.6]]]
densturb=[]
cloud=[]
torus=[]
torus_const=[]
cylinder=[]
### grid parameters ###
res_map=0.00002*pc # m Résolution de la grille
rmax_map=0.001*pc # m Taille de la grille
### model parameters ### unused for model #2 ###
l_agn=3.846*1e26 #W Luminosité de l'AGN (ou objet central - etoile) AGN : 1e36 ; etoile : 3.846*1e26
af=0 #° Ouverture du cone d'ionisation
enpaq=3.846*1e26 #J Energy in each "photon" object
centrobject='star' #(star or AGN)
fichierspectre="s5700_spectrum.dat"
### Rsub for dust ###
rsubsilicate=0.000005*pc # m Rayon de sublimation approximatif pour convergence
elif(int(usemodel)==2): ## Murakawa star model ##
if(info.nsimu==1):
print "Murakawa star model (2)"
denspower=[]
spherepower=[]
densturb=[]
cloud=[]
torus=[[100.*AU,[1.5*1e6,0,0,0],0.3,1.0e-6,0.5]]
#[3.5*1e8]-->tau~60000
#[4.*1e4]-->Mdust~1e26kg 1.5*1e6
torus_const=[]
cylinder=[]
### grid parameters ###
res_map=4*AU # m Résolution de la grille 10 25
rmax_map=200*AU # m Taille de la grille 500 1500
### model parameters ### unused for model #2 ###
l_agn=3.846*1e26 #W Luminosité de l'AGN (ou objet central - etoile) AGN : 1e36 ; etoile : 3.846*1e26
af=0 #° Ouverture du cone d'ionisation 20
enpaq=3.846*1e26 #1.0 #1e10 #J Energy in each "photon" object
centrobject='star' #(star or AGN)
fichierspectre="spectre_NIRpeak.dat" #"s5700_spectrum.dat" #"spectre_picH.dat"
### Rsub for dust ###
rsubsilicate=0.05*AU # m Rayon de sublimation approximatif pour convergence
elif(int(usemodel)==3): ## Murakawa-like star model ##
if(info.nsimu==1):
print "Murakawa low res star model (3)"
denspower=[]
spherepower=[]
densturb=[]
cloud=[]
#fact=3.*1e-3
#torus=[[100.*AU,[3.5e7*fact],0.3,1.0e-6*fact,0.5]]
torus=[[100.*AU,[1.5*1e6,0,0,0],0.3,1.0e-6,0.5]]
torus_const=[]
cylinder=[]
### grid parameters ###
res_map=20*AU # m Résolution de la grille 25
rmax_map=1000*AU # m Taille de la grille 1500
### model parameters ### unused for model #2 ###
l_agn=3.846*1e26 #W Luminosité de l'AGN (ou objet central - etoile) AGN : 1e36 ; etoile : 3.846*1e26
af=0 #° Ouverture du cone d'ionisation 20
enpaq=3.846*1e26 #1.0 #1e10 #J Energy in each "photon" object
centrobject='star' #(star or AGN)
fichierspectre="spectre_NIRpeak.dat" #"s5700_spectrum.dat" #"spectre_picH.dat"
### Rsub for dust ###
rsubsilicate=0.05*AU # m Rayon de sublimation approximatif pour convergence
elif(int(usemodel)==11): ## Mie-like cylindrical AGN model ##
if(info.nsimu==1):
print "Mie-like cylindrical AGN model (11)"
denspower=[]
spherepower=[]
densturb=[]
cloud=[]
#fact=3.*1e-3
#torus=[[100.*AU,[3.5e7*fact],0.3,1.0e-6*fact,0.5]]
torus=[]
torus_const=[]
cylinder=[[6.0*pc,2.0*pc,[3.5e0,0,0,0]]]
### grid parameters ###
res_map= 0.2*pc # m Résolution de la grille 0.1
rmax_map= 10*pc # m Taille de la grille
### model parameters ### unused for model #2 ###
l_agn=1e36 #W Luminosité de l'AGN (ou objet central - etoile) AGN : 1e36 ; etoile : 3.846*1e26
af=0 #° Ouverture du cone d'ionisation 20
enpaq=1e26 #J Energy in each "photon" object
centrobject='AGN' #(star or AGN)
fichierspectre="spectre_picH.dat" #spectre_agn.dat
### Rsub for dust ###
rsubsilicate=0.0*pc # m Rayon de sublimation approximatif pour convergence
elif(int(usemodel)==12): ## AGN simple model ##
if(info.nsimu==1):
print "AGN simple model (12)"
#denspower=[[0,3.3*pc,[5.0*1e-5]]]
denspower=[[0.2,1.0*pc,3.3*pc,[7.0*1e0,0,0,0]]] #[3.0]
spherepower=[[-1.0,1.0*pc,[0.2e0,0,0,0]]]
densturb=[]
cloud=[]
torus=[]
torus_const=[]
cylinder=[]
### grid parameters ###
res_map= 0.2*pc # m Résolution de la grille 0.1
rmax_map= 10*pc # m Taille de la grille
### model parameters ### unused for model #2 ###
l_agn=1e36 #W Luminosité de l'AGN (ou objet central - etoile) AGN : 1e36 ; etoile : 3.846*1e26
af=0 #° Ouverture du cone d'ionisation 20
enpaq=1e26 #J Energy in each "photon" object
centrobject='AGN' #(star or AGN)
fichierspectre="spectre_picH.dat" #spectre_agn.dat
### Rsub for dust ###
rsubsilicate=0.5*pc # m Rayon de sublimation approximatif pour convergence
elif(int(usemodel)==13): ## Murakawa-like AGN model ##
if(info.nsimu==1):
print "Murakawa AGN model (13)"
denspower=[]
spherepower=[]
densturb=[]
cloud=[]
#fact=3.*1e-3
#torus=[[100.*AU,[3.5e7*fact],0.3,1.0e-6*fact,0.5]]
torus=[[10.*pc,[4.5*1e0,0,0,0],0.3,5.0e-6,0.5]]
torus_const=[]
cylinder=[]
### grid parameters ###
res_map=0.5*pc # m Résolution de la grille 25
rmax_map=25*pc # m Taille de la grille 1500
### model parameters ### unused for model #2 ###
l_agn=1e36 #W Luminosité de l'AGN (ou objet central - etoile) AGN : 1e36 ; etoile : 3.846*1e26
af=0 #° Ouverture du cone d'ionisation 20
enpaq=1e36 #1.0 #1e10 #J Energy in each "photon" object
centrobject='AGN' #(star or AGN)
fichierspectre="spectre_NIRpeak.dat" #"s5700_spectrum.dat" #"spectre_picH.dat"
### Rsub for dust ###
rsubsilicate=0.05*pc # m Rayon de sublimation approximatif pour convergence
elif(int(usemodel)==14): ## Strasbourg AGN model ##
if(info.nsimu==1):
print "Strasbourg AGN model (14)"
denspower=[]
spherepower=[]
densturb=[]
cloud=[]
#fact=3.*1e-3
#torus=[[100.*AU,[3.5e7*fact],0.3,1.0e-6*fact,0.5]]
torus=[]
if(usemodel<14.05):
torus_const=[[10.*pc,[6.6,0,0,0],[0.04,0,0,0],[0.04*3/25.,0,0,0]]] #6.6 et 0.04
#torus_const=[[10.*pc,[6.6*1.5e-4],[0.04*1.5e-4],[0.04*3/25.*1.5e-4]]]
if(info.nsimu==1):
print " full model (14.0)"
elif(usemodel<14.15):
torus_const=[[10.*pc,[6.6,0,0,0],[0.,0,0,0],[0.04*3/25.,0,0,0]]]
if(info.nsimu==1):
print " no cocoon model (14.1)"
else:
torus_const=[[10.*pc,[6.6,0,0,0],[0.,0,0,0],[0.,0,0,0]]]
if(info.nsimu==1):
print " only torus model (14.2)"
cylinder=[]
### grid parameters ###
res_map=0.5*pc # m Résolution de la grille 25
rmax_map=25*pc # m Taille de la grille 1500
### model parameters ### unused for model #2 ###
l_agn=1e36 #W Luminosité de l'AGN (ou objet central - etoile) AGN : 1e36 ; etoile : 3.846*1e26
af=0 #° Ouverture du cone d'ionisation 20
enpaq=1e36 #1.0 #1e10 #J Energy in each "photon" object
centrobject='AGN' #(star or AGN)
fichierspectre="spectre_NIRpeak.dat" #"s5700_spectrum.dat" #"spectre_picH.dat"
### Rsub for dust ###
rsubsilicate=0.05*pc # m Rayon de sublimation approximatif pour convergence
else:
print "ERROR : No ask and no model parameters to use."
print "Please enter a valid number for usemodel keyword or choose ask=1"
if (info.ask==1):
### grid parameters ###
res_map=input('Grid resolution (m) ?')
rmax_map=input('Grid size (m) ?')
### model parameters ### unused for model #2 ###
l_agn=input('Central object luminosity (W) ?')
af=input('Funnel aperture (°) ?')
enpaq=input('Energy in each "photon" object (J) ?')
centrobject=input('What central object is it (star or AGN) ?')
fichierspectre=input('What spectrum file to load ?')
### Rsub for dust ###
rsubsilicate=input('Sublimation radius for silicats (m) ?')
######## other parameters ########
### dust properties ###
#silicate :
typesil=0
tsubsilicate=1400 #K Temp de sublimation 1997-Thatte
#rmin_sil=0.005*1.0e-6 #m Rayon min des grains 0.005 0.2
#rmax_sil=0.25*1.0e-6 #m Rayon max des grains
#alpha_sil=-3.5 #exposant de la loi de puissance des grains (<=0)
rmin_sil=rgmin[0] #m Rayon min des grains 0.005 0.2
rmax_sil=rgmax[0] #m Rayon max des grains
alpha_sil=alphagrain[0] #exposant de la loi de puissance des grains (<=0)
#electrons :
typeel=2
tsubelectron=1e10 #K Temp de sublimation, inutilisée
rmin_e=4.600*1.0e-15 #m Rayon min des e- get from sqrt(sigma_T/pi) with Sigma_T=6.65e-29 m2
rmax_e=4.602*1.0e-15 #m Rayon max des e-
alpha_e=0 #exposant de la loi de puissance des grains (<=0)
#graphite :
typegra=1
tsubgraphite=1700 #K Temp de sublimation 1997-Thatte
#ortho
rmin_grap_ortho=rgmin[1] #0.005*1.0e-6 #m Rayon min des grains 0.005 0.2
rmax_grap_ortho=rgmax[1] #0.25*1.0e-6 #m Rayon max des grains
alpha_grap_ortho=alphagrain[1] #-3.5 #exposant de la loi de puissance des grains (<=0)
#para :
rmin_grap_para=rgmin[2] #0.005*1.0e-6 #m Rayon min des grains 0.005 0.2
rmax_grap_para=rgmax[2] #0.25*1.0e-6 #m Rayon max des grains
alpha_grap_para=alphagrain[2] #-3.5 #exposant de la loi de puissance des grains (<=0)
# useless now #
Qabs_sil=1.0e-05,4.33e-05,4.56e-03,4.09e-01,2.67e-01,9.56e-01,9.67e-01,3.92e-01
Qsca_sil=1.0e-11,2.53e-11,2.55e-07,1.80e-03,2.55e+00,1.32e+00,1.02e+00,1.05e+00
cabs_sil=Qabs_sil #Qext_sil-Qsca_sil
cs_sil=Qsca_sil #Qext_sil
#k_sil=10000 # ?
#global
rsub=[rsubsilicate] #Concatène tous les Rsub de tous les types de grains
#rsub=[rsubsilicate,rsubgraphite,rsubgraphite,rsubelectron]
rsub=[rsubsilicate,rsubsilicate,rsubsilicate,rsubsilicate]
### model formalism ###
#res_map_pc=0.00002 #pc Résolution de la grille 0.1
#rmax_map_pc=0.001 #pc Taille de la grille 10
#res_map=res_map_pc*pc #m
#rmax_map=rmax_map_pc*pc #m
### diffusion ###
#g_HG= #coef g for HG phase function depending on lambda
#scat_HG=scat_HG_factory(g_HG)
### translation for input ###
#############################
size,avsec=grain_size_surf(rmin_sil,rmax_sil,alpha_sil)
grain_sil=Grain('silicate',tsubsilicate,grain_size_surf(rmin_sil,rmax_sil,alpha_sil)[0],avsec,rmin_sil,rmax_sil,alpha_sil,typesil)#,FCsca,FCabs,Kp_expl(FCabs),k_sil)#,phase=scat_rayleigh)
size,avsec=grain_size_surf(rmin_grap_ortho,rmax_grap_ortho,alpha_grap_ortho)
grain_gra1=Grain('graphite ortho',tsubgraphite,grain_size_surf(rmin_grap_ortho,rmax_grap_ortho,alpha_grap_ortho)[0],avsec,rmin_grap_ortho,rmax_grap_ortho,alpha_grap_ortho,typegra) #,FCsca,FCabs,Kp_expl(FCabs)
size,avsec=grain_size_surf(rmin_grap_para,rmax_grap_para,alpha_grap_para)
grain_gra2=Grain('graphite para',tsubgraphite,grain_size_surf(rmin_grap_para,rmax_grap_para,alpha_grap_para)[0],avsec,rmin_grap_para,rmax_grap_para,alpha_grap_para,typegra)#,FCsca,FCabs,Kp_expl(FCabs)
electrons=Grain('electron',tsubelectron,sigt,sigt,rmin_e,rmax_e,alpha_e,typeel) #a faire ,FCsca,FCabs,Kp_expl(FCabs)
#if(grain_to_use==[1,0,0,0]):
# dust=[grain_sil] #Concaténation de tous les types de grains
#elif(grain_to_use==[1,1,1,1]):
dust=[grain_sil,grain_gra1,grain_gra2,electrons]
spectre_agn=load_spectrum(fichierspectre)
source1=Source(l_agn,spectre_agn,spdist_centre,t=centrobject)
map1=Map(rmax_map,res_map,dust)#electrons
#while checktau
if(info.ask==1):
fill_map(map1)
elif(info.ask==0):
fill_map2(map1,denspower,spherepower,densturb,cloud,torus,torus_const,cylinder,rgmin=rgmin[0],rgmax=rgmax[0],alphagrain=alphagrain[0],display=info.display,checktau=info.checktau) #to change
else:
print "Warning wrong ask value, no ask"
fill_map2(map1,denspower,spherepower,densturb,cloud,torus,cylinder,rgmin=rgmin[0],rgmax=rgmax[0],alphagrain=alphagrain[0],display=info.display,checktau=info.checktau) #to change
sources=Source_total([source1]) #Concaténation de toutes les sources
model1=Model(sources,map1,af,rsub,enpaq,usemodel)
return model1
### Utility functions for map creations ###
def fill_map2(mp,denspower,spherepower,densturb,cloud,torus,torus_const,cylinder,rgmin =0.005e-6, rgmax = 0.25e-6, alphagrain = -3.5,display=1,checktau=0):
"""adds any of the available structures to a Map object."""
if(denspower!=[]):
for i in range(len(denspower)):
add_density_powerlaw2(mp,denspower[i],display=display,checktau=checktau,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(spherepower!=[]):
for i in range(len(spherepower)):
add_spherical_powerlaw2(mp,spherepower[i],display=display,checktau=checktau,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(densturb!=[]):
for i in range(len(densturb)):
add_density_turbulent2(mp,densturb[i],display=display,checktau=checktau,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(cloud!=[]):
for i in range(len(cloud)):
add_cloud2(mp,cloud[i],display=display,checktau=checktau,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(torus!=[]):
for i in range(len(torus)):
add_torus2(mp,torus[i],display=display,checktau=checktau,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(torus_const!=[]):
for i in range(len(torus_const)):
add_torus_const2(mp,torus_const[i],display=display,checktau=checktau,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(cylinder!=[]):
for i in range(len(cylinder)):
add_camembert2(mp,cylinder[i],display=display,checktau=checktau,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
def add_density_powerlaw2(mp,param,display=1,checktau=0,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
"""adds a powerlaw density of given grains to a Map object"""
lr = param[0]
lrd= param[1]
lz = param[2]
r = []
for i in range(len(param[3])):
ri = param[3][i]
r.append(ri)
r = np.array(r)
if(checktau==1):
res = check_tau(param,1,display=display,Rmod=mp.Rmax,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(res[0]==1):
r=res[1]
print "Filling map"
for i in range(mp.N*2):
for j in range(mp.N*2):
for k in range(mp.N*2):
r0 = np.array(mp.grid[i][j][k].rho) #initial densities
r1 = r*(mp.res/lrd)**lr*((i-mp.N+.5)**2+(j-mp.N+.5)**2)**(lr*.5)*np.exp(-abs(k-mp.N+.5)*mp.res/lz) #added densities
mp.grid[i][j][k].rho = list(r0+r1)
def add_spherical_powerlaw2(mp,param,display=1,checktau=0,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
"""adds a powerlaw density of given grains to a Map object"""
lr = param[0]
lrd= param[1]
r = []
for i in range(len(param[2])):
ri = param[2][i]
r.append(ri)
r = np.array(r)
if(checktau==1):
res = check_tau(param,2,display=display,Rmod=mp.Rmax,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(res[0]==1):
r=res[1]
print "Filling map"
for i in range(mp.N*2):
for j in range(mp.N*2):
for k in range(mp.N*2):
r0 = np.array(mp.grid[i][j][k].rho) #initial densities
r1 = r*(mp.res/lrd)**lr*((i-mp.N+.5)**2+(j-mp.N+.5)**2+(k-mp.N+.5)**2)**(lr*.5) #added densities
mp.grid[i][j][k].rho = list(r0+r1)
def add_density_turbulent2(mp,param,display=1,checktau=0,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
"""adds a turbulent density of given grains to a Map object"""
pass
def add_cloud2(mp,param,display=1,checktau=0,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
"""adds a cloud (bump function) to a Map object"""
x0 = input('x0 ? ')
y0 = input('y0 ? ')
z0 = input('z0 ? ')
a = input('largest semi-axis ? ')
b = input('second semi-axis ? ')
c = input('third semi-axis ? ')
theta = input('first axis polar angle ? ')
phi = input('first axis azimutal angle ? ')
if np.sqrt(x0**2+y0**2+z0**2)+max(a,b,c) > mp.Rmax:
print 'Warning ! The cloud might reach beyond Rmax'
else:
rho = []
def cloud(x,y,z):
X = (np.cos(theta)*np.cos(phi)*(x-x0)+ np.sin(theta)*np.cos(phi)*(z-z0) - np.sin(phi)*(y-y0))/a
Y = (np.cos(theta)*np.sin(phi)*(x-x0) + np.sin(theta)*np.sin(phi)*(z-z0) + np.cos(phi)*(y-y0))/b
Z = (-np.sin(theta)*(x-x0) + np.cos(theta)*(z-z0))/c
#dilatation, then polar rotation (around y axis) then azimuthal rotation (around z axis) then translation
#bump function : reprojected gaussian function
if X**2+Y**2+Z**2 > 1 - 1e-6: #1e-6 for precision safety
return 0
else:
return np.exp(1-1/(1-(X**2+Y**2+Z**2))) #maximum at (0,0,0) normalized to 1.
for i in range(len(mp.dust)):
rho = input('Central density of %s grains ? '%mp.dust[i].name)
for x in np.arange(x0-1.05*a,x0+1.05*a+mp.res,mp.res):
for y in np.arange(y0-1.05*a,y0+1.05*a+mp.res,mp.res):
for z in np.arange(z0-1.05*a,z0+1.05*a+mp.res,mp.res):
xm = np.floor(x/mp.res)*mp.res
xp = np.ceil(x/mp.res)*mp.res
ym = np.floor(y/mp.res)*mp.res
yp = np.ceil(y/mp.res)*mp.res
zm = np.floor(z/mp.res)*mp.res
zp = np.ceil(z/mp.res)*mp.res
m = (cloud(xm,ym,zm)+cloud(xm,ym,zp)+cloud(xm,yp,zm)+cloud(xm,yp,zp)+\
cloud(xp,ym,zm)+cloud(xp,ym,zp)+cloud(xp,yp,zm)+cloud(xp,yp,zp))*.125
#average of the 8 corners of the cell (faster and simpler than an integration)
r0 = np.array(mp.get(x,y,z).rho) #initial densities
r1 = np.array(rho)*m #added densities
T = mp.get(x,y,z).T
mp.set(x,y,z,mc.Cell(T,list(r0+r1)))
def add_torus2(mp,param,display=1,checktau=0,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
"""adds a torus powerlaw density of given grains to a Map object
as described in Murakawa - 2010 - page 2"""
Rdisk=param[0] #in m
rhod=[]
H=param[2]
Mpenv=param[3] #in Msol/yr
Ms=param[4] #in Msol
Gs=G/(pc*pc*pc)*Msol*yr*yr
mu0=np.cos(80./2.*np.pi/180.)
Rc=Rdisk/pc
Rdisk=Rdisk/pc
#Computing the mass per particle
rmax=rgmax #0.25*1e-6
rmin=rgmin #0.005*1e-6
density=3.3*1e3 #kg/m3 # ou 0.29*1e-3 ? ##cf Vincent Guillet 2008
particlemass=density*20./3.*np.pi*(np.sqrt(rmax)-np.sqrt(rmin))/(1./rmin**2.5-1./rmax**2.5) #kg/particle
for i in range(len(param[1])):
ri = param[1][i]
rhod.append(ri)
rhod = np.array(rhod)
if(checktau==1):
res = check_tau(param,5,display=display,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(res[0]==1):
rhod=res[1]
print "Filling map"
for i in range(mp.N*2):
for j in range(mp.N*2):
for k in range(mp.N*2):
x=(i-mp.N+.5)*(mp.res/pc)
y=(j-mp.N+.5)*(mp.res/pc)
z=(k-mp.N+.5)*(mp.res/pc)
r=np.sqrt(x*x+y*y)
R=np.sqrt(x*x+y*y+z*z)
mu=np.abs(z/R)
r0 = np.array(mp.grid[i][j][k].rho) #initial densities
if(R<Rdisk):
#if(j==mp.N and k==mp.N):
#r3[i] = rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)
if(mu<=mu0):
r1 = rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2) #added disk densities
else:
r1 = rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)*0.01
r2 = [0]
else:
r1 = [0]
#mu0 =
#r2 = [Mpenv*Msol/(3.0e-15*pc*pc*pc*4.*np.pi*np.sqrt(Gs*Ms*R*R*R)*np.sqrt(1+mu/mu0)*(mu/mu0+2.*mu0*mu0*Rc/R))] #added envelope densities
if(mu<=mu0):
r2 = [Mpenv*Msol/(4.*np.pi*np.sqrt(Gs*Ms*R*R*R)*np.sqrt(1+mu/mu0)*(mu/mu0+2.*mu0*mu0*Rc/R))/(particlemass*pc*pc*pc)] #added envelope densities
else:
r2 = [Mpenv*Msol/(4.*np.pi*np.sqrt(Gs*Ms*R*R*R)*np.sqrt(1+mu/mu0)*(mu/mu0+2.*mu0*mu0*Rc/R))/(particlemass*pc*pc*pc)*0.01]
#r2 = [Mpenv*Msol/(3.0e-15)/(pc*pc*pc)/(4.*np.pi*np.sqrt(Gs*Ms*R*R*R))/(mu/mu0+2.*mu0*mu0*Rc/R)] #added envelope densities
#if(i==mp.N and j==mp.N and k==mp.N):
#print "z, R :",z,y,x,R
#print "rho :",r1,r2,list(r1+r2)
mp.grid[i][j][k].rho = list(r0+r1+r2)
#print "top"
#n=10000
#r3 = np.zeros([mp.N*2*n])
#for i in range(n*2*mp.N):
# x=(i-mp.N*n+.5)*(mp.res/pc/n)
# y=0.0
# z=0.0
# r=np.sqrt(x*x+y*y)
# R=np.sqrt(x*x+y*y+z*z)
# mu=np.abs(z/R)
# if(r>0.05*AU/pc):
# r3[i] = rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)
#plt.plot(r3)
#print sum(r3)*particlemass
#print "bot"
def add_torus_const2(mp,param,display=1,checktau=0,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
"""adds a torus powerlaw density of given grains to a Map object
as described in Murakawa - 2010 - page 2"""
Rdisk=param[0] #in m
rhod=[]
rhoe=[]
rhoc=[]
#H=param[2]
#Mpenv=param[3] #in Msol/yr
#Ms=param[4] #in Msol
Gs=G/(pc*pc*pc)*Msol*yr*yr
#mu0=np.cos(80./2.*np.pi/180.)
mu0=np.cos(25*np.pi/180.)
Rc=Rdisk/pc
Rdisk=Rdisk/pc
theta0=30*np.pi/180.
mutheta=np.cos(np.pi/2.-theta0)
Rout=25 #pc
#Computing the mass per particle
rmax=rgmax #0.25*1e-6
rmin=rgmin #0.005*1e-6
density=3.3*1e3 #kg/m3 # ou 0.29*1e-3 ? ##cf Vincent Guillet 2008
particlemass=density*20./3.*np.pi*(np.sqrt(rmax)-np.sqrt(rmin))/(1./rmin**2.5-1./rmax**2.5) #kg/particle
for i in range(len(param[1])):
ri = param[1][i]
rhod.append(ri)
rhod = np.array(rhod)
for i in range(len(param[2])):
rie = param[2][i]
rhoe.append(rie)
rhoe = np.array(rhoe)
for i in range(len(param[3])):
ric = param[3][i]
rhoc.append(ric)
rhoc = np.array(rhoc)
if(checktau==1):
res = check_tau(param,6,display=display,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(res[0]==1):
fact=res[1]/rhod
rhod=res[1]
rhoe*=fact
rhoc*=fact
print "Filling map"
for i in range(mp.N*2):
for j in range(mp.N*2):
for k in range(mp.N*2):
x=(i-mp.N+.5)*(mp.res/pc)
y=(j-mp.N+.5)*(mp.res/pc)
z=(k-mp.N+.5)*(mp.res/pc)
r=np.sqrt(x*x+y*y)
R=np.sqrt(x*x+y*y+z*z)
mu=np.abs(z/R)
r0 = np.array(mp.grid[i][j][k].rho) #initial densities
if(R<Rdisk):
if(mu<=mu0):
if(mu<=mutheta):
r1 = rhod #added disk densities
else:
r1= [0]
else:
r1= rhoc
r2 = [0]
elif(R<Rout):
r1 = [0]
if(mu<=mu0):
r2 = rhoe #added envelope densities
else:
r2 = rhoc
else:
r1=[0]
r2=[0]
mp.grid[i][j][k].rho = list(r0+r1+r2)
#print "top"
#n=10000
#r3 = np.zeros([mp.N*2*n])
#for i in range(n*2*mp.N):
# x=(i-mp.N*n+.5)*(mp.res/pc/n)
# y=0.0
# z=0.0
# r=np.sqrt(x*x+y*y)
# R=np.sqrt(x*x+y*y+z*z)
# mu=np.abs(z/R)
# if(r>0.05*AU/pc):
# r3[i] = rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)
#plt.plot(r3)
#print sum(r3)*particlemass
#print "bot"
def add_camembert2(mp,param,display=1,checktau=0,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
"""adds a constant density cylindrical box of given grains to a Map object"""
radius = param[0]
h = param[1]
r = []
for i in range(len(param[2])):
ri = param[2][i]
r.append(ri)
r = np.array(r)
if(checktau==1):
res = check_tau(param,7,display=display,rgmin=rgmin,rgmax=rgmax,alphagrain=alphagrain)
if(res[0]==1):
r=res[1]
print "Filling map"
for i in range(mp.N*2):
for j in range(mp.N*2):
for k in range(mp.N*2):
R=np.sqrt((i-mp.N+0.5)*(i-mp.N+0.5)+(j-mp.N+0.5)*(j-mp.N+0.5))*mp.res
z=(k-mp.N+0.5)*mp.res
r0 = np.array(mp.grid[i][j][k].rho) #initial densities
if(np.abs(z)<h and R<radius):
r1 = r #r*(mp.res/pc)**lr*((i-mp.N+.5)**2+(j-mp.N+.5)**2+(k-mp.N+.5)**2)**(lr*.5) #added densities
else:
r1=[0]
mp.grid[i][j][k].rho = list(r0+r1)
def check_tau(param,densmod,display=1,Rmod=100*AU,rgmin=0.005e-6,rgmax=0.25e-6,alphagrain=-3.5):
"""Compute the optical depth of a torus powerlaw density of given grains
as described in Murakawa - 2010 - page 2
updated on 18/04/2016
densmod:
1:density powerlaw
2:spherical powerlaw
3:density turbulence
4:cloud
5:torus
6:torus const
7:cylindrical box
"""
if(densmod==1):
print "Computing tau for density powerlaw :"
lr = param[0]
lrd= param[1]
lz = param[2]
rhod = []
L=Rmod
for i in range(len(param[3])):
ri = param[3][i]
rhod.append(ri)
rhod = np.array(rhod)
elif(densmod==2):
print "Computing tau for spherical powerlaw :"
lr = param[0]
lrd= param[1]
rhod = []
L=Rmod
for i in range(len(param[2])):
ri = param[2][i]
rhod.append(ri)
rhod = np.array(rhod)
elif(densmod==3):
print "Tau computing not available for density turbulence"
L=1
rhod=[0.]
elif(densmod==4):
print "Tau computing not available for cloud"
L=1
rhod=[0.]
elif(densmod==5):
print "Computing tau for torus geometry :"
Rdisk=param[0] #in m
rhod=[]
H=param[2]
Mpenv=param[3] #in Msol/yr
Ms=param[4] #in Msol
Gs=G/(pc*pc*pc)*Msol*yr*yr
mu0=np.cos(80./2.*np.pi/180.)
Rc=Rdisk #/pc
#Rdisk=Rdisk/pc
L=Rdisk #*pc
for i in range(len(param[1])):
ri = param[1][i]
rhod.append(ri)
rhod = np.array(rhod)
elif(densmod==6):
print "Computing tau for torus geometry :"
Rdisk=param[0] #in m
rhod=[]
rhoe=[]
rhoc=[]
Gs=G/(pc*pc*pc)*Msol*yr*yr
mu0=np.cos(25*np.pi/180.)
Rc=Rdisk/pc
Rdisk=Rdisk/pc
theta0=30*np.pi/180.
mutheta=np.cos(np.pi/2.-theta0)
Rout=25 #pc
L=Rdisk*pc
for i in range(len(param[1])):
ri = param[1][i]
rhod.append(ri)
rhod = np.array(rhod)
#for i in range(len(param[2])):
# rie = param[2][i]
# rhoe.append(rie)
#rhoe = np.array(rhoe)
#for i in range(len(param[3])):
# ric = param[3][i]
# rhoc.append(ric)
#rhoc = np.array(rhoc)
elif(densmod==7):
print "Computing tau for cylindrical geometry :"
radius = param[0]
h = param[1]
rhod = []
for i in range(len(param[2])):
ri = param[2][i]
rhod.append(ri)
rhod = np.array(rhod)
L=radius
#Computing the mass per particle
rmax=rgmax #0.25*1e-6
rmin=rgmin #0.005*1e-6
density=3.3*1e3 #kg/m3 # ou 0.29*1e-3 ? ##cf Vincent Guillet 2008
particlemass=density*20./3.*np.pi*(np.sqrt(rmax)-np.sqrt(rmin))/(1./rmin**2.5-1./rmax**2.5) #kg/particle pour -3.5
##Getting the Qabs information
rmin=rgmin #0.005*1e-6
rmax=rgmax #0.25*1e-6
x_M,S11,S12,S33,S34,phase,albedo,Qexttab,Qabstab=fill_mueller(10,1000,gen_generator(),rgmin,rgmax,alphagrain)
Q=Qexttab
l=-1
while(l<0):
##Computing the grain density in the disk plan
n=10001
#ng = np.zeros([mp.N*2*n])
ngx=0.
ng=0.
for i in range(n):
x=(i+0.5)*(L/n)
y=0.5*(L/n)
z=0.5*(L/n)
xi=(i+0.5)
yi=0.5
zi=0.5
r=np.sqrt(x*x+y*y)
R=np.sqrt(x*x+y*y+z*z)
mu=np.abs(z/R)
#computing ngx in averaged part/m3
if(densmod==1):
ngx+=rhod*((1.0/lrd)**lr*((x)**2+(y)**2)**(lr*.5)*np.exp(-abs(z)/lz))/n
elif(densmod==2):
ngx+=rhod*((1.0/lrd)**lr*((x)**2+(y)**2+(z)**2)**(lr*.5))/n
elif(densmod==3):
ngx=0.
elif(densmod==4):
ngx=0.
elif(densmod==5):
if(r>0.05*AU/pc):
#ng[i] = rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)
ngx+=rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)/n
elif(densmod==6):
ngx+=rhod/n # à vérifier !
elif(densmod==7):
ngx+=rhod/n
##Computing the grain density in the disk
n=101
for i in range(n*2):
for j in range(n*2):
for k in range(n*2):
x=(i-n+.5)*(L/n)
y=(j-n+.5)*(L/n)
z=(k-n+.5)*(L/n)
r=np.sqrt(x*x+y*y)
R=np.sqrt(x*x+y*y+z*z)
mu=np.abs(z/R)
#computing ng in sum(part/m3)
if(densmod==1):
ng+=rhod*((1.0/lrd)**lr*((x)**2+(y)**2)**(lr*.5)*np.exp(-abs(z)/lz))
elif(densmod==2):
ng+=rhod*(1.0/lrd)**lr*((x)**2+(y)**2+(z)**2)**(lr*.5)
elif(densmod==3):
ng=0.
elif(densmod==4):
ng=0.
elif(densmod==5):
if(r>0.05*AU/pc):
#ng[i] = rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)
if(mu<=mu0):
ng+=rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)
else:
ng+=rhod*((r/Rdisk)**(-15./8.))*np.exp(-np.pi/4.*(z/(Rdisk*H*(r/Rdisk)**(9./8.)))**2)*0.01
elif(densmod==6):
ng+=rhod ## à écrire !!
elif(densmod==7):
ng+=rhod
#plt.plot(r3)
##Computing the mass
Ncell=n*n*n*8
ng*=particlemass*L*L*L/Ncell
#ngx*=particlemass
print "A value of rho_d of :",rhod[0],"give :"
print ng[0],"kg of dust for this geometry"
##Computing tau in V
wvl=0.5e-6
a=x_M*wvl/(2.*np.pi)
Qaa=0
for i in range(len(Q)):
if(i<len(Q)-1):
da=a[i+1]-a[i]
else:
da=a[i]-a[i-1]
if(a[i]<rmax and a[i]>rmin):
Qaa+=Q[i]*da*2.5/(a[i]**1.5*(1./(rmin**2.5)-1./(rmax**2.5)))
tau_V=ngx*Qaa*L*np.pi
wvl=2.2e-6
a=x_M*wvl/(2.*np.pi)
##Computing tau in K
Qaa=0
for i in range(len(Q)):
if(i<len(Q)-1):
da=a[i+1]-a[i]
else:
da=a[i]-a[i-1]
if(a[i]<rmax and a[i]>rmin):
Qaa+=Q[i]*da*2.5/(a[i]**1.5*(1./(rmin**2.5)-1./(rmax**2.5)))
tau_K=ngx*Qaa*L*np.pi
#print ngx,Qaa,L
print "tauV of ",tau_V[0],"and tauK of ",tau_K[0],"in the disk plan (x,y)"
if(display==1):
ans=input("Is this value of rho_d what you want ? (1 for yes/0 for no)")
#if(ans=="y" or ans=="Y" or ans=="yes" or ans=="Yes"):
if(ans==1):
l=np.abs(l)
else:
rhod=np.array(input("Please enter new value of rho_d (with []):"))
l=l*2
else:
l=1
if(l>2):
l=2
return l-1,rhod
|
lukaltair/MontAGN_V2
|
montagn/montagn_launch.py
|
Python
|
gpl-3.0
| 37,476
|
[
"Gaussian"
] |
8b631ad1e539221407977fbd5f20434b215da2ddfa4f20c7e13db486a8499799
|
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import time
from traits.api import Float
# ============= standard library imports ========================
from numpy import array, histogram, argmax, zeros, asarray, ones_like, \
nonzero, max, arange, argsort, invert, median, mean, zeros_like
from operator import attrgetter
from skimage.morphology import watershed
from skimage.draw import polygon, circle, circle_perimeter, circle_perimeter_aa
from scipy import ndimage
from skimage.exposure import rescale_intensity
from skimage.filters import gaussian
from skimage import feature
# ============= local library imports ==========================
from pychron.loggable import Loggable
from pychron.mv.segment.region import RegionSegmenter
from pychron.image.cv_wrapper import grayspace, draw_contour_list, contour, \
colorspace, get_polygons, get_size, new_point, draw_rectangle, \
draw_lines, \
draw_polygons, crop
from pychron.mv.target import Target
from pychron.core.geometry.geometry import approximate_polygon_center, \
calc_length
def _coords_inside_image(rr, cc, shape):
mask = (rr >= 0) & (rr < shape[0]) & (cc >= 0) & (cc < shape[1])
return rr[mask], cc[mask]
def draw_circle(frame, center_x, center_y, radius, color, **kw):
cy, cx = circle(int(center_y), int(center_x), int(radius), shape=frame.shape)
frame[cy, cx] = color
def draw_circle_perimeter(frame, center_x, center_y, radius, color):
cy, cx = circle_perimeter(int(center_y), int(center_x), int(radius))
cy, cx = _coords_inside_image(cy, cx, frame.shape)
frame[cy, cx] = color
class Locator(Loggable):
pxpermm = Float
use_histogram = False
use_arc_approximation = True
use_square_approximation = True
step_signal = None
pixel_depth = 255
def wait(self):
if self.step_signal:
self.step_signal.wait()
self.step_signal.clear()
def crop(self, src, cw, ch, ox=0, oy=0, verbose=True):
cw_px = int(cw * self.pxpermm)
ch_px = int(ch * self.pxpermm)
w, h = get_size(src)
x = int((w - cw_px) / 2. + ox)
y = int((h - ch_px) / 2. - oy)
# r = 4 - cw_px % 4
# cw_px = ch_px = cw_px + r
if verbose:
self.debug('Crop: x={},y={}, cw={}, ch={}, '
'width={}, height={}, ox={}, oy={}'.format(x, y, cw_px, ch_px, w, h, ox, oy))
return asarray(crop(src, x, y, cw_px, ch_px))
def find(self, image, frame, dim, shape='circle', **kw):
"""
image is a stand alone image
dim = float. radius or half length of a square in pixels
find the hole in the image
return the offset from the center of the image
0. image is alredy cropped
1. find polygons
"""
dx, dy = None, None
targets = self._find_targets(image, frame, dim, shape=shape, **kw)
if targets:
self.info('found {} potential targets'.format(len(targets)))
# draw center indicator
src = image.source_frame
self._draw_center_indicator(src, size=2, shape='rect', radius=round(dim))
# draw targets
self._draw_targets(src, targets)
if shape == 'circle':
if self.use_arc_approximation:
# calculate circle_minimization position
dx, dy = self._arc_approximation(src, targets[0], dim)
else:
dx, dy = self._calculate_error(targets)
else:
dx, dy = self._calculate_error(targets)
# if self.use_square_approximation:
# dx, dy = self._square_approximation(src, targets[0], dim)
# image.set_frame(src[:])
self.info('dx={}, dy={}'.format(dx, dy))
return dx, dy
def _find_targets(self, image, frame, dim, shape='circle',
search=None, preprocess=True,
filter_targets=True,
convexity_filter=False,
mask=False,
set_image=True, inverted=False):
"""
use a segmentor to segment the image
"""
if search is None:
search = {}
if preprocess:
if not isinstance(preprocess, dict):
preprocess = {}
src = self._preprocess(frame, **preprocess)
else:
src = grayspace(frame)
if src is None:
print('Locator: src is None')
return
if mask:
self._mask(src, mask)
if inverted:
src = invert(src)
start = search.get('start')
if start is None:
w = search.get('width', 10)
start = int(mean(src[src > 0])) - search.get('start_offset_scalar', 3) * w
step = search.get('step', 2)
n = search.get('n', 20)
blocksize_step = search.get('blocksize_step', 5)
seg = RegionSegmenter(use_adaptive_threshold=search.get('use_adaptive_threshold', False),
blocksize=search.get('blocksize', 20))
fa = self._get_filter_target_area(shape, dim)
phigh, plow = None, None
for j in range(n):
ww = w * (j + 1)
self.debug('start intensity={}, width={}'.format(start, ww))
for i in range(n):
low = max((0, start + i * step - ww))
high = max((1, min((255, start + i * step + ww))))
if inverted:
low = 255-low
high = 255-high
seg.threshold_low = low
seg.threshold_high = high
if seg.threshold_low == plow and seg.threshold_high == phigh:
break
plow = seg.threshold_low
phigh = seg.threshold_high
nsrc = seg.segment(src)
seg.blocksize += blocksize_step
nf = colorspace(nsrc)
# draw contours
targets = self._find_polygon_targets(nsrc, frame=nf)
if set_image and image is not None:
image.set_frame(nf)
if targets:
# filter targets
if filter_targets:
targets = self._filter_targets(image, frame, dim, targets, fa)
elif convexity_filter:
# for t in targets:
# print t.convexity, t.area, t.min_enclose_area, t.perimeter_convexity
targets = [t for t in targets if t.perimeter_convexity > convexity_filter]
if targets:
return sorted(targets, key=attrgetter('area'), reverse=True)
# time.sleep(0.5)
def _mask(self, src, radius=None):
radius *= self.pxpermm
h, w = src.shape[:2]
c = circle(h / 2., w / 2., radius, shape=(h, w))
mask = ones_like(src, dtype=bool)
mask[c] = False
src[mask] = 0
return invert(mask)
# ===============================================================================
# filter
# ===============================================================================
def _filter_targets(self, image, frame, dim, targets, fa, threshold=0.85):
"""
filter targets using the _filter_test function
return list of Targets that pass _filter_test
"""
ts = [self._filter_test(image, frame, ti, dim, threshold, fa[0], fa[1])
for ti in targets]
return [ta[0] for ta in ts if ta[1]]
def _filter_test(self, image, frame, target, dim, cthreshold, mi, ma):
"""
if the convexity of the target is <threshold try to do a watershed segmentation
make black image with white polygon
do watershed segmentation
find polygon center
"""
ctest, centtest, atest = self._test_target(frame, target,
cthreshold, mi, ma)
# print('ctest', ctest, cthreshold, 'centtest', centtest, 'atereat', atest, mi, ma)
result = ctest and atest and centtest
if not ctest and (atest and centtest):
target = self._segment_polygon(image, frame,
target,
dim,
cthreshold, mi, ma)
result = True if target else False
return target, result
def _test_target(self, frame, ti, cthreshold, mi, ma):
# print('converasdf', ti.convexity, 'ara', ti.area)
ctest = ti.convexity > cthreshold
centtest = self._near_center(ti.centroid, frame)
atest = ma > ti.area > mi
return ctest, centtest, atest
def _find_polygon_targets(self, src, frame=None):
src, contours, hieararchy = contour(src)
# contours, hieararchy = find_contours(src)
# convert to color for display
if frame is not None:
draw_contour_list(frame, contours, hieararchy)
# do polygon approximation
origin = self._get_frame_center(src)
pargs = get_polygons(src, contours, hieararchy)
return self._make_targets(pargs, origin)
def _segment_polygon(self, image, frame, target, dim, cthreshold, mi, ma):
src = frame[:]
wh = get_size(src)
# make image with polygon
im = zeros(wh)
points = asarray(target.poly_points)
rr, cc = polygon(*points.T)
im[cc, rr] = 255
# do watershedding
distance = ndimage.distance_transform_edt(im)
local_maxi = feature.peak_local_max(distance, labels=im, indices=False)
markers, ns = ndimage.label(local_maxi)
wsrc = watershed(-distance, markers, mask=im)
wsrc = wsrc.astype('uint8')
# self.test_image.setup_images(3, wh)
# self.test_image.set_image(distance, idx=0)
# self.test_image.set_image(wsrc, idx=1)
# self.wait()
targets = self._find_polygon_targets(wsrc)
ct = cthreshold * 0.75
target = self._test_targets(wsrc, targets, ct, mi, ma)
if not target:
values, bins = histogram(wsrc, bins=max((10, ns)))
# assume 0 is the most abundant pixel. ie the image is mostly background
values, bins = values[1:], bins[1:]
idxs = nonzero(values)[0]
'''
polygon is now segmented into multiple regions
consectutively remove a region and find targets
'''
nimage = ones_like(wsrc, dtype='uint8') * 255
nimage[wsrc == 0] = 0
for idx in idxs:
bl = bins[idx]
bu = bins[idx + 1]
nimage[((wsrc >= bl) & (wsrc <= bu))] = 0
targets = self._find_polygon_targets(nimage)
target = self._test_targets(nimage, targets, ct, mi, ma)
if target:
break
return target
def _test_targets(self, src, targets, ct, mi, ma):
if targets:
for ti in targets:
if all(self._test_target(src,
ti, ct, mi, ma)):
return ti
# ===============================================================================
# preprocessing
# ===============================================================================
def _preprocess(self, frame, stretch_intensity=True, blur=1, denoise=0):
"""
1. convert frame to grayscale
2. remove noise from frame. increase denoise value for more noise filtering
3. stretch contrast
"""
if len(frame.shape) != 2:
frm = grayspace(frame) * 255
else:
frm = frame / self.pixel_depth * 255
frm = frm.astype('uint8')
# self.preprocessed_frame = frame
# if denoise:
# frm = self._denoise(frm, weight=denoise)
# print 'gray', frm.shape
if blur:
frm = gaussian(frm, blur) * 255
frm = frm.astype('uint8')
# frm1 = gaussian(self.preprocessed_frame, blur,
# multichannel=True) * 255
# self.preprocessed_frame = frm1.astype('uint8')
if stretch_intensity:
frm = rescale_intensity(frm)
# frm = self._contrast_equalization(frm)
# self.preprocessed_frame = self._contrast_equalization(self.preprocessed_frame)
return frm
def _denoise(self, img, weight):
"""
use TV-denoise to remove noise
http://scipy-lectures.github.com/advanced/image_processing/
http://en.wikipedia.org/wiki/Total_variation_denoising
"""
from skimage.filters import denoise_tv_chambolle
img = denoise_tv_chambolle(img, weight=weight) * 255
return img.astype('uint8')
# def _contrast_equalization(self, img):
# """
# rescale intensities to maximize contrast
# """
# # from numpy import percentile
# # Contrast stretching
# # p2 = percentile(img, 2)
# # p98 = percentile(img, 98)
#
# return rescale_intensity(asarray(img))
# ===============================================================================
# deviation calc
# ===============================================================================
# def _square_approximation(self, src, target, dim):
# tx, ty = self._get_frame_center(src)
# pts = target.poly_points
#
#
# cx, cy = dx + tx, dy + ty
# dy = -dy
# self._draw_indicator(src, (cx, cy), color=(255, 0, 128), shape='crosshairs')
#
# return dx, dy
def _arc_approximation(self, src, target, dim):
"""
find cx,cy of a circle with r radius using the arc center method
only preform if target has high convexity
convexity is simply defined as ratio of area to convex hull area
"""
tol = 0.8
if target.convexity > tol:
self.info('doing arc approximation radius={}'.format(dim))
tx, ty = self._get_frame_center(src)
pts = target.poly_points
pts[:, 1] = pts[:, 1] - ty
pts[:, 0] = pts[:, 0] - tx
dx, dy = approximate_polygon_center(pts, dim)
cx, cy = dx + tx, dy + ty
dy = -dy
self._draw_indicator(src, (cx, cy), color=(255, 0, 128), shape='crosshairs')
draw_circle_perimeter(src, cx, cy, round(dim), color=(255, 0, 128))
else:
dx, dy = self._calculate_error([target])
return dx, dy
def _calculate_error(self, targets):
"""
calculate the dx,dy
deviation of the targets centroid from the center of the image
"""
def hist(d):
f, v = histogram(array(d))
i = len(f) if argmax(f) == len(f) - 1 else argmax(f)
return v[i]
devxs, devys = list(zip(*[r.dev_centroid for r in targets]))
if len(targets) > 2 and self.use_histogram:
dx = hist(devxs)
dy = hist(devys)
else:
def avg(s):
return sum(s) / len(s)
dx = avg(devxs)
dy = avg(devys)
return -dx, dy
# ===============================================================================
# helpers
# ===============================================================================
def _make_targets(self, pargs, origin):
"""
convenience function for assembling target list
"""
targets = []
for pi, ai, co, ci, pa, pch, mask in pargs:
if len(pi) < 5:
continue
tr = Target()
tr.origin = origin
tr.poly_points = pi
# tr.bounding_rect = br
tr.area = ai
tr.min_enclose_area = co
tr.centroid = ci
tr.pactual = pa
tr.pconvex_hull = pch
tr.mask = mask
targets.append(tr)
return targets
def _filter(self, targets, func, *args, **kw):
return [ti for ti in targets if func(ti, *args, **kw)]
def _target_near_center(self, target, *args, **kw):
return self._near_center(target.centroid, *args, **kw)
def _near_center(self, xy, frame, tol=0.75):
"""
is the point xy within tol distance of the center
"""
cxy = self._get_frame_center(frame)
d = calc_length(xy, cxy)
tol *= self.pxpermm
return d < tol
def _get_filter_target_area(self, shape, dim):
"""
calculate min and max bounds of valid polygon areas
"""
if shape == 'circle':
miholedim = 0.5 * dim
maholedim = 1.25 * dim
mi = miholedim ** 2 * 3.1415
ma = maholedim ** 2 * 3.1415
else:
d = (2*dim)**2
mi = 0.5 * d
ma = 1.25 * d
return mi, ma
def _get_frame_center(self, src):
"""
convenience function for geting center of image in c,r from
"""
w, h = get_size(src)
x = w / 2
y = h / 2
return x, y
# ===============================================================================
# draw
# ===============================================================================
def _draw_targets(self, src, targets):
"""
draw a crosshairs indicator
"""
if targets:
for ta in targets:
pt = new_point(*ta.centroid)
self._draw_indicator(src, pt,
color=(0, 255, 0),
size=10,
shape='crosshairs')
# draw_circle(src, pt,
# color=(0,255,0),
# radius=int(dim))
draw_polygons(src, [ta.poly_points], color=(255, 255, 255))
def _draw_center_indicator(self, src, color=(0, 0, 255), shape='crosshairs',
size=10, radius=1):
"""
draw indicator at center of frame
"""
cpt = self._get_frame_center(src)
self._draw_indicator(src, new_point(*cpt),
# shape='crosshairs',
shape=shape,
color=color,
size=size)
# draw_circle_perimeter(src, cpt[0], cpt[1], radius, color=color)
def _draw_indicator(self, src, center, color=(255, 0, 0), shape='circle',
size=4, thickness=-1):
"""
convenience function for drawing indicators
"""
if isinstance(center, tuple):
center = new_point(*center)
r = size
if shape == 'rect':
draw_rectangle(src, center.x - r / 2., center.y - r / 2., r, r,
color=color,
thickness=thickness)
elif shape == 'crosshairs':
draw_lines(src,
[[(center.x - size, center.y),
(center.x + size, center.y)],
[(center.x, center.y - size),
(center.x, center.y + size)]],
color=color,
thickness=1)
else:
draw_circle(src, center[0], center[1], r, color=color)
# ============= EOF =============================================
# def _segment_polygon2(self, image, frame, target,
# dim,
# cthreshold, mi, ma):
#
# pychron = image.source_frame[:]
#
# # find the label with the max area ie max of histogram
# def get_limits(values, bins, width=1):
# ind = argmax(values)
# if ind == 0:
# bil = bins[ind]
# biu = bins[ind + width]
# elif ind == len(bins) - width:
# bil = bins[ind - width]
# biu = bins[ind]
# else:
# bil = bins[ind - width]
# biu = bins[ind + width]
#
# return bil, biu, ind
#
# wh = get_size(pychron)
# # make image with polygon
# im = zeros(wh)
# points = asarray(target.poly_points)
# rr, cc = polygon(*points.T)
#
# # points = asarray([(pi.x, pi.y) for pi in points])
# # rr, cc = polygon(points[:, 0], points[:, 1])
#
# im[cc, rr] = 255
#
# # do watershedding
# distance = ndimage.distance_transform_edt(im)
# local_maxi = feature.peak_local_max(distance, labels=im,
# indices=False,
# footprint=ones((3, 3))
# )
# markers, ns = ndimage.label(local_maxi)
# #
# wsrc = watershed(-distance, markers,
# mask=im
# )
#
# # print wsrc[50]
# # print colorspace(distance)
# # debug_show(im, ws, seg1)
# # debug_show(im, distance, wsrc, nimage)
# # bins = 3 * number of labels. this allows you to precisely pick the value of the max area
# values, bins = histogram(wsrc, bins=ns * 3)
# bil, biu, ind = get_limits(values, bins)
# # ma = max()
# # print ma
# # nimage = ndimage.label(wsrc > biu)[0]
#
# # nimage = nimage.astype('uint8') * 255
# if not bil:
# values = delete(values, ind)
# bins = delete(bins, (ind, ind + 1))
# bil, biu, ind = get_limits(values, bins)
# #
# nimage = ones_like(wsrc, dtype='uint8') * 255
# nimage[wsrc < bil] = 0
# nimage[wsrc > biu] = 0
#
# # image.source_frame = colorspace(nimage)
#
# # image.refresh = True
# # time.sleep(1)
#
# # debug_show(im, distance, wsrc, nimage)
# nimage = invert(nimage)
# # img = nimage
# # # img = asMat(nimage)
# # # locate new polygon from the segmented image
# tars = self._find_targets(image, nimage, dim,
# start=10, w=4, n=2, set_image=False)
# # # tars = None
# # # do_later(lambda: self.debug_show(im, distance, wsrc, nimage))
#
# # tars = None
# if tars:
# target = tars[0]
# return self._test_target(frame, target, cthreshold, mi, ma)
# else:
# return False, False, False
# from numpy import linspace, pi, cos, sin, radians
# from math import atan2
# from scipy.optimize import fmin
# # dx, dy = None, None
# # for ta in targets:
# pts = array([(p.x, p.y) for p in target.poly_points], dtype=float)
# pts = sort_clockwise(pts, pts)
# pts = convex_hull(pts)
# cx, cy = target.centroid
# px, py = pts.T
#
# tx, ty = self._get_frame_center(pychron)
# px -= cx
# py -= cy
#
# r = dim * 0.5
# ts = array([atan2(p[1] - cx, p[0] - cy) for p in pts])
# # ts += 180
# n = len(ts)
# hidx = n / 2
# h1 = ts[:hidx]
#
# offset = 0 if n % 2 == 0 else 1
#
# # h1 = array([ti for ti in ts if ti < 180])
# # h1 = radians(h1)
# # hidx = len(h1)
# # print len(ts), hidx
# # offset = 0
# def cost(p0):
# '''
# cost function
#
# A-D: chord of the polygon
# B-C: radius of fit circle
#
# A B C D
#
# try to minimize difference fit circle and polygon approx
# cost=dist(A,B)+dist(C,D)
# '''
# # r = p0[2]
# # northern hemicircle
# cix1, ciy1 = p0[0] - cx + r * cos(h1), p0[1] - cy + r * sin(h1)
#
# # southern hemicircle
# cix2, ciy2 = p0[0] - cx + r * cos(h1 + pi), p0[1] - cy + r * sin(h1 + pi)
#
# dx, dy = px[:hidx] - cix1, py[:hidx] - ciy1
# p1 = (dx ** 2 + dy ** 2) ** 0.5
#
# # dx, dy = cix2 - px[hidx + offset:], ciy2 - py[hidx + offset:]
# dx, dy = px[hidx + offset:] - cix2, py[hidx + offset:] - ciy2
# p2 = (dx ** 2 + dy ** 2) ** 0.5
# # print 'p1', p1
# # print 'p2', p2
# return ((p2 - p1) ** 2).sum()
# # return (p1 + p2).mean()
# # return p2.sum() + p1.sum()
#
# # minimize the cost function
# dx, dy = fmin(cost, x0=[0, 0], disp=False) # - ta.centroid
# print dx, dy, ty, cy
# # dy -= cy
# # dx -= cx
#
# # print ty + cy, dy
# self._draw_indicator(pychron, (dx, dy), shape='rect')
# draw_circle(pychron, (dx, dy), int(r))
#
# return dx - target.origin[0], dy - target.origin[1]
# def debug_show(image, distance, wsrc, nimage):
#
# import matplotlib.pyplot as plt
# fig, axes = plt.subplots(ncols=4, figsize=(8, 2.7))
# ax0, ax1, ax2, ax3 = axes
#
# ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
# ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
# ax2.imshow(wsrc, cmap=plt.cm.jet, interpolation='nearest')
# ax3.imshow(nimage, cmap=plt.cm.jet, interpolation='nearest')
#
# for ax in axes:
# ax.axis('off')
#
# plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
# right=1)
# plt.show()
# def find_circle(self, image, frame, dim, **kw):
# dx, dy = None, None
#
# pframe = self._preprocess(frame, blur=0)
# edges = canny(pframe, sigma=3)
# hough_radii = arange(dim * 0.9, dim * 1.1, 2)
#
# hough_res = hough_circle(edges, hough_radii)
#
# centers = []
# accums = []
# radii = []
# for radius, h in zip(hough_radii, hough_res):
# # For each radius, extract two circles
# num_peaks = 2
# peaks = peak_local_max(h, num_peaks=num_peaks)
# centers.extend(peaks)
# accums.extend(h[peaks[:, 0], peaks[:, 1]])
# radii.extend([radius] * num_peaks)
#
# # for idx in argsort(accums)[::-1][:1]:
# try:
# idx = argsort(accums)[::-1][0]
# except IndexError:
# return dx, dy
#
# center_y, center_x = centers[idx]
# radius = radii[idx]
#
# draw_circle_perimeter(frame, center_x, center_y, radius, (220, 20, 20))
# # cx, cy = circle_perimeter(int(center_x), int(center_y), int(radius))
#
# # draw perimeter
# # try:
# # frame[cy, cx] = (220, 20, 20)
# # except IndexError:
# # pass
#
# # draw center
# # cx, cy = circle(int(center_x), int(center_y), int(2))
# # frame[cy, cx] = (220, 20, 20)
# draw_circle(frame, center_x, center_y, 2, (220, 20, 20))
#
# h, w = frame.shape[:2]
#
# ox, oy = w / 2, h / 2
# dx = center_x - ox
# dy = center_y - oy
#
# cx, cy = circle(int(ox), int(oy), int(2))
# frame[cy, cx] = (20, 220, 20)
#
# image.set_frame(frame)
# return float(dx), -float(dy)
|
UManPychron/pychron
|
pychron/mv/locator.py
|
Python
|
apache-2.0
| 28,476
|
[
"Gaussian"
] |
0a6971c5c7bce2097925d7cf133b57610f80e0aa7c0fe51658e632f66dd4f3c9
|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import array2d
from ..utils.extmath import fast_logdet, pinvh
def log_likelihood(emp_cov, precision):
"""Computes the log_likelihood of the data
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
"""
return -np.sum(emp_cov * precision) + fast_logdet(precision)
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
`covariance_` : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = array2d(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
`precision_` : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionaly scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the Mahalanobis distances of given observations.
The provided observations are assumed to be centered. One may want to
center them using a location estimate first.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit (including centering).
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
|
depet/scikit-learn
|
sklearn/covariance/empirical_covariance_.py
|
Python
|
bsd-3-clause
| 8,679
|
[
"Gaussian"
] |
72102aa34412079d9f8992260bb262c61bd353f47705f9d47255857815db505b
|
from dateutil import parser
import tempfile
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.messages.storage.fallback import FallbackStorage
from django.test import TestCase, RequestFactory
from hs_core.models import ResourceFile
from hs_core.hydroshare import add_resource_files
from hs_core.views.utils import create_folder, move_or_rename_file_or_folder, zip_folder, \
unzip_file, remove_folder
from hs_core.views.utils import run_ssh_command
from theme.models import UserProfile
from django_irods.icommands import SessionException
from django_irods.storage import IrodsStorage
class MockIRODSTestCaseMixin(object):
def setUp(self):
super(MockIRODSTestCaseMixin, self).setUp()
# only mock up testing iRODS operations when local iRODS container is not used
if settings.IRODS_HOST != 'data.local.org':
from mock import patch
self.irods_patchers = (
patch("hs_core.hydroshare.hs_bagit.delete_files_and_bag"),
patch("hs_core.hydroshare.hs_bagit.create_bag"),
patch("hs_core.hydroshare.hs_bagit.create_bag_files"),
patch("hs_core.tasks.create_bag_by_irods"),
patch("hs_core.hydroshare.utils.copy_resource_files_and_AVUs"),
)
for patcher in self.irods_patchers:
patcher.start()
def tearDown(self):
if settings.IRODS_HOST != 'data.local.org':
for patcher in self.irods_patchers:
patcher.stop()
super(MockIRODSTestCaseMixin, self).tearDown()
class TestCaseCommonUtilities(object):
def is_federated_irods_available(self):
if not settings.REMOTE_USE_IRODS or settings.HS_USER_ZONE_HOST != 'users.local.org' \
or settings.IRODS_HOST != 'data.local.org':
return False
else:
return True
def create_irods_user_in_user_zone(self):
# create corresponding irods account in user zone
try:
exec_cmd = "{0} {1} {2}".format(settings.HS_USER_ZONE_PROXY_USER_CREATE_USER_CMD,
self.user.username, self.user.username)
output = run_ssh_command(host=settings.HS_USER_ZONE_HOST,
uname=settings.HS_USER_ZONE_PROXY_USER,
pwd=settings.HS_USER_ZONE_PROXY_USER_PWD,
exec_cmd=exec_cmd)
if output:
if 'ERROR:' in output.upper():
# irods account failed to create
self.assertRaises(SessionException(-1, output, output))
user_profile = UserProfile.objects.filter(user=self.user).first()
user_profile.create_irods_user_account = True
user_profile.save()
except Exception as ex:
self.assertRaises(SessionException(-1, ex.message, ex.message))
def delete_irods_user_in_user_zone(self):
# delete irods test user in user zone
try:
exec_cmd = "{0} {1}".format(settings.HS_USER_ZONE_PROXY_USER_DELETE_USER_CMD,
self.user.username)
output = run_ssh_command(host=settings.HS_USER_ZONE_HOST,
uname=settings.HS_USER_ZONE_PROXY_USER,
pwd=settings.HS_USER_ZONE_PROXY_USER_PWD,
exec_cmd=exec_cmd)
if output:
if 'ERROR:' in output.upper():
# there is an error from icommand run, report the error
self.assertRaises(SessionException(-1, output, output))
user_profile = UserProfile.objects.filter(user=self.user).first()
user_profile.create_irods_user_account = False
user_profile.save()
except Exception as ex:
# there is an error from icommand run, report the error
self.assertRaises(SessionException(-1, ex.message, ex.message))
def save_files_to_user_zone(self, file_name_to_target_name_dict):
"""
Save a list of files to iRODS user zone using the same IrodsStorage() object
:param file_name_to_target_name_dict: a dictionary in the form of {ori_file, target_file}
where ori_file is the file to be save to, and the target_file is the full path file name
in iRODS user zone to save ori_file to
:return:
"""
self.irods_storage = IrodsStorage('federated')
for file_name, target_name in file_name_to_target_name_dict.iteritems():
self.irods_storage.saveFile(file_name, target_name)
def resource_file_oprs(self):
"""
This is a common test utility function to be called by both regular folder operation
testing and federated zone folder operation testing.
Make sure the calling TestCase object has the following attributes defined before calling
this method:
self.res: resource that has been created that contains files listed in file_name_list
self.user: owner of the resource
self.file_name_list: a list of three file names that have been added to the res object
self.test_file_1 needs to be present for the calling object for doing regular folder
operations without involving federated zone so that the same opened file can be re-added
to the resource for testing the case where zipping cannot overwrite existing file
"""
user = self.user
res = self.res
file_name_list = self.file_name_list
# create a folder, if folder is created successfully, no exception is raised, otherwise,
# an iRODS exception will be raised which will be caught by the test runner and mark as
# a test failure
create_folder(res.short_id, 'data/contents/sub_test_dir')
istorage = res.get_irods_storage()
res_path = res.file_path
store = istorage.listdir(res_path)
self.assertIn('sub_test_dir', store[0], msg='resource does not contain created sub-folder')
# rename the third file in file_name_list
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/' + file_name_list[2],
'data/contents/new_' + file_name_list[2])
# move the first two files in file_name_list to the new folder
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/' + file_name_list[0],
'data/contents/sub_test_dir/' + file_name_list[0])
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/' + file_name_list[1],
'data/contents/sub_test_dir/' + file_name_list[1])
updated_res_file_names = []
for rf in ResourceFile.objects.filter(object_id=res.id):
updated_res_file_names.append(rf.short_path)
self.assertIn('new_' + file_name_list[2], updated_res_file_names,
msg="resource does not contain the updated file new_" + file_name_list[2])
self.assertNotIn(file_name_list[2], updated_res_file_names,
msg='resource still contains the old file ' + file_name_list[2] +
' after renaming')
self.assertIn('sub_test_dir/' + file_name_list[0], updated_res_file_names,
msg='resource does not contain ' + file_name_list[0] + ' moved to a folder')
self.assertNotIn(file_name_list[0], updated_res_file_names,
msg='resource still contains the old ' + file_name_list[0] +
'after moving to a folder')
self.assertIn('sub_test_dir/' + file_name_list[1], updated_res_file_names,
msg='resource does not contain ' + file_name_list[1] +
'moved to a new folder')
self.assertNotIn(file_name_list[1], updated_res_file_names,
msg='resource still contains the old ' + file_name_list[1] +
' after moving to a folder')
# zip the folder
output_zip_fname, size = \
zip_folder(user, res.short_id, 'data/contents/sub_test_dir',
'sub_test_dir.zip', True)
self.assertGreater(size, 0, msg='zipped file has a size of 0')
# Now resource should contain only two files: new_file3.txt and sub_test_dir.zip
# since the folder is zipped into sub_test_dir.zip with the folder deleted
self.assertEqual(res.files.all().count(), 2,
msg="resource file count didn't match-")
# test unzip does not allow override of existing files
# add an existing file in the zip to the resource
if res.resource_federation_path:
fed_test_file1_full_path = '/{zone}/home/{uname}/{fname}'.format(
zone=settings.HS_USER_IRODS_ZONE, uname=user.username, fname=file_name_list[0])
# TODO: why isn't this a method of resource?
# TODO: Why do we repeat the resource_federation_path?
add_resource_files(res.short_id, source_names=[fed_test_file1_full_path],
move=False)
else:
# TODO: Why isn't this a method of resource?
add_resource_files(res.short_id, self.test_file_1)
# TODO: use ResourceFile.create_folder, which doesn't require data/contents prefix
create_folder(res.short_id, 'data/contents/sub_test_dir')
# TODO: use ResourceFile.rename, which doesn't require data/contents prefix
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/' + file_name_list[0],
'data/contents/sub_test_dir/' + file_name_list[0])
# Now resource should contain three files: file3_new.txt, sub_test_dir.zip, and file1.txt
self.assertEqual(res.files.all().count(), 3, msg="resource file count didn't match")
with self.assertRaises(SessionException):
unzip_file(user, res.short_id, 'data/contents/sub_test_dir.zip', False)
# Resource should still contain three files: file3_new.txt, sub_test_dir.zip, and file1.txt
file_cnt = res.files.all().count()
self.assertEqual(file_cnt, 3, msg="resource file count didn't match - " +
str(file_cnt) + " != 3")
# test unzipping the file succeeds now after deleting the existing folder
# TODO: this causes a multiple delete because the paths are valid now.
istorage = res.get_irods_storage()
remove_folder(user, res.short_id, 'data/contents/sub_test_dir')
# Now resource should contain two files: file3_new.txt and sub_test_dir.zip
file_cnt = res.files.all().count()
self.assertEqual(file_cnt, 2, msg="resource file count didn't match - " +
str(file_cnt) + " != 2")
unzip_file(user, res.short_id, 'data/contents/sub_test_dir.zip', True)
# Now resource should contain three files: file1.txt, file2.txt, and file3_new.txt
self.assertEqual(res.files.all().count(), 3, msg="resource file count didn't match")
updated_res_file_names = []
for rf in ResourceFile.objects.filter(object_id=res.id):
updated_res_file_names.append(rf.short_path)
self.assertNotIn('sub_test_dir.zip', updated_res_file_names,
msg="resource still contains the zip file after unzipping")
self.assertIn('sub_test_dir/' + file_name_list[0], updated_res_file_names,
msg='resource does not contain unzipped file ' + file_name_list[0])
self.assertIn('sub_test_dir/' + file_name_list[1], updated_res_file_names,
msg='resource does not contain unzipped file ' + file_name_list[1])
self.assertIn('new_' + file_name_list[2], updated_res_file_names,
msg='resource does not contain unzipped file new_' + file_name_list[2])
# rename a folder
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/sub_test_dir', 'data/contents/sub_dir')
updated_res_file_names = []
for rf in ResourceFile.objects.filter(object_id=res.id):
updated_res_file_names.append(rf.short_path)
self.assertNotIn('sub_test_dir/' + file_name_list[0], updated_res_file_names,
msg='resource still contains ' + file_name_list[0] +
' in the old folder after renaming')
self.assertIn('sub_dir/' + file_name_list[0], updated_res_file_names,
msg='resource does not contain ' + file_name_list[0] +
' in the new folder after renaming')
self.assertNotIn('sub_test_dir/' + file_name_list[1], updated_res_file_names,
msg='resource still contains ' + file_name_list[1] +
' in the old folder after renaming')
self.assertIn('sub_dir/' + file_name_list[1], updated_res_file_names,
msg='resource does not contain ' + file_name_list[1] +
' in the new folder after renaming')
# remove a folder
# TODO: utilize ResourceFile.remove_folder instead. Takes a short path.
remove_folder(user, res.short_id, 'data/contents/sub_dir')
# Now resource only contains one file
self.assertEqual(res.files.all().count(), 1, msg="resource file count didn't match")
updated_res_file_names = []
for rf in ResourceFile.objects.filter(object_id=res.id):
updated_res_file_names.append(rf.short_path)
self.assertEqual(len(updated_res_file_names), 1)
self.assertEqual(updated_res_file_names[0], 'new_' + file_name_list[2])
def raster_metadata_extraction(self):
"""
This is a common test utility function to be called by both regular raster metadata
extraction testing and federated zone raster metadata extraction testing.
Make sure the calling TestCase object has self.resRaster attribute defined before calling
this method which is the raster resource that has been created containing valid raster
files.
"""
# there should be 2 content files
self.assertEqual(self.resRaster.files.all().count(), 2)
# test core metadata after metadata extraction
extracted_title = "My Test Raster Resource"
self.assertEqual(self.resRaster.metadata.title.value, extracted_title)
# there should be 1 creator
self.assertEqual(self.resRaster.metadata.creators.all().count(), 1)
# there should be 1 coverage element - box type
self.assertEqual(self.resRaster.metadata.coverages.all().count(), 1)
self.assertEqual(self.resRaster.metadata.coverages.all().filter(type='box').count(), 1)
box_coverage = self.resRaster.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 42.11270614966863)
self.assertEqual(box_coverage.value['eastlimit'], -111.45699925047542)
self.assertEqual(box_coverage.value['southlimit'], 41.66222054591102)
self.assertEqual(box_coverage.value['westlimit'], -111.81761887121905)
# there should be 2 format elements
self.assertEqual(self.resRaster.metadata.formats.all().count(), 2)
self.assertEqual(self.resRaster.metadata.formats.all().filter(
value='application/vrt').count(), 1)
self.assertEqual(self.resRaster.metadata.formats.all().filter(
value='image/tiff').count(), 1)
# testing extended metadata element: original coverage
ori_coverage = self.resRaster.metadata.originalCoverage
self.assertNotEquals(ori_coverage, None)
self.assertEqual(ori_coverage.value['northlimit'], 4662392.446916306)
self.assertEqual(ori_coverage.value['eastlimit'], 461954.01909127034)
self.assertEqual(ori_coverage.value['southlimit'], 4612592.446916306)
self.assertEqual(ori_coverage.value['westlimit'], 432404.01909127034)
self.assertEqual(ori_coverage.value['units'], 'meter')
self.assertEqual(ori_coverage.value['projection'], "NAD83 / UTM zone 12N")
self.assertEqual(ori_coverage.value['datum'], "North_American_Datum_1983")
projection_string = u'PROJCS["NAD83 / UTM zone 12N",GEOGCS["NAD83",' \
u'DATUM["North_American_Datum_1983",' \
u'SPHEROID["GRS 1980",6378137,298.257222101,' \
u'AUTHORITY["EPSG","7019"]],' \
u'TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6269"]],' \
u'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],' \
u'UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],' \
u'AUTHORITY["EPSG","4269"]],PROJECTION["Transverse_Mercator"],' \
u'PARAMETER["latitude_of_origin",0],' \
u'PARAMETER["central_meridian",-111],' \
u'PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],' \
u'PARAMETER["false_northing",0],' \
u'UNIT["metre",1,AUTHORITY["EPSG","9001"]],' \
u'AXIS["Easting",EAST],AXIS["Northing",' \
u'NORTH],AUTHORITY["EPSG","26912"]]'
self.assertEqual(ori_coverage.value['projection_string'], projection_string)
# testing extended metadata element: cell information
cell_info = self.resRaster.metadata.cellInformation
self.assertEqual(cell_info.rows, 1660)
self.assertEqual(cell_info.columns, 985)
self.assertEqual(cell_info.cellSizeXValue, 30.0)
self.assertEqual(cell_info.cellSizeYValue, 30.0)
self.assertEqual(cell_info.cellDataType, 'Float32')
# testing extended metadata element: band information
self.assertEqual(self.resRaster.metadata.bandInformations.count(), 1)
band_info = self.resRaster.metadata.bandInformations.first()
self.assertEqual(band_info.noDataValue, '-3.40282346639e+38')
self.assertEqual(band_info.maximumValue, '3031.44311523')
self.assertEqual(band_info.minimumValue, '1358.33459473')
def netcdf_metadata_extraction(self, expected_creators_count=1):
"""
This is a common test utility function to be called by both regular netcdf metadata
extraction testing and federated zone netCDF metadata extraction testing.
Make sure the calling TestCase object has self.resNetcdf attribute defined before calling
this method which is the netCDF resource that has been created containing valid netCDF
files.
"""
# there should 2 content file
self.assertEqual(self.resNetcdf.files.all().count(), 2)
# test core metadata after metadata extraction
extracted_title = "Snow water equivalent estimation at TWDEF site from " \
"Oct 2009 to June 2010"
self.assertEqual(self.resNetcdf.metadata.title.value, extracted_title)
# there should be an abstract element
self.assertNotEqual(self.resNetcdf.metadata.description, None)
extracted_abstract = "This netCDF data is the simulation output from Utah Energy " \
"Balance (UEB) model.It includes the simulation result " \
"of snow water equivalent during the period " \
"Oct. 2009 to June 2010 for TWDEF site in Utah."
self.assertEqual(self.resNetcdf.metadata.description.abstract, extracted_abstract)
# there should be one source element
self.assertEqual(self.resNetcdf.metadata.sources.all().count(), 1)
# there should be one license element:
self.assertNotEquals(self.resNetcdf.metadata.rights.statement, 1)
# there should be one relation element
self.assertEqual(self.resNetcdf.metadata.relations.all().filter(type='cites').count(), 1)
# there should be creators equal to expected_creators_count
self.assertEqual(self.resNetcdf.metadata.creators.all().count(), expected_creators_count)
# there should be one contributor
self.assertEqual(self.resNetcdf.metadata.contributors.all().count(), 1)
# there should be 2 coverage element - box type and period type
self.assertEqual(self.resNetcdf.metadata.coverages.all().count(), 2)
self.assertEqual(self.resNetcdf.metadata.coverages.all().filter(type='box').count(), 1)
self.assertEqual(self.resNetcdf.metadata.coverages.all().filter(type='period').count(), 1)
box_coverage = self.resNetcdf.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.867126409)
self.assertEqual(box_coverage.value['eastlimit'], -111.505940368)
self.assertEqual(box_coverage.value['southlimit'], 41.8639080745)
self.assertEqual(box_coverage.value['westlimit'], -111.51138808)
temporal_coverage = self.resNetcdf.metadata.coverages.all().filter(type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('10/01/2009').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('05/30/2010').date())
# there should be 2 format elements
self.assertEqual(self.resNetcdf.metadata.formats.all().count(), 2)
self.assertEqual(self.resNetcdf.metadata.formats.all().
filter(value='text/plain').count(), 1)
self.assertEqual(self.resNetcdf.metadata.formats.all().
filter(value='application/x-netcdf').count(), 1)
# there should be one subject element
self.assertEqual(self.resNetcdf.metadata.subjects.all().count(), 1)
subj_element = self.resNetcdf.metadata.subjects.all().first()
self.assertEqual(subj_element.value, 'Snow water equivalent')
# testing extended metadata element: original coverage
ori_coverage = self.resNetcdf.metadata.ori_coverage.all().first()
self.assertNotEquals(ori_coverage, None)
self.assertEqual(ori_coverage.projection_string_type, 'Proj4 String')
proj_text = u'+proj=tmerc +y_0=0.0 +k_0=0.9996 +x_0=500000.0 +lat_0=0.0 +lon_0=-111.0'
self.assertEqual(ori_coverage.projection_string_text, proj_text)
self.assertEqual(ori_coverage.value['northlimit'], '4.63515e+06')
self.assertEqual(ori_coverage.value['eastlimit'], '458010.0')
self.assertEqual(ori_coverage.value['southlimit'], '4.63479e+06')
self.assertEqual(ori_coverage.value['westlimit'], '457560.0')
self.assertEqual(ori_coverage.value['units'], 'Meter')
self.assertEqual(ori_coverage.value['projection'], 'transverse_mercator')
# testing extended metadata element: variables
self.assertEqual(self.resNetcdf.metadata.variables.all().count(), 5)
# test time variable
var_time = self.resNetcdf.metadata.variables.all().filter(name='time').first()
self.assertNotEquals(var_time, None)
self.assertEqual(var_time.unit, 'hours since 2009-10-1 0:0:00 UTC')
self.assertEqual(var_time.type, 'Float')
self.assertEqual(var_time.shape, 'time')
self.assertEqual(var_time.descriptive_name, 'time')
# test x variable
var_x = self.resNetcdf.metadata.variables.all().filter(name='x').first()
self.assertNotEquals(var_x, None)
self.assertEqual(var_x.unit, 'Meter')
self.assertEqual(var_x.type, 'Float')
self.assertEqual(var_x.shape, 'x')
self.assertEqual(var_x.descriptive_name, 'x coordinate of projection')
# test y variable
var_y = self.resNetcdf.metadata.variables.all().filter(name='y').first()
self.assertNotEquals(var_y, None)
self.assertEqual(var_y.unit, 'Meter')
self.assertEqual(var_y.type, 'Float')
self.assertEqual(var_y.shape, 'y')
self.assertEqual(var_y.descriptive_name, 'y coordinate of projection')
# test SWE variable
var_swe = self.resNetcdf.metadata.variables.all().filter(name='SWE').first()
self.assertNotEquals(var_swe, None)
self.assertEqual(var_swe.unit, 'm')
self.assertEqual(var_swe.type, 'Float')
self.assertEqual(var_swe.shape, 'y,x,time')
self.assertEqual(var_swe.descriptive_name, 'Snow water equivalent')
self.assertEqual(var_swe.method, 'model simulation of UEB model')
self.assertEqual(var_swe.missing_value, '-9999')
# test grid mapping variable
var_grid = self.resNetcdf.metadata.variables.all().\
filter(name='transverse_mercator').first()
self.assertNotEquals(var_grid, None)
self.assertEqual(var_grid.unit, 'Unknown')
self.assertEqual(var_grid.type, 'Unknown')
self.assertEqual(var_grid.shape, 'Not defined')
def timeseries_metadata_extraction(self):
"""
This is a common test utility function to be called by both regular timeseries metadata
extraction testing and federated zone timeseries metadata extraction testing.
Make sure the calling TestCase object has self.resTimeSeries attribute defined before
calling this method which is the timeseries resource that has been created containing
valid timeseries file.
"""
# there should one content file
self.assertEqual(self.resTimeSeries.files.all().count(), 1)
# there should be one contributor element
self.assertEqual(self.resTimeSeries.metadata.contributors.all().count(), 1)
# test core metadata after metadata extraction
extracted_title = "Water temperature data from the Little Bear River, UT"
self.assertEqual(self.resTimeSeries.metadata.title.value, extracted_title)
# there should be an abstract element
self.assertNotEqual(self.resTimeSeries.metadata.description, None)
extracted_abstract = "This dataset contains time series of observations of water " \
"temperature in the Little Bear River, UT. Data were recorded every " \
"30 minutes. The values were recorded using a HydroLab MS5 " \
"multi-parameter water quality sonde connected to a Campbell " \
"Scientific datalogger."
self.assertEqual(self.resTimeSeries.metadata.description.abstract.strip(),
extracted_abstract)
# there should be 2 coverage element - box type and period type
self.assertEqual(self.resTimeSeries.metadata.coverages.all().count(), 2)
self.assertEqual(self.resTimeSeries.metadata.coverages.all().filter(type='box').count(), 1)
self.assertEqual(self.resTimeSeries.metadata.coverages.all().filter(
type='period').count(), 1)
box_coverage = self.resTimeSeries.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'Unknown')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.718473)
self.assertEqual(box_coverage.value['eastlimit'], -111.799324)
self.assertEqual(box_coverage.value['southlimit'], 41.495409)
self.assertEqual(box_coverage.value['westlimit'], -111.946402)
temporal_coverage = self.resTimeSeries.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('01/01/2008').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('01/31/2008').date())
# there should be one format element
self.assertEqual(self.resTimeSeries.metadata.formats.all().count(), 1)
format_element = self.resTimeSeries.metadata.formats.all().first()
self.assertEqual(format_element.value, 'application/sqlite')
# there should be one subject element
self.assertEqual(self.resTimeSeries.metadata.subjects.all().count(), 1)
subj_element = self.resTimeSeries.metadata.subjects.all().first()
self.assertEqual(subj_element.value, 'Temperature')
# there should be a total of 7 timeseries
self.assertEqual(self.resTimeSeries.metadata.time_series_results.all().count(), 7)
# testing extended metadata elements
# test 'site' - there should be 7 sites
self.assertEqual(self.resTimeSeries.metadata.sites.all().count(), 7)
# each site be associated with one series id
for site in self.resTimeSeries.metadata.sites.all():
self.assertEqual(len(site.series_ids), 1)
# test the data for a specific site
site = self.resTimeSeries.metadata.sites.filter(site_code='USU-LBR-Paradise').first()
self.assertNotEqual(site, None)
site_name = 'Little Bear River at McMurdy Hollow near Paradise, Utah'
self.assertEqual(site.site_name, site_name)
self.assertEqual(site.elevation_m, 1445)
self.assertEqual(site.elevation_datum, 'NGVD29')
self.assertEqual(site.site_type, 'Stream')
# test 'variable' - there should be 1 variable element
self.assertEqual(self.resTimeSeries.metadata.variables.all().count(), 1)
variable = self.resTimeSeries.metadata.variables.all().first()
# there should be 7 series ids associated with this one variable
self.assertEqual(len(variable.series_ids), 7)
# test the data for a variable
self.assertEqual(variable.variable_code, 'USU36')
self.assertEqual(variable.variable_name, 'Temperature')
self.assertEqual(variable.variable_type, 'Water Quality')
self.assertEqual(variable.no_data_value, -9999)
self.assertEqual(variable.variable_definition, None)
self.assertEqual(variable.speciation, 'Not Applicable')
# test 'method' - there should be 1 method element
self.assertEqual(self.resTimeSeries.metadata.methods.all().count(), 1)
method = self.resTimeSeries.metadata.methods.all().first()
# there should be 7 series ids associated with this one method element
self.assertEqual(len(method.series_ids), 7)
self.assertEqual(method.method_code, '28')
method_name = 'Quality Control Level 1 Data Series created from raw QC Level 0 data ' \
'using ODM Tools.'
self.assertEqual(method.method_name, method_name)
self.assertEqual(method.method_type, 'Instrument deployment')
method_des = 'Quality Control Level 1 Data Series created from raw QC Level 0 data ' \
'using ODM Tools.'
self.assertEqual(method.method_description, method_des)
self.assertEqual(method.method_link, None)
# test 'processing_level' - there should be 1 processing_level element
self.assertEqual(self.resTimeSeries.metadata.processing_levels.all().count(), 1)
proc_level = self.resTimeSeries.metadata.processing_levels.all().first()
# there should be 7 series ids associated with this one element
self.assertEqual(len(proc_level.series_ids), 7)
self.assertEqual(proc_level.processing_level_code, 1)
self.assertEqual(proc_level.definition, 'Quality controlled data')
explanation = 'Quality controlled data that have passed quality assurance procedures ' \
'such as routine estimation of timing and sensor calibration or visual ' \
'inspection and removal of obvious errors. An example is USGS published ' \
'streamflow records following parsing through USGS quality control ' \
'procedures.'
self.assertEqual(proc_level.explanation, explanation)
# test 'timeseries_result' - there should be 7 timeseries_result element
self.assertEqual(self.resTimeSeries.metadata.time_series_results.all().count(), 7)
ts_result = self.resTimeSeries.metadata.time_series_results.filter(
series_ids__contains=['182d8fa3-1ebc-11e6-ad49-f45c8999816f']).first()
self.assertNotEqual(ts_result, None)
# there should be only 1 series id associated with this element
self.assertEqual(len(ts_result.series_ids), 1)
self.assertEqual(ts_result.units_type, 'Temperature')
self.assertEqual(ts_result.units_name, 'degree celsius')
self.assertEqual(ts_result.units_abbreviation, 'degC')
self.assertEqual(ts_result.status, 'Unknown')
self.assertEqual(ts_result.sample_medium, 'Surface Water')
self.assertEqual(ts_result.value_count, 1441)
self.assertEqual(ts_result.aggregation_statistics, 'Average')
# test for CV lookup tables
# there should be 23 CV_VariableType records
self.assertEqual(self.resTimeSeries.metadata.cv_variable_types.all().count(), 23)
# there should be 805 CV_VariableName records
self.assertEqual(self.resTimeSeries.metadata.cv_variable_names.all().count(), 805)
# there should be 145 CV_Speciation records
self.assertEqual(self.resTimeSeries.metadata.cv_speciations.all().count(), 145)
# there should be 51 CV_SiteType records
self.assertEqual(self.resTimeSeries.metadata.cv_site_types.all().count(), 51)
# there should be 5 CV_ElevationDatum records
self.assertEqual(self.resTimeSeries.metadata.cv_elevation_datums.all().count(), 5)
# there should be 25 CV_MethodType records
self.assertEqual(self.resTimeSeries.metadata.cv_method_types.all().count(), 25)
# there should be 179 CV_UnitsType records
self.assertEqual(self.resTimeSeries.metadata.cv_units_types.all().count(), 179)
# there should be 4 CV_Status records
self.assertEqual(self.resTimeSeries.metadata.cv_statuses.all().count(), 4)
# there should be 17 CV_Medium records
self.assertEqual(self.resTimeSeries.metadata.cv_mediums.all().count(), 18)
# there should be 17 CV_aggregationStatistics records
self.assertEqual(self.resTimeSeries.metadata.cv_aggregation_statistics.all().count(), 17)
# there should not be any UTCOffset element
self.assertEqual(self.resTimeSeries.metadata.utc_offset, None)
class ViewTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.temp_dir = tempfile.mkdtemp()
super(ViewTestCase, self).setUp()
@staticmethod
def set_request_message_attributes(request):
# the following 3 lines are for preventing error in unit test due to the view being
# tested uses messaging middleware
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
@staticmethod
def add_session_to_request(request):
"""Annotate a request object with a session"""
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
|
RENCI/xDCIShare
|
hs_core/testing.py
|
Python
|
bsd-3-clause
| 36,160
|
[
"NetCDF"
] |
1dd6e2455c8fb261d3bba9f3d01adf3ab2eae9e49feef8a29443d7b05dc24adc
|
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from decimal import Decimal
from django.db import IntegrityError
from .apitask import APITask
from thing.models import Character, Corporation, Item, Station, Transaction, APIKey
# ---------------------------------------------------------------------------
# number of rows to request per WalletTransactions call, max is 2560
TRANSACTION_ROWS = 2560
class WalletTransactions(APITask):
name = 'thing.wallet_transactions'
def run(self, url, taskstate_id, apikey_id, character_id):
if self.init(taskstate_id, apikey_id) is False:
return
# Make sure the character exists
try:
character = Character.objects.select_related('details').get(pk=character_id)
except Character.DoesNotExist:
self.log_warn('Character %s does not exist!', character_id)
return
# Corporation key, visit each related CorpWallet
if self.apikey.key_type == APIKey.CORPORATION_TYPE:
for corpwallet in self.apikey.corporation.corpwallet_set.all():
result = self._work(url, character, corpwallet)
if result is False:
return
# Account/character key
else:
result = self._work(url, character)
if result is False:
return
return True
# Do the actual work for wallet transactions
def _work(self, url, character, corp_wallet=None):
# Initialise stuff
params = {
'characterID': character.id,
'rowCount': TRANSACTION_ROWS,
}
# Corporation key
if self.apikey.key_type == APIKey.CORPORATION_TYPE:
params['accountKey'] = corp_wallet.account_key
t_filter = Transaction.objects.filter(corp_wallet=corp_wallet)
# Account/Character key
else:
t_filter = Transaction.objects.filter(corp_wallet=None, character=character)
# Stuff to collect
bulk_data = {}
char_ids = set()
item_ids = set()
station_ids = set()
# Loop until we run out of transactions
while True:
if self.fetch_api(url, params) is False or self.root is None:
return False
rows = self.root.findall('result/rowset/row')
# empty result set = no transactions ever on this wallet
if not rows:
break
# Gather bulk data
for row in rows:
transaction_id = int(row.attrib['transactionID'])
bulk_data[transaction_id] = row
char_ids.add(int(row.attrib['clientID']))
item_ids.add(int(row.attrib['typeID']))
station_ids.add(int(row.attrib['stationID']))
if self.apikey.key_type == APIKey.CORPORATION_TYPE:
char_ids.add(int(row.attrib['characterID']))
# If we got MAX rows we should retrieve some more
if len(rows) == TRANSACTION_ROWS:
params['beforeTransID'] = transaction_id
else:
break
# Retrieve any existing transactions
t_ids = set(t_filter.filter(transaction_id__in=bulk_data.keys()).values_list('transaction_id', flat=True))
# Fetch bulk data
char_map = Character.objects.in_bulk(char_ids)
corp_map = Corporation.objects.in_bulk(char_ids.difference(char_map))
item_map = Item.objects.in_bulk(item_ids)
station_map = Station.objects.in_bulk(station_ids)
# Iterate over scary data
new = []
for transaction_id, row in bulk_data.items():
transaction_time = self.parse_api_date(row.attrib['transactionDateTime'])
# Skip corporate transactions if this is a personal call, we have no idea
# what CorpWallet this transaction is related to otherwise :ccp:
if(row.attrib['transactionFor'].lower() == 'corporation'
and self.apikey.key_type != APIKey.CORPORATION_TYPE):
continue
# Handle possible new clients
client_id = int(row.attrib['clientID'])
client = char_map.get(client_id, corp_map.get(client_id, None))
if client is None:
try:
client = Character.objects.create(
id=client_id,
name=row.attrib['clientName'],
)
except IntegrityError:
client = Character.objects.get(id=client_id)
char_map[client_id] = client
# Check to see if this transaction already exists
if transaction_id not in t_ids:
# Make sure the item is valid
item = item_map.get(int(row.attrib['typeID']))
if item is None:
self.log_warn('Invalid item_id %s', row.attrib['typeID'])
continue
# Make sure the station is valid
station = station_map.get(int(row.attrib['stationID']))
if station is None:
self.log_warn('Invalid station_id %s', row.attrib['stationID'])
continue
# For a corporation key, make sure the character exists
if self.apikey.key_type == APIKey.CORPORATION_TYPE:
char_id = int(row.attrib['characterID'])
char = char_map.get(char_id, None)
# Doesn't exist, create it
if char is None:
char = Character.objects.create(
id=char_id,
name=row.attrib['characterName'],
corporation=self.apikey.corporation,
)
char_map[char_id] = char
# Any other key = just use the supplied character
else:
char = character
# Create a new transaction object and save it
quantity = int(row.attrib['quantity'])
price = Decimal(row.attrib['price'])
buy_transaction = (row.attrib['transactionType'] == 'buy')
t = Transaction(
station=station,
item=item,
character=char,
transaction_id=transaction_id,
date=transaction_time,
buy_transaction=buy_transaction,
quantity=quantity,
price=price,
total_price=quantity * price,
)
# Set the corp_wallet for corporation API requests
if self.apikey.key_type == APIKey.CORPORATION_TYPE:
t.corp_wallet = corp_wallet
# Set whichever client type is relevant
if isinstance(client, Character):
t.other_char_id = client.id
else:
t.other_corp_id = client.id
new.append(t)
# Create any new transaction objects
if new:
Transaction.objects.bulk_create(new)
return True
|
madcowfred/evething
|
thing/tasks/wallettransactions.py
|
Python
|
bsd-2-clause
| 8,753
|
[
"VisIt"
] |
aa243703b7f38378b3ce3c9f3a5eef3c50fc42882c75d02204530da37917c61e
|
import StringIO, csv, yaml, uuid, time
from datetime import datetime
from bioblend.galaxy import GalaxyInstance
from items import SequencerOutputItem, SeqDataSampleItem
# dataset states corresponding to a 'pending' condition
_PENDING_DS_STATES = set(
["new", "upload", "queued", "running", "setting_metadata"]
)
POLLING_INTERVAL = 10
class GalaxyWrapper(object):
# In order to work config has to be a dictionary loaded from a YAML
# configuration file containing the following sections:
#
# ...
# galaxy:
# api_key: your_api_key
# url: galaxy_url
# sequencer_output_importer_workflow:
# label: workflow_label
# history_dataset_label: label_into_the_worflow
# dsamples_dataset_label: label_into_the_worflow
# dobjects_dataset_label: label_into_the_worflow
# seq_data_sample_intermediate_importer_workflow:
# label: workflow_label
# history_dataset_label: label_into_the_worflow
# dsamples_dataset_label: label_into_the_worflow
# seq_data_sample_importer_workflow:
# label: worflow_label
# history_dataset_label: label_into_the_worflow
# dsamples_dataset_label: label_into_the_worflow
# dobjects_dataset_label: label_into_the_worflow
# flowcell_from_samplesheet_importer_workflow:
# label: workflow_label
# samplesheet_dataset_label: label_into_the_workflow
# config_parameters_file_label: label_into_the_workflow
def __init__(self, config, logger):
self.logger = logger
if GalaxyWrapper.validate_configuration(config):
galaxy_conf_values = config['galaxy']
self.gi = GalaxyInstance(galaxy_conf_values['url'],
galaxy_conf_values['api_key'])
self.seq_out_workflow_conf = galaxy_conf_values['sequencer_output_importer_workflow']
self.seq_ds_workflow_conf = galaxy_conf_values['seq_data_sample_importer_workflow']
self.seq_ds_intermediate_workflow_conf = galaxy_conf_values['seq_data_sample_intermediate_importer_workflow']
self.smpsh_to_fc_workflow_conf = galaxy_conf_values['flowcell_from_samplesheet_importer_workflow']
else:
msg = 'Invalid configuration'
self.logger.error(msg)
raise ValueError(msg)
@staticmethod
def validate_configuration(config):
if config.has_key('galaxy'):
keys = ['api_key',
'url',
'sequencer_output_importer_workflow',
'seq_data_sample_importer_workflow',
'flowcell_from_samplesheet_importer_workflow']
for k in keys:
if not config['galaxy'].has_key(k):
return False
dsamples_keys = ['label',
'history_dataset_label',
'dsamples_dataset_label',
'dobjects_dataset_label']
for k in ['sequencer_output_importer_workflow',
'seq_data_sample_importer_workflow']:
for dsk in dsamples_keys:
if not config['galaxy'][k].has_key(dsk):
return False
flowcell_keys = ['label', 'samplesheet_dataset_label',
'config_parameters_file_label']
for k in ['flowcell_from_samplesheet_importer_workflow']:
for fk in flowcell_keys:
if not config['galaxy'][k].has_key(fk):
return False
else:
return False
return True
def __get_or_create_library(self, name):
self.logger.debug('Loading library with name %s', name)
lib_details = self.gi.libraries.get_libraries(name=name)
if len(lib_details) == 0:
self.logger.debug('Unable to load library, creating a new one')
lib_details = [self.gi.libraries.create_library(name)]
self.logger.debug('Library ID %s', lib_details[0]['id'])
return lib_details[0]['id']
def __create_folder(self, folder_name_prefix, library_id):
folder_name = '%s-%s' % (folder_name_prefix, uuid.uuid4().hex)
self.logger.debug('Creating folder %s in library %s', folder_name,
library_id)
folder_details = self.gi.libraries.create_folder(library_id,
folder_name)
self.logger.debug('Folder created with ID %s', folder_details[0]['id'])
return folder_details[0]['id']
def __drop_library(self, library_id):
raise NotImplementedError()
def __upload_to_library(self, data_stream, library_id, folder_id=None):
self.logger.debug('Uploading data to library %s', library_id)
if type(data_stream) == str:
data = data_stream
elif hasattr(data_stream, 'getvalue'):
data = data_stream.getvalue()
else:
msg = 'Unable to upload data_stream of type %r to library' % type(data_stream)
self.logger.error(msg)
raise RuntimeError(msg)
dset_details = self.gi.libraries.upload_file_contents(library_id, data,
folder_id=folder_id)
self.logger.debug('Data uploaded, dataset ID is %s', dset_details[0]['id'])
return dset_details[0]['id']
def __get_workflow_id(self, workflow_label):
self.logger.debug('Retrieving workflow %s', workflow_label)
workflow_mappings = {}
for wf in self.gi.workflows.get_workflows():
workflow_mappings.setdefault(wf['name'], []).append(wf['id'])
if workflow_mappings.has_key(workflow_label):
if len(workflow_mappings[workflow_label]) == 1:
self.logger.debug('Workflow details: %r', workflow_mappings[workflow_label][0])
return workflow_mappings[workflow_label][0]
else:
msg = 'Multiple workflow with label "%s", unable to resolve ID' % workflow_label
self.logger.error(msg)
raise RuntimeError(msg)
else:
msg = 'Unable to retrieve workflow with label "%s"' % workflow_label
self.logger.error(msg)
raise ValueError(msg)
def __run_workflow(self, workflow_id, dataset_map, history_name_prefix):
self.logger.debug('Running workflow %s', workflow_id)
now = datetime.now()
w_in_mappings = {}
for k, v in self.gi.workflows.show_workflow(workflow_id)['inputs'].iteritems():
w_in_mappings[v['label']] = k
new_dataset_map = {}
for k, v in dataset_map.iteritems():
new_dataset_map[w_in_mappings[k]] = v
history_name = '%s_%s' % (history_name_prefix, now.strftime('%Y-%m-%d_%H:%M:%S'))
history_details = self.gi.workflows.run_workflow(workflow_id, new_dataset_map,
history_name=history_name,
import_inputs_to_history=False)
self.logger.debug('Workflow running on history: %r', history_details)
return history_details
def __dump_history_details(self, history):
tmp = StringIO.StringIO()
tmp.write(history.json_data)
tmp.flush()
return tmp
def __serialize_options(self, opts_dict):
if len(opts_dict) == 0:
return 'None'
else:
opts = []
for k,v in opts_dict.iteritems():
opts.append('%s=%s' % (k,v))
return ','.join(opts)
def __dump_ds_do_datasets(self, items, study):
ds_csv_header = ['study', 'label', 'source', 'source_type',
'seq_dsample_type', 'status', 'device',
'options']
if hasattr(items[0], 'sample_label'):
ds_csv_header.insert(-1, 'sample')
do_csv_header = ['study', 'path', 'data_sample', 'mimetype',
'size', 'sha1']
ds_tmp = StringIO.StringIO()
do_tmp = StringIO.StringIO()
ds_writer = csv.DictWriter(ds_tmp, ds_csv_header, delimiter='\t')
do_writer = csv.DictWriter(do_tmp, do_csv_header, delimiter='\t')
try:
ds_writer.writeheader()
do_writer.writeheader()
except AttributeError:
# python 2.6 compatibility
ds_tmp.write('\t'.join(ds_csv_header) + '\n')
do_tmp.write('\t'.join(do_csv_header) + '\n')
for i in items:
opts = {}
if i.tags:
opts = i.tags
if i.hist_src_dataset_id:
opts['hist_src_dataset_id'] = i.hist_src_dataset_id
if i.hist_res_dataset_id:
opts['hist_res_dataset_id'] = i.hist_res_dataset_id
ds_record = {'study' : study, 'label' : i.label,
'source' : i.source_label,
'source_type' : i.source_type,
'seq_dsample_type' : i.dataset_type,
'status' : i.dataset_status,
'device' : i.device_label,
'options' : self.__serialize_options(opts)}
if hasattr(i, 'sample_label'):
if i.sample_label:
ds_record['sample'] = i.sample_label
else:
ds_record['sample'] = 'None'
ds_writer.writerow(ds_record)
for d in i.data_objects:
do_writer.writerow({'study' : study,
'path' : d.path,
'data_sample' : i.label,
'mimetype' : d.mimetype,
'size' : d.size,
'sha1' : d.sha1})
return ds_tmp, do_tmp
def __wait(self, history_id, sleep_interval=POLLING_INTERVAL):
self.logger.debug('Waiting for history %s', history_id)
while True:
state_details = self.gi.histories.get_status(history_id)['state_details']
non_zero = set(state for state, count in state_details.iteritems() if count > 0)
# With newer versions of Galayx the history state remains as
# 'queued' even if individual datasets are in an error state
if 'error' in non_zero:
self.logger.error("History %s failed to execute. state_details: %s",
history_id, state_details)
return 'error'
if len(_PENDING_DS_STATES & non_zero) == 0:
# no pending datasets
self.logger.debug('Workflow done with statuses %s', non_zero)
return 'ok'
self.logger.debug('Workflow not completed (statuses: %s). Wait %d seconds.',
non_zero, sleep_interval)
time.sleep(sleep_interval)
def __dump_config_params(self, study_label, namespace=None):
conf_dict = {'config_parameters': {'study_label' : study_label}}
if namespace:
conf_dict['config_parameters']['namespace'] = namespace
return self.__dump_to_yaml(conf_dict)
def __dump_to_yaml(self, config_dict):
return yaml.dump(config_dict, default_flow_style=False)
def __get_library_name(self, lname_prefix):
return '%s-%s' % (lname_prefix, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
# Import DataSamples and DataObjects within OMERO.biobank,
# automatically selects proper workflow by checking object type
# of 'items' elements
def run_datasets_import(self, history, items, action_context,
no_dataobjects=False, async=False):
self.logger.info('Running datasets import')
history_dataset = self.__dump_history_details(history)
dsamples_dataset, dobjects_dataset = self.__dump_ds_do_datasets(items,
action_context)
lib_id = self.__get_or_create_library(self.__get_library_name('import_datasets'))
folder_id = self.__create_folder('dataset_import', lib_id)
hdset_id = self.__upload_to_library(history_dataset, lib_id, folder_id)
dsset_id = self.__upload_to_library(dsamples_dataset, lib_id, folder_id)
if not no_dataobjects:
doset_id = self.__upload_to_library(dobjects_dataset, lib_id, folder_id)
else:
doset_id = None
if type(items[0]) == SequencerOutputItem:
wf_conf = self.seq_out_workflow_conf
elif type(items[0]) == SeqDataSampleItem:
if not no_dataobjects:
wf_conf = self.seq_ds_workflow_conf
else:
wf_conf = self.seq_ds_intermediate_workflow_conf
else:
raise RuntimeError('Unable to run workflow for type %r' % type(items[0]))
# Preparing dataset map
ds_map = {
wf_conf['history_dataset_label']: {
'id': hdset_id,
'src': 'ld'
},
wf_conf['dsamples_dataset_label']: {
'id': dsset_id,
'src': 'ld'
}
}
if not no_dataobjects:
ds_map[wf_conf['dobjects_dataset_label']] = {
'id': doset_id,
'src': 'ld'
}
hist_details = self.__run_workflow(self.__get_workflow_id(wf_conf['label']),
ds_map, 'seq_datasets_import')
self.logger.info('Workflow running')
if async:
self.logger.info('Enabled async run, returning')
return hist_details
else:
self.logger.info('Waiting for run exit status')
status = self.__wait(hist_details['history'])
if status == 'ok':
self.logger.info('Run completed')
return hist_details, lib_id
else:
msg = 'Error occurred while processing data'
self.logger.error(msg)
raise RuntimeError(msg)
# Import a flowcell samplesheet produced by a Galaxy NGLIMS within OMERO.biobank
def run_flowcell_from_samplesheet_import(self, samplesheet_data, action_context, namespace=None,
async=False):
self.logger.info('Running flowcell samplesheet import')
conf_params = self.__dump_config_params(action_context, namespace)
lib_id = self.__get_or_create_library(self.__get_library_name('import_flowcell'))
folder_id = self.__create_folder('flowcell_from_samplesheet', lib_id)
samplesheet_id = self.__upload_to_library(samplesheet_data, lib_id, folder_id)
conf_file_id = self.__upload_to_library(conf_params, lib_id, folder_id)
wf_conf = self.smpsh_to_fc_workflow_conf
ds_map = {wf_conf['samplesheet_dataset_label']: {'id':
samplesheet_id, 'src': 'ld'},
wf_conf['config_parameters_file_label']: {'id':
conf_file_id, 'src': 'ld'}
}
hist_details = self.__run_workflow(self.__get_workflow_id(wf_conf['label']),
ds_map, 'flowcell_samplesheet_import')
self.logger.info('Workflow running')
if async:
self.logger.info('Enabled async run, returning')
return hist_details
else:
self.logger.info('Waiting for run exit status')
status = self.__wait(hist_details['history'])
if status == 'ok':
self.logger.info('Run completed')
return hist_details, lib_id
else:
msg = 'Error occurred while processing data'
self.logger.error(msg)
raise RuntimeError(msg)
def delete_history(self, history_id, purge_history=False):
self.logger.info('Deleting history with ID %s', history_id)
self.gi.histories.delete_history(history_id, purge_history)
self.logger.info('History deleted')
def delete_library(self, library_id):
self.logger.info('Deleting library with ID %s', library_id)
self.gi.libraries.delete_library(library_id)
self.logger.info('Library deleted')
|
crs4/omero.biobank
|
bl/vl/app/workflow_wrapper/galaxy_wrapper.py
|
Python
|
gpl-2.0
| 16,473
|
[
"Galaxy"
] |
938a5158d4a9e6f8fc415c157aae6c82182db5ccc9a29cf197bb6992a74e7c66
|
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = list(range(1, 5))
>>> for k in range(len(seq) + 2):
... print("%d-combs of %s:" % (k, seq))
... for c in gcomb(seq, k):
... print(" ", c)
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<class 'function'>
>>> i = g()
>>> type(i)
<class 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'send', 'throw']
>>> print(i.__next__.__doc__)
x.__next__() <==> next(x)
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<class 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
AttributeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return next(self.generator)
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.Random(42)
>>> while 1:
... for s in sets:
... print(" %s->%s" % (s, s.find()), end='')
... print()
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print("merged", s1, "into", s2)
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged K into B
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged A into F
A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged E into F
A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged D into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged M into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C
merged J into B
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C
merged B into C
A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C
merged F into G
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C
merged L into C
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C
merged G into I
A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C
merged I into H
A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C
merged C into H
A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [next(g) for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = next(ints)
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = next(g)
... nh = next(h)
... while 1:
... if ng < nh:
... yield ng
... ng = next(g)
... elif ng > nh:
... yield nh
... nh = next(h)
... else:
... yield ng
... ng = next(g)
... nh = next(h)
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print(firstn(result, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.__next__
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print([m235[j] for j in range(15*i, 15*(i+1))])
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def tail(g):
... next(g) # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print(firstn(it, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def _fib():
... yield 1
... yield 2
... next(fibTail) # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
>>> def f():
... return 22
... yield 1
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator
>>> def f():
... yield 1
... return 22
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator
"return None" is not the same as "return" in a generator:
>>> def f():
... yield 1
... return None
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<class 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<class 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... lambda x: x # shouldn't trigger here
... return # or here
... def f(i):
... return 2*i # or here
... if 0:
... return 3 # but *this* sucks (line 8)
... if 0:
... yield 2 # because it's a generator (line 10)
Traceback (most recent call last):
SyntaxError: 'return' with argument inside generator
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print(next(g))
0
>>> print(next(g))
1
>>> print(next(g))
2
>>> print(next(g))
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.__code__
True
>>> next(g)
5
>>> next(g)
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.__code__
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): return lambda x=(yield): 1
Traceback (most recent call last):
...
SyntaxError: 'return' with argument inside generator
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, io
>>> old, sys.stderr = sys.stderr, io.StringIO()
>>> g = f()
>>> next(g)
>>> del g
>>> sys.stderr.getvalue().startswith(
... "Exception RuntimeError: 'generator ignored GeneratorExit' in "
... )
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, io
>>> old = sys.stderr
>>> try:
... sys.stderr = io.StringIO()
... class Leaker:
... def __del__(self):
... raise RuntimeError
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... err.startswith(
... "Exception RuntimeError: RuntimeError() in <"
... )
... err.endswith("> ignored")
... len(err.splitlines())
... finally:
... sys.stderr = old
True
True
1
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import support, test_generators
support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
mancoast/CPythonPyc_test
|
fail/323_test_generators.py
|
Python
|
gpl-3.0
| 50,625
|
[
"VisIt"
] |
1c5317374d257c6aa15db1a8f8c5f60869ff3898fb1003fb46e49b4d86619111
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import numpy as np
import tensorflow as tf
from zhusuan.diagnostics import *
class TestEffectiveSampleSize(tf.test.TestCase):
def test_effective_sample_size(self):
rng = np.random.RandomState(1)
n = 10000
stride = 1
dims = 2
# Gaussian samples
idepg = rng.normal(size=(n, dims))
self.assertTrue(effective_sample_size(idepg, burn_in=100) >= 2000)
# Gaussian samples by MCMC
mcmc = []
current = np.array([0, 0])
rate = 0
for i in range(n):
next = current + rng.normal(size=(dims)) * stride
acceptance_rate = np.exp(
np.minimum(0, -0.5 * np.sum((next ** 2 - current ** 2))))
if np.random.random() < acceptance_rate:
current = next
rate += 1
mcmc.append(list(current))
mcmc = np.array(mcmc)
self.assertTrue(effective_sample_size(mcmc, burn_in=100) <= 1000)
|
thu-ml/zhusuan
|
tests/test_diagnostics.py
|
Python
|
mit
| 1,091
|
[
"Gaussian"
] |
32e599b04a9d3860b3f1dec3d7ee0f2b1ef9231a48b62236c7323d4ee4312e08
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
from alife_milestone2 import *
app=PlantBoxMilestone2()
|
PeterLauris/aifh
|
vol2/vol2-python-examples/examples/capstone_alife/run_milestone2.py
|
Python
|
apache-2.0
| 1,032
|
[
"VisIt"
] |
10e1c79c42df5dfd9a1ed1be4726e8d837fc3a5047cfbe8862589cce523d0e02
|
import Cython.Build
from setuptools import setup
from setuptools.extension import Extension
import numpy as np
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
version = {}
with open("version.py") as fp:
exec(fp.read(), version)
setup(
name='omniCLIP',
version=version['__version__'],
description='omniCLIP is a CLIP-seq Bayesian peak caller.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Philipp Boss',
author_email='philipp.drewe@googlemail.com',
url='https://github.com/philippdre/omniCLIP',
cmdclass={'build_ext': Cython.Build.build_ext},
package_dir={'omniCLIP': 'omniCLIP'},
packages=['omniCLIP', 'omniCLIP.data_parsing', 'omniCLIP.omni_stat'],
ext_modules=[Extension(
'omniCLIP.viterbi',
sources=['omniCLIP/omni_stat/viterbi.pyx'],
include_dirs=[np.get_include()],
)],
zip_safe=False,
entry_points={
'console_scripts': [
'omniCLIP = omniCLIP.omniCLIP:main'
]
},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"License :: OSI Approved :: GNU General Public License (GPL)",
],
setup_requires=['numpy>=1.18', 'cython>=0.24.1'],
install_requires=[
'numpy>=1.18', 'nose>=0.11', 'cython>=0.24.1', 'h5py>=2.10.0',
'statsmodels>=0.11.0', 'scipy>=1.4.1', 'scikit-learn>=0.22.1',
'pysam>=0.15.3', 'pandas>=1.0.2', 'intervaltree>=3.0.2',
'gffutils>=0.10.1', 'biopython>=1.76'],
)
|
philippdre/omniCLIP
|
setup.py
|
Python
|
gpl-3.0
| 1,733
|
[
"Biopython",
"pysam"
] |
c835813c7bbe89eaa6d930e1611d8cf60cdaea3ff9da814672bc2b55ccf7d519
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRandomfields(RPackage):
"""Simulation and Analysis of Random Fields
Methods for the inference on and the simulation of Gaussian fields are
provided, as well as methods for the simulation of extreme value random
fields. Main geostatistical parts are based on the books by Christian
Lantuejoul <doi:10.1007/978-3-662-04808-5>, Jean-Paul Chiles and Pierre
Delfiner <doi:10.1002/9781118136188> and Noel A. Cressie
<doi:10.1002/9781119115151>. For the extreme value random fields see
Oesting, Schlather, Schillings (2019) <doi.org/10.1002/sta4.228> and
Schlather (2002) <doi.org/10.1023/A:1020977924878>."""
homepage = "https://cloud.r-project.org/package=RandomFields"
url = "https://cloud.r-project.org/src/contrib/RandomFields_3.1.50.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/RandomFields"
version('3.3.8', sha256='8a08e2fdae428e354a29fb6818ae781cc56235a6849a0d29574dc756f73199d0')
version('3.3.6', sha256='51b7bfb4e5bd7fd0ce1207c77f428508a6cd3dfc9de01545a8724dfd9c050213')
version('3.3.4', sha256='a340d4f3ba7950d62acdfa19b9724c82e439d7b1a9f73340124038b7c90c73d4')
version('3.1.50', sha256='2d6a07c3a716ce20f9c685deb59e8fcc64fd52c8a50b0f04baf451b6b928e848')
depends_on('r@3.0:', type=('build', 'run'))
depends_on('r@3.5.0:', when='@3.3.8:', type=('build', 'run'))
depends_on('r-sp', type=('build', 'run'))
depends_on('r-randomfieldsutils@0.5.1:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-randomfields/package.py
|
Python
|
lgpl-2.1
| 1,712
|
[
"Gaussian"
] |
cc59be1cf5b099a35b6234ce8a948a6960d5ea728ccaeb7abc234944a7976b7a
|
#!/usr/bin/env python
#
# $File: simuOpt.py $
# $LastChangedDate$
# $Rev$
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Module ``simuOpt`` provides a function ``simuOpt.setOptions`` to control which
simuPOP module to load, and how it is loaded, and a class ``simuOpt.Params``
that helps users manage simulation parameters.
When simuPOP is loaded, it checkes a few environmental variables
(``SIMUOPTIMIZED``, ``SIMUALLELETYPE``, and ``SIMUDEBUG``) to determine which
simuPOP module to load, and how to load it. More options can be set using the
``simuOpt.setOptions`` function. For example, you can suppress the banner
message when simuPOP is loaded and require a minimal version of simuPOP for
your script. simuPOP recognize the following commandline arguments
``--optimized``
Load the optimized version of a simuPOP module.
``--gui=None|batch|interactive|True|wxPython|Tkinter``
Whether or not use a graphical toolkit and which one to use.
``--gui=batch`` is usually used to run a script in batch mode (do not start
a parameter input dialog and use all default values unless a parameter is
specified from command line or a configuraiton file. If
``--gui=interactive``, an interactive shell will be used to solicit input
from users. Otherwise, simuPOP will try to use a graphical parameter input
dialog, and falls to an interactive mode when no graphical Toolkit is
available. Please refer to parameter ``gui`` for ``simuOpt.setOptions``
for details.
class ``params.Params`` provides a powerful way to handle commandline
arguments. Briefly speaking, a ``Params`` object can be created from a list
of parameter specification dictionaries. The parameters are then become
attributes of this object. A number of functions are provided to determine
values of these parameters using commandline arguments, a configuration
file, or a parameter input dialog (using ``Tkinter`` or ``wxPython``).
Values of these parameters can be accessed as attributes, or extracted
as a list or a dictionary. Note that the ``Params.getParam`` function
automatically handles the following commandline arguments.
``-h`` or ``--help``
Print usage message.
``--config=configFile``
Read parameters from a configuration file *configFile*.
'''
__all__ = [
'simuOptions',
'setOptions'
]
import os, sys, re, time, textwrap
#
# simuOptions that will be checked when simuPOP is loaded. This structure
# can be changed by function setOptions
#
simuOptions = {
'Optimized': False,
'AlleleType': 'short',
'Debug': [],
'Quiet': False,
'Version': None,
'Revision': None,
'GUI': True,
'Plotter': None,
'NumThreads': 1,
}
# Optimized: command line option --optimized or environmental variable SIMUOPTIMIZED
if '--optimized' in sys.argv or os.getenv('SIMUOPTIMIZED') is not None:
simuOptions['Optimized'] = True
# AlleleType: from environmental variable SIMUALLELETYPE
if os.getenv('SIMUALLELETYPE') in ['short', 'long', 'binary', 'mutant', 'lineage']:
simuOptions['AlleleType'] = os.getenv('SIMUALLELETYPE')
elif os.getenv('SIMUALLELETYPE') is not None:
print('Environmental variable SIMUALLELETYPE can only be short, long, binary, mutant, or lineage.')
# Debug: from environmental variable SIMUDEBUG
if os.getenv('SIMUDEBUG') is not None:
simuOptions['Debug'].extend(os.getenv('SIMUDEBUG').split(','))
# openMP number of threads
if os.getenv('OMP_NUM_THREADS') is not None:
try:
simuOptions['NumThreads'] = int(os.getenv('OMP_NUM_THREADS'))
except:
print('Ignoring invalid value for environmental variable OMP_NUM_THREADS')
# GUI: from environmental variable SIMUGUI
if os.getenv('SIMUGUI') is not None:
_gui = os.getenv('SIMUGUI')
elif '--gui' in sys.argv:
if sys.argv[-1] == '--gui':
raise ValueError('An value is expected for command line option --gui')
_gui = sys.argv[sys.argv.index('--gui') + 1]
elif True in [x.startswith('--gui=') for x in sys.argv]:
_gui = sys.argv[[x.startswith('--gui=') for x in sys.argv].index(True)][len('--gui='):]
else:
_gui = None
if _gui in ['True', 'true', '1']:
simuOptions['GUI'] = True
elif _gui in ['False', 'false', '0']:
simuOptions['GUI'] = False
elif _gui in ['wxPython', 'Tkinter', 'batch', 'interactive']:
simuOptions['GUI'] = _gui
elif _gui is not None:
print("Invalid value '%s' for environmental variable SIMUGUI or commandline option --gui." % _gui)
def setOptions(alleleType=None, optimized=None, gui=None, quiet=None,
debug=None, version=None, revision=None, numThreads=None, plotter=None):
'''Set options before simuPOP is loaded to control which simuPOP module to
load, and how the module should be loaded.
alleleType
Use the standard, binary,long or mutant allele version of the simuPOP
module if ``alleleType`` is set to 'short', 'binary', 'long', 'mutant',
or 'lineage' respectively. If this parameter is not set, this function
will try to get its value from environmental variable ``SIMUALLELETYPE``.
The standard (short) module will be used if the environmental variable
is not defined.
optimized
Load the optimized version of a module if this parameter is set to
``True`` and the standard version if it is set to ``False``. If this
parameter is not set (``None``), the optimized version will be used
if environmental variable ``SIMUOPTIMIZED`` is defined. The standard
version will be used otherwise.
gui
Whether or not use graphical user interfaces, which graphical toolkit
to use and how to process parameters in non-GUI mode. If this parameter
is ``None`` (default), this function will check environmental variable
``SIMUGUI`` or commandline option ``--gui`` for a value, and assume
``True`` if such an option is unavailable. If ``gui=True``, simuPOP
will use ``wxPython``-based dialogs if ``wxPython`` is available, and
use ``Tkinter``-based dialogs if ``Tkinter`` is available and use an
interactive shell if no graphical toolkit is available.
``gui='Tkinter'`` or ``'wxPython'`` can be used to specify the
graphical toolkit to use. If ``gui='interactive'``, a simuPOP script
prompt users to input values of parameters. If ``gui='batch'``,
default values of unspecified parameters will be used. In any case,
commandline arguments and a configuration file specified by parameter
--config will be processed. This option is usually left to ``None`` so
that the same script can be run in both GUI and batch mode using
commandline option ``--gui``.
plotter
(Deprecated)
quiet
If set to ``True``, suppress the banner message when a simuPOP module
is loaded.
debug
A list of debug code (as string) that will be turned on when simuPOP
is loaded. If this parameter is not set, a list of comma separated
debug code specified in environmental variable ``SIMUDEBUG``, if
available, will be used. Note that setting ``debug=[]`` will remove
any debug code that might have been by variable ``SIMUDEBUG``.
version
A version string (e.g. 1.0.0) indicating the required version number
for the simuPOP module to be loaded. simuPOP will fail to load if the
installed version is older than the required version.
revision
Obsolete with the introduction of parameter version.
numThreads
Number of Threads that will be used to execute a simuPOP script. The
values can be a positive number (number of threads) or 0 (all available
cores of the computer, or whatever number set by environmental variable
``OMP_NUM_THREADS``). If this parameter is not set, the number of
threads will be set to 1, or a value set by environmental variable
``OMP_NUM_THREADS``.
'''
# if the module has already been imported, check which module
# was imported
try:
_imported = sys.modules['simuPOP'].moduleInfo()
except Exception as e:
_imported = {}
# Allele type
if alleleType in ['long', 'binary', 'short', 'mutant', 'lineage']:
# if simuPOP has been imported and re-imported with a different module name
# the existing module will be used so moduleInfo() will return a different
# module type from what is specified in simuOptions.
if _imported and _imported['alleleType'] != alleleType:
raise ImportError(('simuPOP has already been imported with allele type %s (%s) and cannot be '
're-imported with allele type %s. Please make sure you import module simuOpt before '
'any simuPOP module is imported.') % (
_imported['alleleType'], ('optimized' if _imported['optimized'] else 'standard'),
alleleType))
simuOptions['AlleleType'] = alleleType
elif alleleType is not None:
raise TypeError('Parameter alleleType can be either short, long, binary, mutant or lineage.')
# Optimized
if optimized in [True, False]:
# if simuPOP has been imported and re-imported with a different module name
# the existing module will be used so moduleInfo() will return a different
# module type from what is specified in simuOptions.
if _imported and _imported['optimized'] != optimized:
raise ImportError(('simuPOP has already been imported with allele type %s (%s) and cannot be '
're-imported in %s mode. Please make sure you import module simuOpt before '
'any simuPOP module is imported.') % (
_imported['alleleType'], ('optimized' if _imported['optimized'] else 'standard'),
'optimized' if optimized else 'standard'))
simuOptions['Optimized'] = optimized
elif optimized is not None:
raise TypeError('Parameter optimized can be either True or False.')
# Graphical toolkit
if gui in [True, False, 'wxPython', 'Tkinter', 'batch']:
simuOptions['GUI'] = gui
elif gui is not None:
raise TypeError('Parameter gui can be True/False, wxPython or Tkinter.')
# Quiet
if quiet in [True, False]:
simuOptions['Quiet'] = quiet
elif quiet is not None:
raise TypeError('Parameter quiet can be either True or False.')
# Debug
if debug is not None:
if type(debug) == str:
simuOptions['Debug'] = [debug]
else:
simuOptions['Debug'] = debug
# Version
if type(version) == str:
try:
major, minor, release = [int(x) for x in re.sub('\D', ' ', version).split()]
except:
print('Invalid version string %s' % simuOptions['Version'])
simuOptions['Version'] = version
elif version is not None:
raise TypeError('A version string is expected for parameter version.')
# Revision
if type(revision) == int:
simuOptions['Revision'] = revision
elif revision is not None:
raise TypeError('A revision number is expected for parameter revision.')
# NumThreads
if type(numThreads) == int:
simuOptions['NumThreads'] = numThreads
elif numThreads is not None:
raise TypeError('An integer number is expected for parameter numThreads.')
if plotter is not None:
sys.stderr.write('WARNING: plotter option is deprecated because of the removal of rpy/rpy2 support\n')
|
BoPeng/simuPOP
|
simuOpt.py
|
Python
|
gpl-2.0
| 12,392
|
[
"VisIt"
] |
97bec8ae1d4bc74d2b4a62c6a6f757443607ab73e071a8d270195d5919bfd14f
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
There are two options to call wannier90 in PySCF. One is the pyWannier90.py
interface as implemented in this file.
(1)
pyWannier90: Wannier90 for PySCF
Hung Q. Pham
email: pqh3.14@gmail.com
(2)
Another wannier90 python interface is available on the repo:
https://github.com/zhcui/wannier90
Contact its author "Zhihao Cui" <zcui@caltech.edu> for more details of
installation and implementations.
'''
# This is the only place needed to be modified
# The path for the libwannier90 library
W90LIB = 'libwannier90-path'
import numpy as np
import scipy
import cmath, os
import pyscf.data.nist as param
from pyscf import lib
from pyscf.pbc import df
from pyscf.pbc.dft import gen_grid, numint
import sys
sys.path.append(W90LIB)
import importlib
found = importlib.find_loader('libwannier90') is not None
if found == True:
import libwannier90
else:
print('WARNING: Check the installation of libwannier90 and its path in pyscf/pbc/tools/pywannier90.py')
print('libwannier90 path: ' + W90LIB)
print('libwannier90 can be found at: https://github.com/hungpham2017/pyWannier90')
raise ImportError
def save_kmf(kmf, chkfile):
''' Save a wavefunction'''
from pyscf.lib.chkfile import save
kpts = kmf.kpts
mo_energy_kpts = kmf.mo_energy_kpts
mo_coeff_kpts = kmf.mo_coeff_kpts
get_ovlp = kmf.get_ovlp()
scf_dic = { 'kpts' : kpts,
'mo_energy_kpts': mo_energy_kpts,
'mo_coeff_kpts' : mo_coeff_kpts}
save(chkfile, 'scf', scf_dic)
def load_kmf(chkfile):
''' Load a wavefunction'''
from pyscf.lib.chkfile import load
kmf = load(chkfile, 'scf')
class fake_kmf:
def __init__(self, kmf):
self.kpts = kmf['kpts']
self.mo_energy_kpts = kmf['mo_energy_kpts']
self.mo_coeff_kpts = kmf['mo_coeff_kpts']
kmf = fake_kmf(kmf)
return kmf
def angle(v1, v2):
'''
Return the angle (in radiant between v1 and v2
'''
v1 = np.asarray(v1)
v2 = np.asarray(v2)
cosa = v1.dot(v2)/ np.linalg.norm(v1) / np.linalg.norm(v2)
return np.arccos(cosa)
def transform(x_vec, z_vec):
'''
Construct a transformation matrix to transform r_vec to the new coordinate system defined by x_vec and z_vec
'''
x_vec = x_vec/np.linalg.norm(np.asarray(x_vec))
z_vec = z_vec/np.linalg.norm(np.asarray(z_vec))
assert x_vec.dot(z_vec) == 0
y_vec = np.cross(x_vec,z_vec)
new = np.asarray([x_vec, y_vec, z_vec])
original = np.asarray([[1,0,0],[0,1,0],[0,0,1]])
tran_matrix = np.empty([3,3])
for row in range(3):
for col in range(3):
tran_matrix[row,col] = np.cos(angle(original[row],new[col]))
return tran_matrix.T
def cartesian_prod(arrays, out=None, order = 'C'):
'''
This function is similar to lib.cartesian_prod of PySCF, except the output can be in Fortran or in C order
'''
arrays = [np.asarray(x) for x in arrays]
dtype = np.result_type(*arrays)
nd = len(arrays)
dims = [nd] + [len(x) for x in arrays]
if out is None:
out = np.empty(dims, dtype)
else:
out = np.ndarray(dims, dtype, buffer=out)
tout = out.reshape(dims)
shape = [-1] + [1] * nd
for i, arr in enumerate(arrays):
tout[i] = arr.reshape(shape[:nd-i])
return tout.reshape((nd,-1),order=order).T
def periodic_grid(cell, grid = [50,50,50], supercell = [1,1,1], order = 'C'):
'''
Generate a periodic grid for the unit/computational cell in F/C order
'''
ngrid = np.asarray(grid)
qv = cartesian_prod([np.arange(-ngrid[i]*(supercell[i]//2),ngrid[i]*((supercell[i]+1)//2)) for i in range(3)], order=order)
a_frac = np.einsum('i,ij->ij', 1./ngrid, cell.lattice_vectors())
coords = np.dot(qv, a_frac)
# Compute weight
ngrids = np.prod(grid)
ncells = np.prod(supercell)
weights = np.empty(ngrids*ncells)
weights[:] = cell.vol / ngrids / ncells
return coords, weights
def R_r(r_norm, r = 1, zona = 1):
r'''
Radial functions used to compute \Theta_{l,m_r}(\theta,\phi)
'''
if r == 1:
R_r = 2 * zona**(3/2) * np.exp(-zona*r_norm)
elif r == 2:
R_r = 1 / 2 / np.sqrt(2) * zona**(3/2) * (2 - zona*r_norm) * np.exp(-zona*r_norm/2)
else:
R_r = np.sqrt(4/27) * zona**(3/2) * (1 - 2*zona*r_norm/3 + 2*(zona**2)*(r_norm**2)/27) * np.exp(-zona*r_norm/3)
return R_r
def theta(func, cost, phi):
r'''
Basic angular functions (s,p,d,f) used to compute \Theta_{l,m_r}(\theta,\phi)
'''
if func == 's': # s
theta = 1 / np.sqrt(4 * np.pi) * np.ones([cost.shape[0]])
elif func == 'pz':
theta = np.sqrt(3 / 4 / np.pi) * cost
elif func == 'px':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.cos(phi)
elif func == 'py':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.sin(phi)
elif func == 'dz2':
theta = np.sqrt(5 / 16 / np.pi) * (3*cost**2 - 1)
elif func == 'dxz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.cos(phi)
elif func == 'dyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.sin(phi)
elif func == 'dx2-y2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.cos(2*phi)
elif func == 'pxy':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.sin(2*phi)
elif func == 'fz3':
theta = np.sqrt(7) / 4 / np.sqrt(np.pi) * (5*cost**3 - 3*cost)
elif func == 'fxz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.cos(phi)
elif func == 'fyz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.sin(phi)
elif func == 'fz(x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.cos(2*phi)
elif func == 'fxyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.sin(2*phi)
elif func == 'fx(x2-3y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (np.cos(phi)**2 - 3*np.sin(phi)**2) * np.cos(phi)
elif func == 'fy(3x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (3*np.cos(phi)**2 - np.sin(phi)**2) * np.sin(phi)
return theta
def theta_lmr(l, mr, cost, phi):
r'''
Compute the value of \Theta_{l,m_r}(\theta,\phi)
ref: Table 3.1 and 3.2 of Chapter 3, wannier90 User Guide
'''
assert l in [0,1,2,3,-1,-2,-3,-4,-5]
assert mr in [1,2,3,4,5,6,7]
if l == 0: # s
theta_lmr = theta('s', cost, phi)
elif (l == 1) and (mr == 1): # pz
theta_lmr = theta('pz', cost, phi)
elif (l == 1) and (mr == 2): # px
theta_lmr = theta('px', cost, phi)
elif (l == 1) and (mr == 3): # py
theta_lmr = theta('py', cost, phi)
elif (l == 2) and (mr == 1): # dz2
theta_lmr = theta('dz2', cost, phi)
elif (l == 2) and (mr == 2): # dxz
theta_lmr = theta('dxz', cost, phi)
elif (l == 2) and (mr == 3): # dyz
theta_lmr = theta('dyz', cost, phi)
elif (l == 2) and (mr == 4): # dx2-y2
theta_lmr = theta('dx2-y2', cost, phi)
elif (l == 2) and (mr == 5): # pxy
theta_lmr = theta('pxy', cost, phi)
elif (l == 3) and (mr == 1): # fz3
theta_lmr = theta('fz3', cost, phi)
elif (l == 3) and (mr == 2): # fxz2
theta_lmr = theta('fxz2', cost, phi)
elif (l == 3) and (mr == 3): # fyz2
theta_lmr = theta('fyz2', cost, phi)
elif (l == 3) and (mr == 4): # fz(x2-y2)
theta_lmr = theta('fz(x2-y2)', cost, phi)
elif (l == 3) and (mr == 5): # fxyz
theta_lmr = theta('fxyz', cost, phi)
elif (l == 3) and (mr == 6): # fx(x2-3y2)
theta_lmr = theta('fx(x2-3y2)', cost, phi)
elif (l == 3) and (mr == 7): # fy(3x2-y2)
theta_lmr = theta('fy(3x2-y2)', cost, phi)
elif (l == -1) and (mr == 1): # sp-1
theta_lmr = 1/np.sqrt(2) * (theta('s', cost, phi) + theta('px', cost, phi))
elif (l == -1) and (mr == 2): # sp-2
theta_lmr = 1/np.sqrt(2) * (theta('s', cost, phi) - theta('px', cost, phi))
elif (l == -2) and (mr == 1): # sp2-1
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) + 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -2) and (mr == 2): # sp2-2
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) - 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -2) and (mr == 3): # sp2-3
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) + 2/np.sqrt(6) *theta('px', cost, phi)
elif (l == -3) and (mr == 1): # sp3-1
theta_lmr = 1/2 * (theta('s', cost, phi) + theta('px', cost, phi) + theta('py', cost, phi) + theta('pz', cost, phi))
elif (l == -3) and (mr == 2): # sp3-2
theta_lmr = 1/2 * (theta('s', cost, phi) + theta('px', cost, phi) - theta('py', cost, phi) - theta('pz', cost, phi))
elif (l == -3) and (mr == 3): # sp3-3
theta_lmr = 1/2 * (theta('s', cost, phi) - theta('px', cost, phi) + theta('py', cost, phi) - theta('pz', cost, phi))
elif (l == -3) and (mr == 4): # sp3-4
theta_lmr = 1/2 * (theta('s', cost, phi) - theta('px', cost, phi) - theta('py', cost, phi) + theta('pz', cost, phi))
elif (l == -4) and (mr == 1): # sp3d-1
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) + 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -4) and (mr == 2): # sp3d-2
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) - 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -4) and (mr == 3): # sp3d-3
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) + 2/np.sqrt(6) * theta('px', cost, phi)
elif (l == -4) and (mr == 4): # sp3d-4
theta_lmr = 1/np.sqrt(2) (theta('pz', cost, phi) + theta('dz2', cost, phi))
elif (l == -4) and (mr == 5): # sp3d-5
theta_lmr = 1/np.sqrt(2) (-theta('pz', cost, phi) + theta('dz2', cost, phi))
elif (l == -5) and (mr == 1): # sp3d2-1
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('px', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
+ 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 2): # sp3d2-2
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/np.sqrt(2) *theta('px', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
+ 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 3): # sp3d2-3
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('py', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
- 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 4): # sp3d2-4
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/np.sqrt(2) *theta('py', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
- 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 5): # sp3d2-5
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('pz', cost, phi) + 1/np.sqrt(3) *theta('dz2', cost, phi)
elif (l == -5) and (mr == 6): # sp3d2-6
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/np.sqrt(2) *theta('pz', cost, phi) + 1/np.sqrt(3) *theta('dz2', cost, phi)
return theta_lmr
def g_r(grids_coor, site, l, mr, r, zona, x_axis = [1,0,0], z_axis = [0,0,1], unit = 'B'):
r'''
Evaluate the projection function g(r) or \Theta_{l,m_r}(\theta,\phi) on a grid
ref: Chapter 3, wannier90 User Guide
Attributes:
grids_coor : a grids for the cell of interest
site : absolute coordinate (in Borh/Angstrom) of the g(r) in the cell
l, mr : l and mr value in the Table 3.1 and 3.2 of the ref
Return:
theta_lmr : an array (ngrid, value) of g(r)
'''
unit_conv = 1
if unit == 'A': unit_conv = param.BOHR
r_vec = (grids_coor - site)
r_vec = np.einsum('iv,uv ->iu', r_vec, transform(x_axis, z_axis))
r_norm = np.linalg.norm(r_vec,axis=1)
if (r_norm < 1e-8).any() == True:
r_vec = (grids_coor - site - 1e-5)
r_vec = np.einsum('iv,uv ->iu', r_vec, transform(x_axis, z_axis))
r_norm = np.linalg.norm(r_vec,axis=1)
cost = r_vec[:,2]/r_norm
phi = np.empty_like(r_norm)
for point in range(phi.shape[0]):
if r_vec[point,0] > 1e-8:
phi[point] = np.arctan(r_vec[point,1]/r_vec[point,0])
elif r_vec[point,0] < -1e-8:
phi[point] = np.arctan(r_vec[point,1]/r_vec[point,0]) + np.pi
else:
phi[point] = np.sign(r_vec[point,1]) * 0.5 * np.pi
return theta_lmr(l, mr, cost, phi) * R_r(r_norm * unit_conv, r = r, zona = zona)
class W90:
def __init__(self, kmf, cell, mp_grid, num_wann, gamma = False, spinors = False, spin_up = None, other_keywords = None):
if isinstance(kmf, str) == True:
self.kmf = load_kmf(kmf)
else:
self.kmf = kmf
self.cell = cell
self.num_wann = num_wann
self.keywords = other_keywords
# Collect the pyscf calculation info
self.num_bands_tot = self.cell.nao_nr()
self.num_kpts_loc = self.kmf.kpts.shape[0]
self.mp_grid_loc = mp_grid
assert self.num_kpts_loc == np.asarray(self.mp_grid_loc).prod()
self.real_lattice_loc = self.cell.lattice_vectors() * param.BOHR
self.recip_lattice_loc = self.cell.reciprocal_vectors() / param.BOHR
self.kpt_latt_loc = self.cell.get_scaled_kpts(self.kmf.kpts)
self.num_atoms_loc = self.cell.natm
self.atom_symbols_loc = [atom[0] for atom in self.cell._atom]
self.atom_atomic_loc = [int(self.cell._atm[atom][0] + self.cell.atom_nelec_core(atom)) for atom in range(self.num_atoms_loc)]
self.atoms_cart_loc = np.asarray([(np.asarray(atom[1])* param.BOHR).tolist() for atom in self.cell._atom])
self.gamma_only, self.spinors = (0 , 0)
if gamma == True : self.gamma_only = 1
if spinors == True : self.spinors = 1
# Wannier90_setup outputs
self.num_bands_loc = None
self.num_wann_loc = None
self.nntot_loc = None
self.nn_list = None
self.proj_site = None
self.proj_l = None
proj_m = None
self.proj_radial = None
self.proj_z = None
self.proj_x = None
self.proj_zona = None
self.exclude_bands = None
self.proj_s = None
self.proj_s_qaxis = None
# Input for Wannier90_run
self.band_included_list = None
self.A_matrix_loc = None
self.M_matrix_loc = None
self.eigenvalues_loc = None
# Wannier90_run outputs
self.U_matrix = None
self.U_matrix_opt = None
self.lwindow = None
self.wann_centres = None
self.wann_spreads = None
self.spread = None
# Others
self.use_bloch_phases = False
self.check_complex = False
self.spin_up = spin_up
if np.mod(self.cell.nelectron,2) !=0:
if spin_up == True:
self.mo_energy_kpts = self.kmf.mo_energy_kpts[0]
self.mo_coeff_kpts = self.kmf.mo_coeff_kpts[0]
else:
self.mo_energy_kpts = self.kmf.mo_energy_kpts[1]
self.mo_coeff_kpts = self.kmf.mo_coeff_kpts[1]
else:
self.mo_energy_kpts = self.kmf.mo_energy_kpts
self.mo_coeff_kpts = self.kmf.mo_coeff_kpts
def kernel(self):
'''
Main kernel for pyWannier90
'''
self.make_win()
self.setup()
self.M_matrix_loc = self.get_M_mat()
self.A_matrix_loc = self.get_A_mat()
self.eigenvalues_loc = self.get_epsilon_mat()
self.run()
def make_win(self):
'''
Make a basic *.win file for wannier90
'''
win_file = open('wannier90.win', "w")
win_file.write('! Basic input\n')
win_file.write('\n')
win_file.write('num_bands = %d\n' % (self.num_bands_tot))
win_file.write('num_wann = %d\n' % (self.num_wann))
win_file.write('\n')
win_file.write('Begin Unit_Cell_Cart\n')
for row in range(3):
win_file.write('%10.7f %10.7f %10.7f\n' % (self.real_lattice_loc[0, row], self.real_lattice_loc[1, row], \
self.real_lattice_loc[2, row]))
win_file.write('End Unit_Cell_Cart\n')
win_file.write('\n')
win_file.write('Begin atoms_cart\n')
for atom in range(len(self.atom_symbols_loc)):
win_file.write('%s %7.7f %7.7f %7.7f\n' % (self.atom_symbols_loc[atom], self.atoms_cart_loc[atom,0], \
self.atoms_cart_loc[atom,1], self.atoms_cart_loc[atom,2]))
win_file.write('End atoms_cart\n')
win_file.write('\n')
if self.use_bloch_phases == True: win_file.write('use_bloch_phases = T\n\n')
if self.keywords != None:
win_file.write('!Additional keywords\n')
win_file.write(self.keywords)
win_file.write('\n\n\n')
win_file.write('mp_grid = %d %d %d\n' % (self.mp_grid_loc[0], self.mp_grid_loc[1], self.mp_grid_loc[2]))
if self.gamma_only == 1: win_file.write('gamma_only : true\n')
win_file.write('begin kpoints\n')
for kpt in range(self.num_kpts_loc):
win_file.write('%7.7f %7.7f %7.7f\n' % (self.kpt_latt_loc[kpt][0], self.kpt_latt_loc[kpt][1], self.kpt_latt_loc[kpt][2]))
win_file.write('End Kpoints\n')
win_file.close()
def get_M_mat(self):
'''
Construct the ovelap matrix: M_{m,n}^{(\mathbf{k,b})}
Equation (25) in MV, Phys. Rev. B 56, 12847
'''
M_matrix_loc = np.empty([self.num_kpts_loc, self.nntot_loc, self.num_bands_loc, self.num_bands_loc], dtype = np.complex128)
for k_id in range(self.num_kpts_loc):
for nn in range(self.nntot_loc):
k1 = self.cell.get_abs_kpts(self.kpt_latt_loc[k_id])
k_id2 = self.nn_list[nn, k_id, 0] - 1
k2_ = self.kpt_latt_loc[k_id2]
k2_scaled = k2_ + self.nn_list[nn, k_id, 1:4]
k2 = self.cell.get_abs_kpts(k2_scaled)
s_AO = df.ft_ao.ft_aopair(self.cell, -k2+k1, kpti_kptj=[k2,k1], q = np.zeros(3))[0]
Cm = self.mo_coeff_kpts[k_id][:,self.band_included_list]
Cn = self.mo_coeff_kpts[k_id2][:,self.band_included_list]
M_matrix_loc[k_id, nn,:,:] = np.einsum('nu,vm,uv->nm', Cn.T.conj(), Cm, s_AO, optimize = True).conj()
return M_matrix_loc
def get_A_mat(self):
'''
Construct the projection matrix: A_{m,n}^{\mathbf{k}}
Equation (62) in MV, Phys. Rev. B 56, 12847 or equation (22) in SMV, Phys. Rev. B 65, 035109
'''
A_matrix_loc = np.empty([self.num_kpts_loc, self.num_wann_loc, self.num_bands_loc], dtype = np.complex128)
if self.use_bloch_phases == True:
Amn = np.zeros([self.num_wann_loc, self.num_bands_loc])
np.fill_diagonal(Amn, 1)
A_matrix_loc[:,:,:] = Amn
else:
from pyscf.dft import numint,gen_grid
grids = gen_grid.Grids(self.cell).build()
coords = grids.coords
weights = grids.weights
for ith_wann in range(self.num_wann_loc):
frac_site = self.proj_site[ith_wann]
abs_site = frac_site.dot(self.real_lattice_loc) / param.BOHR
l = self.proj_l[ith_wann]
mr = self.proj_m[ith_wann]
r = self.proj_radial[ith_wann]
zona = self.proj_zona[ith_wann]
x_axis = self.proj_x[ith_wann]
z_axis = self.proj_z[ith_wann]
gr = g_r(coords, abs_site, l, mr, r, zona, x_axis, z_axis, unit = 'B')
ao_L0 = numint.eval_ao(self.cell, coords)
s_aoL0_g = np.einsum('i,i,iv->v', weights, gr, ao_L0, optimize = True)
for k_id in range(self.num_kpts_loc):
kpt = self.cell.get_abs_kpts(self.kpt_latt_loc[k_id])
mo_included = self.mo_coeff_kpts[k_id][:,self.band_included_list]
s_kpt = self.cell.pbc_intor('int1e_ovlp', hermi=1, kpts=kpt, pbcopt=lib.c_null_ptr())
s_ao = np.einsum('uv,v->u', s_kpt, s_aoL0_g, optimize = True)
A_matrix_loc[k_id,ith_wann,:] = np.einsum('v,vu,um->m', s_aoL0_g, s_kpt, mo_included, optimize = True).conj()
return A_matrix_loc
def get_epsilon_mat(self):
r'''
Construct the eigenvalues matrix: \epsilon_{n}^(\mathbf{k})
'''
return np.asarray(self.mo_energy_kpts)[:,self.band_included_list] * param.HARTREE2EV
def setup(self):
'''
Execute the Wannier90_setup
'''
seed__name = "wannier90"
real_lattice_loc = self.real_lattice_loc.T.flatten()
recip_lattice_loc = self.recip_lattice_loc.T.flatten()
kpt_latt_loc = self.kpt_latt_loc.flatten()
atoms_cart_loc = self.atoms_cart_loc.flatten()
bands_wann_nntot, nn_list, proj_site, proj_l, proj_m, proj_radial, \
proj_z, proj_x, proj_zona, exclude_bands, proj_s, proj_s_qaxis = \
libwannier90.setup(seed__name, self.mp_grid_loc, self.num_kpts_loc, real_lattice_loc, \
recip_lattice_loc, kpt_latt_loc, self.num_bands_tot, self.num_atoms_loc, \
self.atom_atomic_loc, atoms_cart_loc, self.gamma_only, self.spinors)
# Convert outputs to the correct data type
self.num_bands_loc, self.num_wann_loc, self.nntot_loc = np.int32(bands_wann_nntot)
self.nn_list = np.int32(nn_list)
self.proj_site = proj_site
self.proj_l = np.int32(proj_l)
self.proj_m = np.int32(proj_m)
self.proj_radial = np.int32(proj_radial)
self.proj_z = proj_z
self.proj_x = proj_x
self.proj_zona = proj_zona
self.exclude_bands = np.int32(exclude_bands)
self.band_included_list = [i for i in range(self.num_bands_tot) if (i + 1) not in self.exclude_bands]
self.proj_s = np.int32(proj_s)
self.proj_s_qaxis = proj_s_qaxis
def run(self):
'''
Execute the Wannier90_run
'''
assert type(self.num_wann_loc) != None
assert type(self.M_matrix_loc) == np.ndarray
assert type(self.A_matrix_loc) == np.ndarray
assert type(self.eigenvalues_loc) == np.ndarray
seed__name = "wannier90"
real_lattice_loc = self.real_lattice_loc.T.flatten()
recip_lattice_loc = self.recip_lattice_loc.T.flatten()
kpt_latt_loc = self.kpt_latt_loc.flatten()
atoms_cart_loc = self.atoms_cart_loc.flatten()
M_matrix_loc = self.M_matrix_loc.flatten()
A_matrix_loc = self.A_matrix_loc.flatten()
eigenvalues_loc = self.eigenvalues_loc.flatten()
U_matrix, U_matrix_opt, lwindow, wann_centres, wann_spreads, spread = \
libwannier90.run(seed__name, self.mp_grid_loc, self.num_kpts_loc, real_lattice_loc, \
recip_lattice_loc, kpt_latt_loc, self.num_bands_tot, self.num_bands_loc, self.num_wann_loc, self.nntot_loc, self.num_atoms_loc, \
self.atom_atomic_loc, atoms_cart_loc, self.gamma_only, \
M_matrix_loc, A_matrix_loc, eigenvalues_loc)
# Convert outputs to the correct data typ
self.U_matrix = U_matrix
self.U_matrix_opt = U_matrix_opt
lwindow = np.int32(lwindow.real)
self.lwindow = (lwindow == 1)
self.wann_centres = wann_centres.real
self.wann_spreads = wann_spreads.real
self.spread = spread.real
def export_unk(self, grid = [50,50,50]):
'''
Export the periodic part of BF in a real space grid for plotting with wannier90
'''
from scipy.io import FortranFile
grids_coor, weights = periodic_grid(self.cell, grid, order = 'F')
for k_id in range(self.num_kpts_loc):
spin = '.1'
if self.spin_up != None and self.spin_up == False : spin = '.2'
kpt = self.cell.get_abs_kpts(self.kpt_latt_loc[k_id])
ao = numint.eval_ao(self.cell, grids_coor, kpt = kpt)
u_ao = np.einsum('x,xi->xi', np.exp(-1j*np.dot(grids_coor, kpt)), ao, optimize = True)
unk_file = FortranFile('UNK' + "%05d" % (k_id + 1) + spin, 'w')
unk_file.write_record(np.asarray([grid[0], grid[1], grid[2], k_id + 1, self.num_bands_loc], dtype = np.int32))
mo_included = self.mo_coeff_kpts[k_id][:,self.band_included_list]
u_mo = np.einsum('xi,in->xn', u_ao, mo_included, optimize = True)
for band in range(len(self.band_included_list)):
unk_file.write_record(np.asarray(u_mo[:,band], dtype = np.complex128))
unk_file.close()
def export_AME(self, grid = [50,50,50]):
'''
Export A_{m,n}^{\mathbf{k}} and M_{m,n}^{(\mathbf{k,b})} and \epsilon_{n}^(\mathbf{k})
'''
if self.A_matrix_loc.all() == None:
self.make_win()
self.setup()
self.M_matrix_loc = self.get_M_mat()
self.A_matrix_loc = self.get_A_mat()
self.eigenvalues_loc = self.get_epsilon_mat()
self.export_unk(self, grid = grid)
with open('wannier90.mmn', 'w') as f:
f.write('Generated by the pyWannier90\n')
f.write(' %d %d %d\n' % (self.num_bands_loc, self.num_kpts_loc, self.nntot_loc))
for k_id in range(self.num_kpts_loc):
for nn in range(self.nntot_loc):
k_id1 = k_id + 1
k_id2 = self.nn_list[nn, k_id, 0]
nnn, nnm, nnl = self.nn_list[nn, k_id, 1:4]
f.write(' %d %d %d %d %d\n' % (k_id1, k_id2, nnn, nnm, nnl))
for m in range(self.num_bands_loc):
for n in range(self.num_bands_loc):
f.write(' %22.18f %22.18f\n' % (self.M_matrix_loc[k_id, nn,m,n].real, self.M_matrix_loc[k_id, nn,m,n].imag))
with open('wannier90.amn', 'w') as f:
f.write(' %d\n' % (self.num_bands_loc*self.num_kpts_loc*self.num_wann_loc))
f.write(' %d %d %d\n' % (self.num_bands_loc, self.num_kpts_loc, self.num_wann_loc))
for k_id in range(self.num_kpts_loc):
for ith_wann in range(self.num_wann_loc):
for band in range(self.num_bands_loc):
f.write(' %d %d %d %22.18f %22.18f\n' % (band+1, ith_wann+1, k_id+1, self.A_matrix_loc[k_id,ith_wann,band].real, self.A_matrix_loc[k_id,ith_wann,band].imag))
with open('wannier90.eig', 'w') as f:
for k_id in range(self.num_kpts_loc):
for band in range(self.num_bands_loc):
f.write(' %d %d %22.18f\n' % (band+1, k_id+1, self.eigenvalues_loc[k_id,band]))
def get_wannier(self, supercell = [1,1,1], grid = [50,50,50]):
'''
Evaluate the MLWF using a periodic grid
'''
grids_coor, weights = periodic_grid(self.cell, grid, supercell = [1,1,1], order = 'C')
kpts = self.cell.get_abs_kpts(self.kpt_latt_loc)
ao_kpts = np.asarray([numint.eval_ao(self.cell, grids_coor, kpt = kpt) for kpt in kpts])
u_mo = []
for k_id in range(self.num_kpts_loc):
mo_included = self.mo_coeff_kpts[k_id][:,self.band_included_list]
mo_in_window = self.lwindow[k_id]
C_opt = mo_included[:,mo_in_window].dot(self.U_matrix_opt[k_id].T)
C_tildle = C_opt.dot(self.U_matrix[k_id].T)
kpt = kpts[k_id]
ao = numint.eval_ao(self.cell, grids_coor, kpt = kpt)
u_ao = np.einsum('x,xi->xi', np.exp(-1j*np.dot(grids_coor, kpt)), ao, optimize = True)
u_mo.append(np.einsum('xi,in->xn', u_ao, C_tildle, optimize = True))
u_mo = np.asarray(u_mo)
WF0 = libwannier90.get_WF0s(self.kpt_latt_loc.shape[0],self.kpt_latt_loc, supercell, grid, u_mo)
# Fix the global phase following the pw2wannier90 procedure
max_index = (WF0*WF0.conj()).real.argmax(axis=0)
norm_wfs = np.diag(WF0[max_index,:])
norm_wfs = norm_wfs/np.absolute(norm_wfs)
WF0 = WF0/norm_wfs/self.num_kpts_loc
# Check the 'reality' following the pw2wannier90 procedure
for WF_id in range(self.num_wann_loc):
ratio_max = np.abs(WF0[np.abs(WF0[:,WF_id].real) >= 0.01,WF_id].imag/WF0[np.abs(WF0[:,WF_id].real) >= 0.01,WF_id].real).max(axis=0)
print('The maximum imag/real for wannier function ', WF_id,' : ', ratio_max)
return WF0
def plot_wf(self, outfile = 'MLWF', wf_list = None, supercell = [1,1,1], grid = [50,50,50]):
'''
Export Wannier function at cell R
xsf format: http://web.mit.edu/xcrysden_v1.5.60/www/XCRYSDEN/doc/XSF.html
Attributes:
wf_list : a list of MLWFs to plot
supercell : a supercell used for plotting
'''
if wf_list == None: wf_list = list(range(self.num_wann_loc))
grid = np.asarray(grid)
origin = np.asarray([-(grid[i]*(supercell[i]//2) + 1)/grid[i] for i in range(3)]).dot(self.cell.lattice_vectors().T)* param.BOHR
real_lattice_loc = (grid*supercell-1)/grid * self.cell.lattice_vectors() * param.BOHR
nx, ny, nz = grid*supercell
WF0 = self.get_wannier(supercell = supercell, grid = grid)
for wf_id in wf_list:
assert wf_id in list(range(self.num_wann_loc))
WF = WF0[:,wf_id].reshape(nx,ny,nz).real
with open(outfile + '-' + str(wf_id) + '.xsf', 'w') as f:
f.write('Generated by the pyWannier90\n\n')
f.write('CRYSTAL\n')
f.write('PRIMVEC\n')
for row in range(3):
f.write('%10.7f %10.7f %10.7f\n' % (self.real_lattice_loc[row,0], self.real_lattice_loc[row,1], \
self.real_lattice_loc[row,2]))
f.write('CONVVEC\n')
for row in range(3):
f.write('%10.7f %10.7f %10.7f\n' % (self.real_lattice_loc[row,0], self.real_lattice_loc[row,1], \
self.real_lattice_loc[row,2]))
f.write('PRIMCOORD\n')
f.write('%3d %3d\n' % (self.num_atoms_loc, 1))
for atom in range(len(self.atom_symbols_loc)):
f.write('%s %7.7f %7.7f %7.7f\n' % (self.atom_symbols_loc[atom], self.atoms_cart_loc[atom][0], \
self.atoms_cart_loc[atom][1], self.atoms_cart_loc[atom][2]))
f.write('\n\n')
f.write('BEGIN_BLOCK_DATAGRID_3D\n3D_field\nBEGIN_DATAGRID_3D_UNKNOWN\n')
f.write(' %5d %5d %5d\n' % (nx, ny, nz))
f.write(' %10.7f %10.7f %10.7f\n' % (origin[0],origin[1],origin[2]))
for row in range(3):
f.write(' %10.7f %10.7f %10.7f\n' % (real_lattice_loc[row,0], real_lattice_loc[row,1], \
real_lattice_loc[row,2]))
fmt = ' %13.5e' * nx + '\n'
for iz in range(nz):
for iy in range(ny):
f.write(fmt % tuple(WF[:,iy,iz].tolist()))
f.write('END_DATAGRID_3D\nEND_BLOCK_DATAGRID_3D')
if __name__ == '__main__':
import numpy as np
from pyscf import scf, gto
from pyscf.pbc import gto as pgto
from pyscf.pbc import scf as pscf
import pywannier90
cell = pgto.Cell()
cell.atom = '''
C 3.17500000 3.17500000 3.17500000
H 2.54626556 2.54626556 2.54626556
H 3.80373444 3.80373444 2.54626556
H 2.54626556 3.80373444 3.80373444
H 3.80373444 2.54626556 3.80373444
'''
cell.basis = 'sto-3g'
cell.a = np.eye(3) * 6.35
cell.gs = [15] * 3
cell.verbose = 5
cell.build()
nk = [1, 1, 1]
abs_kpts = cell.make_kpts(nk)
kmf = dft.KRKS(cell, abs_kpts).mix_density_fit()
kmf.xc = 'pbe'
ekpt = kmf.run()
pywannier90.save_kmf(kmf, 'chk_mf') # Save the wave function
# Run pyWannier90 and plot WFs using pyWannier90
num_wann = 4
keywords = \
'''
exclude_bands : 1,6-9
begin projections
C:sp3
end projections
'''
# To use the saved wave function, replace kmf with 'chk_mf'
w90 = pywannier90.W90(kmf, cell, nk, num_wann, other_keywords = keywords)
w90.kernel()
w90.plot_wf(grid=[25,25,25], supercell = [1,1,1])
# Run pyWannier90, export unk files, and plot WFs using Wannier90
w90.export_unk()
keywords = \
'''
begin projections
C:sp3
end projections
wannier_plot = True
wannier_plot_supercell = 1
'''
w90 = pywannier90.W90(kmf, cell, nk, num_wann, other_keywords = keywords)
w90.make_win()
w90.setup()
w90.export_unk(grid = grid)
w90.kernel()
|
gkc1000/pyscf
|
pyscf/pbc/tools/pywannier90.py
|
Python
|
apache-2.0
| 35,627
|
[
"CRYSTAL",
"PySCF",
"Wannier90"
] |
5cc0305fb381121147395e1e5eeb12d4abd8897cd400c346e8c7781b6031ad21
|
"""
Least squares anomaly detection demo on static data.
In this example, we generate some points from a 2-D Gaussian mixture,
and then plot the response of the anomaly detection model across the data
space given some different parameter settings.
The plots show training data as black crosses, contours in blue indicate
the response of the model across the space after training, and the contour
line in red indicates the decision boundary given by thresholding the model
output at 0.5.
This example was created by modifying the scikit-learn demo at
http://scikit-learn.org/stable/auto_examples/covariance/plot_outlier_detection.html
In general the LSAnomaly class can be used as a plug-in replacement in
any of the outlier detection demos on the sklearn site (e.g. as a
replacement for svm.OneClassSVM).
"""
import numpy as np
import pylab as plt
import lsanomaly
plt.rc("text", usetex=True)
plt.rc("font", family="serif")
def data_prep(n_samples=20, offset=2.5):
xx, yy = np.meshgrid(np.linspace(-7, 7, 50), np.linspace(-7, 7, 50))
# Generate training data from a 2-D mixture model with two Gaussian
# components
X1 = np.random.randn(int(0.5 * n_samples), 2) - offset
X2 = np.random.randn(int(0.5 * n_samples), 2) + offset
X = np.r_[X1, X2]
return X, xx, yy
def plot_results(
X, xx, yy, threshold=0.5, sigma_candidates=None, rho_candidates=None
):
_ = plt.figure(figsize=(16, 10))
for row, sigma in enumerate(sigma_candidates):
for col, rho in enumerate(rho_candidates):
# Train the anomaly model
clf = lsanomaly.LSAnomaly(sigma=sigma, rho=rho)
clf.fit(X)
# Get anomaly scores across the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the training data, anomaly model response and decision
# boundary at threshold 0.5.
subplot = plt.subplot(
len(sigma_candidates), len(rho_candidates), row * 3 + col + 1
)
plt.contourf(
xx,
yy,
Z,
levels=np.linspace(0, 1, 11),
cmap=plt.cm.get_cmap("GnBu"),
)
subplot.contour(
xx, yy, Z, levels=[threshold], linewidths=2, colors="red"
)
cb = plt.colorbar()
for t in cb.ax.get_yticklabels():
t.set_fontsize(10)
plt.scatter(
X[:, 0], X[:, 1], c="black", marker="+", s=50, linewidth=2
)
subplot.set_title(
"$\sigma = $ %.3g, $\\rho$ = %.3g" % (sigma, rho),
fontsize=14,
usetex=True,
)
subplot.axes.get_xaxis().set_ticks([])
subplot.axes.get_yaxis().set_ticks([])
plt.xlim((-7, 7))
plt.ylim((-7, 7))
plt.show()
|
lsanomaly/lsanomaly
|
lsanomaly/notebooks/static_mix.py
|
Python
|
mit
| 2,937
|
[
"Gaussian"
] |
01ecbb30ee1684554a38ce32efe4b8f947132ddb1c27b6dd63fda316f30cc287
|
#
# Copyright (C) 2019, 2020, 2021 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
These tests exercise an issue with Sherpa summarized in
https://github.com/sherpa/sherpa/issues/43
A very simple test case which would capture this is the following.
Assume you have a 2D source with a simple, circular, Gaussian emission. Assume you have a PSF defined at a
higher resolution than the images to fit will have. And assume that this PSF is also a simple, circular
Gaussian with different parameters than the source.
We can create a simulated image IMG by convolving the model M, evaluated at the PSF's resolution,
with the PSF, and then rebin it at the desired image resolution to obtain a new image IMG_lo. As the
convolution is between two Gaussians, the resulting image will also be the image of a Gaussian,
whose values we can predict, and in particular the variance of IMG will be the sum in quadrature of the
variances of M and PSF. For simplicity, we might need to assume an exact ratio in pixel sizes.
When the current Sherpa is run, it will apply the PSF to the image as if they were defined at the same
resolution. Sherpa will overestimate the size of the PSF and thus underestimate the size of the source.
We can analytically derive the (incorrect) fit results Sherpa would calculate now, while
the correct results we should get after the improvements are already known by us setting up the image.
The tests in this module implement the following scenario.
We will keep updating the file with more tests for error handling, edge and corner cases, etc.
We might have a different file for integration tests with more realistic data, as we will need
to exercise actual images and PSFs with WCS headers.
"""
from math import sqrt
import attr
import numpy as np
from pytest import approx, fixture, mark
from sherpa.astro.ui.utils import Session
from sherpa.astro.data import DataIMG
from sherpa.astro.instrument import PSFModel
from sherpa.instrument import PSFSpace2D
from sherpa.models import SigmaGauss2D
from sherpa.models.regrid import EvaluationSpace2D
DATA_PIXEL_SIZE = 2
@attr.s
class FixtureConfiguration():
image_size = attr.ib()
psf_size = attr.ib()
source_amplitude = attr.ib()
source_sigma = attr.ib()
psf_sigma = attr.ib()
resolution_ratio = attr.ib()
psf_amplitude = attr.ib(default=1)
image_resolution = attr.ib(default=1)
def __attrs_post_init__(self):
self.source_position = self.image_size / 2
self.psf_position = self.psf_size / 2
@attr.s
class FixtureData():
image = attr.ib()
psf = attr.ib()
psf_model = attr.ib()
configuration = attr.ib()
def __attrs_post_init__(self):
self.data_space = EvaluationSpace2D(*self.image.get_indep(filter=False))
def generate_psf_space_configurations():
return (FixtureConfiguration(image_size=500, psf_size=100, source_amplitude=100,
source_sigma=50, psf_sigma=5, resolution_ratio=1.5),
FixtureConfiguration(image_size=300, psf_size=150, source_amplitude=100,
source_sigma=1, psf_sigma=15, resolution_ratio=1.5),
FixtureConfiguration(image_size=500, psf_size=100, source_amplitude=100,
source_sigma=50, psf_sigma=5, resolution_ratio=2.5),
FixtureConfiguration(image_size=300, psf_size=150, source_amplitude=100,
source_sigma=1, psf_sigma=15, resolution_ratio=2.5),
FixtureConfiguration(image_size=500, psf_size=100, source_amplitude=100,
source_sigma=50, psf_sigma=5, resolution_ratio=0.5),
FixtureConfiguration(image_size=300, psf_size=150, source_amplitude=100,
source_sigma=1, psf_sigma=15, resolution_ratio=0.5),
FixtureConfiguration(image_size=500, psf_size=100, source_amplitude=100,
source_sigma=50, psf_sigma=5, resolution_ratio=0.7),
FixtureConfiguration(image_size=300, psf_size=150, source_amplitude=100,
source_sigma=1, psf_sigma=15, resolution_ratio=0.7),
FixtureConfiguration(image_size=500, psf_size=100, source_amplitude=100,
source_sigma=50, psf_sigma=5, resolution_ratio=1),
FixtureConfiguration(image_size=300, psf_size=150, source_amplitude=100,
source_sigma=1, psf_sigma=15, resolution_ratio=2),
FixtureConfiguration(image_size=500, psf_size=100, source_amplitude=100,
source_sigma=50, psf_sigma=5, resolution_ratio=3),
FixtureConfiguration(image_size=300, psf_size=150, source_amplitude=100,
source_sigma=1, psf_sigma=15, resolution_ratio=4),
FixtureConfiguration(image_size=500, psf_size=100, source_amplitude=100,
source_sigma=50, psf_sigma=5, resolution_ratio=5),
FixtureConfiguration(image_size=300, psf_size=150, source_amplitude=100,
source_sigma=1, psf_sigma=15, resolution_ratio=6),
FixtureConfiguration(image_size=500, psf_size=100, source_amplitude=100,
source_sigma=50, psf_sigma=5, resolution_ratio=7),
FixtureConfiguration(image_size=300, psf_size=150, source_amplitude=100,
source_sigma=1, psf_sigma=15, resolution_ratio=8),
)
def generate_rebinning_configurations():
return (FixtureConfiguration(image_size=128, psf_size=64, source_amplitude=100,
source_sigma=10, psf_sigma=5, resolution_ratio=2),
FixtureConfiguration(image_size=64, psf_size=128, source_amplitude=100,
source_sigma=10, psf_sigma=5, resolution_ratio=2),
FixtureConfiguration(image_size=256, psf_size=128, source_amplitude=100,
source_sigma=5, psf_sigma=5, resolution_ratio=3),
FixtureConfiguration(image_size=128, psf_size=64, source_amplitude=100,
source_sigma=10, psf_sigma=5, resolution_ratio=3),
FixtureConfiguration(image_size=64, psf_size=512, source_amplitude=100,
source_sigma=10, psf_sigma=20, resolution_ratio=4),
FixtureConfiguration(image_size=128, psf_size=512, source_amplitude=100,
source_sigma=5, psf_sigma=20, resolution_ratio=4),
FixtureConfiguration(image_size=128, psf_size=64, source_amplitude=100,
source_sigma=10, psf_sigma=5, resolution_ratio=1),
FixtureConfiguration(image_size=64, psf_size=128, source_amplitude=100,
source_sigma=10, psf_sigma=5, resolution_ratio=1),
FixtureConfiguration(image_size=128, psf_size=64, source_amplitude=100,
source_sigma=5, psf_sigma=5, resolution_ratio=1),
FixtureConfiguration(image_size=128, psf_size=64, source_amplitude=100,
source_sigma=10, psf_sigma=5, resolution_ratio=1),
FixtureConfiguration(image_size=64, psf_size=512, source_amplitude=100,
source_sigma=10, psf_sigma=5, resolution_ratio=1),
FixtureConfiguration(image_size=128, psf_size=512, source_amplitude=100,
source_sigma=5, psf_sigma=5, resolution_ratio=1),
)
@mark.parametrize("psf_fixture", generate_rebinning_configurations(), indirect=True)
def test_psf_resolution_bug(psf_fixture):
session, source, expected_sigma = psf_fixture
session.fit()
assert source.sigma_a.val == expected_sigma
assert source.sigma_b.val == expected_sigma
@mark.parametrize("configuration", generate_psf_space_configurations())
def test_psf_space(configuration):
fixture_data = make_images(configuration)
psf_space = PSFSpace2D(fixture_data.data_space, fixture_data.psf_model, data_pixel_size=fixture_data.image.sky.cdelt)
# Test that the PSF space has the same boundaries of the data space, but a number of
# bins equal to the bins the data image would have if it had the same pixel size as the PSF.
# Also, we want this space to be larger, if not the same, than the data space, to avoid introducing
# more boundary effects in the convolution.
n_bins = configuration.image_size
assert psf_space.start == (0, 0)
assert psf_space.x_axis.size == psf_space.y_axis.size == n_bins * configuration.resolution_ratio
assert psf_space.end == (approx(n_bins - 1 / configuration.resolution_ratio),
approx(n_bins - 1 / configuration.resolution_ratio))
def test_rebin_int_no_int():
"""
Test that the correct rebin_* function is called depending on whether the pixel size is an integer or close to
an integer with a tolerance that can be changed by the user.
"""
from sherpa.models.regrid import rebin_2d
from unittest import mock
rebin_int = mock.MagicMock()
rebin_no_int = mock.MagicMock()
to_space = mock.MagicMock()
y = mock.MagicMock()
with mock.patch('sherpa.models.regrid.rebin_int', rebin_int):
# The pixel ratio is 2, perfect integer, rebin_int should be called
from_space = mock.MagicMock(data_2_psf_pixel_size_ratio=(0.5, 0.5))
rebin_2d(y, from_space, to_space)
assert rebin_int.called
rebin_int.reset_mock()
with mock.patch('sherpa.models.regrid.rebin_int', rebin_int):
# Not a perfect integer, but close to an integer within the default tolerance
from_space = mock.MagicMock(data_2_psf_pixel_size_ratio=(0.333, 0.333))
rebin_2d(y, from_space, to_space)
assert rebin_int.called
rebin_int.reset_mock()
with mock.patch('sherpa.models.regrid.rebin_no_int', rebin_no_int):
# Same case as above, but I am changing the tolerance so the ratio is not equal to an integer within
# the new tolerance.
from_space = mock.MagicMock(data_2_psf_pixel_size_ratio=(0.333, 0.333))
from sherpa.models import regrid
regrid.PIXEL_RATIO_THRESHOLD = 0.000001
rebin_2d(y, from_space, to_space)
assert rebin_no_int.called
def symmetric_gaussian_image(amplitude, sigma, position, n_bins):
model = SigmaGauss2D()
model.ampl = amplitude
model.sigma_a = sigma
model.sigma_b = sigma
model.xpos = position
model.ypos = position
arrays = np.arange(0, n_bins), np.arange(0, n_bins)
x_array, y_array = np.meshgrid(*arrays)
x_array, y_array = x_array.flatten(), y_array.flatten()
return model(x_array, y_array).flatten(), x_array, y_array
def make_images(configuration):
psf, psf_model = make_psf(configuration)
data_image = make_image(configuration)
return FixtureData(data_image, psf, psf_model, configuration)
def make_image(configuration):
source_position = configuration.source_position
# The convolution of two gaussians is a gaussian with a stddev which is the sum
# of those convolved, so we model the image as a Gaussian itself.
image_sigma = sqrt(configuration.source_sigma ** 2 + configuration.psf_sigma ** 2)
image_amplitude = configuration.source_amplitude * configuration.psf_amplitude
image, image_x, image_y = symmetric_gaussian_image(amplitude=image_amplitude, sigma=image_sigma,
position=source_position, n_bins=configuration.image_size)
data_image = DataIMG("image", image_x, image_y, image,
shape=(configuration.image_size, configuration.image_size),
sky=WcsStub([DATA_PIXEL_SIZE, DATA_PIXEL_SIZE])
)
return data_image
def make_psf(configuration):
# The psf parameters in terms of the input parameters. To simulate the different pixel size
# we set the sigma of the psf to be a multiple (according to the ratio input)
# of the actual sigma in data pixel units.
# This should correspond, when ratio>1 to the case when the PSF's resolution is bigger
# than the image. If the ratio is 1 than they have the same pixel size.
psf_sigma = configuration.psf_sigma * configuration.resolution_ratio
psf_amplitude = configuration.psf_amplitude
psf_position = configuration.psf_position
# We model the PSF as a gaussian as well. Note we are using the psf_sigma variable,
# that is the one which is scaled through the ration. In other terms, we are working in
# units of Data Pixels to simulate the conditions of the bug, when the ratio != 1.
psf, psf_x, psf_y = symmetric_gaussian_image(amplitude=psf_amplitude, sigma=psf_sigma,
position=psf_position, n_bins=configuration.psf_size)
# Normalize PSF
norm_psf = psf / psf.sum()
cdelt = DATA_PIXEL_SIZE / configuration.resolution_ratio
psf_wcs = WcsStub([cdelt, cdelt])
# Create a Sherpa PSF model object using the psf arrays
sherpa_kernel = DataIMG('kernel_data',
psf_x, psf_y, norm_psf,
shape=(configuration.psf_size, configuration.psf_size),
sky=psf_wcs
)
psf_model = PSFModel('psf_model', kernel=sherpa_kernel)
psf_model.norm = 1
psf_model.origin = (psf_position + 1, psf_position + 1)
return psf, psf_model
@fixture
def psf_fixture(request):
configuration = request.param
fixture_data = make_images(configuration)
ui = Session()
ui.set_data(1, fixture_data.image)
exact_expected_sigma = configuration.source_sigma
approx_expected_sigma = approx(exact_expected_sigma, rel=3e-2)
# Set the source model as a 2D Gaussian, and set the PSF in Sherpa
source_position = configuration.source_position
sherpa_source = SigmaGauss2D('source')
sherpa_source.ampl = configuration.source_amplitude
sherpa_source.sigma_a = exact_expected_sigma
sherpa_source.sigma_b = exact_expected_sigma
sherpa_source.xpos = source_position
sherpa_source.ypos = source_position
ui.set_source(sherpa_source)
ui.set_psf(fixture_data.psf_model)
return ui, sherpa_source, approx_expected_sigma
@attr.s
class WcsStub():
cdelt = attr.ib()
|
anetasie/sherpa
|
sherpa/utils/tests/test_psf_rebinning_unit.py
|
Python
|
gpl-3.0
| 15,233
|
[
"Gaussian"
] |
cd84e809bb46e12267b4cca2d533250d48cc83360827ec36f762975c75e882b9
|
from functools import wraps
import numpy as np
import networkx as nx
import vigra
import collections
from contextlib import contextmanager
from .box import Box
class BlockflowArray(np.ndarray):
def __new__(cls, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, box=None):
obj = np.ndarray.__new__(cls, shape, dtype, buffer, offset, strides, order)
obj.box = box
return obj
def __array_finalize__(self, obj):
if obj is None:
return
orig_box = getattr(obj, 'box', None)
self.box = orig_box
# We're creating a new array using an existing array as a template, but if the array was generated
# via a broadcasting ufunc, then the box might not be copied from the correct array.
# If it's wrong, just remove the box attribute.
#
# FIXME: We might be able to handle cases like this automatically
# via __array_wrap__() or __array_prepare__()
if orig_box is not None:
if tuple(orig_box[1] - orig_box[0]) == self.shape:
self.box = orig_box
class DryArray(BlockflowArray):
def __new__(cls, shape=(), dtype=float, buffer=None, offset=0, strides=None, order=None, box=None):
assert shape == () or np.prod(shape) == 0, "DryArray must have empty shape"
obj = BlockflowArray.__new__(cls, shape, dtype, buffer, offset, strides, order, box=box)
return obj
@contextmanager
def readonly_array(a):
a = np.asanyarray(a)
writeable = a.flags['WRITEABLE']
a.flags['WRITEABLE'] = False
yield a
a.flags['WRITEABLE'] = writeable
class Operator(object):
def __init__(self, name=None):
self.name = name or self.__class__.__name__
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
assert 'req_box' not in kwargs, \
"The req_box should not be passed to operators explicitly. Use foo.pull(box)"
return self
def dry_pull(self, box):
with readonly_array(box) as box:
assert box.ndim == 2 and box.shape[0] == 2 and box.shape[1] <= 5
kwargs = {'req_box': box}
kwargs.update(self.kwargs)
if global_graph.mode == 'registration_dry_run':
global_graph.dag.add_node(self)
if global_graph.op_callstack:
caller = global_graph.op_callstack[-1]
global_graph.dag.add_edge(self, caller)
global_graph.op_callstack.append(self)
try:
return self.dry_execute(*self.args, **kwargs)
finally:
global_graph.op_callstack.pop()
def pull(self, box):
with readonly_array(box) as box:
assert box.ndim == 2 and box.shape[0] == 2 and box.shape[1] <= 5
kwargs = {'req_box': box}
kwargs.update(self.kwargs)
result_data = self.execute(*self.args, **kwargs)
assert isinstance(result_data, BlockflowArray)
assert result_data.box is not None
return result_data
def dry_execute(self, *args, **kwargs):
raise NotImplementedError()
def execute(self, *args, **kwargs):
raise NotImplementedError()
def __str__(self):
return self.name
class ReadArray(Operator):
def dry_execute(self, arr, req_box):
return DryArray(box=self._clip_box(arr, req_box))
def execute(self, arr, req_box=None):
clipped_box = self._clip_box(arr, req_box)
result = arr[clipped_box.slicing()].view(BlockflowArray)
result.box = clipped_box
return result
def _clip_box(self, arr, req_box):
full_array_box = Box.from_shape(arr.shape)
valid_box = full_array_box.intersection(req_box)
return valid_box
def wrap_filter_5d(filter_func):
"""
Decorator.
Given a 5D array (tzyxc), and corresponding output box,
compute the given filter over the spatial dimensions.
(It doesn't suffice to simply drop the 't' axis and run the filter,
because singleton spatial dimensions would cause trouble.)
"""
@wraps(filter_func)
def wrapper(input_data, scale, box_5d):
input_data = vigra.taggedView(input_data, 'tzyxc')
assert box_5d.shape == (2,5)
assert box_5d[1,0] - box_5d[0,0] == 1, \
"FIXME: Can't handle multiple time slices yet. (Add a loop to this function.)"
# Find the non-singleton axes, so we can keep only them
# but also keep channel, no matter what
input_shape_nochannel = np.array(input_data.shape[:-1])
nonsingleton_axes = (input_shape_nochannel != 1).nonzero()[0]
nonsingleton_axes = tuple(nonsingleton_axes) + (4,) # Keep channel
box = box_5d[:, nonsingleton_axes] # Might be a 2D OR 3D box
# Squeeze, but keep channel
squeezed_input = input_data.squeeze()
if 'c' not in squeezed_input.axistags.keys():
squeezed_input = squeezed_input.insertChannelAxis(-1)
result = filter_func(squeezed_input, scale, box=box)
result = result.withAxes(*'tzyxc')
return result
return wrapper
class ConvolutionalFilter(Operator):
WINDOW_SIZE = 2.0 # Subclasses may override this
def __init__(self, name=None):
super(ConvolutionalFilter, self).__init__(name)
self.filter_func_5d = wrap_filter_5d(self.filter_func)
def filter_func(self, input_data, scale, box):
"""
input_data: array data whose axes are one of the following: zyxc, yxc, zxc, zyc
scale: filter scale (sigma)
box: Not 5D. Either 4D or 3D, depending on the dimensionality of input_data
"""
raise NotImplementedError("Convolutional Filter '{}' must override filter_func()"
.format(self.__class__.__name__))
def num_channels_for_input_box(self, box):
# Default implementation: One output channel per input channel,
# regardless of dimensions
return box[1,'c'] - box[0,'c']
def num_channels_for_input_box_vector_valued(self, box):
"""
For vector-valued filters whose output channels is N*C
"""
shape_zyx = box[1,'zyx'] - box[0,'zyx']
ndim = (shape_zyx > 1).sum()
channels = box.to_shape()[-1]
return ndim*channels
def dry_execute(self, input_op, scale, req_box):
upstream_req_box = self._get_upstream_box(scale, req_box)
empty_data = input_op.dry_pull(upstream_req_box)
n_channels = self.num_channels_for_input_box(empty_data.box)
box = empty_data.box.copy()
box[:,-1] = (0, n_channels)
box = box.intersection(req_box)
return DryArray(box=box)
def execute(self, input_op, scale, req_box=None):
# Ask for the fully padded input
upstream_req_box = self._get_upstream_box(scale, req_box)
input_data = input_op.pull(upstream_req_box)
# The result is tagged with a box.
# If we asked for too much (wider than the actual image),
# then this box won't match what we requested.
upstream_actual_box = input_data.box
result_box, req_box_within_upstream = upstream_actual_box.intersection(req_box, True)
filtered = self.filter_func_5d(input_data, scale, req_box_within_upstream)
filtered = filtered.view(BlockflowArray)
filtered.box = result_box
expected_channels = self.num_channels_for_input_box(upstream_actual_box)
assert filtered.shape[-1] == expected_channels, \
"Filter '{}' returned an unexpected number of channels: got {}, expected {}"\
.format(self.name, filtered.shape[-1], expected_channels)
return filtered
def _get_upstream_box(self, sigma, req_box):
padding = np.ceil(np.array(sigma)*self.WINDOW_SIZE).astype(np.int64)
upstream_req_box = req_box.copy()
upstream_req_box[0, 'zyx'] -= padding
upstream_req_box[1, 'zyx'] += padding
return upstream_req_box
class GaussianSmoothing(ConvolutionalFilter):
def filter_func(self, input_data, scale, box):
return vigra.filters.gaussianSmoothing(input_data, sigma=scale, window_size=self.WINDOW_SIZE, roi=box[:,:-1].tolist())
class LaplacianOfGaussian(ConvolutionalFilter):
def filter_func(self, input_data, scale, box):
return vigra.filters.laplacianOfGaussian(input_data, scale=scale, window_size=self.WINDOW_SIZE, roi=box[:,:-1].tolist())
class GaussianGradientMagnitude(ConvolutionalFilter):
def filter_func(self, input_data, scale, box):
return vigra.filters.gaussianGradientMagnitude(input_data, sigma=scale, window_size=self.WINDOW_SIZE, roi=box[:,:-1].tolist())
class HessianOfGaussianEigenvalues(ConvolutionalFilter):
num_channels_for_input_box = ConvolutionalFilter.num_channels_for_input_box_vector_valued
def filter_func(self, input_data, scale, box):
return vigra.filters.hessianOfGaussianEigenvalues(input_data, scale=scale, window_size=self.WINDOW_SIZE, roi=box[:,:-1].tolist())
class StructureTensorEigenvalues(ConvolutionalFilter):
num_channels_for_input_box = ConvolutionalFilter.num_channels_for_input_box_vector_valued
def filter_func(self, input_data, scale, box):
inner_scale = scale
outer_scale = scale / 2.0
return vigra.filters.structureTensorEigenvalues(input_data,
innerScale=inner_scale,
outerScale=outer_scale,
window_size=self.WINDOW_SIZE,
roi=box[:,:-1].tolist())
class DifferenceOfGaussians(ConvolutionalFilter):
def filter_func(self, input_data, scale, box):
sigma_1 = scale
sigma_2 = 0.66*scale
smoothed_1 = vigra.filters.gaussianSmoothing(input_data, sigma=sigma_1, window_size=self.WINDOW_SIZE, roi=box[:,:-1].tolist())
smoothed_2 = vigra.filters.gaussianSmoothing(input_data, sigma=sigma_2, window_size=self.WINDOW_SIZE, roi=box[:,:-1].tolist())
# In-place subtraction
np.subtract( smoothed_1, smoothed_2, out=smoothed_1 )
return smoothed_1
class DifferenceOfGaussiansComposite(Operator):
"""
Alternative implementation of DifferenceOfGaussians,
but using internal operators for the two smoothing operations.
"""
def __init__(self, name=None):
super(DifferenceOfGaussiansComposite, self).__init__(name)
self.gaussian_1 = GaussianSmoothing('Gaussian-1')
self.gaussian_2 = GaussianSmoothing('Gaussian-2')
def dry_execute(self, input_op, scale, req_box):
empty_1 = self.gaussian_1(input_op, scale).dry_pull(req_box)
empty_2 = self.gaussian_2(input_op, scale*0.66).dry_pull(req_box)
assert (empty_1.box == empty_2.box).all()
return empty_1
def execute(self, input_op, scale, req_box=None):
a = self.gaussian_1(input_op, scale).pull(req_box)
b = self.gaussian_2(input_op, scale*0.66).pull(req_box)
# For pointwise numpy ufuncs, the result is already cast as
# a BlockflowArray, with the box already initialized.
# Nothing extra needed here.
return a - b
FilterSpec = collections.namedtuple( 'FilterSpec', 'name scale' )
FilterNames = { 'GaussianSmoothing': GaussianSmoothing,
'LaplacianOfGaussian': LaplacianOfGaussian,
'GaussianGradientMagnitude': GaussianGradientMagnitude,
'DifferenceOfGaussians': DifferenceOfGaussians,
#'DifferenceOfGaussians': DifferenceOfGaussiansComposite,
'HessianOfGaussianEigenvalues': HessianOfGaussianEigenvalues,
'StructureTensorEigenvalues': StructureTensorEigenvalues }
class PixelFeatures(Operator):
def __init__(self, name=None):
Operator.__init__(self, name)
self.feature_ops = {} # (name, scale) : op
def dry_execute(self, input_op, filter_specs, req_box):
n_channels = 0
for spec in filter_specs:
feature_op = self._get_filter_op(spec)
empty = feature_op(input_op, spec.scale).dry_pull(req_box)
n_channels += empty.box[1, 'c']
box = empty.box.copy()
box[:,-1] = (0, n_channels)
# Restrict to requested channels
box = box.intersection(req_box)
return DryArray(box=box)
def execute(self, input_op, filter_specs, req_box=None):
# FIXME: This requests all channels, no matter what.
results = []
for spec in filter_specs:
feature_op = self._get_filter_op(spec)
feature_data = feature_op(input_op, spec.scale).pull(req_box)
results.append(feature_data)
stacked_data = np.concatenate(results, axis=-1)
# Select only the requested channels
stacked_data = stacked_data[..., slice(*req_box[:,-1])]
stacked_data = stacked_data.view(BlockflowArray)
stacked_data.box = feature_data.box
stacked_data.box[:,-1] = req_box[:,-1]
return stacked_data
def _get_filter_op(self, spec):
try:
feature_op = self.feature_ops[spec]
except KeyError:
feature_op = self.feature_ops[spec] = FilterNames[spec.name]()
return feature_op
class PredictPixels(Operator):
def dry_execute(self, features_op, classifier, req_box):
upstream_box = req_box.copy()
upstream_box[:,-1] = (Box.MIN,Box.MAX) # Request all features
empty_feats = features_op.dry_pull(upstream_box)
out_box = empty_feats.box.copy()
out_box[:,-1] = (0, len(classifier.known_classes))
out_box = out_box.intersection(req_box)
return DryArray(dtype=np.float32, box=out_box)
def execute(self, features_op, classifier, req_box):
upstream_box = req_box.copy()
upstream_box[:,-1] = (Box.MIN,Box.MAX) # Request all features
feature_vol = features_op.pull(upstream_box)
prod = np.prod(feature_vol.shape[:-1])
feature_matrix = feature_vol.reshape((prod, feature_vol.shape[-1]))
probabilities_matrix = classifier.predict_probabilities( feature_matrix )
# TODO: Somehow check for correct number of channels, in case the classifier returned fewer classes than we expected
# (See lazyflow for example)
probabilities_vol = probabilities_matrix.reshape(feature_vol.shape[:-1] + (-1,))
# Extract only the channel range that was originally requested
ch_start, ch_stop = req_box[:,-1]
probabilities_vol = probabilities_vol[..., ch_start:ch_stop]
probabilities_vol = probabilities_vol.view(BlockflowArray)
probabilities_vol.box = np.append(feature_vol.box[:,:-1], req_box[:,-1:], axis=1)
return probabilities_vol
class Graph(object):
MODES = ['uninitialized', 'registration_dry_run', 'block_flow_dry_run', 'executable']
def __init__(self):
self.op_callstack = []
self.dag = nx.DiGraph()
self.mode = 'uninitialized'
@contextmanager
def register_calls(self):
assert len(self.op_callstack) == 0
self.mode = 'registration_dry_run'
yield
assert len(self.op_callstack) == 0
self.mode = 'executable'
global_graph = Graph()
|
stuarteberg/blockflow
|
blockflow/blockflow.py
|
Python
|
mit
| 15,659
|
[
"Gaussian"
] |
92802fc4df3a874a0e71d98fefbb2ee61b61cbc89ca94e3ef40f461e5d41a208
|
"""
A set of ML-related functions that are used in a variety of models.
==============
Copyright Info
==============
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Copyright Brian Dolhansky 2014
bdolmail@gmail.com
"""
import numpy as np
"""
A common function used to compute the entropy. This is a "safe" entropy
function in that invalid values (NaN or inf) are clamped to 0. This is used
in the decision tree module where dividing by 0 could happen.
"""
def safe_plogp(x):
e = x * np.log2(x)
if hasattr(e, "__len__"):
e[np.isinf(e)] = 0
e[np.isnan(e)] = 0
else:
if np.isnan(e) or np.isinf(e):
e = 0.0
return e
"""
The standard entropy function, using the "safe" plogp function which clamps
invalid inputs to 0.
"""
def safe_entropy(x):
return -np.sum(safe_plogp(x))
def marginal_entropy(x):
return -np.sum(x * np.log2(x))
def safe_binary_entropy(x):
l_px = np.log2(x)
l_pnotx = np.log2(1-x)
if hasattr(x, "__len__"):
l_px[np.isinf(l_px)] = 0
l_pnotx[np.isinf(l_pnotx)] = 0
else:
if np.isnan(l_px) or np.isinf(l_px):
l_px = 0
if np.isnan(l_pnotx) or np.isinf(l_pnotx):
l_pnotx = 0
return -(np.multiply(x, l_px)
+ np.multiply((1-x), l_pnotx))
|
bdol/bdol-ml
|
utils/ml_functions.py
|
Python
|
lgpl-3.0
| 1,867
|
[
"Brian"
] |
491b546f3e2964fbd91ab8fccbb40c8b62f49ce27c5b52eb854a57d19cef1d9b
|
""" Utility Class for threaded agents (e.g. TransformationAgent)
Mostly for logging
"""
import time
from DIRAC import gLogger
__RCSID__ = "$Id$"
AGENT_NAME = ''
class TransformationAgentsUtilities(object):
""" logging utilities for threaded TS agents
"""
def __init__(self):
""" c'tor
"""
self.transInThread = {}
self.debug = False
def __prefixForLogging(self, transID, method, reftime):
""" get the thread number """
if reftime is not None:
method += " (%.1f seconds)" % (time.time() - reftime)
try:
return self.transInThread.get(transID, ' [None] [%s] ' % transID) + AGENT_NAME + '.' + method
except NameError:
return ''
def _logVerbose(self, message, param='', method="execute", transID='None', reftime=None):
""" verbose """
if self.debug:
gLogger.getSubLogger('(V) ' + self.__prefixForLogging(transID, method, reftime)).info(message, param)
else:
gLogger.getSubLogger(self.__prefixForLogging(transID, method, reftime)).verbose(message, param)
def _logDebug(self, message, param='', method="execute", transID='None', reftime=None):
""" debug """
gLogger.getSubLogger(self.__prefixForLogging(transID, method, reftime)).debug(message, param)
def _logInfo(self, message, param='', method="execute", transID='None', reftime=None):
""" info """
gLogger.getSubLogger(self.__prefixForLogging(transID, method, reftime)).info(message, param)
def _logWarn(self, message, param='', method="execute", transID='None', reftime=None):
""" warn """
gLogger.getSubLogger(self.__prefixForLogging(transID, method, reftime)).warn(message, param)
def _logError(self, message, param='', method="execute", transID='None', reftime=None):
""" error """
gLogger.getSubLogger(self.__prefixForLogging(transID, method, reftime)).error(message, param)
def _logException(self, message, param='', lException=False, method="execute", transID='None', reftime=None):
""" exception """
gLogger.getSubLogger(self.__prefixForLogging(transID, method, reftime)).exception(message, param, lException)
def _logFatal(self, message, param='', method="execute", transID='None', reftime=None):
""" error """
gLogger.getSubLogger(self.__prefixForLogging(transID, method, reftime)).fatal(message, param)
def _transTaskName(self, transID, taskID): # pylint: disable=no-self-use
""" Construct the task name from the transformation and task ID """
return str(transID).zfill(8) + '_' + str(taskID).zfill(8)
def _parseTaskName(self, taskName): # pylint: disable=no-self-use
""" Split a task name into transformation and taskID """
try:
return (int(x) for x in taskName.split('_'))
except ValueError:
return (0, 0)
|
andresailer/DIRAC
|
TransformationSystem/Agent/TransformationAgentsUtilities.py
|
Python
|
gpl-3.0
| 2,767
|
[
"DIRAC"
] |
39ab030bd63b574f1bc4e3206847f4ba6904b60734221aa5ce73fd7800c42fc2
|
# -*- coding: utf-8 -*-
# Copyright 2008-2013 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''Package with source formats for pages.
Each module in zim.formats should contains exactly one subclass of
DumperClass and exactly one subclass of ParserClass
(optional for export formats). These can be loaded by L{get_parser()}
and L{get_dumper()} respectively. The requirement to have exactly one
subclass per module means you can not import other classes that derive
from these base classes directly into the module.
For format modules it is safe to import '*' from this module.
Parse tree structure
====================
Parse trees are build using the (c)ElementTree module (included in
python 2.5 as xml.etree.ElementTree). It is basically a xml structure
supporting a subset of "html like" tags.
Supported tags:
- page root element for grouping paragraphs
- p for paragraphs
- h for heading, level attribute can be 1..6
- pre for verbatim paragraphs (no further parsing in these blocks)
- em for emphasis, rendered italic by default
- strong for strong emphasis, rendered bold by default
- mark for highlighted text, renderd with background color or underlined
- strike for text that is removed, usually renderd as strike through
- code for inline verbatim text
- ul for bullet and checkbox lists
- ol for numbered lists
- li for list items
- link for links, attribute href gives the target
- img for images, attributes src, width, height an optionally href and alt
- type can be used to control plugin functionality, e.g. type=equation
Unlike html we respect line breaks and other whitespace as is.
When rendering as html use the "white-space: pre" CSS definition to
get the same effect.
Since elements are based on the functional markup instead of visual
markup it is not allowed to nest elements in arbitrary ways.
TODO: allow links to be nested in other elements
TODO: allow strike to have sub elements
TODO: add HR element
If a page starts with a h1 this heading is considered the page title,
else we can fall back to the page name as title.
NOTE: To avoid confusion: "headers" refers to meta data, usually in
the form of rfc822 headers at the top of a page. But "heading" refers
to a title or subtitle in the document.
'''
import re
import string
import logging
import types
from zim.fs import Dir, File
from zim.parsing import link_type, is_url_re, \
url_encode, url_decode, URL_ENCODE_READABLE, URL_ENCODE_DATA
from zim.parser import Builder
from zim.config import data_file
from zim.objectmanager import ObjectManager
import zim.plugins
import zim.notebook # no 'from' to prevent cyclic import errors
logger = logging.getLogger('zim.formats')
# Needed to determine RTL, but may not be available
# if gtk bindings are not installed
try:
import pango
except:
pango = None
logger.warn('Could not load pango - RTL scripts may look bad')
try:
import xml.etree.cElementTree as ElementTreeModule
except: #pragma: no cover
logger.warn('Could not load cElementTree, defaulting to ElementTree')
import xml.etree.ElementTree as ElementTreeModule
EXPORT_FORMAT = 1
IMPORT_FORMAT = 2
NATIVE_FORMAT = 4
TEXT_FORMAT = 8 # Used for "Copy As" menu - these all prove "text/plain" mimetype
UNCHECKED_BOX = 'unchecked-box'
CHECKED_BOX = 'checked-box'
XCHECKED_BOX = 'xchecked-box'
BULLET = '*' # FIXME make this 'bullet'
FORMATTEDTEXT = 'zim-tree'
FRAGMENT = 'zim-tree'
HEADING = 'h'
PARAGRAPH = 'p'
VERBATIM_BLOCK = 'pre' # should be same as verbatim
BLOCK = 'div'
IMAGE = 'img'
OBJECT = 'object'
BULLETLIST = 'ul'
NUMBEREDLIST = 'ol'
LISTITEM = 'li'
EMPHASIS = 'emphasis' # TODO change to "em" to be in line with html
STRONG = 'strong'
MARK = 'mark'
VERBATIM = 'code'
STRIKE = 'strike'
SUBSCRIPT = 'sub'
SUPERSCRIPT = 'sup'
LINK = 'link'
TAG = 'tag'
ANCHOR = 'anchor'
BLOCK_LEVEL = (PARAGRAPH, HEADING, VERBATIM_BLOCK, BLOCK, OBJECT, IMAGE, LISTITEM)
def increase_list_iter(listiter):
'''Get the next item in a list for a numbered list
E.g if C{listiter} is C{"1"} this function returns C{"2"}, if it
is C{"a"} it returns C{"b"}.
@param listiter: the current item, either an integer number or
single letter
@returns: the next item, or C{None}
'''
try:
i = int(listiter)
return str(i + 1)
except ValueError:
try:
i = string.letters.index(listiter)
return string.letters[i+1]
except ValueError: # listiter is not a letter
return None
except IndexError: # wrap to start of list
return string.letters[0]
def encode_xml(text):
'''Encode text such that it can be used in xml
@param text: label text as string
@returns: encoded text
'''
return text.replace('&', '&').replace('>', '>').replace('<', '<').replace('"', '"').replace("'", ''')
def list_formats(type):
if type == EXPORT_FORMAT:
return ['HTML','LaTeX', 'Markdown (pandoc)', 'RST (sphinx)']
elif type == TEXT_FORMAT:
return ['Text', 'Wiki', 'Markdown (pandoc)', 'RST (sphinx)']
else:
assert False, 'TODO'
def canonical_name(name):
# "HTML" -> html
# "Markdown (pandoc)" -> "markdown"
# "Text" -> "plain"
name = name.lower()
if ' ' in name:
name, _ = name.split(' ', 1)
if name == 'text': return 'plain'
else: return name
def get_format(name):
'''Returns the module object for a specific format.'''
# If this method is removes, class names in formats/*.py can be made more explicit
#~ print 'DEPRECATED: get_format() is deprecated in favor if get_parser() and get_dumper()'
return get_format_module(name)
def get_format_module(name):
'''Returns the module object for a specific format
@param name: the format name
@returns: a module object
'''
return zim.plugins.get_module('zim.formats.' + canonical_name(name))
def get_parser(name, *arg, **kwarg):
'''Returns a parser object instance for a specific format
@param name: format name
@param arg: arguments to pass to the parser object
@param kwarg: keyword arguments to pass to the parser object
@returns: parser object instance (subclass of L{ParserClass})
'''
module = get_format_module(name)
klass = zim.plugins.lookup_subclass(module, ParserClass)
return klass(*arg, **kwarg)
def get_dumper(name, *arg, **kwarg):
'''Returns a dumper object instance for a specific format
@param name: format name
@param arg: arguments to pass to the dumper object
@param kwarg: keyword arguments to pass to the dumper object
@returns: dumper object instance (subclass of L{DumperClass})
'''
module = get_format_module(name)
klass = zim.plugins.lookup_subclass(module, DumperClass)
return klass(*arg, **kwarg)
class ParseTree(object):
'''Wrapper for zim parse trees.'''
# No longer derives from ElementTree, internals are not private
# TODO, also remove etree args from init
# TODO, rename to FormattedText
def __init__(self, *arg, **kwarg):
self._etree = ElementTreeModule.ElementTree(*arg, **kwarg)
self._object_cache = {}
@property
def hascontent(self):
'''Returns True if the tree contains any content at all.'''
root = self._etree.getroot()
return bool(root.getchildren()) or (root.text and not root.text.isspace())
@property
def ispartial(self):
'''Returns True when this tree is a segment of a page
(like a copy-paste buffer).
'''
return self._etree.getroot().attrib.get('partial', False)
@property
def israw(self):
'''Returns True when this is a raw tree (which is representation
of TextBuffer, but not really valid).
'''
return self._etree.getroot().attrib.get('raw', False)
def extend(self, tree):
# Do we need a deepcopy here ?
myroot = self._etree.getroot()
otherroot = tree._etree.getroot()
if otherroot.text:
children = myroot.getchildren()
if children:
last = children[-1]
last.tail = (last.tail or '') + otherroot.text
else:
myroot.text = (myroot.text or '') + otherroot.text
for element in otherroot.getchildren():
myroot.append(element)
return self
__add__ = extend
def fromstring(self, string):
'''Set the contents of this tree from XML representation.'''
parser = ElementTreeModule.XMLTreeBuilder()
parser.feed(string)
root = parser.close()
self._etree._setroot(root)
return self # allow ParseTree().fromstring(..)
def tostring(self):
'''Serialize the tree to a XML representation'''
from cStringIO import StringIO
# Parent dies when we have attributes that are not a string
for element in self._etree.getiterator('*'):
for key in element.attrib.keys():
element.attrib[key] = str(element.attrib[key])
xml = StringIO()
xml.write("<?xml version='1.0' encoding='utf-8'?>\n")
ElementTreeModule.ElementTree.write(self._etree, xml, 'utf-8')
return xml.getvalue()
def copy(self):
# By using serialization we are absolutely sure all refs are new
xml = self.tostring()
return ParseTree().fromstring(xml)
def _get_heading_element(self, level=1):
root = self._etree.getroot()
children = root.getchildren()
if root.text and not root.text.isspace():
return None
if children:
first = children[0]
if first.tag == 'h' and first.attrib['level'] >= level:
return first
return None
def get_heading(self, level=1):
heading_elem = self._get_heading_element(level)
if heading_elem is not None:
return heading_elem.text
else:
return ""
def set_heading(self, text, level=1):
'''Set the first heading of the parse tree to 'text'. If the tree
already has a heading of the specified level or higher it will be
replaced. Otherwise the new heading will be prepended.
'''
heading = self._get_heading_element(level)
if heading is not None:
heading.text = text
else:
root = self._etree.getroot()
heading = ElementTreeModule.Element('h', {'level': level})
heading.text = text
heading.tail = root.text
root.text = None
root.insert(0, heading)
def pop_heading(self, level=-1):
'''If the tree starts with a heading, remove it and any trailing
whitespace.
Will modify the tree.
@returns: a 2-tuple of text and heading level or C{(None, None)}
'''
root = self._etree.getroot()
children = root.getchildren()
if root.text and not root.text.isspace():
return None, None
if children:
first = children[0]
if first.tag == 'h':
mylevel = int(first.attrib['level'])
if level == -1 or mylevel <= level:
root.remove(first)
if first.tail and not first.tail.isspace():
root.text = first.tail # Keep trailing text
return first.text, mylevel
else:
return None, None
else:
return None, None
def cleanup_headings(self, offset=0, max=6):
'''Change the heading levels throughout the tree. This makes sure that
al headings are nested directly under their parent (no gaps in the
levels of the headings). Also you can set an offset for the top level
and a max depth.
'''
path = []
for heading in self._etree.getiterator('h'):
level = int(heading.attrib['level'])
# find parent header in path using old level
while path and path[-1][0] >= level:
path.pop()
if not path:
newlevel = offset+1
else:
newlevel = path[-1][1] + 1
if newlevel > max:
newlevel = max
heading.attrib['level'] = newlevel
path.append((level, newlevel))
def resolve_images(self, notebook=None, path=None):
'''Resolves the source files for all images relative to a page path and
adds a '_src_file' attribute to the elements with the full file path.
'''
if notebook is None:
for element in self._etree.getiterator('img'):
filepath = element.attrib['src']
element.attrib['_src_file'] = File(filepath)
else:
for element in self._etree.getiterator('img'):
filepath = element.attrib['src']
element.attrib['_src_file'] = notebook.resolve_file(element.attrib['src'], path)
def unresolve_images(self):
'''Undo effect of L{resolve_images()}, mainly intended for
testing.
'''
for element in self._etree.getiterator('img'):
if '_src_file' in element.attrib:
element.attrib.pop('_src_file')
def encode_urls(self, mode=URL_ENCODE_READABLE):
'''Calls encode_url() on all links that contain urls.
See zim.parsing for details. Modifies the parse tree.
'''
for link in self._etree.getiterator('link'):
href = link.attrib['href']
if is_url_re.match(href):
link.attrib['href'] = url_encode(href, mode=mode)
if link.text == href:
link.text = link.attrib['href']
def decode_urls(self, mode=URL_ENCODE_READABLE):
'''Calls decode_url() on all links that contain urls.
See zim.parsing for details. Modifies the parse tree.
'''
for link in self._etree.getiterator('link'):
href = link.attrib['href']
if is_url_re.match(href):
link.attrib['href'] = url_decode(href, mode=mode)
if link.text == href:
link.text = link.attrib['href']
def count(self, text):
'''Returns the number of occurences of 'text' in this tree.'''
count = 0
for element in self._etree.getiterator():
if element.text:
count += element.text.count(text)
if element.tail:
count += element.tail.count(text)
return count
def countre(self, regex):
'''Returns the number of matches for a regular expression
in this tree.
'''
count = 0
for element in self._etree.getiterator():
if element.text:
newstring, n = regex.subn('', element.text)
count += n
if element.tail:
newstring, n = regex.subn('', element.tail)
count += n
return count
def get_ends_with_newline(self):
'''Checks whether this tree ends in a newline or not'''
return self._get_element_ends_with_newline(self._etree.getroot())
def _get_element_ends_with_newline(self, element):
if element.tail:
return element.tail.endswith('\n')
elif element.tag in ('li', 'h'):
return True # implicit newline
else:
children = element.getchildren()
if children:
return self._get_element_ends_with_newline(children[-1]) # recurs
elif element.text:
return element.text.endswith('\n')
else:
return False # empty element like image
def visit(self, visitor):
'''Visit all nodes of this tree
@note: If the visitor modifies the attrib dict on nodes, this
will modify the tree.
@param visitor: a L{Visitor} or L{Builder} object
'''
try:
self._visit(visitor, self._etree.getroot())
except VisitorStop:
pass
def _visit(self, visitor, node):
try:
if len(node): # Has children
visitor.start(node.tag, node.attrib)
if node.text:
visitor.text(node.text)
for child in node:
self._visit(visitor, child) # recurs
if child.tail:
visitor.text(child.tail)
visitor.end(node.tag)
else:
visitor.append(node.tag, node.attrib, node.text)
except VisitorSkip:
pass
def find(self, tag):
'''Find first occurence of C{tag} in the tree
@returns: a L{Node} object or C{None}
'''
for elt in self.findall(tag):
return elt # return first
else:
return None
def findall(self, tag):
'''Find all occurences of C{tag} in the tree
@param tag: tag name
@returns: yields L{Node} objects
'''
for elt in self._etree.iter(tag):
yield Element.new_from_etree(elt)
def replace(self, tag, func):
'''Modify the tree by replacing all occurences of C{tag}
by the return value of C{func}.
@param tag: tag name
@param func: function to generate replacement values.
Function will be called as::
func(node)
Where C{node} is a L{Node} object representing the subtree.
If the function returns another L{Node} object or modifies
C{node} and returns it, the subtree will be replaced by this
new node.
If the function raises L{VisitorSkip} the replace is skipped.
If the function raises L{VisitorStop} the replacement of all
nodes will stop.
'''
try:
self._replace(self._etree.getroot(), tag, func)
except VisitorStop:
pass
def _replace(self, elt, tag, func):
# Two-step replace in order to do items in order
# of appearance.
replacements = []
for i, child in enumerate(elt):
if child.tag == tag:
try:
replacement = func(Element.new_from_etree(child))
except VisitorSkip:
pass
else:
replacements.append((i, child, replacement))
elif len(child):
self._replace(child, tag, func) # recurs
else:
pass
if replacements:
self._do_replace(elt, replacements)
def _do_replace(self, elt, replacements):
offset = 0 # offset due to replacements
for i, child, node in replacements:
i += offset
if node is None or len(node) == 0:
# Remove element
tail = child.tail
elt.remove(child)
if tail:
self._insert_text(elt, i, tail)
offset -= 1
elif isinstance(node, Element):
# Just replace elements
newchild = self._node_to_etree(node)
newchild.tail = child.tail
elt[i] = newchild
elif isinstance(node, DocumentFragment):
# Insert list of elements and text
tail = child.tail
elt.remove(child)
offset -= 1
for item in node:
if isinstance(item, basestring):
self._insert_text(elt, i, item)
else:
assert isinstance(item, Element)
elt.insert(i, self._node_to_etree(item))
i += 1
offset += 1
if tail:
self._insert_text(elt, i, tail)
else:
raise TypeError, 'BUG: invalid replacement result'
@staticmethod
def _node_to_etree(node):
builder = ParseTreeBuilder()
node.visit(builder)
return builder._b.close()
def _insert_text(self, elt, i, text):
if i == 0:
if elt.text:
elt.text += text
else:
elt.text = text
else:
prev = elt[i-1]
if prev.tail:
prev.tail += text
else:
prev.tail = text
def get_objects(self, type=None):
'''Generator that yields all custom objects in the tree,
or all objects of a certain type.
@param type: object type to return or C{None} to get all
@returns: yields objects (as provided by L{ObjectManager})
'''
for elt in self._etree.getiterator(OBJECT):
if type and elt.attrib.get('type') != type:
pass
else:
obj = self._get_object(elt)
if obj is not None:
yield obj
def _get_object(self, elt):
## TODO optimize using self._object_cache or new API for
## passing on objects in the tree
type = elt.attrib.get('type')
if elt.tag == OBJECT and type:
return ObjectManager.get_object(type, elt.attrib, elt.text)
else:
return None
class VisitorStop(Exception):
'''Exception to be raised to cancel a visitor action'''
pass
class VisitorSkip(Exception):
'''Exception to be raised when the visitor should skip a leaf node
and not decent into it.
'''
pass
class Visitor(object):
'''Conceptual opposite of a builder, but with same API.
Used to walk nodes in a parsetree and call callbacks for each node.
See e.g. L{ParseTree.visit()}.
'''
def start(self, tag, attrib=None):
'''Start formatted region
Visitor objects can raise two exceptions in this method
to influence the tree traversal:
1. L{VisitorStop} will cancel the current parsing, but without
raising an error. So code implementing a visit method should
catch this.
2. L{VisitorSkip} can be raised when the visitor wants to skip
a node, and should prevent the implementation from further
decending into this node
@note: If the visitor modifies the attrib dict on nodes, this
will modify the tree. If this is not intended, the implementation
needs to take care to copy the attrib to break the reference.
@param tag: the tag name
@param attrib: optional dict with attributes
@implementation: optional for subclasses
'''
pass
def text(self, text):
'''Append text
@param text: text to be appended as string
@implementation: optional for subclasses
'''
pass
def end(self, tag):
'''End formatted region
@param tag: the tag name
@raises AssertionError: when tag does not match current state
@implementation: optional for subclasses
'''
pass
def append(self, tag, attrib=None, text=None):
'''Convenience function to open a tag, append text and close
it immediatly.
Can raise L{VisitorStop} or L{VisitorSkip}, see C{start()}
for the conditions.
@param tag: the tag name
@param attrib: optional dict with attributes
@param text: formatted text
@implementation: optional for subclasses, default implementation
calls L{start()}, L{text()}, and L{end()}
'''
self.start(tag, attrib)
if text is not None:
self.text(text)
self.end(tag)
class ParseTreeBuilder(Builder):
'''Builder object that builds a L{ParseTree}'''
def __init__(self, partial=False):
self.partial = partial
self._b = ElementTreeModule.TreeBuilder()
self.stack = [] #: keeps track of current open elements
self._last_char = None
def get_parsetree(self):
'''Returns the constructed L{ParseTree} object.
Can only be called once, after calling this method the object
can not be re-used.
'''
root = self._b.close()
if self.partial:
root.attrib['partial'] = True
return zim.formats.ParseTree(root)
def start(self, tag, attrib=None):
self._b.start(tag, attrib)
self.stack.append(tag)
if tag in BLOCK_LEVEL:
self._last_char = None
def text(self, text):
self._last_char = text[-1]
# FIXME hack for backward compat
if self.stack and self.stack[-1] in (HEADING, LISTITEM):
text = text.strip('\n')
self._b.data(text)
def end(self, tag):
if tag != self.stack[-1]:
raise AssertionError, 'Unmatched tag closed: %s' % tag
if tag in BLOCK_LEVEL:
if self._last_char is not None and not self.partial:
#~ assert self._last_char == '\n', 'Block level text needs to end with newline'
if self._last_char != '\n' and tag not in (HEADING, LISTITEM):
self._b.data('\n')
# FIXME check for HEADING LISTITME for backward compat
# TODO if partial only allow missing \n at end of tree,
# delay message and trigger if not followed by get_parsetree ?
self._b.end(tag)
self.stack.pop()
# FIXME hack for backward compat
if tag == HEADING:
self._b.data('\n')
self._last_char = None
def append(self, tag, attrib=None, text=None):
if tag in BLOCK_LEVEL:
if text and not text.endswith('\n'):
text += '\n'
# FIXME hack for backward compat
if text and tag in (HEADING, LISTITEM):
text = text.strip('\n')
self._b.start(tag, attrib)
if text:
self._b.data(text)
self._b.end(tag)
# FIXME hack for backward compat
if tag == HEADING:
self._b.data('\n')
self._last_char = None
count_eol_re = re.compile(r'\n+\Z')
split_para_re = re.compile(r'((?:^[ \t]*\n){2,})', re.M)
class OldParseTreeBuilder(object):
'''This class supplies an alternative for xml.etree.ElementTree.TreeBuilder
which cleans up the tree on the fly while building it. The main use
is to normalize the tree that is produced by the editor widget, but it can
also be used on other "dirty" interfaces.
This builder takes care of the following issues:
- Inline tags ('emphasis', 'strong', 'h', etc.) can not span multiple lines
- Tags can not contain only whitespace
- Tags can not be empty (with the exception of the 'img' tag)
- There should be an empty line before each 'h', 'p' or 'pre'
(with the exception of the first tag in the tree)
- The 'p' and 'pre' elements should always end with a newline ('\\n')
- Each 'p', 'pre' and 'h' should be postfixed with a newline ('\\n')
(as a results 'p' and 'pre' are followed by an empty line, the
'h' does not end in a newline itself, so it is different)
- Newlines ('\\n') after a <li> alement are removed (optional)
- The element '_ignore_' is silently ignored
'''
## TODO TODO this also needs to be based on Builder ##
def __init__(self, remove_newlines_after_li=True):
assert remove_newlines_after_li, 'TODO'
self._stack = [] # stack of elements for open tags
self._last = None # last element opened or closed
self._data = [] # buffer with data
self._tail = False # True if we are after an end tag
self._seen_eol = 2 # track line ends on flushed data
# starts with "2" so check is ok for first top level element
def start(self, tag, attrib=None):
if tag == '_ignore_':
return self._last
elif tag == 'h':
self._flush(need_eol=2)
elif tag in ('p', 'pre'):
self._flush(need_eol=1)
else:
self._flush()
#~ print 'START', tag
if tag == 'h':
if not (attrib and 'level' in attrib):
logger.warn('Missing "level" attribute for heading')
attrib = attrib or {}
attrib['level'] = 1
elif tag == 'link':
if not (attrib and 'href' in attrib):
logger.warn('Missing "href" attribute for link')
attrib = attrib or {}
attrib['href'] = "404"
# TODO check other mandatory properties !
if attrib:
self._last = ElementTreeModule.Element(tag, attrib)
else:
self._last = ElementTreeModule.Element(tag)
if self._stack:
self._stack[-1].append(self._last)
else:
assert tag == 'zim-tree', 'root element needs to be "zim-tree"'
self._stack.append(self._last)
self._tail = False
return self._last
def end(self, tag):
if tag == '_ignore_':
return None
elif tag in ('p', 'pre'):
self._flush(need_eol=1)
else:
self._flush()
#~ print 'END', tag
self._last = self._stack[-1]
assert self._last.tag == tag, \
"end tag mismatch (expected %s, got %s)" % (self._last.tag, tag)
self._tail = True
if len(self._stack) > 1 and not (tag == 'img' or tag == 'object'
or (self._last.text and not self._last.text.isspace())
or self._last.getchildren() ):
# purge empty tags
if self._last.text and self._last.text.isspace():
self._append_to_previous(self._last.text)
empty = self._stack.pop()
self._stack[-1].remove(empty)
children = self._stack[-1].getchildren()
if children:
self._last = children[-1]
if not self._last.tail is None:
self._data = [self._last.tail]
self._last.tail = None
else:
self._last = self._stack[-1]
self._tail = False
if not self._last.text is None:
self._data = [self._last.text]
self._last.text = None
return empty
else:
return self._stack.pop()
def data(self, text):
assert isinstance(text, basestring)
self._data.append(text)
def _flush(self, need_eol=0):
# need_eol makes sure previous data ends with \n
#~ print 'DATA:', self._data
text = ''.join(self._data)
# Fix trailing newlines
if text:
m = count_eol_re.search(text)
if m: self._seen_eol = len(m.group(0))
else: self._seen_eol = 0
if need_eol > self._seen_eol:
text += '\n' * (need_eol - self._seen_eol)
self._seen_eol = need_eol
# Fix prefix newlines
if self._tail and self._last.tag in ('h', 'p') \
and not text.startswith('\n'):
if text:
text = '\n' + text
else:
text = '\n'
self._seen_eol = 1
elif self._tail and self._last.tag == 'li' \
and text.startswith('\n'):
text = text[1:]
if not text.strip('\n'):
self._seen_eol -=1
if text:
assert not self._last is None, 'data seen before root element'
self._data = []
# Tags that are not allowed to have newlines
if not self._tail and self._last.tag in (
'h', 'emphasis', 'strong', 'mark', 'strike', 'code'):
# assume no nested tags in these types ...
if self._seen_eol:
text = text.rstrip('\n')
self._data.append('\n' * self._seen_eol)
self._seen_eol = 0
lines = text.split('\n')
for line in lines[:-1]:
assert self._last.text is None, "internal error (text)"
assert self._last.tail is None, "internal error (tail)"
if line and not line.isspace():
self._last.text = line
self._last.tail = '\n'
attrib = self._last.attrib.copy()
self._last = ElementTreeModule.Element(self._last.tag, attrib)
self._stack[-2].append(self._last)
self._stack[-1] = self._last
else:
self._append_to_previous(line + '\n')
assert self._last.text is None, "internal error (text)"
self._last.text = lines[-1]
else:
# TODO split paragraphs
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
else:
self._data = []
def close(self):
assert len(self._stack) == 0, 'missing end tags'
assert not self._last is None and self._last.tag == 'zim-tree', 'missing root element'
return self._last
def _append_to_previous(self, text):
'''Add text before current element'''
parent = self._stack[-2]
children = parent.getchildren()[:-1]
if children:
if children[-1].tail:
children[-1].tail = children[-1].tail + text
else:
children[-1].tail = text
else:
if parent.text:
parent.text = parent.text + text
else:
parent.text = text
class ParserClass(object):
'''Base class for parsers
Each format that can be used natively should define a class
'Parser' which inherits from this base class.
'''
def parse(self, input):
'''ABSTRACT METHOD: needs to be overloaded by sub-classes.
This method takes a text or an iterable with lines and returns
a ParseTree object.
'''
raise NotImplementedError
@classmethod
def parse_image_url(self, url):
'''Parse urls style options for images like "foo.png?width=500" and
returns a dict with the options. The base url will be in the dict
as 'src'.
'''
i = url.find('?')
if i > 0:
attrib = {'src': url[:i]}
for option in url[i+1:].split('&'):
if option.find('=') == -1:
logger.warn('Mal-formed options in "%s"' , url)
break
k, v = option.split('=', 1)
if k in ('width', 'height', 'type', 'href'):
if len(v) > 0:
value = url_decode(v, mode=URL_ENCODE_DATA)
attrib[str(k)] = value # str to avoid unicode key
else:
logger.warn('Unknown attribute "%s" in "%s"', k, url)
return attrib
else:
return {'src': url}
import collections
DumperContextElement = collections.namedtuple('DumperContextElement', ('tag', 'attrib', 'text'))
# FIXME unify this class with a generic Element class (?)
class DumperClass(Visitor):
'''Base class for dumper classes. Dumper classes serialize the content
of a parse tree back to a text representation of the page content.
Therefore this class implements the visitor API, so it can be
used with any parse tree implementation or parser object that supports
this API.
To implement a dumper class, you need to define handlers for all
tags that can appear in a page. Tags that are represented by a simple
prefix and postfix string can be defined in the dictionary C{TAGS}.
For example to define the italic tag in html output the dictionary
should contain a definition like: C{EMPHASIS: ('<i>', '</i>')}.
For tags that require more complex logic you can define a method to
format the tag. Typical usage is to format link attributes in such
a method. The method name should be C{dump_} + the name of the tag,
e.g. C{dump_link()} for links (see the constants with tag names for
the other tags). Such a sump method will get 3 arguments: the tag
name itself, a dictionary with the tag attributes and a list of
strings that form the tag content. The method should return a list
of strings that represents the formatted text.
This base class takes care of a stack of nested formatting tags and
when a tag is closed either picks the appropriate prefix and postfix
from C{TAGS} or calls the corresponding C{dump_} method. As a result
tags are serialized depth-first.
@ivar linker: the (optional) L{Linker} object, used to resolve links
@ivar template_options: a dict with options that may be set in a
template (so inherently not safe !) to control the output style
@ivar context: the stack of open tags maintained by this class. Can
be used in C{dump_} methods to inspect the parent scope of the
format. Elements on this stack have "tag", "attrib" and "text"
attributes. Keep in mind that the parent scope is not yet complete
when a tag is serialized.
'''
TAGS = {} #: dict mapping formatting tags to 2-tuples of a prefix and a postfix string
def __init__(self, linker=None, template_options=None):
self.linker = linker
self.template_options = template_options or {}
self.context = []
self._text = []
def dump(self, tree):
'''Convenience methods to dump a given tree.
@param tree: a parse tree object that supports a C{visit()} method
'''
# FIXME - issue here is that we need to reset state - should be in __init__
self._text = []
self.context = [DumperContextElement(None, None, self._text)]
tree.visit(self)
if len(self.context) != 1:
raise AssertionError, 'Unclosed tags on tree: %s' % self.context[-1].tag
#~ import pprint; pprint.pprint(self._text)
return self.get_lines() # FIXME - maybe just return text ?
def get_lines(self):
'''Return the dumped content as a list of lines
Should only be called after closing the top level element
'''
return u''.join(self._text).splitlines(1)
def start(self, tag, attrib=None):
if attrib:
attrib = attrib.copy() # Ensure dumping does not change tree
self.context.append(DumperContextElement(tag, attrib, []))
def text(self, text):
assert not text is None
text = self.encode_text(text)
self.context[-1].text.append(text)
def end(self, tag):
if not tag or tag != self.context[-1].tag:
raise AssertionError, 'Unexpected tag closed: %s' % tag
_, attrib, strings = self.context.pop()
if tag in self.TAGS:
assert strings, 'Can not append empty %s element' % tag
start, end = self.TAGS[tag]
strings.insert(0, start)
strings.append(end)
elif tag in FORMATTEDTEXT:
pass
else:
try:
method = getattr(self, 'dump_'+tag)
except AttributeError:
raise AssertionError, 'BUG: Unknown tag: %s' % tag
strings = method(tag, attrib, strings)
#~ try:
#~ u''.join(strings)
#~ except:
#~ print "BUG: %s returned %s" % ('dump_'+tag, strings)
if strings is not None:
self.context[-1].text.extend(strings)
def append(self, tag, attrib=None, text=None):
strings = None
if tag in self.TAGS:
assert text is not None, 'Can not append empty %s element' % tag
start, end = self.TAGS[tag]
text = self.encode_text(text)
strings = [start, text, end]
elif tag == FORMATTEDTEXT:
if text is not None:
strings = [self.encode_text(text)]
else:
if attrib:
attrib = attrib.copy() # Ensure dumping does not change tree
try:
method = getattr(self, 'dump_'+tag)
except AttributeError:
raise AssertionError, 'BUG: Unknown tag: %s' % tag
if text is None:
strings = method(tag, attrib, None)
else:
strings = method(tag, attrib, [self.encode_text(text)])
if strings is not None:
self.context[-1].text.extend(strings)
def encode_text(self, text):
'''Optional method to encode text elements in the output
@note: Do not apply text encoding in the C{dump_} methods, the
list of strings given there may contain prefix and postfix
formatting of nested tags.
@param text: text to be encoded
@returns: encoded text
@implementation: optional, default just returns unmodified input
'''
return text
def prefix_lines(self, prefix, strings):
'''Convenience method to wrap a number of lines with e.g. an
indenting sequence.
@param prefix: a string to prefix each line
@param strings: a list of pieces of text
@returns: a new list of lines, each starting with prefix
'''
lines = u''.join(strings).splitlines(1)
return [prefix + l for l in lines]
def dump_object(self, tag, attrib, strings=None):
'''Dumps object using proper ObjectManager'''
format = str(self.__class__.__module__).split('.')[-1]
if 'type' in attrib:
obj = ObjectManager.get_object(attrib['type'], attrib, u''.join(strings))
output = obj.dump(format, self, self.linker)
if output is not None:
return [output]
return self.dump_object_fallback(tag, attrib, strings)
# TODO put content in attrib, use text for caption (with full recursion)
# See img
def dump_object_fallback(self, tag, attrib, strings=None):
'''Method to serialize objects that do not have their own
handler for this format.
@implementation: must be implemented in sub-classes
'''
raise NotImplementedError
def isrtl(self, text):
'''Check for Right To Left script
@param text: the text to check
@returns: C{True} if C{text} starts with characters in a
RTL script, or C{None} if direction is not determined.
'''
if pango is None:
return None
# It seems the find_base_dir() function is not documented in the
# python language bindings. The Gtk C code shows the signature:
#
# pango.find_base_dir(text, length)
#
# It either returns a direction, or NEUTRAL if e.g. text only
# contains punctuation but no real characters.
dir = pango.find_base_dir(text, len(text))
if dir == pango.DIRECTION_NEUTRAL:
return None
else:
return dir == pango.DIRECTION_RTL
class BaseLinker(object):
'''Base class for linker objects
Linker object translate links in zim pages to (relative) URLs.
This is used when exporting data to resolve links.
Relative URLs start with "./" or "../" and should be interpreted
in the same way as in HTML. Both URLs and relative URLs are
already URL encoded.
'''
def __init__(self):
self._icons = {}
self._links = {}
self.path = None
self.usebase = False
self.base = None
def set_path(self, path):
'''Set the page path for resolving links'''
self.path = path
self._links = {}
def set_base(self, dir):
'''Set a path to use a base for linking files'''
assert isinstance(dir, Dir)
self.base = dir
def set_usebase(self, usebase):
'''Set whether the format supports relative files links or not'''
self.usebase = usebase
def resolve_file(self, link):
'''Find the source file for an attachment
Used e.g. by the latex format to find files for equations to
be inlined. Do not use this method to resolve links, the file
given here might be temporary and is not guaranteed to be
available after the export. Use L{link()} or C{link_file()}
to resolve links to files.
@returns: a L{File} object or C{None} if no file was found
@implementation: must be implemented by child classes
'''
raise NotImplementedError
def link(self, link):
'''Returns an url for a link in a zim page
This method is used to translate links of any type. It determined
the link type and dispatches to L{link_page()}, L{link_file()},
or other C{link_*} methods.
Results of this method are cached, so only calls dispatch method
once for repeated occurences. Setting a new path with L{set_path()}
will clear the cache.
@param link: link to be translated
@type link: string
@returns: url, uri or whatever link notation is relevant in the
context of this linker
@rtype: string
'''
assert not self.path is None
if not link in self._links:
type = link_type(link)
if type == 'page': href = self.link_page(link)
elif type == 'file': href = self.link_file(link)
elif type == 'mailto':
if link.startswith('mailto:'):
href = self.link_mailto(link)
else:
href = self.link_mailto('mailto:' + link)
elif type == 'interwiki':
href = zim.notebook.interwiki_link(link)
if href and href != link:
href = self.link(href) # recurs
else:
logger.warn('No URL found for interwiki link "%s"', link)
link = href
elif type == 'notebook':
href = self.link_notebook(link)
else: # I dunno, some url ?
method = 'link_' + type
if hasattr(self, method):
href = getattr(self, method)(link)
else:
href = link
self._links[link] = href
return self._links[link]
def img(self, src):
'''Returns an url for image file 'src' '''
return self.link_file(src)
def icon(self, name):
'''Returns an url for an icon'''
if not name in self._icons:
self._icons[name] = data_file('pixmaps/%s.png' % name).uri
return self._icons[name]
def resource(self, path):
'''To be overloaded, return an url for template resources'''
raise NotImplementedError
def link_page(self, link):
'''To be overloaded, return an url for a page link
@implementation: must be implemented by child classes
'''
raise NotImplementedError
def link_file(self, path):
'''To be overloaded, return an url for a file link
@implementation: must be implemented by child classes
'''
raise NotImplementedError
def link_mailto(self, uri):
'''Optional method, default just returns uri'''
return uri
def link_notebook(self, url):
'''Optional method, default just returns url'''
return url
class StubLinker(BaseLinker):
'''Linker used for testing - just gives back the link as it was
parsed. DO NOT USE outside of testing.
'''
def __init__(self):
BaseLinker.__init__(self)
self.path = '<PATH>'
self.base = Dir('<NOBASE>')
def resolve_file(self, link):
return self.base.file(link)
# Very simple stub, allows finding files be rel path for testing
def icon(self, name):
return 'icon:' + name
def resource(self, path):
return path
def link_page(self, link):
return link
def link_file(self, path):
return path
class Node(list):
'''Base class for DOM-like access to the document structure.
@note: This class is not optimized for keeping large structures
in memory.
@ivar tag: tag name
@ivar attrib: dict with attributes
'''
__slots__ = ('tag', 'attrib')
def __init__(self, tag, attrib=None, *content):
self.tag = tag
self.attrib = attrib
if content:
self.extend(content)
@classmethod
def new_from_etree(klass, elt):
obj = klass(elt.tag, dict(elt.attrib))
if elt.text:
obj.append(elt.text)
for child in elt:
subnode = klass.new_from_etree(child) # recurs
obj.append(subnode)
if child.tail:
obj.append(child.tail)
return obj
def get(self, key, default=None):
if self.attrib:
return self.attrib.get(key, default)
else:
return default
def set(self, key, value):
if not self.attrib:
self.attrib = {}
self.attrib[key] = value
def append(self, item):
if isinstance(item, DocumentFragment):
list.extend(self, item)
else:
list.append(self, item)
def gettext(self):
'''Get text as string
Ignores any markup and attributes and simply returns textual
content.
@note: do _not_ use as replacement for exporting to plain text
@returns: string
'''
strings = self._gettext()
return u''.join(strings)
def _gettext(self):
strings = []
for item in self:
if isinstance(item, basestring):
strings.append(item)
else:
strings.extend(item._gettext())
return strings
def toxml(self):
strings = self._toxml()
return u''.join(strings)
def _toxml(self):
strings = []
if self.attrib:
strings.append('<%s' % self.tag)
for key in sorted(self.attrib):
strings.append(' %s="%s"' % (key, encode_xml(self.attrib[key])))
strings.append('>')
else:
strings.append("<%s>" % self.tag)
for item in self:
if isinstance(item, basestring):
strings.append(encode_xml(item))
else:
strings.extend(item._toxml())
strings.append("</%s>" % self.tag)
return strings
__repr__ = toxml
def visit(self, visitor):
if len(self) == 1 and isinstance(self[0], basestring):
visitor.append(self.tag, self.attrib, self[0])
else:
visitor.start(self.tag, self.attrib)
for item in self:
if isinstance(item, basestring):
visitor.text(item)
else:
item.visit(visitor)
visitor.end(self.tag)
class Element(Node):
'''Element class for DOM-like access'''
pass
class DocumentFragment(Node):
'''Document fragment class for DOM-like access'''
def __init__(self, *content):
self.tag = FRAGMENT
self.attrib = None
if content:
self.extend(content)
|
gdw2/zim
|
zim/formats/__init__.py
|
Python
|
gpl-2.0
| 43,556
|
[
"VisIt"
] |
b848642300753fc12dfd49e899602daf2a95237ce41d44759a85b318d99921dd
|
"""Soundex algorithm
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/06 21:36:36 $"
__copyright__ = "Copyright (c) 2004 Mark Pilgrim"
__license__ = "Python"
import string, re
charToSoundex = {"A": "9",
"B": "1",
"C": "2",
"D": "3",
"E": "9",
"F": "1",
"G": "2",
"H": "9",
"I": "9",
"J": "2",
"K": "2",
"L": "4",
"M": "5",
"N": "5",
"O": "9",
"P": "1",
"Q": "2",
"R": "6",
"S": "2",
"T": "3",
"U": "9",
"V": "1",
"W": "9",
"X": "2",
"Y": "9",
"Z": "2"}
isOnlyChars = re.compile('^[A-Za-z]+$').search
def soundex(source):
"convert string to Soundex equivalent"
# Soundex requirements:
# source string must be at least 1 character
# and must consist entirely of letters
if not isOnlyChars(source):
return "0000"
# Soundex algorithm:
# 1. make first character uppercase
source = source[0].upper() + source[1:]
# 2. translate all other characters to Soundex digits
digits = source[0]
for s in source[1:]:
s = s.upper()
digits += charToSoundex[s]
# 3. remove consecutive duplicates
digits2 = digits[0]
for d in digits[1:]:
if digits2[-1] != d:
digits2 += d
# 4. remove all "9"s
digits3 = re.sub('9', '', digits2)
# 5. pad end with "0"s to 4 characters
while len(digits3) < 4:
digits3 += "0"
# 6. return first 4 characters
return digits3[:4]
if __name__ == '__main__':
from timeit import Timer
names = ('Woo', 'Pilgrim', 'Flingjingwaller')
for name in names:
statement = "soundex('%s')" % name
t = Timer(statement, "from __main__ import soundex")
print name.ljust(15), soundex(name), min(t.repeat())
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/soundex/stage1/soundex1c.py
|
Python
|
mit
| 2,335
|
[
"VisIt"
] |
6bcdf60c6c164544b16acf95ff01008eaf86d95450780fbb4d0073dc2d849fda
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
import pickle
import numpy as np
import warnings
from pymatgen import SETTINGS
import scipy.constants as const
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.vasp.inputs import Incar, Poscar, Kpoints, Potcar, \
PotcarSingle, VaspInput
from pymatgen import Composition, Structure
from pymatgen.electronic_structure.core import Magmom
from monty.io import zopen
"""
Created on Jul 16, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 16, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class PoscarTest(PymatgenTest):
def test_init(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
comp = poscar.structure.composition
self.assertEqual(comp, Composition("Fe4P4O16"))
# Vasp 4 type with symbols at the end.
poscar_string = """Test1
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.structure.composition, Composition("SiF"))
poscar_string = ""
self.assertRaises(ValueError, Poscar.from_string, poscar_string)
# Vasp 4 tyle file with default names, i.e. no element symbol found.
poscar_string = """Test2
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000
0.750000 0.500000 0.750000
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.structure.composition, Composition("HHe"))
# Vasp 4 tyle file with default names, i.e. no element symbol found.
poscar_string = """Test3
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
Selective dynamics
direct
0.000000 0.000000 0.000000 T T T Si
0.750000 0.500000 0.750000 F F F O
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.selective_dynamics, [[True, True, True],
[False, False, False]])
self.selective_poscar = poscar
def test_from_file(self):
filepath = os.path.join(test_dir, 'POSCAR.symbols_natoms_multilines')
poscar = Poscar.from_file(filepath, check_for_POTCAR=False,
read_velocities=False)
ordered_expected_elements = ['Fe', 'Cr', 'Fe', 'Fe', 'Cr', 'Cr', 'Cr',
'Cr',
'Fe', 'Fe', 'Cr', 'Fe', 'Cr', 'Fe', 'Fe',
'Cr',
'Fe', 'Cr', 'Fe', 'Fe', 'Fe', 'Fe', 'Cr',
'Fe',
'Ni', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe', 'Cr',
'Cr',
'Cr', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe',
'Cr',
'Fe', 'Fe', 'Ni', 'Fe', 'Fe', 'Fe', 'Cr',
'Cr',
'Fe', 'Fe', 'Fe', 'Fe', 'Fe']
self.assertEqual([site.specie.symbol for site in poscar.structure],
ordered_expected_elements)
def test_to_from_dict(self):
poscar_string = """Test3
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
Selective dynamics
direct
0.000000 0.000000 0.000000 T T T Si
0.750000 0.500000 0.750000 F F F O
"""
poscar = Poscar.from_string(poscar_string)
d = poscar.as_dict()
poscar2 = Poscar.from_dict(d)
self.assertEqual(poscar2.comment, "Test3")
self.assertTrue(all(poscar2.selective_dynamics[0]))
self.assertFalse(all(poscar2.selective_dynamics[1]))
def test_cart_scale(self):
poscar_string = """Test1
1.1
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
Si F
1 1
cart
0.000000 0.00000000 0.00000000
3.840198 1.50000000 2.35163175
"""
p = Poscar.from_string(poscar_string)
site = p.structure[1]
self.assertArrayAlmostEqual(site.coords,
np.array([3.840198, 1.5, 2.35163175]) * 1.1)
def test_significant_figures(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
expected_str = '''Si2
1.0
3.84 0.00 0.00
1.92 3.33 0.00
0.00 -2.22 3.14
Si
2
direct
0.00 0.00 0.00 Si
0.75 0.50 0.75 Si
'''
actual_str = poscar.get_string(significant_figures=2)
self.assertEqual(actual_str, expected_str, "Wrong POSCAR output!")
def test_str(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
expected_str = '''Si2
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
Si
2
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 Si
'''
self.assertEqual(str(poscar), expected_str, "Wrong POSCAR output!")
# Vasp 4 type with symbols at the end.
poscar_string = """Test1
1.0
-3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
expected = """Test1
1.0
3.840198 -0.000000 -0.000000
-1.920099 -3.325710 -0.000000
-0.000000 2.217138 -3.135509
Si F
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(str(poscar), expected)
def test_from_md_run(self):
# Parsing from an MD type run with velocities and predictor corrector data
p = Poscar.from_file(os.path.join(test_dir, "CONTCAR.MD"),
check_for_POTCAR=False)
self.assertAlmostEqual(np.sum(np.array(p.velocities)), 0.0065417961324)
self.assertEqual(p.predictor_corrector[0][0][0], 0.33387820E+00)
self.assertEqual(p.predictor_corrector[0][1][1], -0.10583589E-02)
def test_write_MD_poscar(self):
# Parsing from an MD type run with velocities and predictor corrector data
# And writing a new POSCAR from the new structure
p = Poscar.from_file(os.path.join(test_dir, "CONTCAR.MD"),
check_for_POTCAR=False)
tempfname = "POSCAR.testing.md"
p.write_file(tempfname)
p3 = Poscar.from_file(tempfname)
self.assertArrayAlmostEqual(p.structure.lattice.abc,
p3.structure.lattice.abc, 5)
self.assertArrayAlmostEqual(p.velocities,
p3.velocities, 5)
self.assertArrayAlmostEqual(p.predictor_corrector,
p3.predictor_corrector, 5)
self.assertEqual(p.predictor_corrector_preamble,
p3.predictor_corrector_preamble)
os.remove(tempfname)
def test_setattr(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.assertRaises(ValueError, setattr, poscar, 'velocities',
[[0, 0, 0]])
poscar.selective_dynamics = np.array([[True, False, False]] * 24)
ans = """
LiFePO4
1.0
10.411767 0.000000 0.000000
0.000000 6.067172 0.000000
0.000000 0.000000 4.759490
Fe P O
4 4 16
Selective dynamics
direct
0.218728 0.750000 0.474867 T F F Fe
0.281272 0.250000 0.974867 T F F Fe
0.718728 0.750000 0.025133 T F F Fe
0.781272 0.250000 0.525133 T F F Fe
0.094613 0.250000 0.418243 T F F P
0.405387 0.750000 0.918243 T F F P
0.594613 0.250000 0.081757 T F F P
0.905387 0.750000 0.581757 T F F P
0.043372 0.750000 0.707138 T F F O
0.096642 0.250000 0.741320 T F F O
0.165710 0.046072 0.285384 T F F O
0.165710 0.453928 0.285384 T F F O
0.334290 0.546072 0.785384 T F F O
0.334290 0.953928 0.785384 T F F O
0.403358 0.750000 0.241320 T F F O
0.456628 0.250000 0.207138 T F F O
0.543372 0.750000 0.792862 T F F O
0.596642 0.250000 0.758680 T F F O
0.665710 0.046072 0.214616 T F F O
0.665710 0.453928 0.214616 T F F O
0.834290 0.546072 0.714616 T F F O
0.834290 0.953928 0.714616 T F F O
0.903358 0.750000 0.258680 T F F O
0.956628 0.250000 0.292862 T F F O"""
self.assertEqual(str(poscar).strip(), ans.strip())
def test_velocities(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
poscar.set_temperature(900)
v = np.array(poscar.velocities)
for x in np.sum(v, axis=0):
self.assertAlmostEqual(x, 0, 7)
temperature = struct[0].specie.atomic_mass.to("kg") * \
np.sum(v ** 2) / (3 * const.k) * 1e10
self.assertAlmostEqual(temperature, 900, 4,
'Temperature instantiated incorrectly')
poscar.set_temperature(700)
v = np.array(poscar.velocities)
for x in np.sum(v, axis=0):
self.assertAlmostEqual(
x, 0, 7, 'Velocities initialized with a net momentum')
temperature = struct[0].specie.atomic_mass.to("kg") * \
np.sum(v ** 2) / (3 * const.k) * 1e10
self.assertAlmostEqual(temperature, 700, 4,
'Temperature instantiated incorrectly')
def test_write(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
tempfname = "POSCAR.testing"
poscar.write_file(tempfname)
p = Poscar.from_file(tempfname)
self.assertArrayAlmostEqual(poscar.structure.lattice.abc,
p.structure.lattice.abc, 5)
os.remove(tempfname)
class IncarTest(unittest.TestCase):
def setUp(self):
file_name = os.path.join(test_dir, 'INCAR')
self.incar = Incar.from_file(file_name)
def test_init(self):
incar = self.incar
incar["LDAU"] = "T"
self.assertEqual(incar["ALGO"], "Damped", "Wrong Algo")
self.assertEqual(float(incar["EDIFF"]), 1e-4, "Wrong EDIFF")
self.assertEqual(type(incar["LORBIT"]), int)
def test_diff(self):
incar = self.incar
filepath1 = os.path.join(test_dir, 'INCAR')
incar1 = Incar.from_file(filepath1)
filepath2 = os.path.join(test_dir, 'INCAR.2')
incar2 = Incar.from_file(filepath2)
filepath3 = os.path.join(test_dir, 'INCAR.3')
incar3 = Incar.from_file(filepath2)
self.assertEqual(
incar1.diff(incar2),
{'Different': {
'NELM': {'INCAR1': None, 'INCAR2': 100},
'ISPIND': {'INCAR1': 2, 'INCAR2': None},
'LWAVE': {'INCAR1': True, 'INCAR2': False},
'LDAUPRINT': {'INCAR1': None, 'INCAR2': 1},
'MAGMOM': {'INCAR1': [6, -6, -6, 6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
'INCAR2': None},
'NELMIN': {'INCAR1': None, 'INCAR2': 3},
'ENCUTFOCK': {'INCAR1': 0.0, 'INCAR2': None},
'HFSCREEN': {'INCAR1': 0.207, 'INCAR2': None},
'LSCALU': {'INCAR1': False, 'INCAR2': None},
'ENCUT': {'INCAR1': 500, 'INCAR2': None},
'NSIM': {'INCAR1': 1, 'INCAR2': None},
'ICHARG': {'INCAR1': None, 'INCAR2': 1},
'NSW': {'INCAR1': 99, 'INCAR2': 51},
'NKRED': {'INCAR1': 2, 'INCAR2': None},
'NUPDOWN': {'INCAR1': 0, 'INCAR2': None},
'LCHARG': {'INCAR1': True, 'INCAR2': None},
'LPLANE': {'INCAR1': True, 'INCAR2': None},
'ISMEAR': {'INCAR1': 0, 'INCAR2': -5},
'NPAR': {'INCAR1': 8, 'INCAR2': 1},
'SYSTEM': {
'INCAR1': 'Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]',
'INCAR2': 'Id=[91090] dblock_code=[20070929235612linio-59.53134651-vasp] formula=[li3 ni3 o6] sg_name=[r-3m]'},
'ALGO': {'INCAR1': 'Damped', 'INCAR2': 'Fast'},
'LHFCALC': {'INCAR1': True, 'INCAR2': None},
'TIME': {'INCAR1': 0.4, 'INCAR2': None}},
'Same': {'IBRION': 2, 'PREC': 'Accurate', 'ISIF': 3,
'LMAXMIX': 4,
'LREAL': 'Auto', 'ISPIN': 2, 'EDIFF': 0.0001,
'LORBIT': 11, 'SIGMA': 0.05}})
self.assertEqual(
incar1.diff(incar3),
{'Different': {
'NELM': {'INCAR1': None, 'INCAR2': 100},
'ISPIND': {'INCAR1': 2, 'INCAR2': None},
'LWAVE': {'INCAR1': True, 'INCAR2': False},
'LDAUPRINT': {'INCAR1': None, 'INCAR2': 1},
'MAGMOM': {'INCAR1': [6, -6, -6, 6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
'INCAR2': None},
'NELMIN': {'INCAR1': None, 'INCAR2': 3},
'ENCUTFOCK': {'INCAR1': 0.0, 'INCAR2': None},
'HFSCREEN': {'INCAR1': 0.207, 'INCAR2': None},
'LSCALU': {'INCAR1': False, 'INCAR2': None},
'ENCUT': {'INCAR1': 500, 'INCAR2': None},
'NSIM': {'INCAR1': 1, 'INCAR2': None},
'ICHARG': {'INCAR1': None, 'INCAR2': 1},
'NSW': {'INCAR1': 99, 'INCAR2': 51},
'NKRED': {'INCAR1': 2, 'INCAR2': None},
'NUPDOWN': {'INCAR1': 0, 'INCAR2': None},
'LCHARG': {'INCAR1': True, 'INCAR2': None},
'LPLANE': {'INCAR1': True, 'INCAR2': None},
'ISMEAR': {'INCAR1': 0, 'INCAR2': -5},
'NPAR': {'INCAR1': 8, 'INCAR2': 1},
'SYSTEM': {
'INCAR1': 'Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]',
'INCAR2': 'Id=[91090] dblock_code=[20070929235612linio-59.53134651-vasp] formula=[li3 ni3 o6] sg_name=[r-3m]'},
'ALGO': {'INCAR1': 'Damped', 'INCAR2': 'Fast'},
'LHFCALC': {'INCAR1': True, 'INCAR2': None},
'TIME': {'INCAR1': 0.4, 'INCAR2': None}},
'Same': {'IBRION': 2, 'PREC': 'Accurate', 'ISIF': 3,
'LMAXMIX': 4,
'LREAL': 'Auto', 'ISPIN': 2, 'EDIFF': 0.0001,
'LORBIT': 11, 'SIGMA': 0.05}})
def test_as_dict_and_from_dict(self):
d = self.incar.as_dict()
incar2 = Incar.from_dict(d)
self.assertEqual(self.incar, incar2)
d["MAGMOM"] = [Magmom([1, 2, 3]).as_dict()]
incar3 = Incar.from_dict(d)
self.assertEqual(incar3["MAGMOM"], [Magmom([1, 2, 3])])
def test_write(self):
tempfname = "INCAR.testing"
self.incar.write_file(tempfname)
i = Incar.from_file(tempfname)
self.assertEqual(i, self.incar)
os.remove(tempfname)
def test_get_string(self):
s = self.incar.get_string(pretty=True, sort_keys=True)
ans = """ALGO = Damped
EDIFF = 0.0001
ENCUT = 500
ENCUTFOCK = 0.0
HFSCREEN = 0.207
IBRION = 2
ISIF = 3
ISMEAR = 0
ISPIN = 2
ISPIND = 2
LCHARG = True
LHFCALC = True
LMAXMIX = 4
LORBIT = 11
LPLANE = True
LREAL = Auto
LSCALU = False
LWAVE = True
MAGMOM = 1*6.0 2*-6.0 1*6.0 20*0.6
NKRED = 2
NPAR = 8
NSIM = 1
NSW = 99
NUPDOWN = 0
PREC = Accurate
SIGMA = 0.05
SYSTEM = Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]
TIME = 0.4"""
self.assertEqual(s, ans)
def test_lsorbit_magmom(self):
magmom1 = [[0.0, 0.0, 3.0], [0, 1, 0], [2, 1, 2]]
magmom2 = [-1, -1, -1, 0, 0, 0, 0, 0]
magmom4 = [Magmom([1.0, 2.0, 2.0])]
ans_string1 = "LANGEVIN_GAMMA = 10 10 10\nLSORBIT = True\n" \
"MAGMOM = 0.0 0.0 3.0 0 1 0 2 1 2\n"
ans_string2 = "LANGEVIN_GAMMA = 10\nLSORBIT = True\n" \
"MAGMOM = 3*3*-1 3*5*0\n"
ans_string3 = "LSORBIT = False\nMAGMOM = 2*-1 2*9\n"
ans_string4_nolsorbit = "LANGEVIN_GAMMA = 10\nLSORBIT = False\nMAGMOM = 1*3.0\n"
ans_string4_lsorbit = "LANGEVIN_GAMMA = 10\nLSORBIT = True\nMAGMOM = 1.0 2.0 2.0\n"
incar = Incar({})
incar["MAGMOM"] = magmom1
incar["LSORBIT"] = "T"
incar["LANGEVIN_GAMMA"] = [10, 10, 10]
self.assertEqual(ans_string1, str(incar))
incar["MAGMOM"] = magmom2
incar["LSORBIT"] = "T"
incar["LANGEVIN_GAMMA"] = 10
self.assertEqual(ans_string2, str(incar))
incar["MAGMOM"] = magmom4
incar["LSORBIT"] = "F"
self.assertEqual(ans_string4_nolsorbit, str(incar))
incar["LSORBIT"] = "T"
self.assertEqual(ans_string4_lsorbit, str(incar))
incar = Incar.from_string(ans_string1)
self.assertEqual(incar["MAGMOM"],
[[0.0, 0.0, 3.0], [0, 1, 0], [2, 1, 2]])
self.assertEqual(incar["LANGEVIN_GAMMA"], [10, 10, 10])
incar = Incar.from_string(ans_string2)
self.assertEqual(incar["MAGMOM"], [[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0]])
self.assertEqual(incar["LANGEVIN_GAMMA"], [10])
incar = Incar.from_string(ans_string3)
self.assertFalse(incar["LSORBIT"])
self.assertEqual(incar["MAGMOM"], [-1, -1, 9, 9])
def test_quad_efg(self):
incar1 = Incar({})
incar1["LEFG"] = True
incar1["QUAD_EFG"] = [0.0, 146.6, -25.58]
ans_string1 = "LEFG = True\nQUAD_EFG = 0.0 146.6 -25.58\n"
self.assertEqual(ans_string1, str(incar1))
incar2 = Incar.from_string(ans_string1)
self.assertEqual(ans_string1, str(incar2))
def test_types(self):
incar_str = """ALGO = Fast
ECUT = 510
EDIFF = 1e-07
EINT = -0.85 0.85
IBRION = -1
ICHARG = 11
ISIF = 3
ISMEAR = 1
ISPIN = 1
LPARD = True
NBMOD = -3
PREC = Accurate
SIGMA = 0.1"""
i = Incar.from_string(incar_str)
self.assertIsInstance(i["EINT"], list)
self.assertEqual(i["EINT"][0], -0.85)
def test_proc_types(self):
self.assertEqual(Incar.proc_val("HELLO", "-0.85 0.85"), "-0.85 0.85")
class KpointsTest(PymatgenTest):
def test_init(self):
filepath = os.path.join(test_dir, 'KPOINTS.auto')
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.kpts, [[10]], "Wrong kpoint lattice read")
filepath = os.path.join(test_dir, 'KPOINTS.cartesian')
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.kpts,
[[0.25, 0, 0], [0, 0.25, 0], [0, 0, 0.25]],
"Wrong kpoint lattice read")
self.assertEqual(kpoints.kpts_shift, [0.5, 0.5, 0.5],
"Wrong kpoint shift read")
filepath = os.path.join(test_dir, 'KPOINTS')
kpoints = Kpoints.from_file(filepath)
self.kpoints = kpoints
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
filepath = os.path.join(test_dir, 'KPOINTS.band')
kpoints = Kpoints.from_file(filepath)
self.assertIsNotNone(kpoints.labels)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Line_mode)
kpoints_str = str(kpoints)
self.assertEqual(kpoints_str.split("\n")[3], "Reciprocal")
filepath = os.path.join(test_dir, 'KPOINTS.explicit')
kpoints = Kpoints.from_file(filepath)
self.assertIsNotNone(kpoints.kpts_weights)
self.assertEqual(str(kpoints).strip(), """Example file
4
Cartesian
0.0 0.0 0.0 1 None
0.0 0.0 0.5 1 None
0.0 0.5 0.5 2 None
0.5 0.5 0.5 4 None""")
filepath = os.path.join(test_dir, 'KPOINTS.explicit_tet')
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.tet_connections, [(6, [1, 2, 3, 4])])
def test_style_setter(self):
filepath = os.path.join(test_dir, 'KPOINTS')
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
kpoints.style = "G"
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_static_constructors(self):
kpoints = Kpoints.gamma_automatic([3, 3, 3], [0, 0, 0])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
self.assertEqual(kpoints.kpts, [[3, 3, 3]])
kpoints = Kpoints.monkhorst_automatic([2, 2, 2], [0, 0, 0])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
self.assertEqual(kpoints.kpts, [[2, 2, 2]])
kpoints = Kpoints.automatic(100)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic)
self.assertEqual(kpoints.kpts, [[100]])
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
kpoints = Kpoints.automatic_density(poscar.structure, 500)
self.assertEqual(kpoints.kpts, [[1, 3, 3]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.automatic_density(poscar.structure, 500, True)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.automatic_density_by_vol(poscar.structure, 1000)
self.assertEqual(kpoints.kpts, [[6, 10, 13]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
s = poscar.structure
s.make_supercell(3)
kpoints = Kpoints.automatic_density(s, 500)
self.assertEqual(kpoints.kpts, [[1, 1, 1]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_as_dict_from_dict(self):
k = Kpoints.monkhorst_automatic([2, 2, 2], [0, 0, 0])
d = k.as_dict()
k2 = Kpoints.from_dict(d)
self.assertEqual(k.kpts, k2.kpts)
self.assertEqual(k.style, k2.style)
self.assertEqual(k.kpts_shift, k2.kpts_shift)
def test_kpt_bands_as_dict_from_dict(self):
file_name = os.path.join(test_dir, 'KPOINTS.band')
k = Kpoints.from_file(file_name)
d = k.as_dict()
import json
json.dumps(d)
# This doesn't work
k2 = Kpoints.from_dict(d)
self.assertEqual(k.kpts, k2.kpts)
self.assertEqual(k.style, k2.style)
self.assertEqual(k.kpts_shift, k2.kpts_shift)
self.assertEqual(k.num_kpts, k2.num_kpts)
def test_pickle(self):
k = Kpoints.gamma_automatic()
pickle.dumps(k)
def test_automatic_kpoint(self):
# s = PymatgenTest.get_structure("Li2O")
p = Poscar.from_string("""Al1
1.0
2.473329 0.000000 1.427977
0.824443 2.331877 1.427977
0.000000 0.000000 2.855955
Al
1
direct
0.000000 0.000000 0.000000 Al""")
kpoints = Kpoints.automatic_density(p.structure, 1000)
self.assertArrayAlmostEqual(kpoints.kpts[0], [10, 10, 10])
class PotcarSingleTest(unittest.TestCase):
def setUp(self):
self.psingle = PotcarSingle.from_file(
os.path.join(test_dir, "POT_GGA_PAW_PBE", "POTCAR.Mn_pv.gz"))
def test_keywords(self):
data = {'VRHFIN': 'Mn: 3p4s3d', 'LPAW': True, 'DEXC': -.003,
'STEP': [20.000, 1.050],
'RPACOR': 2.080, 'LEXCH': 'PE',
'ENMAX': 269.865, 'QCUT': -4.454,
'TITEL': 'PAW_PBE Mn_pv 07Sep2000',
'LCOR': True, 'EAUG': 569.085,
'RMAX': 2.807,
'ZVAL': 13.000,
'EATOM': 2024.8347, 'NDATA': 100,
'LULTRA': False,
'QGAM': 8.907,
'ENMIN': 202.399,
'RCLOC': 1.725,
'RCORE': 2.300,
'RDEP': 2.338,
'IUNSCR': 1,
'RAUG': 1.300,
'POMASS': 54.938,
'RWIGS': 1.323}
self.assertEqual(self.psingle.keywords, data)
def test_nelectrons(self):
self.assertEqual(self.psingle.nelectrons, 13)
def test_electron_config(self):
config = self.psingle.electron_configuration
self.assertEqual(config[-1], (3, "p", 6))
def test_attributes(self):
for k in ['DEXC', 'RPACOR', 'ENMAX', 'QCUT', 'EAUG', 'RMAX',
'ZVAL', 'EATOM', 'NDATA', 'QGAM', 'ENMIN', 'RCLOC',
'RCORE', 'RDEP', 'RAUG', 'POMASS', 'RWIGS']:
self.assertIsNotNone(getattr(self.psingle, k))
def test_found_unknown_key(self):
with self.assertRaises(KeyError):
PotcarSingle.parse_functions['BAD_KEY']
def test_bad_value(self):
self.assertRaises(ValueError, PotcarSingle.parse_functions['ENMAX'],
"ThisShouldBeAFloat")
def test_hash(self):
self.assertEqual(self.psingle.get_potcar_hash(),
"fa52f891f234d49bb4cb5ea96aae8f98")
def test_from_functional_and_symbols(self):
test_potcar_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"..", "..", "..", "..", "test_files"))
SETTINGS["PMG_VASP_PSP_DIR"] = test_potcar_dir
p = PotcarSingle.from_symbol_and_functional("Li_sv", "PBE")
self.assertEqual(p.enmax, 271.649)
def test_functional_types(self):
self.assertEqual(self.psingle.functional, 'PBE')
self.assertEqual(self.psingle.functional_class, 'GGA')
self.assertEqual(self.psingle.potential_type, 'PAW')
psingle = PotcarSingle.from_file(os.path.join(test_dir, "POT_LDA_PAW",
"POTCAR.Fe.gz"))
self.assertEqual(psingle.functional, 'Perdew-Zunger81')
self.assertEqual(psingle.functional_class, 'LDA')
self.assertEqual(psingle.potential_type, 'PAW')
def test_default_functional(self):
p = PotcarSingle.from_symbol_and_functional("Fe")
self.assertEqual(p.functional_class, 'GGA')
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "LDA"
p = PotcarSingle.from_symbol_and_functional("Fe")
self.assertEqual(p.functional_class, 'LDA')
def tearDown(self):
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "PBE"
class PotcarTest(unittest.TestCase):
def setUp(self):
if "PMG_VASP_PSP_DIR" not in os.environ:
test_potcar_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files"))
os.environ["PMG_VASP_PSP_DIR"] = test_potcar_dir
filepath = os.path.join(test_dir, 'POTCAR')
self.potcar = Potcar.from_file(filepath)
def test_init(self):
self.assertEqual(self.potcar.symbols, ["Fe", "P", "O"],
"Wrong symbols read in for POTCAR")
potcar = Potcar(["Fe_pv", "O"])
self.assertEqual(potcar[0].enmax, 293.238)
def test_potcar_map(self):
fe_potcar = zopen(os.path.join(test_dir, "POT_GGA_PAW_PBE",
"POTCAR.Fe_pv.gz")).read().decode(
"utf-8")
# specify V instead of Fe - this makes sure the test won't pass if the
# code just grabs the POTCAR from the config file (the config file would
# grab the V POTCAR)
potcar = Potcar(["V"], sym_potcar_map={"V": fe_potcar})
self.assertEqual(potcar.symbols, ["Fe_pv"], "Wrong symbols read in "
"for POTCAR")
def test_to_from_dict(self):
d = self.potcar.as_dict()
potcar = Potcar.from_dict(d)
self.assertEqual(potcar.symbols, ["Fe", "P", "O"])
def test_write(self):
tempfname = "POTCAR.testing"
self.potcar.write_file(tempfname)
p = Potcar.from_file(tempfname)
self.assertEqual(p.symbols, self.potcar.symbols)
os.remove(tempfname)
def test_set_symbol(self):
self.assertEqual(self.potcar.symbols, ["Fe", "P", "O"])
self.assertEqual(self.potcar[0].nelectrons, 8)
self.potcar.symbols = ["Fe_pv", "O"]
self.assertEqual(self.potcar.symbols, ["Fe_pv", "O"])
self.assertEqual(self.potcar[0].nelectrons, 14)
def test_default_functional(self):
p = Potcar(["Fe", "P"])
self.assertEqual(p[0].functional_class, 'GGA')
self.assertEqual(p[1].functional_class, 'GGA')
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "LDA"
p = Potcar(["Fe", "P"])
self.assertEqual(p[0].functional_class, 'LDA')
self.assertEqual(p[1].functional_class, 'LDA')
def test_pickle(self):
pickle.dumps(self.potcar)
def tearDown(self):
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "PBE"
class VaspInputTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'INCAR')
incar = Incar.from_file(filepath)
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
if "PMG_VASP_PSP_DIR" not in os.environ:
test_potcar_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files"))
os.environ["PMG_VASP_PSP_DIR"] = test_potcar_dir
filepath = os.path.join(test_dir, 'POTCAR')
potcar = Potcar.from_file(filepath)
filepath = os.path.join(test_dir, 'KPOINTS.auto')
kpoints = Kpoints.from_file(filepath)
self.vinput = VaspInput(incar, kpoints, poscar, potcar)
def test_to_from_dict(self):
d = self.vinput.as_dict()
vinput = VaspInput.from_dict(d)
comp = vinput["POSCAR"].structure.composition
self.assertEqual(comp, Composition("Fe4P4O16"))
def test_write(self):
tmp_dir = "VaspInput.testing"
self.vinput.write_input(tmp_dir)
filepath = os.path.join(tmp_dir, "INCAR")
incar = Incar.from_file(filepath)
self.assertEqual(incar["NSW"], 99)
for name in ("INCAR", "POSCAR", "POTCAR", "KPOINTS"):
os.remove(os.path.join(tmp_dir, name))
os.rmdir(tmp_dir)
def test_from_directory(self):
vi = VaspInput.from_directory(test_dir,
optional_files={"CONTCAR.Li2O": Poscar})
self.assertEqual(vi["INCAR"]["ALGO"], "Damped")
self.assertIn("CONTCAR.Li2O", vi)
d = vi.as_dict()
vinput = VaspInput.from_dict(d)
self.assertIn("CONTCAR.Li2O", vinput)
if __name__ == "__main__":
unittest.main()
|
matk86/pymatgen
|
pymatgen/io/vasp/tests/test_inputs.py
|
Python
|
mit
| 32,454
|
[
"VASP",
"pymatgen"
] |
dd18795fba96bdab71de9acfc40cb45a92688c44fb0dc3d8f1cd285f3c9a5d04
|
#!/usr/bin/env python
#
# Copyright (c) 2014, Steven Caron <steven@steven-caron.com> All rights reserved.
#
# FabricArnold Extension
# test_0003
import os
import time
import math
from arnold import *
testNumber = "test_0003"
print("[FabricArnold::TestSuite] Generating reference image for {0}...".format(testNumber))
start = time.clock()
AiBegin()
nbSpheres = 100000
radius = 2.0
for i in range(nbSpheres):
step = float(i)/float(nbSpheres)
theta = AI_PITIMES2 * step
x = radius * math.cos(theta)
z = radius * math.sin(theta)
# create a sphere
sphere = AiNode("sphere")
AiNodeSetStr(sphere, "name", "mysphere_{0}".format(i))
AiNodeSetFlt(sphere, "radius", step*0.5)
AiNodeSetPnt(sphere, "center", x, theta, z)
# create a lambert shader
lambert = AiNode("lambert")
AiNodeSetStr(lambert, "name", "myshader_{0}".format(i))
AiNodeSetRGB(lambert, "Kd_color", 0.0, 1.0, 0.0)
# assign the sphere's shader
AiNodeSetPtr(sphere, "shader", lambert)
# create a perspective camera
camera = AiNode("persp_camera")
AiNodeSetStr(camera, "name", "mycamera")
AiNodeSetPnt(camera, "position", 0.0, 5.0, 20.0)
AiNodeSetPnt(camera, "look_at", 0.0, 5.0, 0.0)
# create a point light
light = AiNode("point_light")
AiNodeSetStr(light, "name", "mylight")
AiNodeSetFlt(light, "exposure", 10.5)
AiNodeSetPnt(light, "position", 0.0, 20.0, 0.0)
# create a point light
light1 = AiNode("point_light")
AiNodeSetStr(light1, "name", "mylight_1")
AiNodeSetFlt(light1, "exposure", 10.5)
AiNodeSetPnt(light1, "position", 0.0, -20.0, 0.0)
# set render options
options = AiUniverseGetOptions()
AiNodeSetInt(options, "AA_samples", 4)
AiNodeSetInt(options, "xres", 320)
AiNodeSetInt(options, "yres", 240)
AiNodeSetPtr(options, "camera", camera)
# create an output driver
driver = AiNode("driver_jpeg")
AiNodeSetStr(driver, "name", "mydriver")
filename = os.path.join(os.getcwd(), testNumber, "reference.jpg")
AiNodeSetStr(driver, "filename", filename)
# create a gaussian filter node
gfilter = AiNode("gaussian_filter")
AiNodeSetStr(gfilter, "name", "myfilter");
# assign th driver and the filter to the outputs
outputs_array = AiArrayAllocate(1, 1, AI_TYPE_STRING)
AiArraySetStr(outputs_array, 0, "RGB RGB myfilter mydriver")
AiNodeSetArray(options, "outputs", outputs_array)
# render the scene
result = AiRender(AI_RENDER_MODE_CAMERA)
if result != AI_SUCCESS:
print("[FabricArnold::TestSuite] Error {0}".format(result))
AiEnd()
secs = time.clock() - start
print("Elapsed time: {0} seconds".format(secs))
|
wildparky/FabricArnold
|
tests/test_0003/reference.py
|
Python
|
bsd-3-clause
| 2,639
|
[
"Gaussian"
] |
199babc34f24b8a15b17ec50ed0409e92305fcf6d752e4c72032c543a311c78a
|
# -*- coding: utf-8 -*-
# Copyright 2007-2022 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from collections.abc import MutableMapping
import copy
from contextlib import contextmanager
from datetime import datetime
import inspect
from itertools import product
import logging
import numbers
from pathlib import Path
import warnings
import dask.array as da
from matplotlib import pyplot as plt
import numpy as np
from pint import UndefinedUnitError
from scipy import integrate
from scipy import signal as sp_signal
import traits.api as t
import hyperspy
from hyperspy.axes import AxesManager
from hyperspy.api_nogui import _ureg
from hyperspy.drawing import mpl_hie, mpl_hse, mpl_he
from hyperspy.learn.mva import MVA, LearningResults
from hyperspy.io import assign_signal_subclass
import hyperspy.misc.utils
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.drawing import signal as sigdraw
from hyperspy.defaults_parser import preferences
from hyperspy.misc.io.tools import ensure_directory
from hyperspy.misc.utils import iterable_not_string
from hyperspy.external.progressbar import progressbar
from hyperspy.exceptions import SignalDimensionError, DataDimensionError
from hyperspy.misc import rgb_tools
from hyperspy.misc.utils import underline, isiterable
from hyperspy.misc.hist_tools import histogram
from hyperspy.drawing.utils import animate_legend
from hyperspy.drawing.marker import markers_metadata_dict_to_markers
from hyperspy.misc.slicing import SpecialSlicers, FancySlicing
from hyperspy.misc.utils import slugify
from hyperspy.misc.utils import is_binned # remove in v2.0
from hyperspy.misc.utils import process_function_blockwise, guess_output_signal_size
from hyperspy.misc.utils import add_scalar_axis
from hyperspy.docstrings.signal import (
ONE_AXIS_PARAMETER, MANY_AXIS_PARAMETER, OUT_ARG, NAN_FUNC, OPTIMIZE_ARG,
RECHUNK_ARG, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
CLUSTER_SIGNALS_ARG, HISTOGRAM_BIN_ARGS, HISTOGRAM_MAX_BIN_ARGS, LAZY_OUTPUT_ARG)
from hyperspy.docstrings.plot import (BASE_PLOT_DOCSTRING, PLOT1D_DOCSTRING,
BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT2D_KWARGS_DOCSTRING)
from hyperspy.docstrings.utils import REBIN_ARGS
from hyperspy.events import Events, Event
from hyperspy.interactive import interactive
from hyperspy.misc.signal_tools import (are_signals_aligned,
broadcast_signals)
from hyperspy.misc.math_tools import outer_nd, hann_window_nth_order, check_random_state
from hyperspy.exceptions import VisibleDeprecationWarning
_logger = logging.getLogger(__name__)
class ModelManager(object):
"""Container for models
"""
class ModelStub(object):
def __init__(self, mm, name):
self._name = name
self._mm = mm
self.restore = lambda: mm.restore(self._name)
self.remove = lambda: mm.remove(self._name)
self.pop = lambda: mm.pop(self._name)
self.restore.__doc__ = "Returns the stored model"
self.remove.__doc__ = "Removes the stored model"
self.pop.__doc__ = \
"Returns the stored model and removes it from storage"
def __repr__(self):
return repr(self._mm._models[self._name])
def __init__(self, signal, dictionary=None):
self._signal = signal
self._models = DictionaryTreeBrowser()
self._add_dictionary(dictionary)
def _add_dictionary(self, dictionary=None):
if dictionary is not None:
for k, v in dictionary.items():
if k.startswith('_') or k in ['restore', 'remove']:
raise KeyError("Can't add dictionary with key '%s'" % k)
k = slugify(k, True)
self._models.set_item(k, v)
setattr(self, k, self.ModelStub(self, k))
def _set_nice_description(self, node, names):
ans = {'date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'dimensions': self._signal.axes_manager._get_dimension_str(),
}
node.add_dictionary(ans)
for n in names:
node.add_node('components.' + n)
def _save(self, name, dictionary):
_abc = 'abcdefghijklmnopqrstuvwxyz'
def get_letter(models):
howmany = len(models)
if not howmany:
return 'a'
order = int(np.log(howmany) / np.log(26)) + 1
letters = [_abc, ] * order
for comb in product(*letters):
guess = "".join(comb)
if guess not in models.keys():
return guess
if name is None:
name = get_letter(self._models)
else:
name = self._check_name(name)
if name in self._models:
self.remove(name)
self._models.add_node(name)
node = self._models.get_item(name)
names = [c['name'] for c in dictionary['components']]
self._set_nice_description(node, names)
node.set_item('_dict', dictionary)
setattr(self, name, self.ModelStub(self, name))
def store(self, model, name=None):
"""If the given model was created from this signal, stores it
Parameters
----------
model : :py:class:`~hyperspy.model.BaseModel` (or subclass)
The model to store in the signal
name : str or None
The name for the model to be stored with
See also
--------
remove
restore
pop
"""
if model.signal is self._signal:
self._save(name, model.as_dictionary())
else:
raise ValueError("The model is created from a different signal, "
"you should store it there")
def _check_name(self, name, existing=False):
if not isinstance(name, str):
raise KeyError('Name has to be a string')
if name.startswith('_'):
raise KeyError('Name cannot start with "_" symbol')
if '.' in name:
raise KeyError('Name cannot contain dots (".")')
name = slugify(name, True)
if existing:
if name not in self._models:
raise KeyError(
"Model named '%s' is not currently stored" %
name)
return name
def remove(self, name):
"""Removes the given model
Parameters
----------
name : str
The name of the model to remove
See also
--------
restore
store
pop
"""
name = self._check_name(name, True)
delattr(self, name)
self._models.__delattr__(name)
def pop(self, name):
"""Returns the restored model and removes it from storage
Parameters
----------
name : str
The name of the model to restore and remove
See also
--------
restore
store
remove
"""
name = self._check_name(name, True)
model = self.restore(name)
self.remove(name)
return model
def restore(self, name):
"""Returns the restored model
Parameters
----------
name : str
The name of the model to restore
See also
--------
remove
store
pop
"""
name = self._check_name(name, True)
d = self._models.get_item(name + '._dict').as_dictionary()
return self._signal.create_model(dictionary=copy.deepcopy(d))
def __repr__(self):
return repr(self._models)
def __len__(self):
return len(self._models)
def __getitem__(self, name):
name = self._check_name(name, True)
return getattr(self, name)
class MVATools(object):
# TODO: All of the plotting methods here should move to drawing
def _plot_factors_or_pchars(self, factors, comp_ids=None,
calibrate=True, avg_char=False,
same_window=True, comp_label='PC',
img_data=None,
plot_shifts=True, plot_char=4,
cmap=plt.cm.gray, quiver_color='white',
vector_scale=1,
per_row=3, ax=None):
"""Plot components from PCA or ICA, or peak characteristics.
Parameters
----------
comp_ids : None, int, or list of ints
If None, returns maps of all components.
If int, returns maps of components with ids from 0 to given
int.
if list of ints, returns maps of components with ids in
given list.
calibrate : bool
If True, plots are calibrated according to the data in the
axes manager.
same_window : bool
If True, plots each factor to the same window. They are not scaled.
Default True.
comp_label : str
Title of the plot
cmap : a matplotlib colormap
The colormap used for factor images or any peak characteristic
scatter map overlay. Default is the matplotlib gray colormap
(``plt.cm.gray``).
Other Parameters
----------------
img_data : 2D numpy array,
The array to overlay peak characteristics onto. If None,
defaults to the average image of your stack.
plot_shifts : bool, default is True
If true, plots a quiver (arrow) plot showing the shifts for
each
peak present in the component being plotted.
plot_char : None or int
If int, the id of the characteristic to plot as the colored
scatter plot.
Possible components are:
* 4: peak height
* 5: peak orientation
* 6: peak eccentricity
quiver_color : any color recognized by matplotlib
Determines the color of vectors drawn for
plotting peak shifts.
vector_scale : integer or None
Scales the quiver plot arrows. The vector is defined as one data
unit along the X axis. If shifts are small, set vector_scale so
that when they are multiplied by vector_scale, they are on the
scale of the image plot. If None, uses matplotlib's autoscaling.
Returns
-------
matplotlib figure or list of figure if same_window=False
"""
if same_window is None:
same_window = True
if comp_ids is None:
comp_ids = range(factors.shape[1])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
n = len(comp_ids)
if same_window:
rows = int(np.ceil(n / float(per_row)))
fig_list = []
if n < per_row:
per_row = n
if same_window and self.axes_manager.signal_dimension == 2:
f = plt.figure(figsize=(4 * per_row, 3 * rows))
else:
f = plt.figure()
for i in range(len(comp_ids)):
if self.axes_manager.signal_dimension == 1:
if same_window:
ax = plt.gca()
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
ax = sigdraw._plot_1D_component(
factors=factors,
idx=comp_ids[i],
axes_manager=self.axes_manager,
ax=ax,
calibrate=calibrate,
comp_label=comp_label,
same_window=same_window)
if same_window:
plt.legend(ncol=factors.shape[1] // 2, loc='best')
elif self.axes_manager.signal_dimension == 2:
if same_window:
ax = f.add_subplot(rows, per_row, i + 1)
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
sigdraw._plot_2D_component(factors=factors,
idx=comp_ids[i],
axes_manager=self.axes_manager,
calibrate=calibrate, ax=ax,
cmap=cmap, comp_label=comp_label)
if not same_window:
fig_list.append(f)
if same_window: # Main title for same window
title = '%s' % comp_label
if self.axes_manager.signal_dimension == 1:
plt.title(title)
else:
plt.suptitle(title)
try:
plt.tight_layout()
except BaseException:
pass
if not same_window:
return fig_list
else:
return f
def _plot_loadings(self, loadings, comp_ids, calibrate=True,
same_window=True, comp_label=None,
with_factors=False, factors=None,
cmap=plt.cm.gray, no_nans=False, per_row=3,
axes_decor='all'):
if same_window is None:
same_window = True
if comp_ids is None:
comp_ids = range(loadings.shape[0])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
n = len(comp_ids)
if same_window:
rows = int(np.ceil(n / float(per_row)))
fig_list = []
if n < per_row:
per_row = n
if same_window and self.axes_manager.signal_dimension == 2:
f = plt.figure(figsize=(4 * per_row, 3 * rows))
else:
f = plt.figure()
for i in range(n):
if self.axes_manager.navigation_dimension == 1:
if same_window:
ax = plt.gca()
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
elif self.axes_manager.navigation_dimension == 2:
if same_window:
ax = f.add_subplot(rows, per_row, i + 1)
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
sigdraw._plot_loading(
loadings, idx=comp_ids[i], axes_manager=self.axes_manager,
no_nans=no_nans, calibrate=calibrate, cmap=cmap,
comp_label=comp_label, ax=ax, same_window=same_window,
axes_decor=axes_decor)
if not same_window:
fig_list.append(f)
if same_window: # Main title for same window
title = '%s' % comp_label
if self.axes_manager.navigation_dimension == 1:
plt.title(title)
else:
plt.suptitle(title)
try:
plt.tight_layout()
except BaseException:
pass
if not same_window:
if with_factors:
return fig_list, self._plot_factors_or_pchars(
factors, comp_ids=comp_ids, calibrate=calibrate,
same_window=same_window, comp_label=comp_label,
per_row=per_row)
else:
return fig_list
else:
if self.axes_manager.navigation_dimension == 1:
plt.legend(ncol=loadings.shape[0] // 2, loc='best')
animate_legend(f)
if with_factors:
return f, self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
per_row=per_row)
else:
return f
def _export_factors(self,
factors,
folder=None,
comp_ids=None,
multiple_files=True,
save_figures=False,
save_figures_format='png',
factor_prefix=None,
factor_format=None,
comp_label=None,
cmap=plt.cm.gray,
plot_shifts=True,
plot_char=4,
img_data=None,
same_window=False,
calibrate=True,
quiver_color='white',
vector_scale=1,
no_nans=True, per_row=3):
from hyperspy._signals.signal2d import Signal2D
from hyperspy._signals.signal1d import Signal1D
if multiple_files is None:
multiple_files = True
if factor_format is None:
factor_format = 'hspy'
# Select the desired factors
if comp_ids is None:
comp_ids = range(factors.shape[1])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
mask = np.zeros(factors.shape[1], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
factors = factors[:, mask]
if save_figures is True:
plt.ioff()
fac_plots = self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
same_window=same_window,
comp_label=comp_label,
img_data=img_data,
plot_shifts=plot_shifts,
plot_char=plot_char,
cmap=cmap,
per_row=per_row,
quiver_color=quiver_color,
vector_scale=vector_scale)
for idx in range(len(comp_ids)):
filename = '%s_%02i.%s' % (factor_prefix, comp_ids[idx],
save_figures_format)
if folder is not None:
filename = Path(folder, filename)
ensure_directory(filename)
_args = {'dpi': 600,
'format': save_figures_format}
fac_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.signal_dimension == 2:
# factor images
axes_dicts = []
axes = self.axes_manager.signal_axes[::-1]
shape = (axes[1].size, axes[0].size)
factor_data = np.rollaxis(
factors.reshape((shape[0], shape[1], -1)), 2)
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts.append({'name': 'factor_index',
'scale': 1.,
'offset': 0.,
'size': int(factors.shape[1]),
'units': 'factor',
'index_in_array': 0, })
s = Signal2D(factor_data,
axes=axes_dicts,
metadata={
'General': {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
elif self.axes_manager.signal_dimension == 1:
axes = [self.axes_manager.signal_axes[0].get_axis_dictionary(),
{'name': 'factor_index',
'scale': 1.,
'offset': 0.,
'size': int(factors.shape[1]),
'units': 'factor',
'index_in_array': 0,
}]
axes[0]['index_in_array'] = 1
s = Signal1D(
factors.T, axes=axes, metadata={
"General": {
'title': '%s from %s' %
(factor_prefix, self.metadata.General.title), }})
filename = '%ss.%s' % (factor_prefix, factor_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.signal_dimension == 1:
axis_dict = self.axes_manager.signal_axes[0].\
get_axis_dictionary()
axis_dict['index_in_array'] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal1D(factors[:, index],
axes=[axis_dict, ],
metadata={
"General": {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (factor_prefix,
dim,
factor_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
if self.axes_manager.signal_dimension == 2:
axes = self.axes_manager.signal_axes
axes_dicts = [axes[0].get_axis_dictionary(),
axes[1].get_axis_dictionary()]
axes_dicts[0]['index_in_array'] = 0
axes_dicts[1]['index_in_array'] = 1
factor_data = factors.reshape(
self.axes_manager._signal_shape_in_array + [-1, ])
for dim, index in zip(comp_ids, range(len(comp_ids))):
im = Signal2D(factor_data[..., index],
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (factor_prefix,
dim,
factor_format)
if folder is not None:
filename = Path(folder, filename)
im.save(filename)
def _export_loadings(self,
loadings,
folder=None,
comp_ids=None,
multiple_files=True,
loading_prefix=None,
loading_format="hspy",
save_figures_format='png',
comp_label=None,
cmap=plt.cm.gray,
save_figures=False,
same_window=False,
calibrate=True,
no_nans=True,
per_row=3):
from hyperspy._signals.signal2d import Signal2D
from hyperspy._signals.signal1d import Signal1D
if multiple_files is None:
multiple_files = True
if loading_format is None:
loading_format = 'hspy'
if comp_ids is None:
comp_ids = range(loadings.shape[0])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
mask = np.zeros(loadings.shape[0], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
loadings = loadings[mask]
if save_figures is True:
plt.ioff()
sc_plots = self._plot_loadings(loadings, comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
cmap=cmap, no_nans=no_nans,
per_row=per_row)
for idx in range(len(comp_ids)):
filename = '%s_%02i.%s' % (loading_prefix, comp_ids[idx],
save_figures_format)
if folder is not None:
filename = Path(folder, filename)
ensure_directory(filename)
_args = {'dpi': 600,
'format': save_figures_format}
sc_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[1].size, axes[0].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]['index_in_array'] = 1
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]['index_in_array'] = 2
axes_dicts.append({'name': 'loading_index',
'scale': 1.,
'offset': 0.,
'size': int(loadings.shape[0]),
'units': 'factor',
'index_in_array': 0, })
s = Signal2D(loading_data,
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
elif self.axes_manager.navigation_dimension == 1:
cal_axis = self.axes_manager.navigation_axes[0].\
get_axis_dictionary()
cal_axis['index_in_array'] = 1
axes = [{'name': 'loading_index',
'scale': 1.,
'offset': 0.,
'size': int(loadings.shape[0]),
'units': 'comp_id',
'index_in_array': 0, },
cal_axis]
s = Signal2D(loadings,
axes=axes,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
filename = '%ss.%s' % (loading_prefix, loading_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.navigation_dimension == 1:
axis_dict = self.axes_manager.navigation_axes[0].\
get_axis_dictionary()
axis_dict['index_in_array'] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal1D(loadings[index],
axes=[axis_dict, ])
filename = '%s-%i.%s' % (loading_prefix,
dim,
loading_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
elif self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[0].size, axes[1].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]['index_in_array'] = 0
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]['index_in_array'] = 1
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal2D(loading_data[index, ...],
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (loading_prefix,
dim,
loading_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
def plot_decomposition_factors(self,
comp_ids=None,
calibrate=True,
same_window=True,
title=None,
cmap=plt.cm.gray,
per_row=3,
**kwargs,
):
"""Plot factors from a decomposition. In case of 1D signal axis, each
factors line can be toggled on and off by clicking on their
corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned if the `output_dimension` was defined when executing
:py:meth:`~hyperspy.learn.mva.MVA.decomposition`. Otherwise it
raises a :py:exc:`ValueError`.
If `comp_ids` is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
If ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor images, or for peak
characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
See also
--------
plot_decomposition_loadings, plot_decomposition_results
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError("This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if self.learning_results.factors is None:
raise RuntimeError("No learning results found. A 'decomposition' "
"needs to be performed first.")
if same_window is None:
same_window = True
if self.learning_results.factors is None:
raise RuntimeError("Run a decomposition first.")
factors = self.learning_results.factors
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"`comp_ids` argument")
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title('Decomposition factors of',
same_window=same_window)
return self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
cmap=cmap,
per_row=per_row)
def plot_bss_factors(
self,
comp_ids=None,
calibrate=True,
same_window=True,
title=None,
cmap=plt.cm.gray,
per_row=3,
**kwargs,
):
"""Plot factors from blind source separation results. In case of 1D
signal axis, each factors line can be toggled on and off by clicking
on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned. If it is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
if ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor images, or for peak
characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
See also
--------
plot_bss_loadings, plot_bss_results
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError("This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if self.learning_results.bss_factors is None:
raise RuntimeError("No learning results found. A "
"'blind_source_separation' needs to be "
"performed first.")
if same_window is None:
same_window = True
factors = self.learning_results.bss_factors
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title('BSS factors of',
same_window=same_window)
return self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
per_row=per_row)
def plot_decomposition_loadings(self,
comp_ids=None,
calibrate=True,
same_window=True,
title=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
**kwargs,
):
"""Plot loadings from a decomposition. In case of 1D navigation axis,
each loading line can be toggled on and off by clicking on the legended
line.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned if the `output_dimension` was defined when executing
:py:meth:`~hyperspy.learn.mva.MVA.decomposition`.
Otherwise it raises a :py:exc:`ValueError`.
If `comp_ids` is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
if ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
if ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
with_factors : bool
If ``True``, also returns figure(s) with the factors for the
given comp_ids.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the loadings images, or for peak
characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
no_nans : bool
If ``True``, removes ``NaN``'s from the loading plots.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
axes_decor : str or None, optional
One of: ``'all'``, ``'ticks'``, ``'off'``, or ``None``
Controls how the axes are displayed on each image; default is
``'all'``
If ``'all'``, both ticks and axis labels will be shown.
If ``'ticks'``, no axis labels will be shown, but ticks/labels will.
If ``'off'``, all decorations and frame will be disabled.
If ``None``, no axis decorations will be shown, but ticks/frame
will.
See also
--------
plot_decomposition_factors, plot_decomposition_results
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if self.learning_results.loadings is None:
raise RuntimeError("No learning results found. A 'decomposition' "
"needs to be performed first.")
if same_window is None:
same_window = True
if self.learning_results.loadings is None:
raise RuntimeError("Run a decomposition first.")
loadings = self.learning_results.loadings.T
if with_factors:
factors = self.learning_results.factors
else:
factors = None
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"`comp_ids` argument")
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title(
'Decomposition loadings of', same_window=same_window)
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def plot_bss_loadings(self,
comp_ids=None,
calibrate=True,
same_window=True,
title=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
**kwargs,
):
"""Plot loadings from blind source separation results. In case of 1D
navigation axis, each loading line can be toggled on and off by
clicking on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned. If it is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
if ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
If ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
comp_label : str
Will be deprecated in 2.0, please use `title` instead
title : str
Title of the plot.
with_factors : bool
If `True`, also returns figure(s) with the factors for the
given `comp_ids`.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the loading image, or for peak
characteristics,. Default is the matplotlib gray colormap
(``plt.cm.gray``).
no_nans : bool
If ``True``, removes ``NaN``'s from the loading plots.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
axes_decor : str or None, optional
One of: ``'all'``, ``'ticks'``, ``'off'``, or ``None``
Controls how the axes are displayed on each image;
default is ``'all'``
If ``'all'``, both ticks and axis labels will be shown
If ``'ticks'``, no axis labels will be shown, but ticks/labels will
If ``'off'``, all decorations and frame will be disabled
If ``None``, no axis decorations will be shown, but ticks/frame will
See also
--------
plot_bss_factors, plot_bss_results
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_bss_results` instead.")
if self.learning_results.bss_loadings is None:
raise RuntimeError("No learning results found. A "
"'blind_source_separation' needs to be "
"performed first.")
if same_window is None:
same_window = True
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title(
'BSS loadings of', same_window=same_window)
loadings = self.learning_results.bss_loadings.T
if with_factors:
factors = self.learning_results.bss_factors
else:
factors = None
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def _get_plot_title(self, base_title='Loadings', same_window=True):
title_md = self.metadata.General.title
title = "%s %s" % (base_title, title_md)
if title_md == '': # remove the 'of' if 'title' is a empty string
title = title.replace(' of ', '')
if not same_window:
title = title.replace('loadings', 'loading')
return title
def export_decomposition_results(self, comp_ids=None,
folder=None,
calibrate=True,
factor_prefix='factor',
factor_format="hspy",
loading_prefix='loading',
loading_format="hspy",
comp_label=None,
cmap=plt.cm.gray,
same_window=False,
multiple_files=True,
no_nans=True,
per_row=3,
save_figures=False,
save_figures_format='png'):
"""Export results from a decomposition to any of the supported
formats.
Parameters
----------
comp_ids : None, int, or list (of ints)
If None, returns all components/loadings.
If an int, returns components/loadings with ids from 0 to the
given value.
If a list of ints, returns components/loadings with ids provided in
the given list.
folder : str or None
The path to the folder where the file will be saved.
If ``None``, the current folder is used by default.
factor_prefix : str
The prefix that any exported filenames for factors/components
begin with
factor_format : str
The extension of the format that you wish to save the factors to.
Default is ``'hspy'``. See `loading_format` for more details.
loading_prefix : str
The prefix that any exported filenames for factors/components
begin with
loading_format : str
The extension of the format that you wish to save to. default
is ``'hspy'``. The format determines the kind of output:
* For image formats (``'tif'``, ``'png'``, ``'jpg'``, etc.),
plots are created using the plotting flags as below, and saved
at 600 dpi. One plot is saved per loading.
* For multidimensional formats (``'rpl'``, ``'hspy'``), arrays
are saved in single files. All loadings are contained in the
one file.
* For spectral formats (``'msa'``), each loading is saved to a
separate file.
multiple_files : bool
If ``True``, one file will be created for each factor and loading.
Otherwise, only two files will be created, one for
the factors and another for the loadings. The default value can
be chosen in the preferences.
save_figures : bool
If ``True`` the same figures that are obtained when using the plot
methods will be saved with 600 dpi resolution
Note
----
The following parameters are only used when ``save_figures = True``:
Other Parameters
----------------
calibrate : :py:class:`bool`
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : :py:class:`bool`
If ``True``, plots each factor to the same window.
comp_label : :py:class:`str`
the label that is either the plot title (if plotting in separate
windows) or the label in the legend (if plotting in the same window)
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for images, such as factors, loadings, or for peak
characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
per_row : :py:class:`int`
The number of plots in each row, when the `same_window`
parameter is ``True``.
save_figures_format : :py:class:`str`
The image format extension.
See also
--------
get_decomposition_factors, get_decomposition_loadings
"""
factors = self.learning_results.factors
loadings = self.learning_results.loadings.T
self._export_factors(factors,
folder=folder,
comp_ids=comp_ids,
calibrate=calibrate,
multiple_files=multiple_files,
factor_prefix=factor_prefix,
factor_format=factor_format,
comp_label=comp_label,
save_figures=save_figures,
cmap=cmap,
no_nans=no_nans,
same_window=same_window,
per_row=per_row,
save_figures_format=save_figures_format)
self._export_loadings(loadings,
comp_ids=comp_ids, folder=folder,
calibrate=calibrate,
multiple_files=multiple_files,
loading_prefix=loading_prefix,
loading_format=loading_format,
comp_label=comp_label,
cmap=cmap,
save_figures=save_figures,
same_window=same_window,
no_nans=no_nans,
per_row=per_row)
def export_cluster_results(self,
cluster_ids=None,
folder=None,
calibrate=True,
center_prefix='cluster_center',
center_format="hspy",
membership_prefix='cluster_label',
membership_format="hspy",
comp_label=None,
cmap=plt.cm.gray,
same_window=False,
multiple_files=True,
no_nans=True,
per_row=3,
save_figures=False,
save_figures_format='png'):
"""Export results from a cluster analysis to any of the supported
formats.
Parameters
----------
cluster_ids : None, int, or list of ints
if None, returns all clusters/centers.
if int, returns clusters/centers with ids from 0 to
given int.
if list of ints, returnsclusters/centers with ids in
given list.
folder : str or None
The path to the folder where the file will be saved.
If `None` the
current folder is used by default.
center_prefix : string
The prefix that any exported filenames for
cluster centers
begin with
center_format : string
The extension of the format that you wish to save to. Default is
"hspy". See `loading format` for more details.
label_prefix : string
The prefix that any exported filenames for
cluster labels
begin with
label_format : string
The extension of the format that you wish to save to. default
is "hspy". The format determines the kind of output.
* For image formats (``'tif'``, ``'png'``, ``'jpg'``, etc.),
plots are created using the plotting flags as below, and saved
at 600 dpi. One plot is saved per loading.
* For multidimensional formats (``'rpl'``, ``'hspy'``), arrays
are saved in single files. All loadings are contained in the
one file.
* For spectral formats (``'msa'``), each loading is saved to a
separate file.
multiple_files : bool
If True, on exporting a file per center will
be created. Otherwise only two files will be created, one for
the centers and another for the membership. The default value can
be chosen in the preferences.
save_figures : bool
If True the same figures that are obtained when using the plot
methods will be saved with 600 dpi resolution
Plotting options (for save_figures = True ONLY)
----------------------------------------------
calibrate : bool
if True, calibrates plots where calibration is available
from
the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window.
comp_label : string, the label that is either the plot title
(if plotting in separate windows) or the label in the legend
(if plotting in the same window)
cmap : The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
per_row : int, the number of plots in each row, when the
same_window
parameter is True.
save_figures_format : str
The image format extension.
See Also
--------
get_cluster_signals,
get_cluster_labels.
"""
factors = self.learning_results.cluster_centers.T
loadings = self.learning_results.cluster_labels
self._export_factors(factors,
folder=folder,
comp_ids=cluster_ids,
calibrate=calibrate,
multiple_files=multiple_files,
factor_prefix=center_prefix,
factor_format=center_format,
comp_label=comp_label,
save_figures=save_figures,
cmap=cmap,
no_nans=no_nans,
same_window=same_window,
per_row=per_row,
save_figures_format=save_figures_format)
self._export_loadings(loadings,
comp_ids=cluster_ids,
folder=folder,
calibrate=calibrate,
multiple_files=multiple_files,
loading_prefix=membership_prefix,
loading_format=membership_format,
comp_label=comp_label,
cmap=cmap,
save_figures=save_figures,
same_window=same_window,
no_nans=no_nans,
per_row=per_row)
def export_bss_results(self,
comp_ids=None,
folder=None,
calibrate=True,
multiple_files=True,
save_figures=False,
factor_prefix='bss_factor',
factor_format="hspy",
loading_prefix='bss_loading',
loading_format="hspy",
comp_label=None, cmap=plt.cm.gray,
same_window=False,
no_nans=True,
per_row=3,
save_figures_format='png'):
"""Export results from ICA to any of the supported formats.
Parameters
----------
comp_ids : None, int, or list (of ints)
If None, returns all components/loadings.
If an int, returns components/loadings with ids from 0 to the
given value.
If a list of ints, returns components/loadings with ids provided in
the given list.
folder : str or None
The path to the folder where the file will be saved.
If ``None`` the current folder is used by default.
factor_prefix : str
The prefix that any exported filenames for factors/components
begin with
factor_format : str
The extension of the format that you wish to save the factors to.
Default is ``'hspy'``. See `loading_format` for more details.
loading_prefix : str
The prefix that any exported filenames for factors/components
begin with
loading_format : str
The extension of the format that you wish to save to. default
is ``'hspy'``. The format determines the kind of output:
* For image formats (``'tif'``, ``'png'``, ``'jpg'``, etc.),
plots are created using the plotting flags as below, and saved
at 600 dpi. One plot is saved per loading.
* For multidimensional formats (``'rpl'``, ``'hspy'``), arrays
are saved in single files. All loadings are contained in the
one file.
* For spectral formats (``'msa'``), each loading is saved to a
separate file.
multiple_files : bool
If ``True``, one file will be created for each factor and loading.
Otherwise, only two files will be created, one for
the factors and another for the loadings. The default value can
be chosen in the preferences.
save_figures : bool
If ``True``, the same figures that are obtained when using the plot
methods will be saved with 600 dpi resolution
Note
----
The following parameters are only used when ``save_figures = True``:
Other Parameters
----------------
calibrate : :py:class:`bool`
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : :py:class:`bool`
If ``True``, plots each factor to the same window.
comp_label : :py:class:`str`
the label that is either the plot title (if plotting in separate
windows) or the label in the legend (if plotting in the same window)
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for images, such as factors, loadings, or
for peak characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
per_row : :py:class:`int`
The number of plots in each row, when the `same_window`
parameter is ``True``.
save_figures_format : :py:class:`str`
The image format extension.
See also
--------
get_bss_factors, get_bss_loadings
"""
factors = self.learning_results.bss_factors
loadings = self.learning_results.bss_loadings.T
self._export_factors(factors,
folder=folder,
comp_ids=comp_ids,
calibrate=calibrate,
multiple_files=multiple_files,
factor_prefix=factor_prefix,
factor_format=factor_format,
comp_label=comp_label,
save_figures=save_figures,
cmap=cmap,
no_nans=no_nans,
same_window=same_window,
per_row=per_row,
save_figures_format=save_figures_format)
self._export_loadings(loadings,
comp_ids=comp_ids,
folder=folder,
calibrate=calibrate,
multiple_files=multiple_files,
loading_prefix=loading_prefix,
loading_format=loading_format,
comp_label=comp_label,
cmap=cmap,
save_figures=save_figures,
same_window=same_window,
no_nans=no_nans,
per_row=per_row,
save_figures_format=save_figures_format)
def _get_loadings(self, loadings):
if loadings is None:
raise RuntimeError("No learning results found.")
from hyperspy.api import signals
data = loadings.T.reshape(
(-1,) + self.axes_manager.navigation_shape[::-1])
if data.shape[0] > 1:
signal = signals.BaseSignal(
data,
axes=(
[{"size": data.shape[0], "navigate": True}] +
self.axes_manager._get_navigation_axes_dicts()))
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
else:
signal = self._get_navigation_signal(data.squeeze())
return signal
def _get_factors(self, factors):
if factors is None:
raise RuntimeError("No learning results found.")
signal = self.__class__(
factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
axes=[{"size": factors.shape[-1], "navigate": True}] +
self.axes_manager._get_signal_axes_dicts())
signal.set_signal_type(self.metadata.Signal.signal_type)
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
return signal
def get_decomposition_loadings(self):
"""Return the decomposition loadings.
Returns
-------
signal : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See also
--------
get_decomposition_factors, export_decomposition_results
"""
if self.learning_results.loadings is None:
raise RuntimeError("Run a decomposition first.")
signal = self._get_loadings(self.learning_results.loadings)
signal.axes_manager._axes[0].name = "Decomposition component index"
signal.metadata.General.title = "Decomposition loadings of " + \
self.metadata.General.title
return signal
def get_decomposition_factors(self):
"""Return the decomposition factors.
Returns
-------
signal : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See also
--------
get_decomposition_loadings, export_decomposition_results
"""
if self.learning_results.factors is None:
raise RuntimeError("Run a decomposition first.")
signal = self._get_factors(self.learning_results.factors)
signal.axes_manager._axes[0].name = "Decomposition component index"
signal.metadata.General.title = ("Decomposition factors of " +
self.metadata.General.title)
return signal
def get_bss_loadings(self):
"""Return the blind source separation loadings.
Returns
-------
signal : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See also
--------
get_bss_factors, export_bss_results
"""
signal = self._get_loadings(
self.learning_results.bss_loadings)
signal.axes_manager[0].name = "BSS component index"
signal.metadata.General.title = ("BSS loadings of " +
self.metadata.General.title)
return signal
def get_bss_factors(self):
"""Return the blind source separation factors.
Returns
-------
signal : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See also
--------
get_bss_loadings, export_bss_results
"""
signal = self._get_factors(self.learning_results.bss_factors)
signal.axes_manager[0].name = "BSS component index"
signal.metadata.General.title = ("BSS factors of " +
self.metadata.General.title)
return signal
def plot_bss_results(self,
factors_navigator="smart_auto",
loadings_navigator="smart_auto",
factors_dim=2,
loadings_dim=2,):
"""Plot the blind source separation factors and loadings.
Unlike :py:meth:`~hyperspy.signal.MVATools.plot_bss_factors` and
:py:meth:`~hyperspy.signal.MVATools.plot_bss_loadings`,
this method displays one component at a time. Therefore it provides a
more compact visualization than then other two methods.
The loadings and factors are displayed in different windows and each
has its own navigator/sliders to navigate them if they are
multidimensional. The component index axis is synchronized between
the two.
Parameters
----------
factors_navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
One of: ``'smart_auto'``, ``'auto'``, ``None``, ``'spectrum'`` or a
:py:class:`~hyperspy.signal.BaseSignal` object.
``'smart_auto'`` (default) displays sliders if the navigation
dimension is less than 3. For a description of the other options
see the :py:meth:`~hyperspy.signal.BaseSignal.plot` documentation
for details.
loadings_navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See the `factors_navigator` parameter
factors_dim : int
Currently HyperSpy cannot plot a signal when the signal dimension is
higher than two. Therefore, to visualize the BSS results when the
factors or the loadings have signal dimension greater than 2,
the data can be viewed as spectra (or images) by setting this
parameter to 1 (or 2). (The default is 2)
loadings_dim : int
See the ``factors_dim`` parameter
See also
--------
plot_bss_factors, plot_bss_loadings, plot_decomposition_results
"""
factors = self.get_bss_factors()
loadings = self.get_bss_loadings()
_plot_x_results(factors=factors, loadings=loadings,
factors_navigator=factors_navigator,
loadings_navigator=loadings_navigator,
factors_dim=factors_dim,
loadings_dim=loadings_dim)
def plot_decomposition_results(self,
factors_navigator="smart_auto",
loadings_navigator="smart_auto",
factors_dim=2,
loadings_dim=2):
"""Plot the decomposition factors and loadings.
Unlike :py:meth:`~hyperspy.signal.MVATools.plot_decomposition_factors`
and :py:meth:`~hyperspy.signal.MVATools.plot_decomposition_loadings`,
this method displays one component at a time. Therefore it provides a
more compact visualization than then other two methods. The loadings
and factors are displayed in different windows and each has its own
navigator/sliders to navigate them if they are multidimensional. The
component index axis is synchronized between the two.
Parameters
----------
factors_navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
One of: ``'smart_auto'``, ``'auto'``, ``None``, ``'spectrum'`` or a
:py:class:`~hyperspy.signal.BaseSignal` object.
``'smart_auto'`` (default) displays sliders if the navigation
dimension is less than 3. For a description of the other options
see the :py:meth:`~hyperspy.signal.BaseSignal.plot` documentation
for details.
loadings_navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See the `factors_navigator` parameter
factors_dim : int
Currently HyperSpy cannot plot a signal when the signal dimension is
higher than two. Therefore, to visualize the BSS results when the
factors or the loadings have signal dimension greater than 2,
the data can be viewed as spectra (or images) by setting this
parameter to 1 (or 2). (The default is 2)
loadings_dim : int
See the ``factors_dim`` parameter
See also
--------
plot_decomposition_factors, plot_decomposition_loadings,
plot_bss_results
"""
factors = self.get_decomposition_factors()
loadings = self.get_decomposition_loadings()
_plot_x_results(factors=factors, loadings=loadings,
factors_navigator=factors_navigator,
loadings_navigator=loadings_navigator,
factors_dim=factors_dim,
loadings_dim=loadings_dim)
def get_cluster_labels(self, merged=False):
"""Return cluster labels as a Signal.
Parameters
----------
merged : bool
If False the cluster label signal has a navigation axes of length
number_of_clusters and the signal along the the navigation
direction is binary - 0 the point is not in the cluster, 1 it is
included. If True, the cluster labels are merged (no navigation
axes). The value of the signal at any point will be between -1 and
the number of clusters. -1 represents the points that
were masked for cluster analysis if any.
See Also
--------
get_cluster_signals
Returns
-------
signal Hyperspy signal of cluster labels
"""
if self.learning_results.cluster_labels is None:
raise RuntimeError(
"Cluster analysis needs to be performed first.")
if merged:
data = (np.arange(1, self.learning_results.number_of_clusters + 1)
[:, np.newaxis] *
self.learning_results.cluster_labels ).sum(0) - 1
label_signal = self._get_loadings(data)
else:
label_signal = self._get_loadings(
self.learning_results.cluster_labels.T)
label_signal.axes_manager._axes[0].name = "Cluster index"
label_signal.metadata.General.title = (
"Cluster labels of " + self.metadata.General.title)
return label_signal
def _get_cluster_signals_factors(self, signal):
if self.learning_results.cluster_centroid_signals is None:
raise RuntimeError("Cluster analysis needs to be performed first.")
if signal == "mean":
members = self.learning_results.cluster_labels.sum(1, keepdims=True)
cs = self.learning_results.cluster_sum_signals / members
elif signal == "sum":
cs=self.learning_results.cluster_sum_signals
elif signal == "centroid":
cs=self.learning_results.cluster_centroid_signals
return cs
def get_cluster_signals(self, signal="mean"):
"""Return the cluster centers as a Signal.
Parameters
----------
%s
See Also
--------
get_cluster_labels
"""
cs = self._get_cluster_signals_factors(signal=signal)
signal = self._get_factors(cs.T)
signal.axes_manager._axes[0].name="Cluster index"
signal.metadata.General.title = (
f"Cluster {signal} signals of {self.metadata.General.title}")
return signal
get_cluster_signals.__doc__ %= (CLUSTER_SIGNALS_ARG)
def get_cluster_distances(self):
"""Euclidian distances to the centroid of each cluster
See Also
--------
get_cluster_signals
Returns
-------
signal
Hyperspy signal of cluster distances
"""
if self.learning_results.cluster_distances is None:
raise RuntimeError("Cluster analysis needs to be performed first.")
distance_signal = self._get_loadings(self.learning_results.cluster_distances.T)
distance_signal.axes_manager._axes[0].name = "Cluster index"
distance_signal.metadata.General.title = \
"Cluster distances of " + self.metadata.General.title
return distance_signal
def plot_cluster_signals(
self,
signal="mean",
cluster_ids=None,
calibrate=True,
same_window=True,
comp_label="Cluster centers",
per_row=3):
"""Plot centers from a cluster analysis.
Parameters
----------
%s
cluster_ids : None, int, or list of ints
if None, returns maps of all clusters.
if int, returns maps of clusters with ids from 0 to given
int.
if list of ints, returns maps of clusters with ids in
given list.
calibrate :
if True, calibrates plots where calibration is available
from the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each center to the same window. They are
not scaled.
comp_label : string
the label that is either the plot title (if plotting in
separate windows) or the label in the legend (if plotting
in the same window)
per_row : int
the number of plots in each row, when the same_window parameter is
True.
See Also
--------
plot_cluster_labels
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError("This method cannot plot factors of "
"signals of dimension higher than 2.")
cs = self._get_cluster_signals_factors(signal=signal)
if same_window is None:
same_window = True
factors = cs.T
if cluster_ids is None:
cluster_ids = range(factors.shape[1])
return self._plot_factors_or_pchars(factors,
comp_ids=cluster_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
per_row=per_row)
plot_cluster_signals.__doc__ %= (CLUSTER_SIGNALS_ARG)
def plot_cluster_labels(
self,
cluster_ids=None,
calibrate=True,
same_window=True,
with_centers=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
title=None,
**kwargs):
"""Plot cluster labels from a cluster analysis. In case of 1D navigation axis,
each loading line can be toggled on and off by clicking on the legended
line.
Parameters
----------
cluster_ids : None, int, or list of ints
if None (default), returns maps of all components using the
number_of_cluster was defined when
executing ``cluster``. Otherwise it raises a ValueError.
if int, returns maps of cluster labels with ids from 0 to
given int.
if list of ints, returns maps of cluster labels with ids in
given list.
calibrate : bool
if True, calibrates plots where calibration is available
from the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default is True.
title : string
Title of the plot.
with_centers : bool
If True, also returns figure(s) with the cluster centers for the
given cluster_ids.
cmap : matplotlib colormap
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If True, removes NaN's from the loading plots.
per_row : int
the number of plots in each row, when the same_window
parameter is True.
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
See Also
--------
plot_cluster_signals, plot_cluster_results.
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot labels of "
"dimension higher than 2."
"You can use "
"`plot_cluster_results` instead.")
if same_window is None:
same_window = True
labels = self.learning_results.cluster_labels.astype("uint")
if with_centers:
centers = self.learning_results.cluster_centers.T
else:
centers = None
if cluster_ids is None:
cluster_ids = range(labels.shape[0])
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title(
'Cluster labels of', same_window=same_window)
return self._plot_loadings(labels,
comp_ids=cluster_ids,
with_factors=with_centers,
factors=centers,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def plot_cluster_distances(
self,
cluster_ids=None,
calibrate=True,
same_window=True,
with_centers=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
title=None,
**kwargs):
"""Plot the euclidian distances to the centroid of each cluster.
In case of 1D navigation axis,
each line can be toggled on and off by clicking on the legended
line.
Parameters
----------
cluster_ids : None, int, or list of ints
if None (default), returns maps of all components using the
number_of_cluster was defined when
executing ``cluster``. Otherwise it raises a ValueError.
if int, returns maps of cluster labels with ids from 0 to
given int.
if list of ints, returns maps of cluster labels with ids in
given list.
calibrate : bool
if True, calibrates plots where calibration is available
from the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default is True.
title : string
Title of the plot.
with_centers : bool
If True, also returns figure(s) with the cluster centers for the
given cluster_ids.
cmap : matplotlib colormap
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If True, removes NaN's from the loading plots.
per_row : int
the number of plots in each row, when the same_window
parameter is True.
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
See Also
--------
plot_cluster_signals, plot_cluster_results, plot_cluster_labels
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot labels of "
"dimension higher than 2."
"You can use "
"`plot_cluster_results` instead.")
if same_window is None:
same_window = True
distances = self.learning_results.cluster_distances
if with_centers:
centers = self.learning_results.cluster_centers.T
else:
centers = None
if cluster_ids is None:
cluster_ids = range(distances.shape[0])
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title(
'Cluster distances of', same_window=same_window)
return self._plot_loadings(distances,
comp_ids=cluster_ids,
with_factors=with_centers,
factors=centers,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def plot_cluster_results(self,
centers_navigator="smart_auto",
labels_navigator="smart_auto",
centers_dim=2,
labels_dim=2,
):
"""Plot the cluster labels and centers.
Unlike `plot_cluster_labels` and `plot_cluster_signals`, this
method displays one component at a time.
Therefore it provides a more compact visualization than then other
two methods. The labels and centers are displayed in different
windows and each has its own navigator/sliders to navigate them if
they are multidimensional. The component index axis is synchronized
between the two.
Parameters
----------
centers_navigator, labels_navigator : {"smart_auto",
"auto", None, "spectrum", Signal}
"smart_auto" (default) displays sliders if the navigation
dimension is less than 3. For a description of the other options
see `plot` documentation for details.
labels_dim, centers_dims : int
Currently HyperSpy cannot plot signals of dimension higher than
two. Therefore, to visualize the clustering results when the
centers or the labels have signal dimension greater than 2
we can view the data as spectra(images) by setting this parameter
to 1(2). (Default 2)
See Also
--------
plot_cluster_signals, plot_cluster_labels.
"""
centers = self.get_cluster_signals()
distances = self.get_cluster_distances()
self.get_cluster_labels(merged=True).plot()
_plot_x_results(factors=centers,
loadings=distances,
factors_navigator=centers_navigator,
loadings_navigator=labels_navigator,
factors_dim=centers_dim,
loadings_dim=labels_dim)
def _plot_x_results(factors, loadings, factors_navigator, loadings_navigator,
factors_dim, loadings_dim):
factors.axes_manager._axes[0] = loadings.axes_manager._axes[0]
if loadings.axes_manager.signal_dimension > 2:
loadings.axes_manager.set_signal_dimension(loadings_dim)
if factors.axes_manager.signal_dimension > 2:
factors.axes_manager.set_signal_dimension(factors_dim)
if (loadings_navigator == "smart_auto" and
loadings.axes_manager.navigation_dimension < 3):
loadings_navigator = "slider"
else:
loadings_navigator = "auto"
if (factors_navigator == "smart_auto" and
(factors.axes_manager.navigation_dimension < 3 or
loadings_navigator is not None)):
factors_navigator = None
else:
factors_navigator = "auto"
loadings.plot(navigator=loadings_navigator)
factors.plot(navigator=factors_navigator)
def _change_API_comp_label(title, comp_label):
if comp_label is not None:
if title is None:
title = comp_label
warnings.warn("The 'comp_label' argument will be deprecated "
"in 2.0, please use 'title' instead",
VisibleDeprecationWarning)
else:
warnings.warn("The 'comp_label' argument will be deprecated "
"in 2.0, Since you are already using the 'title'",
"argument, 'comp_label' is ignored.",
VisibleDeprecationWarning)
return title
class SpecialSlicersSignal(SpecialSlicers):
def __setitem__(self, i, j):
"""x.__setitem__(i, y) <==> x[i]=y
"""
if isinstance(j, BaseSignal):
j = j.data
array_slices = self.obj._get_array_slices(i, self.isNavigation)
self.obj.data[array_slices] = j
def __len__(self):
return self.obj.axes_manager.signal_shape[0]
class BaseSetMetadataItems(t.HasTraits):
def __init__(self, signal):
for key, value in self.mapping.items():
if signal.metadata.has_item(key):
setattr(self, value, signal.metadata.get_item(key))
self.signal = signal
def store(self, *args, **kwargs):
for key, value in self.mapping.items():
if getattr(self, value) != t.Undefined:
self.signal.metadata.set_item(key, getattr(self, value))
class BaseSignal(FancySlicing,
MVA,
MVATools,):
_dtype = "real"
_signal_dimension = -1
_signal_type = ""
_lazy = False
_alias_signal_types = []
_additional_slicing_targets = [
"metadata.Signal.Noise_properties.variance",
]
def __init__(self, data, **kwds):
"""Create a Signal from a numpy array.
Parameters
----------
data : :py:class:`numpy.ndarray`
The signal data. It can be an array of any dimensions.
axes : [dict/axes], optional
List of either dictionaries or axes objects to define the axes (see
the documentation of the :py:class:`~hyperspy.axes.AxesManager`
class for more details).
attributes : dict, optional
A dictionary whose items are stored as attributes.
metadata : dict, optional
A dictionary containing a set of parameters
that will to stores in the ``metadata`` attribute.
Some parameters might be mandatory in some cases.
original_metadata : dict, optional
A dictionary containing a set of parameters
that will to stores in the ``original_metadata`` attribute. It
typically contains all the parameters that has been
imported from the original data file.
ragged : bool or None, optional
Define whether the signal is ragged or not. Overwrite the
``ragged`` value in the ``attributes`` dictionary. If None, it does
nothing. Default is None.
"""
# the 'full_initialisation' keyword is private API to be used by the
# _assign_subclass method. Purposely not exposed as public API.
# Its purpose is to avoid creating new attributes, which breaks events
# and to reduce overhead when changing 'signal_type'.
if kwds.get('full_initialisation', True):
self._create_metadata()
self.models = ModelManager(self)
self.learning_results = LearningResults()
kwds['data'] = data
self._plot = None
self.inav = SpecialSlicersSignal(self, True)
self.isig = SpecialSlicersSignal(self, False)
self.events = Events()
self.events.data_changed = Event("""
Event that triggers when the data has changed
The event trigger when the data is ready for consumption by any
process that depend on it as input. Plotted signals automatically
connect this Event to its `BaseSignal.plot()`.
Note: The event only fires at certain specific times, not everytime
that the `BaseSignal.data` array changes values.
Arguments:
obj: The signal that owns the data.
""", arguments=['obj'])
self._load_dictionary(kwds)
def _create_metadata(self):
self.metadata = DictionaryTreeBrowser()
mp = self.metadata
mp.add_node("_HyperSpy")
mp.add_node("General")
mp.add_node("Signal")
mp._HyperSpy.add_node("Folding")
folding = mp._HyperSpy.Folding
folding.unfolded = False
folding.signal_unfolded = False
folding.original_shape = None
folding.original_axes_manager = None
self.original_metadata = DictionaryTreeBrowser()
self.tmp_parameters = DictionaryTreeBrowser()
def __repr__(self):
if self.metadata._HyperSpy.Folding.unfolded:
unfolded = "unfolded "
else:
unfolded = ""
string = '<'
string += self.__class__.__name__
string += ", title: %s" % self.metadata.General.title
string += ", %sdimensions: %s" % (
unfolded,
self.axes_manager._get_dimension_str())
string += '>'
return string
def _binary_operator_ruler(self, other, op_name):
exception_message = (
"Invalid dimensions for this operation")
if isinstance(other, BaseSignal):
# Both objects are signals
oam = other.axes_manager
sam = self.axes_manager
if sam.navigation_shape == oam.navigation_shape and \
sam.signal_shape == oam.signal_shape:
# They have the same signal shape.
# The signal axes are aligned but there is
# no guarantee that data axes area aligned so we make sure that
# they are aligned for the operation.
sdata = self._data_aligned_with_axes
odata = other._data_aligned_with_axes
if op_name in INPLACE_OPERATORS:
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns = self._deepcopy_with_new_data(
getattr(sdata, op_name)(odata))
ns.axes_manager._sort_axes()
return ns
else:
# Different navigation and/or signal shapes
if not are_signals_aligned(self, other):
raise ValueError(exception_message)
else:
# They are broadcastable but have different number of axes
ns, no = broadcast_signals(self, other)
sdata = ns.data
odata = no.data
if op_name in INPLACE_OPERATORS:
# This should raise a ValueError if the operation
# changes the shape of the object on the left.
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns.data = getattr(sdata, op_name)(odata)
return ns
else:
# Second object is not a Signal
if op_name in INPLACE_OPERATORS:
getattr(self.data, op_name)(other)
return self
else:
return self._deepcopy_with_new_data(
getattr(self.data, op_name)(other))
def _unary_operator_ruler(self, op_name):
return self._deepcopy_with_new_data(getattr(self.data, op_name)())
def _check_signal_dimension_equals_one(self):
if self.axes_manager.signal_dimension != 1:
raise SignalDimensionError(self.axes_manager.signal_dimension, 1)
def _check_signal_dimension_equals_two(self):
if self.axes_manager.signal_dimension != 2:
raise SignalDimensionError(self.axes_manager.signal_dimension, 2)
def _deepcopy_with_new_data(self, data=None, copy_variance=False,
copy_navigator=False,
copy_learning_results=False):
"""Returns a deepcopy of itself replacing the data.
This method has an advantage over the default :py:func:`copy.deepcopy`
in that it does not copy the data, which can save memory.
Parameters
----------
data : None or :py:class:`numpy.ndarray`
copy_variance : bool
Whether to copy the variance of the signal to the new copy
copy_navigator : bool
Whether to copy the navigator of the signal to the new copy
copy_learning_results : bool
Whether to copy the learning_results of the signal to the new copy
Returns
-------
ns : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
The newly copied signal
"""
old_np = None
old_navigator = None
old_learning_results = None
try:
old_data = self.data
self.data = None
old_plot = self._plot
self._plot = None
old_models = self.models._models
if not copy_variance and "Noise_properties" in self.metadata.Signal:
old_np = self.metadata.Signal.Noise_properties
del self.metadata.Signal.Noise_properties
if not copy_navigator and self.metadata.has_item('_HyperSpy.navigator'):
old_navigator = self.metadata._HyperSpy.navigator
del self.metadata._HyperSpy.navigator
if not copy_learning_results:
old_learning_results = self.learning_results
del self.learning_results
self.models._models = DictionaryTreeBrowser()
ns = self.deepcopy()
ns.data = data
return ns
finally:
self.data = old_data
self._plot = old_plot
self.models._models = old_models
if old_np is not None:
self.metadata.Signal.Noise_properties = old_np
if old_navigator is not None:
self.metadata._HyperSpy.navigator = old_navigator
if old_learning_results is not None:
self.learning_results = old_learning_results
def as_lazy(self, copy_variance=True, copy_navigator=True,
copy_learning_results=True):
"""
Create a copy of the given Signal as a
:py:class:`~hyperspy._signals.lazy.LazySignal`.
Parameters
----------
copy_variance : bool
Whether or not to copy the variance from the original Signal to
the new lazy version. Default is True.
copy_navigator : bool
Whether or not to copy the navigator from the original Signal to
the new lazy version. Default is True.
copy_learning_results : bool
Whether to copy the learning_results from the original signal to
the new lazy version. Default is True.
Returns
-------
res : :py:class:`~hyperspy._signals.lazy.LazySignal`
The same signal, converted to be lazy
"""
res = self._deepcopy_with_new_data(
self.data,
copy_variance=copy_variance,
copy_navigator=copy_navigator,
copy_learning_results=copy_learning_results
)
res._lazy = True
res._assign_subclass()
return res
def _summary(self):
string = "\n\tTitle: "
string += self.metadata.General.title
if self.metadata.has_item("Signal.signal_type"):
string += "\n\tSignal type: "
string += self.metadata.Signal.signal_type
string += "\n\tData dimensions: "
string += str(self.axes_manager.shape)
string += "\n\tData type: "
string += str(self.data.dtype)
return string
def _print_summary(self):
print(self._summary())
@property
def data(self):
"""The underlying data structure as a :py:class:`numpy.ndarray` (or
:py:class:`dask.array.Array`, if the Signal is lazy)."""
return self._data
@data.setter
def data(self, value):
if not isinstance(value, da.Array):
value = np.asanyarray(value)
self._data = np.atleast_1d(value)
@property
def ragged(self):
return self.axes_manager._ragged
@ragged.setter
def ragged(self, value):
# nothing needs to be done!
if self.ragged == value:
return
if value:
if self.data.dtype != object:
raise ValueError("The array is not ragged.")
axes = [axis for axis in self.axes_manager.signal_axes
if axis.index_in_array not in list(range(self.data.ndim))]
self.axes_manager.remove(axes)
self.axes_manager.set_signal_dimension(0)
else:
if self._lazy:
raise NotImplementedError(
"Conversion of a lazy ragged signal to its non-ragged "
"counterpart is not supported. Make the required "
"non-ragged dask array manually and make a new lazy "
"signal."
)
error = "The signal can't be converted to a non-ragged signal."
try:
# Check that we can actually make a non-ragged array
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# As of numpy 1.20, it raises a VisibleDeprecationWarning
# and in the future, it will raise an error
data = np.array(self.data.tolist())
except:
_logger.error(error)
if data.dtype == object:
raise ValueError(error)
self.data = data
# Add axes which were previously in the ragged dimension
axes = [idx for idx in range(self.data.ndim) if
idx not in self.axes_manager.navigation_indices_in_array]
for index in axes:
axis = {'index_in_array':index, 'size':self.data.shape[index]}
self.axes_manager._append_axis(**axis)
self.axes_manager._update_attributes()
self.axes_manager._ragged = value
def _load_dictionary(self, file_data_dict):
"""Load data from dictionary.
Parameters
----------
file_data_dict : dict
A dictionary containing at least a 'data' keyword with an array of
arbitrary dimensions. Additionally the dictionary can contain the
following items:
* data: the signal data. It can be an array of any dimensions.
* axes: a dictionary to define the axes (see the documentation of
the :py:class:`~hyperspy.axes.AxesManager` class for more details).
* attributes: a dictionary whose items are stored as attributes.
* metadata: a dictionary containing a set of parameters that will
to stores in the `metadata` attribute. Some parameters might be
mandatory in some cases.
* original_metadata: a dictionary containing a set of parameters
that will to stores in the `original_metadata` attribute. It
typically contains all the parameters that has been
imported from the original data file.
* ragged: a bool, defining whether the signal is ragged or not.
Overwrite the attributes['ragged'] entry
"""
self.data = file_data_dict['data']
oldlazy = self._lazy
attributes = file_data_dict.get('attributes', {})
ragged = file_data_dict.get('ragged')
if ragged is not None:
attributes['ragged'] = ragged
if 'axes' not in file_data_dict:
file_data_dict['axes'] = self._get_undefined_axes_list(
attributes.get('ragged', False))
self.axes_manager = AxesManager(file_data_dict['axes'])
# Setting `ragged` attributes requires the `axes_manager`
for key, value in attributes.items():
if hasattr(self, key):
if isinstance(value, dict):
for k, v in value.items():
setattr(getattr(self, key), k, v)
else:
setattr(self, key, value)
if 'models' in file_data_dict:
self.models._add_dictionary(file_data_dict['models'])
if 'metadata' not in file_data_dict:
file_data_dict['metadata'] = {}
if 'original_metadata' not in file_data_dict:
file_data_dict['original_metadata'] = {}
self.original_metadata.add_dictionary(
file_data_dict['original_metadata'])
self.metadata.add_dictionary(
file_data_dict['metadata'])
if "title" not in self.metadata.General:
self.metadata.General.title = ''
if (self._signal_type or not self.metadata.has_item("Signal.signal_type")):
self.metadata.Signal.signal_type = self._signal_type
if "learning_results" in file_data_dict:
self.learning_results.__dict__.update(
file_data_dict["learning_results"])
if self._lazy is not oldlazy:
self._assign_subclass()
# TODO: try to find a way to use dask ufuncs when called with lazy data (e.g.
# np.log(s) -> da.log(s.data) wrapped.
def __array__(self, dtype=None):
if dtype:
return self.data.astype(dtype)
else:
return self.data
def __array_wrap__(self, array, context=None):
signal = self._deepcopy_with_new_data(array)
if context is not None:
# ufunc, argument of the ufunc, domain of the ufunc
# In ufuncs with multiple outputs, domain indicates which output
# is currently being prepared (eg. see modf).
# In ufuncs with a single output, domain is 0
uf, objs, huh = context
def get_title(signal, i=0):
g = signal.metadata.General
if g.title:
return g.title
else:
return "Untitled Signal %s" % (i + 1)
title_strs = []
i = 0
for obj in objs:
if isinstance(obj, BaseSignal):
title_strs.append(get_title(obj, i))
i += 1
else:
title_strs.append(str(obj))
signal.metadata.General.title = "%s(%s)" % (
uf.__name__, ", ".join(title_strs))
return signal
def squeeze(self):
"""Remove single-dimensional entries from the shape of an array
and the axes. See :py:func:`numpy.squeeze` for more details.
Returns
-------
s : signal
A new signal object with single-entry dimensions removed
Examples
--------
>>> s = hs.signals.Signal2D(np.random.random((2,1,1,6,8,8)))
<Signal2D, title: , dimensions: (6, 1, 1, 2|8, 8)>
>>> s = s.squeeze()
>>> s
<Signal2D, title: , dimensions: (6, 2|8, 8)>
"""
# We deepcopy everything but data
self = self._deepcopy_with_new_data(self.data)
for ax in (self.axes_manager.signal_axes, self.axes_manager.navigation_axes):
for axis in reversed(ax):
if axis.size == 1:
self._remove_axis(axis.index_in_axes_manager)
self.data = self.data.squeeze()
return self
def _to_dictionary(self, add_learning_results=True, add_models=False,
add_original_metadata=True):
"""Returns a dictionary that can be used to recreate the signal.
All items but `data` are copies.
Parameters
----------
add_learning_results : bool, optional
Whether or not to include any multivariate learning results in
the outputted dictionary. Default is True.
add_models : bool, optional
Whether or not to include any models in the outputted dictionary.
Default is False
add_original_metadata : bool
Whether or not to include the original_medata in the outputted
dictionary. Default is True.
Returns
-------
dic : dict
The dictionary that can be used to recreate the signal
"""
dic = {'data': self.data,
'axes': self.axes_manager._get_axes_dicts(),
'metadata': copy.deepcopy(self.metadata.as_dictionary()),
'tmp_parameters': self.tmp_parameters.as_dictionary(),
'attributes': {'_lazy': self._lazy,
'ragged': self.axes_manager._ragged},
}
if add_original_metadata:
dic['original_metadata'] = copy.deepcopy(
self.original_metadata.as_dictionary()
)
if add_learning_results and hasattr(self, 'learning_results'):
dic['learning_results'] = copy.deepcopy(
self.learning_results.__dict__)
if add_models:
dic['models'] = self.models._models.as_dictionary()
return dic
def _get_undefined_axes_list(self, ragged=False):
"""Returns default list of axes construct from the data array shape."""
axes = []
for s in self.data.shape:
axes.append({'size': int(s), })
# With ragged signal with navigation dimension 0 and signal dimension 0
# we return an empty list to avoid getting a navigation axis of size 1,
# which is incorrect, because it corresponds to the ragged dimension
if ragged and len(axes) == 1 and axes[0]['size'] == 1:
axes = []
return axes
def __call__(self, axes_manager=None, fft_shift=False):
if axes_manager is None:
axes_manager = self.axes_manager
indices = axes_manager._getitem_tuple
if self._lazy:
value = self._get_cache_dask_chunk(indices)
else:
value = self.data.__getitem__(indices)
value = np.atleast_1d(value)
if fft_shift:
value = np.fft.fftshift(value)
return value
@property
def navigator(self):
return self.metadata.get_item('_HyperSpy.navigator')
@navigator.setter
def navigator(self, navigator):
self.metadata.set_item('_HyperSpy.navigator', navigator)
def plot(self, navigator="auto", axes_manager=None, plot_markers=True,
**kwargs):
"""%s
%s
%s
%s
"""
if self.axes_manager.ragged:
raise RuntimeError("Plotting ragged signal is not supported.")
if self._plot is not None:
self._plot.close()
if 'power_spectrum' in kwargs:
if not np.issubdtype(self.data.dtype, np.complexfloating):
raise ValueError('The parameter `power_spectrum` required a '
'signal with complex data type.')
del kwargs['power_spectrum']
if axes_manager is None:
axes_manager = self.axes_manager
if self.is_rgbx is True:
if axes_manager.navigation_size < 2:
navigator = None
else:
navigator = "slider"
if axes_manager.signal_dimension == 0:
if axes_manager.navigation_dimension == 0:
# 0d signal without navigation axis: don't make a figure
# and instead, we display the value
print(self.data)
return
self._plot = mpl_he.MPL_HyperExplorer()
elif axes_manager.signal_dimension == 1:
# Hyperspectrum
self._plot = mpl_hse.MPL_HyperSignal1D_Explorer()
elif axes_manager.signal_dimension == 2:
self._plot = mpl_hie.MPL_HyperImage_Explorer()
else:
raise ValueError(
"Plotting is not supported for this view. "
"Try e.g. 's.transpose(signal_axes=1).plot()' for "
"plotting as a 1D signal, or "
"'s.transpose(signal_axes=(1,2)).plot()' "
"for plotting as a 2D signal.")
self._plot.axes_manager = axes_manager
self._plot.signal_data_function = self.__call__
if self.metadata.has_item("Signal.quantity"):
self._plot.quantity_label = self.metadata.Signal.quantity
if self.metadata.General.title:
title = self.metadata.General.title
self._plot.signal_title = title
elif self.tmp_parameters.has_item('filename'):
self._plot.signal_title = self.tmp_parameters.filename
def get_static_explorer_wrapper(*args, **kwargs):
if np.issubdtype(navigator.data.dtype, np.complexfloating):
return np.abs(navigator())
else:
return navigator()
def get_1D_sum_explorer_wrapper(*args, **kwargs):
navigator = self
# Sum over all but the first navigation axis.
am = navigator.axes_manager
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning,
module='hyperspy'
)
navigator = navigator.sum(
am.signal_axes + am.navigation_axes[1:]
)
return np.nan_to_num(navigator.data).squeeze()
def get_dynamic_explorer_wrapper(*args, **kwargs):
navigator.axes_manager.indices = self.axes_manager.indices[
navigator.axes_manager.signal_dimension:]
navigator.axes_manager._update_attributes()
if np.issubdtype(navigator().dtype, np.complexfloating):
return np.abs(navigator())
else:
return navigator()
if not isinstance(navigator, BaseSignal) and navigator == "auto":
if self.navigator is not None:
navigator = self.navigator
elif (self.axes_manager.navigation_dimension > 1 and
np.any(np.array([not axis.is_uniform for axis in
self.axes_manager.navigation_axes]))):
navigator = "slider"
elif (self.axes_manager.navigation_dimension == 1 and
self.axes_manager.signal_dimension == 1):
if (self.axes_manager.navigation_axes[0].is_uniform and
self.axes_manager.signal_axes[0].is_uniform):
navigator = "data"
else:
navigator = "spectrum"
elif self.axes_manager.navigation_dimension > 0:
if self.axes_manager.signal_dimension == 0:
navigator = self.deepcopy()
else:
navigator = interactive(
self.sum,
self.events.data_changed,
self.axes_manager.events.any_axis_changed,
self.axes_manager.signal_axes)
if navigator.axes_manager.navigation_dimension == 1:
navigator = interactive(
navigator.as_signal1D,
navigator.events.data_changed,
navigator.axes_manager.events.any_axis_changed, 0)
else:
navigator = interactive(
navigator.as_signal2D,
navigator.events.data_changed,
navigator.axes_manager.events.any_axis_changed,
(0, 1))
else:
navigator = None
# Navigator properties
if axes_manager.navigation_axes:
# check first if we have a signal to avoid comparion of signal with
# string
if isinstance(navigator, BaseSignal):
def is_shape_compatible(navigation_shape, shape):
return (navigation_shape == shape or
navigation_shape[:2] == shape or
(navigation_shape[0],) == shape
)
# Static navigator
if is_shape_compatible(axes_manager.navigation_shape,
navigator.axes_manager.signal_shape):
self._plot.navigator_data_function = get_static_explorer_wrapper
# Static transposed navigator
elif is_shape_compatible(axes_manager.navigation_shape,
navigator.axes_manager.navigation_shape):
navigator = navigator.T
self._plot.navigator_data_function = get_static_explorer_wrapper
# Dynamic navigator
elif (axes_manager.navigation_shape ==
navigator.axes_manager.signal_shape +
navigator.axes_manager.navigation_shape):
self._plot.navigator_data_function = get_dynamic_explorer_wrapper
else:
raise ValueError(
"The dimensions of the provided (or stored) navigator "
"are not compatible with this signal.")
elif navigator == "slider":
self._plot.navigator_data_function = "slider"
elif navigator is None:
self._plot.navigator_data_function = None
elif navigator == "data":
if np.issubdtype(self.data.dtype, np.complexfloating):
self._plot.navigator_data_function = lambda axes_manager=None: np.abs(
self.data)
else:
self._plot.navigator_data_function = lambda axes_manager=None: self.data
elif navigator == "spectrum":
self._plot.navigator_data_function = get_1D_sum_explorer_wrapper
else:
raise ValueError(
'navigator must be one of "spectrum","auto", '
'"slider", None, a Signal instance')
self._plot.plot(**kwargs)
self.events.data_changed.connect(self.update_plot, [])
p = self._plot.signal_plot if self._plot.signal_plot else self._plot.navigator_plot
p.events.closed.connect(
lambda: self.events.data_changed.disconnect(self.update_plot),
[])
if plot_markers:
if self.metadata.has_item('Markers'):
self._plot_permanent_markers()
plot.__doc__ %= (BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING, PLOT2D_KWARGS_DOCSTRING)
def save(self, filename=None, overwrite=None, extension=None, **kwds):
"""Saves the signal in the specified format.
The function gets the format from the specified extension (see
:ref:`supported-formats` in the User Guide for more information):
* ``'hspy'`` for HyperSpy's HDF5 specification
* ``'rpl'`` for Ripple (useful to export to Digital Micrograph)
* ``'msa'`` for EMSA/MSA single spectrum saving.
* ``'unf'`` for SEMPER unf binary format.
* ``'blo'`` for Blockfile diffraction stack saving.
* Many image formats such as ``'png'``, ``'tiff'``, ``'jpeg'``...
If no extension is provided the default file format as defined
in the `preferences` is used.
Please note that not all the formats supports saving datasets of
arbitrary dimensions, e.g. ``'msa'`` only supports 1D data, and
blockfiles only supports image stacks with a `navigation_dimension` < 2.
Each format accepts a different set of parameters. For details
see the specific format documentation.
Parameters
----------
filename : str or None
If None (default) and `tmp_parameters.filename` and
`tmp_parameters.folder` are defined, the
filename and path will be taken from there. A valid
extension can be provided e.g. ``'my_file.rpl'``
(see `extension` parameter).
overwrite : None or bool
If None, if the file exists it will query the user. If
True(False) it does(not) overwrite the file if it exists.
extension : None or str
The extension of the file that defines the file format.
Allowable string values are: {``'hspy'``, ``'hdf5'``, ``'rpl'``,
``'msa'``, ``'unf'``, ``'blo'``, ``'emd'``, and common image
extensions e.g. ``'tiff'``, ``'png'``, etc.}
``'hspy'`` and ``'hdf5'`` are equivalent. Use ``'hdf5'`` if
compatibility with HyperSpy versions older than 1.2 is required.
If ``None``, the extension is determined from the following list in
this order:
i) the filename
ii) `Signal.tmp_parameters.extension`
iii) ``'hspy'`` (the default extension)
chunks : tuple or True or None (default)
HyperSpy, Nexus and EMD NCEM format only. Define chunks used when
saving. The chunk shape should follow the order of the array
(``s.data.shape``), not the shape of the ``axes_manager``.
If None and lazy signal, the dask array chunking is used.
If None and non-lazy signal, the chunks are estimated automatically
to have at least one chunk per signal space.
If True, the chunking is determined by the the h5py ``guess_chunk``
function.
save_original_metadata : bool , default : False
Nexus file only. Option to save hyperspy.original_metadata with
the signal. A loaded Nexus file may have a large amount of data
when loaded which you may wish to omit on saving
use_default : bool , default : False
Nexus file only. Define the default dataset in the file.
If set to True the signal or first signal in the list of signals
will be defined as the default (following Nexus v3 data rules).
write_dataset : bool, optional
Only for hspy files. If True, write the dataset, otherwise, don't
write it. Useful to save attributes without having to write the
whole dataset. Default is True.
close_file : bool, optional
Only for hdf5-based files and some zarr store. Close the file after
writing. Default is True.
"""
if filename is None:
if (self.tmp_parameters.has_item('filename') and
self.tmp_parameters.has_item('folder')):
filename = Path(
self.tmp_parameters.folder,
self.tmp_parameters.filename)
extension = (self.tmp_parameters.extension
if not extension
else extension)
elif self.metadata.has_item('General.original_filename'):
filename = self.metadata.General.original_filename
else:
raise ValueError('File name not defined')
if not isinstance(filename, MutableMapping):
filename = Path(filename)
if extension is not None:
filename = filename.with_suffix(f".{extension}")
hyperspy.io.save(filename, self, overwrite=overwrite, **kwds)
def _replot(self):
if self._plot is not None:
if self._plot.is_active:
self.plot()
def update_plot(self):
"""
If this Signal has been plotted, update the signal and navigator
plots, as appropriate.
"""
if self._plot is not None and self._plot.is_active:
if self._plot.signal_plot is not None:
self._plot.signal_plot.update()
if self._plot.navigator_plot is not None:
self._plot.navigator_plot.update()
def get_dimensions_from_data(self):
"""Get the dimension parameters from the Signal's underlying data.
Useful when the data structure was externally modified, or when the
spectrum image was not loaded from a file
"""
dc = self.data
for axis in self.axes_manager._axes:
axis.size = int(dc.shape[axis.index_in_array])
def crop(self, axis, start=None, end=None, convert_units=False):
"""Crops the data in a given axis. The range is given in pixels.
Parameters
----------
axis : int or str
Specify the data axis in which to perform the cropping
operation. The axis can be specified using the index of the
axis in `axes_manager` or the axis name.
start : int, float, or None
The beginning of the cropping interval. If type is ``int``,
the value is taken as the axis index. If type is ``float`` the index
is calculated using the axis calibration. If `start`/`end` is
``None`` the method crops from/to the low/high end of the axis.
end : int, float, or None
The end of the cropping interval. If type is ``int``,
the value is taken as the axis index. If type is ``float`` the index
is calculated using the axis calibration. If `start`/`end` is
``None`` the method crops from/to the low/high end of the axis.
convert_units : bool
Default is ``False``. If ``True``, convert the units using the
:py:meth:`~hyperspy.axes.AxesManager.convert_units` method
of the :py:class:`~hyperspy.axes.AxesManager`. If ``False``,
does nothing.
"""
axis = self.axes_manager[axis]
i1, i2 = axis._get_index(start), axis._get_index(end)
# To prevent an axis error, which may confuse users
if i1 is not None and i2 is not None and not i1 != i2:
raise ValueError("The `start` and `end` values need to be "
"different.")
# We take a copy to guarantee the continuity of the data
self.data = self.data[
(slice(None),) * axis.index_in_array + (slice(i1, i2),
Ellipsis)]
axis.crop(i1, i2)
self.get_dimensions_from_data()
self.squeeze()
self.events.data_changed.trigger(obj=self)
if convert_units:
self.axes_manager.convert_units(axis)
def swap_axes(self, axis1, axis2, optimize=False):
"""Swap two axes in the signal.
Parameters
----------
axis1%s
axis2%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
A copy of the object with the axes swapped.
See also
--------
rollaxis
"""
axis1 = self.axes_manager[axis1].index_in_array
axis2 = self.axes_manager[axis2].index_in_array
s = self._deepcopy_with_new_data(self.data.swapaxes(axis1, axis2))
am = s.axes_manager
am._update_trait_handlers(remove=True)
c1 = am._axes[axis1]
c2 = am._axes[axis2]
c1.slice, c2.slice = c2.slice, c1.slice
c1.navigate, c2.navigate = c2.navigate, c1.navigate
c1.is_binned, c2.is_binned = c2.is_binned, c1.is_binned
am._axes[axis1] = c2
am._axes[axis2] = c1
am._update_attributes()
am._update_trait_handlers(remove=False)
if optimize:
s._make_sure_data_is_contiguous()
return s
swap_axes.__doc__ %= (ONE_AXIS_PARAMETER, ONE_AXIS_PARAMETER, OPTIMIZE_ARG)
def rollaxis(self, axis, to_axis, optimize=False):
"""Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
axis %s The axis to roll backwards.
The positions of the other axes do not change relative to one
another.
to_axis %s The axis is rolled until it lies before this other axis.
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
Output signal.
See also
--------
:py:func:`numpy.roll`, swap_axes
Examples
--------
>>> s = hs.signals.Signal1D(np.ones((5,4,3,6)))
>>> s
<Signal1D, title: , dimensions: (3, 4, 5, 6)>
>>> s.rollaxis(3, 1)
<Signal1D, title: , dimensions: (3, 4, 5, 6)>
>>> s.rollaxis(2,0)
<Signal1D, title: , dimensions: (5, 3, 4, 6)>
"""
axis = self.axes_manager[axis].index_in_array
to_index = self.axes_manager[to_axis].index_in_array
if axis == to_index:
return self.deepcopy()
new_axes_indices = hyperspy.misc.utils.rollelem(
[axis_.index_in_array for axis_ in self.axes_manager._axes],
index=axis,
to_index=to_index)
s = self._deepcopy_with_new_data(self.data.transpose(new_axes_indices))
s.axes_manager._axes = hyperspy.misc.utils.rollelem(
s.axes_manager._axes,
index=axis,
to_index=to_index)
s.axes_manager._update_attributes()
if optimize:
s._make_sure_data_is_contiguous()
return s
rollaxis.__doc__ %= (ONE_AXIS_PARAMETER, ONE_AXIS_PARAMETER, OPTIMIZE_ARG)
@property
def _data_aligned_with_axes(self):
"""Returns a view of `data` with is axes aligned with the Signal axes.
"""
if self.axes_manager.axes_are_aligned_with_data:
return self.data
else:
am = self.axes_manager
nav_iia_r = am.navigation_indices_in_array[::-1]
sig_iia_r = am.signal_indices_in_array[::-1]
# nav_sort = np.argsort(nav_iia_r)
# sig_sort = np.argsort(sig_iia_r) + len(nav_sort)
data = self.data.transpose(nav_iia_r + sig_iia_r)
return data
def _validate_rebin_args_and_get_factors(self, new_shape=None, scale=None):
if new_shape is None and scale is None:
raise ValueError("One of new_shape, or scale must be specified")
elif new_shape is None and scale is None:
raise ValueError(
"Only one out of new_shape or scale should be specified. "
"Not both.")
elif new_shape:
if len(new_shape) != len(self.data.shape):
raise ValueError("Wrong new_shape size")
for axis in self.axes_manager._axes:
if axis.is_uniform is False:
raise NotImplementedError(
"Rebinning of non-uniform axes is not yet implemented.")
new_shape_in_array = np.array([new_shape[axis.index_in_axes_manager]
for axis in self.axes_manager._axes])
factors = np.array(self.data.shape) / new_shape_in_array
else:
if len(scale) != len(self.data.shape):
raise ValueError("Wrong scale size")
for axis in self.axes_manager._axes:
if axis.is_uniform is False:
raise NotImplementedError(
"Rebinning of non-uniform axes is not yet implemented.")
factors = np.array([scale[axis.index_in_axes_manager]
for axis in self.axes_manager._axes])
return factors # Factors are in array order
def rebin(self, new_shape=None, scale=None, crop=True, dtype=None,
out=None):
"""
Rebin the signal into a smaller or larger shape, based on linear
interpolation. Specify **either** `new_shape` or `scale`. Scale of 1
means no binning and scale less than one results in up-sampling.
Parameters
----------
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
The resulting cropped signal.
Raises
------
NotImplementedError
If trying to rebin over a non-uniform axis.
Examples
--------
>>> spectrum = hs.signals.EDSTEMSpectrum(np.ones([4, 4, 10]))
>>> spectrum.data[1, 2, 9] = 5
>>> print(spectrum)
<EDXTEMSpectrum, title: dimensions: (4, 4|10)>
>>> print ('Sum = ', sum(sum(sum(spectrum.data))))
Sum = 164.0
>>> scale = [2, 2, 5]
>>> test = spectrum.rebin(scale)
>>> print(test)
<EDSTEMSpectrum, title: dimensions (2, 2|2)>
>>> print('Sum = ', sum(sum(sum(test.data))))
Sum = 164.0
>>> s = hs.signals.Signal1D(np.ones((2, 5, 10), dtype=np.uint8)
>>> print(s)
<Signal1D, title: , dimensions: (5, 2|10)>
>>> print(s.data.dtype)
uint8
Use dtype=np.unit16 to specify a dtype
>>> s2 = s.rebin(scale=(5, 2, 1), dtype=np.uint16)
>>> print(s2.data.dtype)
uint16
Use dtype="same" to keep the same dtype
>>> s3 = s.rebin(scale=(5, 2, 1), dtype="same")
>>> print(s3.data.dtype)
uint8
By default `dtype=None`, the dtype is determined by the behaviour of
numpy.sum, in this case, unsigned integer of the same precision as
the platform interger
>>> s4 = s.rebin(scale=(5, 2, 1))
>>> print(s4.data.dtype)
uint64
"""
# TODO: Adapt so that it works if a non_uniform_axis exists, but is not
# changed; for new_shape, a non_uniform_axis should be interpolated to a
# linear grid
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale,)
s = out or self._deepcopy_with_new_data(None, copy_variance=True)
data = hyperspy.misc.array_tools.rebin(
self.data, scale=factors, crop=crop, dtype=dtype)
if out:
if out._lazy:
out.data = data
else:
out.data[:] = data
else:
s.data = data
s.get_dimensions_from_data()
for axis, axis_src in zip(s.axes_manager._axes,
self.axes_manager._axes):
factor = factors[axis.index_in_array]
axis.scale = axis_src.scale * factor
axis.offset = axis_src.offset + (factor - 1) * axis_src.scale / 2
if s.metadata.has_item('Signal.Noise_properties.variance'):
if isinstance(s.metadata.Signal.Noise_properties.variance,
BaseSignal):
var = s.metadata.Signal.Noise_properties.variance
s.metadata.Signal.Noise_properties.variance = var.rebin(
new_shape=new_shape, scale=scale, crop=crop, out=out,
dtype=dtype)
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
rebin.__doc__ %= (REBIN_ARGS, OUT_ARG)
def split(self,
axis='auto',
number_of_parts='auto',
step_sizes='auto'):
"""Splits the data into several signals.
The split can be defined by giving the `number_of_parts`, a homogeneous
step size, or a list of customized step sizes. By default (``'auto'``),
the function is the reverse of :py:func:`~hyperspy.misc.utils.stack`.
Parameters
----------
axis %s
If ``'auto'`` and if the object has been created with
:py:func:`~hyperspy.misc.utils.stack` (and ``stack_metadata=True``),
this method will return the former list of signals (information
stored in `metadata._HyperSpy.Stacking_history`).
If it was not created with :py:func:`~hyperspy.misc.utils.stack`,
the last navigation axis will be used.
number_of_parts : str or int
Number of parts in which the spectrum image will be split. The
splitting is homogeneous. When the axis size is not divisible
by the `number_of_parts` the remainder data is lost without
warning. If `number_of_parts` and `step_sizes` is ``'auto'``,
`number_of_parts` equals the length of the axis,
`step_sizes` equals one, and the axis is suppressed from each
sub-spectrum.
step_sizes : str, list (of ints), or int
Size of the split parts. If ``'auto'``, the `step_sizes` equals one.
If an int is given, the splitting is homogeneous.
Examples
--------
>>> s = hs.signals.Signal1D(random.random([4,3,2]))
>>> s
<Signal1D, title: , dimensions: (3, 4|2)>
>>> s.split()
[<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>]
>>> s.split(step_sizes=2)
[<Signal1D, title: , dimensions: (3, 2|2)>,
<Signal1D, title: , dimensions: (3, 2|2)>]
>>> s.split(step_sizes=[1,2])
[<Signal1D, title: , dimensions: (3, 1|2)>,
<Signal1D, title: , dimensions: (3, 2|2)>]
Raises
------
NotImplementedError
If trying to split along a non-uniform axis.
Returns
-------
splitted : list
A list of the split signals
"""
if number_of_parts != 'auto' and step_sizes != 'auto':
raise ValueError(
"You can define step_sizes or number_of_parts but not both."
)
shape = self.data.shape
signal_dict = self._to_dictionary(add_learning_results=False)
if axis == 'auto':
mode = 'auto'
if hasattr(self.metadata._HyperSpy, 'Stacking_history'):
stack_history = self.metadata._HyperSpy.Stacking_history
axis_in_manager = stack_history.axis
step_sizes = stack_history.step_sizes
else:
axis_in_manager = self.axes_manager[-1 +
1j].index_in_axes_manager
else:
mode = 'manual'
axis_in_manager = self.axes_manager[axis].index_in_axes_manager
axis = self.axes_manager[axis_in_manager].index_in_array
len_axis = self.axes_manager[axis_in_manager].size
if self.axes_manager[axis].is_uniform is False:
raise NotImplementedError(
"Splitting of signals over a non-uniform axis is not implemented")
if number_of_parts == 'auto' and step_sizes == 'auto':
step_sizes = 1
number_of_parts = len_axis
elif step_sizes == 'auto':
if number_of_parts > shape[axis]:
raise ValueError(
"The number of parts is greater than the axis size."
)
step_sizes = ([shape[axis] // number_of_parts, ] * number_of_parts)
if isinstance(step_sizes, numbers.Integral):
step_sizes = [step_sizes] * int(len_axis / step_sizes)
splitted = []
cut_index = np.array([0] + step_sizes).cumsum()
axes_dict = signal_dict['axes']
for i in range(len(cut_index) - 1):
axes_dict[axis]['offset'] = self.axes_manager._axes[
axis].index2value(cut_index[i])
axes_dict[axis]['size'] = cut_index[i + 1] - cut_index[i]
data = self.data[
(slice(None), ) * axis +
(slice(cut_index[i], cut_index[i + 1]), Ellipsis)]
signal_dict['data'] = data
splitted += self.__class__(**signal_dict),
if number_of_parts == len_axis \
or step_sizes == [1] * len_axis:
for i, signal1D in enumerate(splitted):
signal1D.data = signal1D.data[
signal1D.axes_manager._get_data_slice([(axis, 0)])]
signal1D._remove_axis(axis_in_manager)
if mode == 'auto' and hasattr(
self.original_metadata, 'stack_elements'):
for i, spectrum in enumerate(splitted):
se = self.original_metadata.stack_elements['element' + str(i)]
spectrum.metadata = copy.deepcopy(
se['metadata'])
spectrum.original_metadata = copy.deepcopy(
se['original_metadata'])
spectrum.metadata.General.title = se.metadata.General.title
return splitted
split.__doc__ %= (ONE_AXIS_PARAMETER)
def _unfold(self, steady_axes, unfolded_axis):
"""Modify the shape of the data by specifying the axes whose
dimension do not change and the axis over which the remaining axes will
be unfolded
Parameters
----------
steady_axes : list
The indices of the axes which dimensions do not change
unfolded_axis : int
The index of the axis over which all the rest of the axes (except
the steady axes) will be unfolded
See also
--------
fold
Notes
-----
WARNING: this private function does not modify the signal subclass
and it is intended for internal use only. To unfold use the public
:py:meth:`~hyperspy.signal.BaseSignal.unfold`,
:py:meth:`~hyperspy.signal.BaseSignal.unfold_navigation_space`,
:py:meth:`~hyperspy.signal.BaseSignal.unfold_signal_space` instead.
It doesn't make sense to perform an unfolding when `dim` < 2
"""
if self.data.squeeze().ndim < 2:
return
# We need to store the original shape and coordinates to be used
# by the fold function only if it has not been already stored by a
# previous unfold
folding = self.metadata._HyperSpy.Folding
if folding.unfolded is False:
folding.original_shape = self.data.shape
folding.original_axes_manager = self.axes_manager
folding.unfolded = True
new_shape = [1] * len(self.data.shape)
for index in steady_axes:
new_shape[index] = self.data.shape[index]
new_shape[unfolded_axis] = -1
self.data = self.data.reshape(new_shape)
self.axes_manager = self.axes_manager.deepcopy()
uname = ''
uunits = ''
to_remove = []
for axis, dim in zip(self.axes_manager._axes, new_shape):
if dim == 1:
uname += ',' + str(axis)
uunits = ',' + str(axis.units)
to_remove.append(axis)
ua = self.axes_manager._axes[unfolded_axis]
ua.name = str(ua) + uname
ua.units = str(ua.units) + uunits
ua.size = self.data.shape[unfolded_axis]
for axis in to_remove:
self.axes_manager.remove(axis.index_in_axes_manager)
self.data = self.data.squeeze()
self._assign_subclass()
def unfold(self, unfold_navigation=True, unfold_signal=True):
"""Modifies the shape of the data by unfolding the signal and
navigation dimensions separately
Parameters
----------
unfold_navigation : bool
Whether or not to unfold the navigation dimension(s) (default:
``True``)
unfold_signal : bool
Whether or not to unfold the signal dimension(s) (default:
``True``)
Returns
-------
needed_unfolding : bool
Whether or not one of the axes needed unfolding (and that
unfolding was performed)
Note
----
It doesn't make sense to perform an unfolding when the total number
of dimensions is < 2.
"""
unfolded = False
if unfold_navigation:
if self.unfold_navigation_space():
unfolded = True
if unfold_signal:
if self.unfold_signal_space():
unfolded = True
return unfolded
@contextmanager
def unfolded(self, unfold_navigation=True, unfold_signal=True):
"""Use this function together with a `with` statement to have the
signal be unfolded for the scope of the `with` block, before
automatically refolding when passing out of scope.
See also
--------
unfold, fold
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> with s.unfolded():
# Do whatever needs doing while unfolded here
pass
"""
unfolded = self.unfold(unfold_navigation, unfold_signal)
try:
yield unfolded
finally:
if unfolded is not False:
self.fold()
def unfold_navigation_space(self):
"""Modify the shape of the data to obtain a navigation space of
dimension 1
Returns
-------
needed_unfolding : bool
Whether or not the navigation space needed unfolding (and whether
it was performed)
"""
if self.axes_manager.navigation_dimension < 2:
needed_unfolding = False
else:
needed_unfolding = True
steady_axes = [
axis.index_in_array for axis in
self.axes_manager.signal_axes]
unfolded_axis = (
self.axes_manager.navigation_axes[0].index_in_array)
self._unfold(steady_axes, unfolded_axis)
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.unfold_navigation_space()
return needed_unfolding
def unfold_signal_space(self):
"""Modify the shape of the data to obtain a signal space of
dimension 1
Returns
-------
needed_unfolding : bool
Whether or not the signal space needed unfolding (and whether
it was performed)
"""
if self.axes_manager.signal_dimension < 2:
needed_unfolding = False
else:
needed_unfolding = True
steady_axes = [
axis.index_in_array for axis in
self.axes_manager.navigation_axes]
unfolded_axis = self.axes_manager.signal_axes[0].index_in_array
self._unfold(steady_axes, unfolded_axis)
self.metadata._HyperSpy.Folding.signal_unfolded = True
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.unfold_signal_space()
return needed_unfolding
def fold(self):
"""If the signal was previously unfolded, fold it back"""
folding = self.metadata._HyperSpy.Folding
# Note that == must be used instead of is True because
# if the value was loaded from a file its type can be np.bool_
if folding.unfolded is True:
self.data = self.data.reshape(folding.original_shape)
self.axes_manager = folding.original_axes_manager
folding.original_shape = None
folding.original_axes_manager = None
folding.unfolded = False
folding.signal_unfolded = False
self._assign_subclass()
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.fold()
def _make_sure_data_is_contiguous(self):
if self.data.flags['C_CONTIGUOUS'] is False:
_logger.info("{0!r} data is replaced by its optimized copy, see "
"optimize parameter of ``Basesignal.transpose`` "
"for more information.".format(self))
self.data = np.ascontiguousarray(self.data)
def _iterate_signal(self, iterpath=None):
"""Iterates over the signal data. It is faster than using the signal
iterator, because it avoids making deepcopy of metadata and other
attributes.
Parameters
----------
iterpath : None or str or iterable
Any valid iterpath supported by the axes_manager.
Returns
-------
numpy array when iterating over the navigation space
"""
original_index = self.axes_manager.indices
if iterpath is None:
_logger.warning('The default iterpath will change in HyperSpy 2.0.')
with self.axes_manager.switch_iterpath(iterpath):
self.axes_manager.indices = tuple(
[0 for _ in self.axes_manager.navigation_axes]
)
for _ in self.axes_manager:
yield self()
# restore original index
self.axes_manager.indices = original_index
def _cycle_signal(self):
"""Cycles over the signal data.
It is faster than using the signal iterator.
Warning! could produce a infinite loop.
"""
if self.axes_manager.navigation_size < 2:
while True:
yield self()
return # pragma: no cover
self._make_sure_data_is_contiguous()
axes = [axis.index_in_array for
axis in self.axes_manager.signal_axes]
if axes:
unfolded_axis = (
self.axes_manager.navigation_axes[0].index_in_array)
new_shape = [1] * len(self.data.shape)
for axis in axes:
new_shape[axis] = self.data.shape[axis]
new_shape[unfolded_axis] = -1
else: # signal_dimension == 0
new_shape = (-1, 1)
axes = [1]
unfolded_axis = 0
# Warning! if the data is not contigous it will make a copy!!
data = self.data.reshape(new_shape)
getitem = [0] * len(data.shape)
for axis in axes:
getitem[axis] = slice(None)
i = 0
Ni = data.shape[unfolded_axis]
while True:
getitem[unfolded_axis] = i
yield(data[tuple(getitem)])
i += 1
i = 0 if i == Ni else i
def _remove_axis(self, axes):
am = self.axes_manager
axes = am[axes]
if not np.iterable(axes):
axes = (axes,)
if am.navigation_dimension + am.signal_dimension >= len(axes):
old_signal_dimension = am.signal_dimension
am.remove(axes)
if old_signal_dimension != am.signal_dimension:
self._assign_subclass()
if not self.axes_manager._axes and not self.ragged:
# Create a "Scalar" axis because the axis is the last one left and
# HyperSpy does not # support 0 dimensions
add_scalar_axis(self)
def _ma_workaround(self, s, function, axes, ar_axes, out):
# TODO: Remove if and when numpy.ma accepts tuple `axis`
# Basically perform unfolding, but only on data. We don't care about
# the axes since the function will consume it/them.
if not np.iterable(ar_axes):
ar_axes = (ar_axes,)
ar_axes = sorted(ar_axes)
new_shape = list(self.data.shape)
for index in ar_axes[1:]:
new_shape[index] = 1
new_shape[ar_axes[0]] = -1
data = self.data.reshape(new_shape).squeeze()
if out:
data = np.atleast_1d(function(data, axis=ar_axes[0],))
if data.shape == out.data.shape:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (data.shape, out.data.shape))
else:
s.data = function(data, axis=ar_axes[0],)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, **kwargs):
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes,)
# Use out argument in numpy function when available for operations that
# do not return scalars in numpy.
np_out = not len(self.axes_manager._axes) == len(axes)
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 0:
# no axes is provided, so no operation needs to be done but we
# still need to finished the execution of the function properly.
if out:
out.data[:] = self.data
out.events.data_changed.trigger(obj=out)
return
else:
return self
elif len(ar_axes) == 1:
ar_axes = ar_axes[0]
s = out or self._deepcopy_with_new_data(None)
if np.ma.is_masked(self.data):
return self._ma_workaround(s=s, function=function, axes=axes,
ar_axes=ar_axes, out=out)
if out:
if np_out:
function(self.data, axis=ar_axes, out=out.data,)
else:
data = np.atleast_1d(function(self.data, axis=ar_axes,))
if data.shape == out.data.shape:
out.data[:] = data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (data.shape, out.data.shape))
out.events.data_changed.trigger(obj=out)
else:
s.data = np.atleast_1d(
function(self.data, axis=ar_axes,))
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def sum(self, axis=None, out=None, rechunk=True):
"""Sum the data over the given axes.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the sum of the provided Signal along the
specified axes.
Note
----
If you intend to calculate the numerical integral of an unbinned signal,
please use the :py:meth:`integrate1D` function instead. To avoid
erroneous misuse of the `sum` function as integral, it raises a warning
when working with an unbinned, non-uniform axis.
See also
--------
max, min, mean, std, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.sum(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
axes = self.axes_manager[axis]
if not np.iterable(axes):
axes = (axes,)
if any([not ax.is_uniform and not is_binned(self, ax) for ax in axes]):
warnings.warn("You are summing over an unbinned, non-uniform axis. "
"The result can not be used as an approximation of "
"the integral of the signal. For this functionality, "
"use integrate1D instead.")
return self._apply_function_on_data_and_remove_axis(
np.sum, axis, out=out, rechunk=rechunk)
sum.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def max(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the maximum of the signal along at least one
axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the maximum of the provided Signal over the
specified axes
See also
--------
min, sum, mean, std, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.max(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.max, axis, out=out, rechunk=rechunk)
max.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def min(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the minimum of the signal along at least one
axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the minimum of the provided Signal over the
specified axes
See also
--------
max, sum, mean, std, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.min(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.min, axis, out=out, rechunk=rechunk)
min.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def mean(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the average of the signal along at least one
axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the mean of the provided Signal over the
specified axes
See also
--------
max, min, sum, std, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.mean(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.mean, axis, out=out, rechunk=rechunk)
mean.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def std(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the standard deviation of the signal along
at least one axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the standard deviation of the provided
Signal over the specified axes
See also
--------
max, min, sum, mean, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.std(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.std, axis, out=out, rechunk=rechunk)
std.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def var(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the variances of the signal along at least one
axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the variance of the provided Signal over the
specified axes
See also
--------
max, min, sum, mean, std, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.var(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.var, axis, out=out, rechunk=rechunk)
var.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def nansum(self, axis=None, out=None, rechunk=True):
"""%s
"""
if axis is None:
axis = self.axes_manager.navigation_axes
axes = self.axes_manager[axis]
if not np.iterable(axes):
axes = (axes,)
if any([not ax.is_uniform for ax in axes]):
warnings.warn("You are summing over a non-uniform axis. The result "
"can not be used as an approximation of the "
"integral of the signal. For this functionaliy, "
"use integrate1D instead.")
return self._apply_function_on_data_and_remove_axis(
np.nansum, axis, out=out, rechunk=rechunk)
nansum.__doc__ %= (NAN_FUNC.format('sum'))
def nanmax(self, axis=None, out=None, rechunk=True):
"""%s
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanmax, axis, out=out, rechunk=rechunk)
nanmax.__doc__ %= (NAN_FUNC.format('max'))
def nanmin(self, axis=None, out=None, rechunk=True):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanmin, axis, out=out, rechunk=rechunk)
nanmin.__doc__ %= (NAN_FUNC.format('min'))
def nanmean(self, axis=None, out=None, rechunk=True):
"""%s """
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanmean, axis, out=out, rechunk=rechunk)
nanmean.__doc__ %= (NAN_FUNC.format('mean'))
def nanstd(self, axis=None, out=None, rechunk=True):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanstd, axis, out=out, rechunk=rechunk)
nanstd.__doc__ %= (NAN_FUNC.format('std'))
def nanvar(self, axis=None, out=None, rechunk=True):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanvar, axis, out=out, rechunk=rechunk)
nanvar.__doc__ %= (NAN_FUNC.format('var'))
def diff(self, axis, order=1, out=None, rechunk=True):
"""Returns a signal with the `n`-th order discrete difference along
given axis. `i.e.` it calculates the difference between consecutive
values in the given axis: `out[n] = a[n+1] - a[n]`. See
:py:func:`numpy.diff` for more details.
Parameters
----------
axis %s
order : int
The order of the discrete difference.
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses) or None
Note that the size of the data on the given ``axis`` decreases by
the given ``order``. `i.e.` if ``axis`` is ``"x"`` and ``order`` is
2, the `x` dimension is N, ``der``'s `x` dimension is N - 2.
Note
----
If you intend to calculate the numerical derivative, please use the
proper :py:meth:`derivative` function instead. To avoid erroneous
misuse of the `diff` function as derivative, it raises an error when
when working with a non-uniform axis.
See also
--------
derivative, integrate1D, integrate_simpson
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.diff(-1).data.shape
(64,64,1023)
"""
if not self.axes_manager[axis].is_uniform:
raise NotImplementedError(
"Performing a numerical difference on a non-uniform axis "
"is not implemented. Consider using `derivative` instead."
)
s = out or self._deepcopy_with_new_data(None)
data = np.diff(self.data, n=order,
axis=self.axes_manager[axis].index_in_array)
if out is not None:
out.data[:] = data
else:
s.data = data
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def derivative(self, axis, order=1, out=None, **kwargs):
r"""Calculate the numerical derivative along the given axis,
with respect to the calibrated units of that axis.
For a function :math:`y = f(x)` and two consecutive values :math:`x_1`
and :math:`x_2`:
.. math::
\frac{df(x)}{dx} = \frac{y(x_2)-y(x_1)}{x_2-x_1}
Parameters
----------
axis %s
order: int
The order of the derivative.
%s
**kwargs : dict
All extra keyword arguments are passed to :py:func:`numpy.gradient`
Returns
-------
der : :py:class:`~hyperspy.signal.BaseSignal`
Note that the size of the data on the given ``axis`` decreases by
the given ``order``. `i.e.` if ``axis`` is ``"x"`` and ``order`` is
2, if the `x` dimension is N, then ``der``'s `x` dimension is N - 2.
Notes
-----
This function uses numpy.gradient to perform the derivative. See its
documentation for implementation details.
See also
--------
integrate1D, integrate_simpson
"""
# rechunk was a valid keyword up to HyperSpy 1.6
if "rechunk" in kwargs:
del kwargs["rechunk"]
n = order
der_data = self.data
while n:
der_data = np.gradient(
der_data, self.axes_manager[axis].axis,
axis=self.axes_manager[axis].index_in_array, **kwargs)
n -= 1
if out:
out.data = der_data
out.events.data_changed.trigger(obj=out)
else:
return self._deepcopy_with_new_data(der_data)
derivative.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def integrate_simpson(self, axis, out=None):
"""Calculate the integral of a Signal along an axis using
`Simpson's rule <https://en.wikipedia.org/wiki/Simpson%%27s_rule>`_.
Parameters
----------
axis %s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the integral of the provided Signal along
the specified axis.
See also
--------
derivative, integrate1D
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.integrate_simpson(-1).data.shape
(64,64)
"""
axis = self.axes_manager[axis]
s = out or self._deepcopy_with_new_data(None)
data = integrate.simps(y=self.data, x=axis.axis,
axis=axis.index_in_array)
if out is not None:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
else:
s.data = data
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def fft(self, shift=False, apodization=False, real_fft_only=False, **kwargs):
"""Compute the discrete Fourier Transform.
This function computes the discrete Fourier Transform over the signal
axes by means of the Fast Fourier Transform (FFT) as implemented in
numpy.
Parameters
----------
shift : bool, optional
If ``True``, the origin of FFT will be shifted to the centre
(default is ``False``).
apodization : bool or str
Apply an
`apodization window <http://mathworld.wolfram.com/ApodizationFunction.html>`_
before calculating the FFT in order to suppress streaks.
Valid string values are {``'hann'`` or ``'hamming'`` or ``'tukey'``}
If ``True`` or ``'hann'``, applies a Hann window.
If ``'hamming'`` or ``'tukey'``, applies Hamming or Tukey
windows, respectively (default is ``False``).
real_fft_only : bool, default False
If ``True`` and data is real-valued, uses :py:func:`numpy.fft.rfftn`
instead of :py:func:`numpy.fft.fftn`
**kwargs : dict
other keyword arguments are described in :py:func:`numpy.fft.fftn`
Returns
-------
s : :py:class:`~hyperspy._signals.complex_signal.ComplexSignal`
A Signal containing the result of the FFT algorithm
Raises
------
NotImplementedError
If performing FFT along a non-uniform axis.
Examples
--------
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> im.fft()
<ComplexSignal2D, title: FFT of , dimensions: (|512, 512)>
>>> # Use following to plot power spectrum of `im`:
>>> im.fft(shift=True, apodization=True).plot(power_spectrum=True)
Note
----
Requires a uniform axis. For further information see the documentation
of :py:func:`numpy.fft.fftn`
"""
if self.axes_manager.signal_dimension == 0:
raise AttributeError("Signal dimension must be at least one.")
if apodization == True:
apodization = 'hann'
if apodization:
im_fft = self.apply_apodization(window=apodization, inplace=False)
else:
im_fft = self
ax = self.axes_manager
axes = ax.signal_indices_in_array
if any([not axs.is_uniform for axs in self.axes_manager[axes]]):
raise NotImplementedError(
"Not implemented for non-uniform axes.")
use_real_fft = real_fft_only and (self.data.dtype.kind != 'c')
if use_real_fft:
fft_f = np.fft.rfftn
else:
fft_f = np.fft.fftn
if shift:
im_fft = self._deepcopy_with_new_data(np.fft.fftshift(
fft_f(im_fft.data, axes=axes, **kwargs), axes=axes))
else:
im_fft = self._deepcopy_with_new_data(
fft_f(self.data, axes=axes, **kwargs))
im_fft.change_dtype("complex")
im_fft.metadata.General.title = 'FFT of {}'.format(
im_fft.metadata.General.title)
im_fft.metadata.set_item('Signal.FFT.shifted', shift)
if hasattr(self.metadata.Signal, 'quantity'):
self.metadata.Signal.__delattr__('quantity')
for axis in im_fft.axes_manager.signal_axes:
axis.scale = 1. / axis.size / axis.scale
axis.offset = 0.0
try:
units = _ureg.parse_expression(str(axis.units))**(-1)
axis.units = '{:~}'.format(units.units)
except UndefinedUnitError:
_logger.warning('Units are not set or cannot be recognized')
if shift:
axis.offset = -axis.high_value / 2.
return im_fft
def ifft(self, shift=None, return_real=True, **kwargs):
"""
Compute the inverse discrete Fourier Transform.
This function computes the real part of the inverse of the discrete
Fourier Transform over the signal axes by means of the Fast Fourier
Transform (FFT) as implemented in numpy.
Parameters
----------
shift : bool or None, optional
If ``None``, the shift option will be set to the original status
of the FFT using the value in metadata. If no FFT entry is
present in metadata, the parameter will be set to ``False``.
If ``True``, the origin of the FFT will be shifted to the centre.
If ``False``, the origin will be kept at (0, 0)
(default is ``None``).
return_real : bool, default True
If ``True``, returns only the real part of the inverse FFT.
If ``False``, returns all parts.
**kwargs : dict
other keyword arguments are described in :py:func:`numpy.fft.ifftn`
Return
------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A Signal containing the result of the inverse FFT algorithm
Raises
------
NotImplementedError
If performing IFFT along a non-uniform axis.
Examples
--------
>>> import scipy
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> imfft = im.fft()
>>> imfft.ifft()
<Signal2D, title: real(iFFT of FFT of ), dimensions: (|512, 512)>
Note
----
Requires a uniform axis. For further information see the documentation
of :py:func:`numpy.fft.ifftn`
"""
if self.axes_manager.signal_dimension == 0:
raise AttributeError("Signal dimension must be at least one.")
ax = self.axes_manager
axes = ax.signal_indices_in_array
if any([not axs.is_uniform for axs in self.axes_manager[axes]]):
raise NotImplementedError(
"Not implemented for non-uniform axes.")
if shift is None:
shift = self.metadata.get_item('Signal.FFT.shifted', False)
if shift:
im_ifft = self._deepcopy_with_new_data(np.fft.ifftn(
np.fft.ifftshift(self.data, axes=axes), axes=axes, **kwargs))
else:
im_ifft = self._deepcopy_with_new_data(np.fft.ifftn(
self.data, axes=axes, **kwargs))
im_ifft.metadata.General.title = 'iFFT of {}'.format(
im_ifft.metadata.General.title)
if im_ifft.metadata.has_item('Signal.FFT'):
del im_ifft.metadata.Signal.FFT
if return_real:
im_ifft = im_ifft.real
for axis in im_ifft.axes_manager.signal_axes:
axis.scale = 1. / axis.size / axis.scale
try:
units = _ureg.parse_expression(str(axis.units)) ** (-1)
axis.units = '{:~}'.format(units.units)
except UndefinedUnitError:
_logger.warning('Units are not set or cannot be recognized')
axis.offset = 0.
return im_ifft
def integrate1D(self, axis, out=None):
"""Integrate the signal over the given axis.
The integration is performed using
`Simpson's rule <https://en.wikipedia.org/wiki/Simpson%%27s_rule>`_ if
`axis.is_binned` is ``False`` and simple summation over the given axis
if ``True`` (along binned axes, the detector already provides
integrated counts per bin).
Parameters
----------
axis %s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the integral of the provided Signal along
the specified axis.
See also
--------
integrate_simpson, derivative
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.integrate1D(-1).data.shape
(64,64)
"""
if is_binned(self, axis=axis):
# in v2 replace by
# self.axes_manager[axis].is_binned
return self.sum(axis=axis, out=out)
else:
return self.integrate_simpson(axis=axis, out=out)
integrate1D.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def indexmin(self, axis, out=None, rechunk=True):
"""Returns a signal with the index of the minimum along an axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the indices of the minimum along the
specified axis. Note: the data `dtype` is always ``int``.
See also
--------
max, min, sum, mean, std, var, indexmax, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.indexmin(-1).data.shape
(64,64)
"""
return self._apply_function_on_data_and_remove_axis(
np.argmin, axis, out=out, rechunk=rechunk)
indexmin.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def indexmax(self, axis, out=None, rechunk=True):
"""Returns a signal with the index of the maximum along an axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the indices of the maximum along the
specified axis. Note: the data `dtype` is always ``int``.
See also
--------
max, min, sum, mean, std, var, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.indexmax(-1).data.shape
(64,64)
"""
return self._apply_function_on_data_and_remove_axis(
np.argmax, axis, out=out, rechunk=rechunk)
indexmax.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def valuemax(self, axis, out=None, rechunk=True):
"""Returns a signal with the value of coordinates of the maximum along
an axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the calibrated coordinate values of the
maximum along the specified axis.
See also
--------
max, min, sum, mean, std, var, indexmax, indexmin, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.valuemax(-1).data.shape
(64,64)
"""
idx = self.indexmax(axis)
data = self.axes_manager[axis].index2value(idx.data)
if out is None:
idx.data = data
return idx
else:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def valuemin(self, axis, out=None, rechunk=True):
"""Returns a signal with the value of coordinates of the minimum along
an axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the calibrated coordinate values of the
minimum along the specified axis.
See also
--------
max, min, sum, mean, std, var, indexmax, indexmin, valuemax
"""
idx = self.indexmin(axis)
data = self.axes_manager[axis].index2value(idx.data)
if out is None:
idx.data = data
return idx
else:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def get_histogram(self, bins='fd', range_bins=None, max_num_bins=250, out=None,
**kwargs):
"""Return a histogram of the signal data.
More sophisticated algorithms for determining the bins can be used
by passing a string as the ``bins`` argument. Other than the ``'blocks'``
and ``'knuth'`` methods, the available algorithms are the same as
:py:func:`numpy.histogram`.
Note: The lazy version of the algorithm only supports ``"scott"``
and ``"fd"`` as a string argument for ``bins``.
Parameters
----------
%s
range_bins : tuple or None, optional
the minimum and maximum range for the histogram. If
`range_bins` is ``None``, (``x.min()``, ``x.max()``) will be used.
%s
%s
%s
**kwargs
other keyword arguments (weight and density) are described in
:py:func:`numpy.histogram`.
Returns
-------
hist_spec : :py:class:`~hyperspy._signals.signal1d.Signal1D`
A 1D spectrum instance containing the histogram.
See also
--------
* print_summary_statistics
* :py:func:`numpy.histogram`
* :py:func:`dask.histogram`
Examples
--------
>>> s = hs.signals.Signal1D(np.random.normal(size=(10, 100)))
>>> # Plot the data histogram
>>> s.get_histogram().plot()
>>> # Plot the histogram of the signal at the current coordinates
>>> s.get_current_signal().get_histogram().plot()
"""
from hyperspy import signals
data = self.data[~np.isnan(self.data)].flatten()
hist, bin_edges = histogram(
data,
bins=bins,
max_num_bins=max_num_bins,
range=range_bins,
**kwargs
)
if out is None:
hist_spec = signals.Signal1D(hist)
else:
hist_spec = out
if hist_spec.data.shape == hist.shape:
hist_spec.data[:] = hist
else:
hist_spec.data = hist
if isinstance(bins, str) and bins == 'blocks':
hist_spec.axes_manager.signal_axes[0].axis = bin_edges[:-1]
warnings.warn(
"The option `bins='blocks'` is not fully supported in this "
"version of HyperSpy. It should be used for plotting purposes "
"only.",
UserWarning,
)
else:
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.axes_manager[0].is_binned = True
hist_spec.metadata.General.title = (self.metadata.General.title +
" histogram")
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ %= (HISTOGRAM_BIN_ARGS, HISTOGRAM_MAX_BIN_ARGS, OUT_ARG, RECHUNK_ARG)
def map(
self,
function,
show_progressbar=None,
parallel=None,
max_workers=None,
inplace=True,
ragged=None,
output_signal_size=None,
output_dtype=None,
lazy_output=None,
**kwargs,
):
"""Apply a function to the signal data at all the navigation
coordinates.
The function must operate on numpy arrays. It is applied to the data at
each navigation coordinate pixel-py-pixel. Any extra keyword arguments
are passed to the function. The keywords can take different values at
different coordinates. If the function takes an `axis` or `axes`
argument, the function is assumed to be vectorized and the signal axes
are assigned to `axis` or `axes`. Otherwise, the signal is iterated
over the navigation axes and a progress bar is displayed to monitor the
progress.
In general, only navigation axes (order, calibration, and number) are
guaranteed to be preserved.
Parameters
----------
function : :std:term:`function`
Any function that can be applied to the signal. This function should
not alter any mutable input arguments or input data. So do not do
operations which alter the input, without copying it first.
For example, instead of doing `image *= mask`, rather do
`image = image * mask`. Likewise, do not do `image[5, 5] = 10`
directly on the input data or arguments, but make a copy of it
first. For example via `image = copy.deepcopy(image)`.
%s
%s
inplace : bool, default True
If ``True``, the data is replaced by the result. Otherwise
a new Signal with the results is returned.
ragged : None or bool, default None
Indicates if the results for each navigation pixel are of identical
shape (and/or numpy arrays to begin with). If ``None``,
the output signal will be ragged only if the original signal is ragged.
output_signal_size : None, tuple
Since the size and dtype of the signal dimension of the output
signal can be different from the input signal, this output signal
size must be calculated somehow. If both ``output_signal_size``
and ``output_dtype`` is ``None``, this is automatically determined.
However, if for some reason this is not working correctly, this
can be specified via ``output_signal_size`` and ``output_dtype``.
The most common reason for this failing is due to the signal size
being different for different navigation positions. If this is the
case, use ragged=True. None is default.
output_dtype : None, NumPy dtype
See docstring for output_signal_size for more information.
Default None.
%s
%s
**kwargs : dict
All extra keyword arguments are passed to the provided function
Notes
-----
If the function results do not have identical shapes, the result is an
array of navigation shape, where each element corresponds to the result
of the function (of arbitrary object type), called a "ragged array". As
such, most functions are not able to operate on the result and the data
should be used directly.
This method is similar to Python's :py:func:`python:map` that can
also be utilized with a :py:class:`~hyperspy.signal.BaseSignal`
instance for similar purposes. However, this method has the advantage of
being faster because it iterates the underlying numpy data array
instead of the :py:class:`~hyperspy.signal.BaseSignal`.
Examples
--------
Apply a Gaussian filter to all the images in the dataset. The sigma
parameter is constant:
>>> import scipy.ndimage
>>> im = hs.signals.Signal2D(np.random.random((10, 64, 64)))
>>> im.map(scipy.ndimage.gaussian_filter, sigma=2.5)
Apply a Gaussian filter to all the images in the dataset. The signal
parameter is variable:
>>> im = hs.signals.Signal2D(np.random.random((10, 64, 64)))
>>> sigmas = hs.signals.BaseSignal(np.linspace(2, 5, 10)).T
>>> im.map(scipy.ndimage.gaussian_filter, sigma=sigmas)
Rotate the two signal dimensions, with different amount as a function
of navigation index. Delay the calculation by getting the output
lazily. The calculation is then done using the compute method.
>>> from scipy.ndimage import rotate
>>> s = hs.signals.Signal2D(np.random.random((5, 4, 40, 40)))
>>> s_angle = hs.signals.BaseSignal(np.linspace(0, 90, 20).reshape(5, 4)).T
>>> s.map(rotate, angle=s_angle, reshape=False, lazy_output=True)
>>> s.compute()
Rotate the two signal dimensions, with different amount as a function
of navigation index. In addition, the output is returned as a new
signal, instead of replacing the old signal.
>>> s = hs.signals.Signal2D(np.random.random((5, 4, 40, 40)))
>>> s_angle = hs.signals.BaseSignal(np.linspace(0, 90, 20).reshape(5, 4)).T
>>> s_rot = s.map(rotate, angle=s_angle, reshape=False, inplace=False)
Note
----
Currently requires a uniform axis.
"""
if lazy_output is None:
lazy_output = self._lazy
if ragged is None:
ragged = self.ragged
# Separate ndkwargs depending on if they are BaseSignals.
self_nav_shape = self.axes_manager.navigation_shape
ndkwargs = {}
ndkeys = [key for key in kwargs if isinstance(kwargs[key], BaseSignal)]
for key in ndkeys:
nd_nav_shape = kwargs[key].axes_manager.navigation_shape
if nd_nav_shape == self_nav_shape:
ndkwargs[key] = kwargs.pop(key)
elif nd_nav_shape == () or nd_nav_shape == (1,):
# This really isn't an iterating signal.
kwargs[key] = np.squeeze(kwargs[key].data)
else:
raise ValueError(
f"The size of the navigation_shape for the kwarg {key} "
f"(<{nd_nav_shape}> must be consistent "
f"with the size of the mapped signal "
f"<{self_nav_shape}>"
)
# TODO: Consider support for non-uniform signal axis
if any([not ax.is_uniform for ax in self.axes_manager.signal_axes]):
_logger.warning(
"At least one axis of the signal is non-uniform. Can your "
"`function` operate on non-uniform axes?"
)
else:
# Check if the signal axes have inhomogeneous scales and/or units and
# display in warning if yes.
scale = set()
units = set()
for i in range(len(self.axes_manager.signal_axes)):
scale.add(self.axes_manager.signal_axes[i].scale)
units.add(self.axes_manager.signal_axes[i].units)
if len(units) != 1 or len(scale) != 1:
_logger.warning(
"The function you applied does not take into account "
"the difference of units and of scales in-between axes."
)
# If the function has an axis argument and the signal dimension is 1,
# we suppose that it can operate on the full array and we don't
# iterate over the coordinates.
fargs = []
try:
# numpy ufunc operate element-wise on the inputs and we don't
# except them to have an axis argument
if not isinstance(function, np.ufunc):
fargs = inspect.signature(function).parameters.keys()
else:
_logger.warning(
f"The function `{function.__name__}` can directly operate "
"on hyperspy signals and it is not necessary to use `map`."
)
except TypeError as error:
# This is probably a Cython function that is not supported by
# inspect.
_logger.warning(error)
if not ndkwargs and not lazy_output and (self.axes_manager.signal_dimension == 1 and
"axis" in fargs):
kwargs['axis'] = self.axes_manager.signal_axes[-1].index_in_array
result = self._map_all(function, inplace=inplace, **kwargs)
# If the function has an axes argument
# we suppose that it can operate on the full array and we don't
# iterate over the coordinates.
elif not ndkwargs and not lazy_output and "axes" in fargs and not parallel:
kwargs['axes'] = tuple([axis.index_in_array for axis in
self.axes_manager.signal_axes])
result = self._map_all(function, inplace=inplace, **kwargs)
else:
kwargs["output_signal_size"] = output_signal_size
kwargs["output_dtype"] = output_dtype
# Iteration over coordinates.
result = self._map_iterate(
function,
iterating_kwargs=ndkwargs,
show_progressbar=show_progressbar,
ragged=ragged,
inplace=inplace,
lazy_output=lazy_output,
max_workers=max_workers,
**kwargs,
)
if not inplace:
return result
else:
self.events.data_changed.trigger(obj=self)
map.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, LAZY_OUTPUT_ARG, MAX_WORKERS_ARG)
def _map_all(self, function, inplace=True, **kwargs):
"""The function has to have either 'axis' or 'axes' keyword argument,
and hence support operating on the full dataset efficiently."""
newdata = function(self.data, **kwargs)
if inplace:
self.data = newdata
self._lazy = False
self._assign_subclass()
self.get_dimensions_from_data()
return None
else:
sig = self._deepcopy_with_new_data(newdata)
sig._lazy = False
sig._assign_subclass()
sig.get_dimensions_from_data()
return sig
def _map_iterate(
self,
function,
iterating_kwargs=None,
show_progressbar=None,
ragged=False,
inplace=True,
output_signal_size=None,
output_dtype=None,
lazy_output=None,
max_workers=None,
**kwargs,
):
if lazy_output is None:
lazy_output = self._lazy
if not self._lazy:
s_input = self.as_lazy()
else:
s_input = self
# unpacking keyword arguments
if iterating_kwargs is None:
iterating_kwargs = {}
elif isinstance(iterating_kwargs, (tuple, list)):
iterating_kwargs = dict((k, v) for k, v in iterating_kwargs)
nav_indexes = s_input.axes_manager.navigation_indices_in_array
chunk_span = np.equal(s_input.data.chunksize, s_input.data.shape)
chunk_span = [
chunk_span[i] for i in s_input.axes_manager.signal_indices_in_array
]
if not all(chunk_span):
_logger.info(
"The chunk size needs to span the full signal size, rechunking..."
)
old_sig = s_input.rechunk(inplace=False, nav_chunks=None)
else:
old_sig = s_input
os_am = old_sig.axes_manager
autodetermine = (output_signal_size is None or output_dtype is None) # try to guess output dtype and sig size?
args, arg_keys = old_sig._get_iterating_kwargs(iterating_kwargs)
if autodetermine: # trying to guess the output d-type and size from one signal
testing_kwargs = {}
for ikey, key in enumerate(arg_keys):
test_ind = (0,) * len(os_am.navigation_axes)
testing_kwargs[key] = np.squeeze(args[ikey][test_ind]).compute()
testing_kwargs = {**kwargs, **testing_kwargs}
test_data = np.array(old_sig.inav[(0,) * len(os_am.navigation_shape)].data.compute())
temp_output_signal_size, temp_output_dtype = guess_output_signal_size(
test_data=test_data,
function=function,
ragged=ragged,
**testing_kwargs,
)
if output_signal_size is None:
output_signal_size = temp_output_signal_size
if output_dtype is None:
output_dtype = temp_output_dtype
drop_axis, new_axis, axes_changed = self._get_drop_axis_new_axis(output_signal_size)
chunks = tuple([old_sig.data.chunks[i] for i in sorted(nav_indexes)]) + output_signal_size
mapped = da.map_blocks(
process_function_blockwise,
old_sig.data,
*args,
function=function,
nav_indexes=nav_indexes,
drop_axis=drop_axis,
new_axis=new_axis,
output_signal_size=output_signal_size,
dtype=output_dtype,
chunks=chunks,
arg_keys=arg_keys,
**kwargs
)
data_stored = False
if inplace:
if (
not self._lazy
and not lazy_output
and (mapped.shape == self.data.shape)
and (mapped.dtype == self.data.dtype)
):
# da.store is used to avoid unnecessary amount of memory usage.
# By using it here, the contents in mapped is written directly to
# the existing NumPy array, avoiding a potential doubling of memory use.
da.store(
mapped,
self.data,
dtype=mapped.dtype,
compute=True,
num_workers=max_workers,
)
data_stored = True
else:
self.data = mapped
self._lazy = lazy_output
sig = self
else:
sig = s_input._deepcopy_with_new_data(mapped)
am = sig.axes_manager
sig._lazy = lazy_output
if ragged:
axes_dicts = self.axes_manager._get_navigation_axes_dicts()
sig.axes_manager.__init__(axes_dicts)
sig.axes_manager._ragged = True
elif axes_changed:
am.remove(am.signal_axes[len(output_signal_size) :])
for ind in range(len(output_signal_size) - am.signal_dimension, 0, -1):
am._append_axis(size=output_signal_size[-ind], navigate=False)
if not ragged:
sig.axes_manager._ragged = False
if output_signal_size == () and am.navigation_dimension == 0:
add_scalar_axis(sig)
sig.get_dimensions_from_data()
sig._assign_subclass()
if not lazy_output:
if not data_stored:
sig.data = sig.data.compute(num_workers=max_workers)
return sig
def _get_drop_axis_new_axis(self, output_signal_size):
am = self.axes_manager
if output_signal_size == self.axes_manager.signal_shape:
drop_axis = None
new_axis = None
axes_changed = False
else:
axes_changed = True
if len(output_signal_size) != len(am.signal_shape):
drop_axis = am.signal_indices_in_array
nav_dim = am.navigation_dimension
new_axis = tuple(range(nav_dim, len(output_signal_size) + nav_dim))
else:
drop_axis = [it for (o, i, it) in zip(output_signal_size,
am.signal_shape,
am.signal_indices_in_array)
if o != i]
drop_axis = tuple(drop_axis)
new_axis = drop_axis
return drop_axis, new_axis, axes_changed
def _get_iterating_kwargs(self, iterating_kwargs):
signal_dim_shape = self.axes_manager.signal_shape
nav_chunks = self._get_navigation_chunk_size()
args, arg_keys = (), ()
for key in iterating_kwargs:
if not isinstance(iterating_kwargs[key], BaseSignal):
iterating_kwargs[key] = BaseSignal(iterating_kwargs[key].T).T
_logger.warning(
"Passing arrays as keyword arguments can be ambiguous. "
"This is deprecated and will be removed in HyperSpy 2.0. "
"Pass signal instances instead."
)
if iterating_kwargs[key]._lazy:
if iterating_kwargs[key]._get_navigation_chunk_size() != nav_chunks:
iterating_kwargs[key].rechunk(nav_chunks=nav_chunks, sig_chunks=-1)
else:
iterating_kwargs[key] = iterating_kwargs[key].as_lazy()
iterating_kwargs[key].rechunk(nav_chunks=nav_chunks, sig_chunks=-1)
extra_dims = (len(signal_dim_shape) -
len(iterating_kwargs[key].axes_manager.signal_shape))
if extra_dims > 0:
old_shape = iterating_kwargs[key].data.shape
new_shape = old_shape + (1,)*extra_dims
args += (iterating_kwargs[key].data.reshape(new_shape), )
else:
args += (iterating_kwargs[key].data, )
arg_keys += (key,)
return args, arg_keys
def copy(self):
"""
Return a "shallow copy" of this Signal using the
standard library's :py:func:`~copy.copy` function. Note: this will
return a copy of the signal, but it will not duplicate the underlying
data in memory, and both Signals will reference the same data.
See Also
--------
:py:meth:`~hyperspy.signal.BaseSignal.deepcopy`
"""
try:
backup_plot = self._plot
self._plot = None
return copy.copy(self)
finally:
self._plot = backup_plot
def __deepcopy__(self, memo):
dc = type(self)(**self._to_dictionary())
if isinstance(dc.data, np.ndarray):
dc.data = dc.data.copy()
# uncomment if we want to deepcopy models as well:
# dc.models._add_dictionary(
# copy.deepcopy(
# self.models._models.as_dictionary()))
# The Signal subclasses might change the view on init
# The following code just copies the original view
for oaxis, caxis in zip(self.axes_manager._axes,
dc.axes_manager._axes):
caxis.navigate = oaxis.navigate
if dc.metadata.has_item('Markers'):
temp_marker_dict = dc.metadata.Markers.as_dictionary()
markers_dict = markers_metadata_dict_to_markers(
temp_marker_dict,
dc.axes_manager)
dc.metadata.Markers = markers_dict
return dc
def deepcopy(self):
"""
Return a "deep copy" of this Signal using the
standard library's :py:func:`~copy.deepcopy` function. Note: this means
the underlying data structure will be duplicated in memory.
See Also
--------
:py:meth:`~hyperspy.signal.BaseSignal.copy`
"""
return copy.deepcopy(self)
def change_dtype(self, dtype, rechunk=True):
"""Change the data type of a Signal.
Parameters
----------
dtype : str or :py:class:`numpy.dtype`
Typecode string or data-type to which the Signal's data array is
cast. In addition to all the standard numpy :ref:`arrays.dtypes`,
HyperSpy supports four extra dtypes for RGB images: ``'rgb8'``,
``'rgba8'``, ``'rgb16'``, and ``'rgba16'``. Changing from and to
any ``rgb(a)`` `dtype` is more constrained than most other `dtype`
conversions. To change to an ``rgb(a)`` `dtype`,
the `signal_dimension` must be 1, and its size should be 3 (for
``rgb``) or 4 (for ``rgba``) `dtypes`. The original `dtype`
should be ``uint8`` or ``uint16`` if converting to ``rgb(a)8``
or ``rgb(a))16``, and the `navigation_dimension` should be at
least 2. After conversion, the `signal_dimension` becomes 2. The
`dtype` of images with original `dtype` ``rgb(a)8`` or ``rgb(a)16``
can only be changed to ``uint8`` or ``uint16``, and the
`signal_dimension` becomes 1.
%s
Examples
--------
>>> s = hs.signals.Signal1D([1,2,3,4,5])
>>> s.data
array([1, 2, 3, 4, 5])
>>> s.change_dtype('float')
>>> s.data
array([ 1., 2., 3., 4., 5.])
"""
if not isinstance(dtype, np.dtype):
if dtype in rgb_tools.rgb_dtypes:
if self.axes_manager.signal_dimension != 1:
raise AttributeError(
"Only 1D signals can be converted "
"to RGB images.")
if "8" in dtype and self.data.dtype.name != "uint8":
raise AttributeError(
"Only signals with dtype uint8 can be converted to "
"rgb8 images")
elif "16" in dtype and self.data.dtype.name != "uint16":
raise AttributeError(
"Only signals with dtype uint16 can be converted to "
"rgb16 images")
self.data = rgb_tools.regular_array2rgbx(self.data)
self.axes_manager.remove(-1)
self.axes_manager.set_signal_dimension(2)
self._assign_subclass()
return
else:
dtype = np.dtype(dtype)
if rgb_tools.is_rgbx(self.data) is True:
ddtype = self.data.dtype.fields["B"][0]
if ddtype != dtype:
raise ValueError(
"It is only possibile to change to %s." %
ddtype)
self.data = rgb_tools.rgbx2regular_array(self.data)
self.axes_manager._append_axis(
size=self.data.shape[-1],
scale=1,
offset=0,
name="RGB index",
navigate=False,)
self.axes_manager.set_signal_dimension(1)
self._assign_subclass()
return
else:
self.data = self.data.astype(dtype)
self._assign_subclass()
change_dtype.__doc__ %= (RECHUNK_ARG)
def estimate_poissonian_noise_variance(self,
expected_value=None,
gain_factor=None,
gain_offset=None,
correlation_factor=None):
r"""Estimate the Poissonian noise variance of the signal.
The variance is stored in the
`metadata.Signal.Noise_properties.variance` attribute.
The Poissonian noise variance is equal to the expected value. With the
default arguments, this method simply sets the variance attribute to
the given `expected_value`. However, more generally (although then the
noise is not strictly Poissonian), the variance may be proportional to
the expected value. Moreover, when the noise is a mixture of white
(Gaussian) and Poissonian noise, the variance is described by the
following linear model:
.. math::
\mathrm{Var}[X] = (a * \mathrm{E}[X] + b) * c
Where `a` is the `gain_factor`, `b` is the `gain_offset` (the Gaussian
noise variance) and `c` the `correlation_factor`. The correlation
factor accounts for correlation of adjacent signal elements that can
be modeled as a convolution with a Gaussian point spread function.
Parameters
----------
expected_value : :py:data:`None` or :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
If ``None``, the signal data is taken as the expected value. Note
that this may be inaccurate where the value of `data` is small.
gain_factor : None or float
`a` in the above equation. Must be positive. If ``None``, take the
value from `metadata.Signal.Noise_properties.Variance_linear_model`
if defined. Otherwise, suppose pure Poissonian noise (*i.e.*
``gain_factor=1``). If not ``None``, the value is stored in
`metadata.Signal.Noise_properties.Variance_linear_model`.
gain_offset : None or float
`b` in the above equation. Must be positive. If ``None``, take the
value from `metadata.Signal.Noise_properties.Variance_linear_model`
if defined. Otherwise, suppose pure Poissonian noise (*i.e.*
``gain_offset=0``). If not ``None``, the value is stored in
`metadata.Signal.Noise_properties.Variance_linear_model`.
correlation_factor : None or float
`c` in the above equation. Must be positive. If ``None``, take the
value from `metadata.Signal.Noise_properties.Variance_linear_model`
if defined. Otherwise, suppose pure Poissonian noise (*i.e.*
``correlation_factor=1``). If not ``None``, the value is stored in
`metadata.Signal.Noise_properties.Variance_linear_model`.
"""
if expected_value is None:
expected_value = self
dc = expected_value.data if expected_value._lazy else expected_value.data.copy()
if self.metadata.has_item(
"Signal.Noise_properties.Variance_linear_model"):
vlm = self.metadata.Signal.Noise_properties.Variance_linear_model
else:
self.metadata.add_node(
"Signal.Noise_properties.Variance_linear_model")
vlm = self.metadata.Signal.Noise_properties.Variance_linear_model
if gain_factor is None:
if not vlm.has_item("gain_factor"):
vlm.gain_factor = 1
gain_factor = vlm.gain_factor
if gain_offset is None:
if not vlm.has_item("gain_offset"):
vlm.gain_offset = 0
gain_offset = vlm.gain_offset
if correlation_factor is None:
if not vlm.has_item("correlation_factor"):
vlm.correlation_factor = 1
correlation_factor = vlm.correlation_factor
if gain_offset < 0:
raise ValueError("`gain_offset` must be positive.")
if gain_factor < 0:
raise ValueError("`gain_factor` must be positive.")
if correlation_factor < 0:
raise ValueError("`correlation_factor` must be positive.")
variance = self._estimate_poissonian_noise_variance(dc, gain_factor,
gain_offset,
correlation_factor)
variance = BaseSignal(variance, attributes={'_lazy': self._lazy})
variance.axes_manager = self.axes_manager
variance.metadata.General.title = ("Variance of " + self.metadata.General.title)
self.set_noise_variance(variance)
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
variance = np.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
def set_noise_variance(self, variance):
"""Set the noise variance of the signal.
Equivalent to ``s.metadata.set_item("Signal.Noise_properties.variance", variance)``.
Parameters
----------
variance : None or float or :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
Value or values of the noise variance. A value of None is
equivalent to clearing the variance.
Returns
-------
None
"""
if isinstance(variance, BaseSignal):
if (
variance.axes_manager.navigation_shape
!= self.axes_manager.navigation_shape
):
raise ValueError(
"The navigation shape of the `variance` is "
"not equal to the navigation shape of the signal"
)
elif isinstance(variance, numbers.Number):
pass
elif variance is None:
pass
else:
raise ValueError(
"`variance` must be one of [None, float, "
f"hyperspy.signal.BaseSignal], not {type(variance)}."
)
self.metadata.set_item("Signal.Noise_properties.variance", variance)
def get_noise_variance(self):
"""Get the noise variance of the signal, if set.
Equivalent to ``s.metadata.Signal.Noise_properties.variance``.
Parameters
----------
None
Returns
-------
variance : None or float or :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
Noise variance of the signal, if set.
Otherwise returns None.
"""
if "Signal.Noise_properties.variance" in self.metadata:
return self.metadata.Signal.Noise_properties.variance
return None
def get_current_signal(self, auto_title=True, auto_filename=True):
"""Returns the data at the current coordinates as a
:py:class:`~hyperspy.signal.BaseSignal` subclass.
The signal subclass is the same as that of the current object. All the
axes navigation attributes are set to ``False``.
Parameters
----------
auto_title : bool
If ``True``, the current indices (in parentheses) are appended to
the title, separated by a space.
auto_filename : bool
If ``True`` and `tmp_parameters.filename` is defined
(which is always the case when the Signal has been read from a
file), the filename stored in the metadata is modified by
appending an underscore and the current indices in parentheses.
Returns
-------
cs : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
The data at the current coordinates as a Signal
Examples
--------
>>> im = hs.signals.Signal2D(np.zeros((2,3, 32,32)))
>>> im
<Signal2D, title: , dimensions: (3, 2, 32, 32)>
>>> im.axes_manager.indices = 2,1
>>> im.get_current_signal()
<Signal2D, title: (2, 1), dimensions: (32, 32)>
"""
metadata = self.metadata.deepcopy()
# Check if marker update
if metadata.has_item('Markers'):
marker_name_list = metadata.Markers.keys()
markers_dict = metadata.Markers.__dict__
for marker_name in marker_name_list:
marker = markers_dict[marker_name]['_dtb_value_']
if marker.auto_update:
marker.axes_manager = self.axes_manager
key_dict = {}
for key in marker.data.dtype.names:
key_dict[key] = marker.get_data_position(key)
marker.set_data(**key_dict)
class_ = hyperspy.io.assign_signal_subclass(
dtype=self.data.dtype,
signal_dimension=self.axes_manager.signal_dimension,
signal_type=self._signal_type,
lazy=False)
cs = class_(
self(),
axes=self.axes_manager._get_signal_axes_dicts(),
metadata=metadata.as_dictionary())
if cs.metadata.has_item('Markers'):
temp_marker_dict = cs.metadata.Markers.as_dictionary()
markers_dict = markers_metadata_dict_to_markers(
temp_marker_dict,
cs.axes_manager)
cs.metadata.Markers = markers_dict
if auto_filename is True and self.tmp_parameters.has_item('filename'):
cs.tmp_parameters.filename = (self.tmp_parameters.filename +
'_' +
str(self.axes_manager.indices))
cs.tmp_parameters.extension = self.tmp_parameters.extension
cs.tmp_parameters.folder = self.tmp_parameters.folder
if auto_title is True:
cs.metadata.General.title = (cs.metadata.General.title +
' ' + str(self.axes_manager.indices))
cs.axes_manager._set_axis_attribute_values("navigate", False)
return cs
def _get_navigation_signal(self, data=None, dtype=None):
"""Return a signal with the same axes as the navigation space.
Parameters
----------
data : None or :py:class:`numpy.ndarray`, optional
If ``None``, the resulting Signal data is an array of the same
`dtype` as the current one filled with zeros. If a numpy array,
the array must have the correct dimensions.
dtype : :py:class:`numpy.dtype`, optional
The desired data-type for the data array when `data` is ``None``,
e.g., ``numpy.int8``. The default is the data type of the current
signal data.
"""
from dask.array import Array
if data is not None:
ref_shape = (self.axes_manager._navigation_shape_in_array
if self.axes_manager.navigation_dimension != 0
else (1,))
if data.shape != ref_shape:
raise ValueError(
("data.shape %s is not equal to the current navigation "
"shape in array which is %s") %
(str(data.shape), str(ref_shape)))
else:
if dtype is None:
dtype = self.data.dtype
if self.axes_manager.navigation_dimension == 0:
data = np.array([0, ], dtype=dtype)
else:
data = np.zeros(
self.axes_manager._navigation_shape_in_array,
dtype=dtype)
if self.axes_manager.navigation_dimension == 0:
s = BaseSignal(data)
elif self.axes_manager.navigation_dimension == 1:
from hyperspy._signals.signal1d import Signal1D
s = Signal1D(data,
axes=self.axes_manager._get_navigation_axes_dicts())
elif self.axes_manager.navigation_dimension == 2:
from hyperspy._signals.signal2d import Signal2D
s = Signal2D(data,
axes=self.axes_manager._get_navigation_axes_dicts())
else:
s = BaseSignal(
data,
axes=self.axes_manager._get_navigation_axes_dicts())
s.axes_manager.set_signal_dimension(
self.axes_manager.navigation_dimension)
if isinstance(data, Array):
s = s.as_lazy()
return s
def _get_signal_signal(self, data=None, dtype=None):
"""Return a signal with the same axes as the signal space.
Parameters
----------
data : None or :py:class:`numpy.ndarray`, optional
If ``None``, the resulting Signal data is an array of the same
`dtype` as the current one filled with zeros. If a numpy array,
the array must have the correct dimensions.
dtype : :py:class:`numpy.dtype`, optional
The desired data-type for the data array when `data` is ``None``,
e.g., ``numpy.int8``. The default is the data type of the current
signal data.
"""
from dask.array import Array
if data is not None:
ref_shape = (self.axes_manager._signal_shape_in_array
if self.axes_manager.signal_dimension != 0
else (1,))
if data.shape != ref_shape:
raise ValueError(
"data.shape %s is not equal to the current signal shape in"
" array which is %s" % (str(data.shape), str(ref_shape)))
else:
if dtype is None:
dtype = self.data.dtype
if self.axes_manager.signal_dimension == 0:
data = np.array([0, ], dtype=dtype)
else:
data = np.zeros(
self.axes_manager._signal_shape_in_array,
dtype=dtype)
if self.axes_manager.signal_dimension == 0:
s = BaseSignal(data)
s.set_signal_type(self.metadata.Signal.signal_type)
else:
s = self.__class__(data,
axes=self.axes_manager._get_signal_axes_dicts())
if isinstance(data, Array):
s = s.as_lazy()
return s
def __iter__(self):
# Reset AxesManager iteration index
self.axes_manager.__iter__()
return self
def __next__(self):
next(self.axes_manager)
return self.get_current_signal()
def __len__(self):
nitem = int(self.axes_manager.navigation_size)
nitem = nitem if nitem > 0 else 1
return nitem
def as_signal1D(self, spectral_axis, out=None, optimize=True):
"""Return the Signal as a spectrum.
The chosen spectral axis is moved to the last index in the
array and the data is made contiguous for efficient iteration over
spectra. By default, the method ensures the data is stored optimally,
hence often making a copy of the data. See
:py:meth:`~hyperspy.signal.BaseSignal.transpose` for a more general
method with more options.
Parameters
----------
spectral_axis %s
%s
%s
See also
--------
as_signal2D, transpose, :py:func:`hyperspy.misc.utils.transpose`
Examples
--------
>>> img = hs.signals.Signal2D(np.ones((3,4,5,6)))
>>> img
<Signal2D, title: , dimensions: (4, 3, 6, 5)>
>>> img.as_signal1D(-1+1j)
<Signal1D, title: , dimensions: (6, 5, 4, 3)>
>>> img.as_signal1D(0)
<Signal1D, title: , dimensions: (6, 5, 3, 4)>
"""
sp = self.transpose(signal_axes=[spectral_axis], optimize=optimize)
if out is None:
return sp
else:
if out._lazy:
out.data = sp.data
else:
out.data[:] = sp.data
out.events.data_changed.trigger(obj=out)
as_signal1D.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG,
OPTIMIZE_ARG.replace('False', 'True'))
def as_signal2D(self, image_axes, out=None, optimize=True):
"""Convert a signal to image (
:py:class:`~hyperspy._signals.signal2d.Signal2D`).
The chosen image axes are moved to the last indices in the
array and the data is made contiguous for efficient
iteration over images.
Parameters
----------
image_axes : tuple (of int, str or :py:class:`~hyperspy.axes.DataAxis`)
Select the image axes. Note that the order of the axes matters
and it is given in the "natural" i.e. `X`, `Y`, `Z`... order.
%s
%s
Raises
------
DataDimensionError
When `data.ndim` < 2
See also
--------
as_signal1D, transpose, :py:func:`hyperspy.misc.utils.transpose`
Examples
--------
>>> s = hs.signals.Signal1D(np.ones((2,3,4,5)))
>>> s
<Signal1D, title: , dimensions: (4, 3, 2, 5)>
>>> s.as_signal2D((0,1))
<Signal2D, title: , dimensions: (5, 2, 4, 3)>
>>> s.to_signal2D((1,2))
<Signal2D, title: , dimensions: (4, 5, 3, 2)>
"""
if self.data.ndim < 2:
raise DataDimensionError(
"A Signal dimension must be >= 2 to be converted to a Signal2D")
im = self.transpose(signal_axes=image_axes, optimize=optimize)
if out is None:
return im
else:
if out._lazy:
out.data = im.data
else:
out.data[:] = im.data
out.events.data_changed.trigger(obj=out)
as_signal2D.__doc__ %= (OUT_ARG, OPTIMIZE_ARG.replace('False', 'True'))
def _assign_subclass(self):
mp = self.metadata
self.__class__ = assign_signal_subclass(
dtype=self.data.dtype,
signal_dimension=self.axes_manager.signal_dimension,
signal_type=mp.Signal.signal_type
if "Signal.signal_type" in mp
else self._signal_type,
lazy=self._lazy)
if self._alias_signal_types: # In case legacy types exist:
mp.Signal.signal_type = self._signal_type # set to default!
self.__init__(self.data, full_initialisation=False)
if self._lazy:
self._make_lazy()
def set_signal_type(self, signal_type=""):
"""Set the signal type and convert the current signal accordingly.
The ``signal_type`` attribute specifies the type of data that the signal
contains e.g. electron energy-loss spectroscopy data,
photoemission spectroscopy data, etc.
When setting `signal_type` to a "known" type, HyperSpy converts the
current signal to the most appropriate
:py:class:`hyperspy.signal.BaseSignal` subclass. Known signal types are
signal types that have a specialized
:py:class:`hyperspy.signal.BaseSignal` subclass associated, usually
providing specific features for the analysis of that type of signal.
HyperSpy ships with a minimal set of known signal types. External
packages can register extra signal types. To print a list of
registered signal types in the current installation, call
:py:meth:`hyperspy.utils.print_known_signal_types`, and see
the developer guide for details on how to add new signal_types.
A non-exhaustive list of HyperSpy extensions is also maintained
here: https://github.com/hyperspy/hyperspy-extensions-list.
Parameters
----------
signal_type : str, optional
If no arguments are passed, the ``signal_type`` is set to undefined
and the current signal converted to a generic signal subclass.
Otherwise, set the signal_type to the given signal
type or to the signal type corresponding to the given signal type
alias. Setting the signal_type to a known signal type (if exists)
is highly advisable. If none exists, it is good practice
to set signal_type to a value that best describes the data signal
type.
See Also
--------
* :py:meth:`hyperspy.utils.print_known_signal_types`
Examples
--------
Let's first print all known signal types:
>>> s = hs.signals.Signal1D([0, 1, 2, 3])
>>> s
<Signal1D, title: , dimensions: (|4)>
>>> hs.print_known_signal_types()
+--------------------+---------------------+--------------------+----------+
| signal_type | aliases | class name | package |
+--------------------+---------------------+--------------------+----------+
| DielectricFunction | dielectric function | DielectricFunction | hyperspy |
| EDS_SEM | | EDSSEMSpectrum | hyperspy |
| EDS_TEM | | EDSTEMSpectrum | hyperspy |
| EELS | TEM EELS | EELSSpectrum | hyperspy |
| hologram | | HologramImage | hyperspy |
| MySignal | | MySignal | hspy_ext |
+--------------------+---------------------+--------------------+----------+
We can set the `signal_type` using the `signal_type`:
>>> s.set_signal_type("EELS")
>>> s
<EELSSpectrum, title: , dimensions: (|4)>
>>> s.set_signal_type("EDS_SEM")
>>> s
<EDSSEMSpectrum, title: , dimensions: (|4)>
or any of its aliases:
>>> s.set_signal_type("TEM EELS")
>>> s
<EELSSpectrum, title: , dimensions: (|4)>
To set the `signal_type` to `undefined`, simply call the method without arguments:
>>> s.set_signal_type()
>>> s
<Signal1D, title: , dimensions: (|4)>
"""
if signal_type is None:
warnings.warn(
"`s.set_signal_type(signal_type=None)` is deprecated. "
"Use `s.set_signal_type(signal_type='')` instead.",
VisibleDeprecationWarning
)
self.metadata.Signal.signal_type = signal_type
# _assign_subclass takes care of matching aliases with their
# corresponding signal class
self._assign_subclass()
def set_signal_origin(self, origin):
"""Set the `signal_origin` metadata value.
The `signal_origin` attribute specifies if the data was obtained
through experiment or simulation.
Parameters
----------
origin : str
Typically ``'experiment'`` or ``'simulation'``
"""
self.metadata.Signal.signal_origin = origin
def print_summary_statistics(self, formatter="%.3g", rechunk=True):
"""Prints the five-number summary statistics of the data, the mean, and
the standard deviation.
Prints the mean, standard deviation (std), maximum (max), minimum
(min), first quartile (Q1), median, and third quartile. nans are
removed from the calculations.
Parameters
----------
formatter : str
The number formatter to use for the output
%s
See also
--------
get_histogram
"""
_mean, _std, _min, _q1, _q2, _q3, _max = self._calculate_summary_statistics(
rechunk=rechunk)
print(underline("Summary statistics"))
print("mean:\t" + formatter % _mean)
print("std:\t" + formatter % _std)
print()
print("min:\t" + formatter % _min)
print("Q1:\t" + formatter % _q1)
print("median:\t" + formatter % _q2)
print("Q3:\t" + formatter % _q3)
print("max:\t" + formatter % _max)
print_summary_statistics.__doc__ %= (RECHUNK_ARG)
def _calculate_summary_statistics(self, **kwargs):
data = self.data
data = data[~np.isnan(data)]
_mean = np.nanmean(data)
_std = np.nanstd(data)
_min = np.nanmin(data)
_q1 = np.percentile(data, 25)
_q2 = np.percentile(data, 50)
_q3 = np.percentile(data, 75)
_max = np.nanmax(data)
return _mean, _std, _min, _q1, _q2, _q3, _max
@property
def is_rgba(self):
"""
Whether or not this signal is an RGB + alpha channel `dtype`.
"""
return rgb_tools.is_rgba(self.data)
@property
def is_rgb(self):
"""
Whether or not this signal is an RGB `dtype`.
"""
return rgb_tools.is_rgb(self.data)
@property
def is_rgbx(self):
"""
Whether or not this signal is either an RGB or RGB + alpha channel
`dtype`.
"""
return rgb_tools.is_rgbx(self.data)
def add_marker(
self, marker, plot_on_signal=True, plot_marker=True,
permanent=False, plot_signal=True, render_figure=True):
"""
Add one or several markers to the signal or navigator plot and plot
the signal, if not yet plotted (by default)
Parameters
----------
marker : :py:mod:`hyperspy.drawing.marker` object or iterable
The marker or iterable (list, tuple, ...) of markers to add.
See the :ref:`plot.markers` section in the User Guide if you want
to add a large number of markers as an iterable, since this will
be much faster. For signals with navigation dimensions,
the markers can be made to change for different navigation
indices. See the examples for info.
plot_on_signal : bool
If ``True`` (default), add the marker to the signal.
If ``False``, add the marker to the navigator
plot_marker : bool
If ``True`` (default), plot the marker.
permanent : bool
If ``False`` (default), the marker will only appear in the current
plot. If ``True``, the marker will be added to the
`metadata.Markers` list, and be plotted with
``plot(plot_markers=True)``. If the signal is saved as a HyperSpy
HDF5 file, the markers will be stored in the HDF5 signal and be
restored when the file is loaded.
Examples
--------
>>> import scipy.misc
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> m = hs.markers.rectangle(x1=150, y1=100, x2=400,
>>> y2=400, color='red')
>>> im.add_marker(m)
Adding to a 1D signal, where the point will change
when the navigation index is changed:
>>> s = hs.signals.Signal1D(np.random.random((3, 100)))
>>> marker = hs.markers.point((19, 10, 60), (0.2, 0.5, 0.9))
>>> s.add_marker(marker, permanent=True, plot_marker=True)
Add permanent marker:
>>> s = hs.signals.Signal2D(np.random.random((100, 100)))
>>> marker = hs.markers.point(50, 60, color='red')
>>> s.add_marker(marker, permanent=True, plot_marker=True)
Add permanent marker to signal with 2 navigation dimensions.
The signal has navigation dimensions (3, 2), as the dimensions
gets flipped compared to the output from :py:func:`numpy.random.random`.
To add a vertical line marker which changes for different navigation
indices, the list used to make the marker must be a nested list:
2 lists with 3 elements each (2 x 3):
>>> s = hs.signals.Signal1D(np.random.random((2, 3, 10)))
>>> marker = hs.markers.vertical_line([[1, 3, 5], [2, 4, 6]])
>>> s.add_marker(marker, permanent=True)
Add permanent marker which changes with navigation position, and
do not add it to a current plot:
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(3, 100, 100)))
>>> marker = hs.markers.point((10, 30, 50), (30, 50, 60), color='red')
>>> s.add_marker(marker, permanent=True, plot_marker=False)
>>> s.plot(plot_markers=True) #doctest: +SKIP
Removing a permanent marker:
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(100, 100)))
>>> marker = hs.markers.point(10, 60, color='red')
>>> marker.name = "point_marker"
>>> s.add_marker(marker, permanent=True)
>>> del s.metadata.Markers.point_marker
Adding many markers as a list:
>>> from numpy.random import random
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(100, 100)))
>>> marker_list = []
>>> for i in range(100):
>>> marker = hs.markers.point(random()*100, random()*100, color='red')
>>> marker_list.append(marker)
>>> s.add_marker(marker_list, permanent=True)
"""
if isiterable(marker):
marker_list = marker
else:
marker_list = [marker]
markers_dict = {}
if permanent:
if not self.metadata.has_item('Markers'):
self.metadata.add_node('Markers')
marker_object_list = []
for marker_tuple in list(self.metadata.Markers):
marker_object_list.append(marker_tuple[1])
name_list = self.metadata.Markers.keys()
marker_name_suffix = 1
for m in marker_list:
marker_data_shape = m._get_data_shape()[::-1]
if (not (len(marker_data_shape) == 0)) and (
marker_data_shape != self.axes_manager.navigation_shape):
raise ValueError(
"Navigation shape of the marker must be 0 or the "
"inverse navigation shape as this signal. If the "
"navigation dimensions for the signal is (2, 3), "
"the marker dimension must be (3, 2).")
if (m.signal is not None) and (m.signal is not self):
raise ValueError("Markers can not be added to several signals")
m._plot_on_signal = plot_on_signal
if plot_marker:
if self._plot is None or not self._plot.is_active:
self.plot()
if m._plot_on_signal:
self._plot.signal_plot.add_marker(m)
else:
if self._plot.navigator_plot is None:
self.plot()
self._plot.navigator_plot.add_marker(m)
m.plot(render_figure=False)
if permanent:
for marker_object in marker_object_list:
if m is marker_object:
raise ValueError("Marker already added to signal")
name = m.name
temp_name = name
while temp_name in name_list:
temp_name = name + str(marker_name_suffix)
marker_name_suffix += 1
m.name = temp_name
markers_dict[m.name] = m
m.signal = self
marker_object_list.append(m)
name_list.append(m.name)
if not plot_marker and not permanent:
_logger.warning(
"plot_marker=False and permanent=False does nothing")
if permanent:
self.metadata.Markers = markers_dict
if plot_marker and render_figure:
self._render_figure()
def _render_figure(self, plot=['signal_plot', 'navigation_plot']):
for p in plot:
if hasattr(self._plot, p):
p = getattr(self._plot, p)
p.render_figure()
def _plot_permanent_markers(self):
marker_name_list = self.metadata.Markers.keys()
markers_dict = self.metadata.Markers.__dict__
for marker_name in marker_name_list:
marker = markers_dict[marker_name]['_dtb_value_']
if marker.plot_marker:
if marker._plot_on_signal:
self._plot.signal_plot.add_marker(marker)
else:
self._plot.navigator_plot.add_marker(marker)
marker.plot(render_figure=False)
self._render_figure()
def add_poissonian_noise(self, keep_dtype=True, random_state=None):
"""Add Poissonian noise to the data.
This method works in-place. The resulting data type is ``int64``.
If this is different from the original data type then a warning
is added to the log.
Parameters
----------
keep_dtype : bool, default True
If ``True``, keep the original data type of the signal data. For
example, if the data type was initially ``'float64'``, the result of
the operation (usually ``'int64'``) will be converted to
``'float64'``.
random_state : None or int or RandomState instance, default None
Seed for the random generator.
Note
----
This method uses :py:func:`numpy.random.poisson`
(or :py:func:`dask.array.random.poisson` for lazy signals)
to generate the Poissonian noise.
"""
kwargs = {}
random_state = check_random_state(random_state, lazy=self._lazy)
if self._lazy:
kwargs["chunks"] = self.data.chunks
original_dtype = self.data.dtype
self.data = random_state.poisson(lam=self.data, **kwargs)
if self.data.dtype != original_dtype:
if keep_dtype:
_logger.warning(
f"Changing data type from {self.data.dtype} "
f"to the original {original_dtype}"
)
# Don't change the object if possible
self.data = self.data.astype(original_dtype, copy=False)
else:
_logger.warning(
f"The data type changed from {original_dtype} "
f"to {self.data.dtype}"
)
self.events.data_changed.trigger(obj=self)
def add_gaussian_noise(self, std, random_state=None):
"""Add Gaussian noise to the data.
The operation is performed in-place (*i.e.* the data of the signal
is modified). This method requires the signal to have a float data type,
otherwise it will raise a :py:exc:`TypeError`.
Parameters
----------
std : float
The standard deviation of the Gaussian noise.
random_state : None or int or RandomState instance, default None
Seed for the random generator.
Note
----
This method uses :py:func:`numpy.random.normal` (or
:py:func:`dask.array.random.normal` for lazy signals)
to generate the noise.
"""
if self.data.dtype.char not in np.typecodes["AllFloat"]:
raise TypeError(
"`s.add_gaussian_noise()` requires the data to have "
f"a float datatype, but the current type is '{self.data.dtype}'. "
"To fix this issue, you can change the type using the "
"change_dtype method (e.g. s.change_dtype('float64'))."
)
kwargs = {}
random_state = check_random_state(random_state, lazy=self._lazy)
if self._lazy:
kwargs["chunks"] = self.data.chunks
noise = random_state.normal(loc=0, scale=std, size=self.data.shape, **kwargs)
if self._lazy:
# With lazy data we can't keep the same array object
self.data = self.data + noise
else:
# Don't change the object
self.data += noise
self.events.data_changed.trigger(obj=self)
def transpose(self, signal_axes=None,
navigation_axes=None, optimize=False):
"""Transposes the signal to have the required signal and navigation
axes.
Parameters
----------
signal_axes : None, int, or iterable type
The number (or indices) of axes to convert to signal axes
navigation_axes : None, int, or iterable type
The number (or indices) of axes to convert to navigation axes
%s
Note
----
With the exception of both axes parameters (`signal_axes` and
`navigation_axes` getting iterables, generally one has to be ``None``
(i.e. "floating"). The other one specifies either the required number
or explicitly the indices of axes to move to the corresponding space.
If both are iterables, full control is given as long as all axes
are assigned to one space only.
See also
--------
T, as_signal2D, as_signal1D, :py:func:`hyperspy.misc.utils.transpose`
Examples
--------
>>> # just create a signal with many distinct dimensions
>>> s = hs.signals.BaseSignal(np.random.rand(1,2,3,4,5,6,7,8,9))
>>> s
<BaseSignal, title: , dimensions: (|9, 8, 7, 6, 5, 4, 3, 2, 1)>
>>> s.transpose() # swap signal and navigation spaces
<BaseSignal, title: , dimensions: (9, 8, 7, 6, 5, 4, 3, 2, 1|)>
>>> s.T # a shortcut for no arguments
<BaseSignal, title: , dimensions: (9, 8, 7, 6, 5, 4, 3, 2, 1|)>
>>> # roll to leave 5 axes in navigation space
>>> s.transpose(signal_axes=5)
<BaseSignal, title: , dimensions: (4, 3, 2, 1|9, 8, 7, 6, 5)>
>>> # roll leave 3 axes in navigation space
>>> s.transpose(navigation_axes=3)
<BaseSignal, title: , dimensions: (3, 2, 1|9, 8, 7, 6, 5, 4)>
>>> # 3 explicitly defined axes in signal space
>>> s.transpose(signal_axes=[0, 2, 6])
<BaseSignal, title: , dimensions: (8, 6, 5, 4, 2, 1|9, 7, 3)>
>>> # A mix of two lists, but specifying all axes explicitly
>>> # The order of axes is preserved in both lists
>>> s.transpose(navigation_axes=[1, 2, 3, 4, 5, 8], signal_axes=[0, 6, 7])
<BaseSignal, title: , dimensions: (8, 7, 6, 5, 4, 1|9, 3, 2)>
"""
if self.axes_manager.ragged:
raise RuntimeError("Signal with ragged dimension can't be "
"transposed.")
am = self.axes_manager
ax_list = am._axes
if isinstance(signal_axes, int):
if navigation_axes is not None:
raise ValueError("The navigation_axes are not None, even "
"though just a number was given for "
"signal_axes")
if len(ax_list) < signal_axes:
raise ValueError("Too many signal axes requested")
if signal_axes < 0:
raise ValueError("Can't have negative number of signal axes")
elif signal_axes == 0:
signal_axes = ()
navigation_axes = ax_list[::-1]
else:
navigation_axes = ax_list[:-signal_axes][::-1]
signal_axes = ax_list[-signal_axes:][::-1]
elif iterable_not_string(signal_axes):
signal_axes = tuple(am[ax] for ax in signal_axes)
if navigation_axes is None:
navigation_axes = tuple(ax for ax in ax_list
if ax not in signal_axes)[::-1]
elif iterable_not_string(navigation_axes):
# want to keep the order
navigation_axes = tuple(am[ax] for ax in navigation_axes)
intersection = set(signal_axes).intersection(navigation_axes)
if len(intersection):
raise ValueError("At least one axis found in both spaces:"
" {}".format(intersection))
if len(am._axes) != (len(signal_axes) + len(navigation_axes)):
raise ValueError("Not all current axes were assigned to a "
"space")
else:
raise ValueError("navigation_axes has to be None or an iterable"
" when signal_axes is iterable")
elif signal_axes is None:
if isinstance(navigation_axes, int):
if len(ax_list) < navigation_axes:
raise ValueError("Too many navigation axes requested")
if navigation_axes < 0:
raise ValueError(
"Can't have negative number of navigation axes")
elif navigation_axes == 0:
navigation_axes = ()
signal_axes = ax_list[::-1]
else:
signal_axes = ax_list[navigation_axes:][::-1]
navigation_axes = ax_list[:navigation_axes][::-1]
elif iterable_not_string(navigation_axes):
navigation_axes = tuple(am[ax] for ax in
navigation_axes)
signal_axes = tuple(ax for ax in ax_list
if ax not in navigation_axes)[::-1]
elif navigation_axes is None:
signal_axes = am.navigation_axes
navigation_axes = am.signal_axes
else:
raise ValueError(
"The passed navigation_axes argument is not valid")
else:
raise ValueError("The passed signal_axes argument is not valid")
# translate to axes idx from actual objects for variance
idx_sig = [ax.index_in_axes_manager for ax in signal_axes]
idx_nav = [ax.index_in_axes_manager for ax in navigation_axes]
# From now on we operate with axes in array order
signal_axes = signal_axes[::-1]
navigation_axes = navigation_axes[::-1]
# get data view
array_order = tuple(
ax.index_in_array for ax in navigation_axes)
array_order += tuple(ax.index_in_array for ax in signal_axes)
newdata = self.data.transpose(array_order)
res = self._deepcopy_with_new_data(newdata, copy_variance=True,
copy_learning_results=True)
# reconfigure the axes of the axesmanager:
ram = res.axes_manager
ram._update_trait_handlers(remove=True)
# _axes are ordered in array order
ram._axes = [ram._axes[i] for i in array_order]
for i, ax in enumerate(ram._axes):
if i < len(navigation_axes):
ax.navigate = True
else:
ax.navigate = False
ram._update_attributes()
ram._update_trait_handlers(remove=False)
res._assign_subclass()
var = res.get_noise_variance()
if isinstance(var, BaseSignal):
var = var.transpose(signal_axes=idx_sig,
navigation_axes=idx_nav,
optimize=optimize)
res.set_noise_variance(var)
if optimize:
res._make_sure_data_is_contiguous()
if res.metadata.has_item('Markers'):
# The markers might fail if the navigation dimensions are changed
# so the safest is simply to not carry them over from the
# previous signal.
del res.metadata.Markers
return res
transpose.__doc__ %= (OPTIMIZE_ARG)
@property
def T(self):
"""The transpose of the signal, with signal and navigation spaces
swapped. Enables calling
:py:meth:`~hyperspy.signal.BaseSignal.transpose` with the default
parameters as a property of a Signal.
"""
return self.transpose()
def apply_apodization(self, window='hann',
hann_order=None, tukey_alpha=0.5, inplace=False):
"""
Apply an `apodization window
<http://mathworld.wolfram.com/ApodizationFunction.html>`_ to a Signal.
Parameters
----------
window : str, optional
Select between {``'hann'`` (default), ``'hamming'``, or ``'tukey'``}
hann_order : None or int, optional
Only used if ``window='hann'``
If integer `n` is provided, a Hann window of `n`-th order will be
used. If ``None``, a first order Hann window is used.
Higher orders result in more homogeneous intensity distribution.
tukey_alpha : float, optional
Only used if ``window='tukey'`` (default is 0.5). From the
documentation of
:py:func:`scipy.signal.windows.tukey`:
- Shape parameter of the Tukey window, representing the
fraction of the window inside the cosine tapered region. If
zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
inplace : bool, optional
If ``True``, the apodization is applied in place, *i.e.* the signal
data will be substituted by the apodized one (default is
``False``).
Returns
-------
out : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses), optional
If ``inplace=False``, returns the apodized signal of the same
type as the provided Signal.
Examples
--------
>>> import hyperspy.api as hs
>>> holo = hs.datasets.example_signals.object_hologram()
>>> holo.apply_apodization('tukey', tukey_alpha=0.1).plot()
"""
if window == 'hanning' or window == 'hann':
if hann_order:
def window_function(
m): return hann_window_nth_order(m, hann_order)
else:
def window_function(m): return np.hanning(m)
elif window == 'hamming':
def window_function(m): return np.hamming(m)
elif window == 'tukey':
def window_function(m): return sp_signal.tukey(m, tukey_alpha)
else:
raise ValueError('Wrong type parameter value.')
windows_1d = []
axes = np.array(self.axes_manager.signal_indices_in_array)
for axis, axis_index in zip(self.axes_manager.signal_axes, axes):
if isinstance(self.data, da.Array):
chunks = self.data.chunks[axis_index]
window_da = da.from_array(window_function(axis.size),
chunks=(chunks, ))
windows_1d.append(window_da)
else:
windows_1d.append(window_function(axis.size))
window_nd = outer_nd(*windows_1d).T
# Prepare slicing for multiplication window_nd nparray with data with
# higher dimensionality:
if inplace:
slice_w = []
# Iterate over all dimensions of the data
for i in range(self.data.ndim):
if any(
i == axes): # If current dimension represents one of signal axis, all elements in window
# along current axis to be subscribed
slice_w.append(slice(None))
else: # If current dimension is navigation one, new axis is absent in window and should be created
slice_w.append(None)
self.data = self.data * window_nd[tuple(slice_w)]
self.events.data_changed.trigger(obj=self)
else:
return self * window_nd
def _check_navigation_mask(self, mask):
"""
Check the shape of the navigation mask.
Parameters
----------
mask : numpy array or BaseSignal.
Mask to check the shape.
Raises
------
ValueError
If shape doesn't match the shape of the navigation dimension.
Returns
-------
None.
"""
if isinstance(mask, BaseSignal):
if mask.axes_manager.signal_dimension != 0:
raise ValueError("The navigation mask signal must have the "
"`signal_dimension` equal to 0.")
elif (mask.axes_manager.navigation_shape !=
self.axes_manager.navigation_shape):
raise ValueError("The navigation mask signal must have the "
"same `navigation_shape` as the current "
"signal.")
if isinstance(mask, np.ndarray) and (
mask.shape != self.axes_manager.navigation_shape):
raise ValueError("The shape of the navigation mask array must "
"match `navigation_shape`.")
def _check_signal_mask(self, mask):
"""
Check the shape of the signal mask.
Parameters
----------
mask : numpy array or BaseSignal.
Mask to check the shape.
Raises
------
ValueError
If shape doesn't match the shape of the signal dimension.
Returns
-------
None.
"""
if isinstance(mask, BaseSignal):
if mask.axes_manager.navigation_dimension != 0:
raise ValueError("The signal mask signal must have the "
"`navigation_dimension` equal to 0.")
elif (mask.axes_manager.signal_shape !=
self.axes_manager.signal_shape):
raise ValueError("The signal mask signal must have the same "
"`signal_shape` as the current signal.")
if isinstance(mask, np.ndarray) and (
mask.shape != self.axes_manager.signal_shape):
raise ValueError("The shape of signal mask array must match "
"`signal_shape`.")
ARITHMETIC_OPERATORS = (
"__add__",
"__sub__",
"__mul__",
"__floordiv__",
"__mod__",
"__divmod__",
"__pow__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__mod__",
"__truediv__",
)
INPLACE_OPERATORS = (
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ipow__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
)
COMPARISON_OPERATORS = (
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__ge__",
"__gt__",
)
UNARY_OPERATORS = (
"__neg__",
"__pos__",
"__abs__",
"__invert__",
)
for name in ARITHMETIC_OPERATORS + INPLACE_OPERATORS + COMPARISON_OPERATORS:
exec(
("def %s(self, other):\n" % name) +
(" return self._binary_operator_ruler(other, \'%s\')\n" %
name))
exec("%s.__doc__ = np.ndarray.%s.__doc__" % (name, name))
exec("setattr(BaseSignal, \'%s\', %s)" % (name, name))
# The following commented line enables the operators with swapped
# operands. They should be defined only for commutative operators
# but for simplicity we don't support this at all atm.
# exec("setattr(BaseSignal, \'%s\', %s)" % (name[:2] + "r" + name[2:],
# name))
# Implement unary arithmetic operations
for name in UNARY_OPERATORS:
exec(
("def %s(self):" % name) +
(" return self._unary_operator_ruler(\'%s\')" % name))
exec("%s.__doc__ = int.%s.__doc__" % (name, name))
exec("setattr(BaseSignal, \'%s\', %s)" % (name, name))
|
jat255/hyperspy
|
hyperspy/signal.py
|
Python
|
gpl-3.0
| 259,911
|
[
"Gaussian"
] |
2a7d92f6f39231ad99e1f11a8ad5ab397ab29d7541805e3cd75a7cdf657c54b4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Qiming Sun <osirpt.sun@gmail.com>
# Junzi Liu <latrix1247@gmail.com>
# Susi Lehtola <susi.lehtola@gmail.com>
import copy
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.gto import mole
from pyscf.lib import logger
from pyscf.lib.scipy_helper import pivoted_cholesky
from pyscf.scf import hf
from pyscf import __config__
LINEAR_DEP_THRESHOLD = getattr(__config__, 'scf_addons_remove_linear_dep_threshold', 1e-8)
CHOLESKY_THRESHOLD = getattr(__config__, 'scf_addons_cholesky_threshold', 1e-10)
FORCE_PIVOTED_CHOLESKY = getattr(__config__, 'scf_addons_force_cholesky', False)
LINEAR_DEP_TRIGGER = getattr(__config__, 'scf_addons_remove_linear_dep_trigger', 1e-10)
def smearing_(*args, **kwargs):
from pyscf.pbc.scf.addons import smearing_
return smearing_(*args, **kwargs)
def frac_occ_(mf, tol=1e-3):
'''
Addons for SCF methods to assign fractional occupancy for degenerated
occpupied HOMOs.
Examples::
>>> mf = gto.M(atom='O 0 0 0; O 0 0 1', verbose=4).RHF()
>>> mf = scf.addons.frac_occ(mf)
>>> mf.run()
'''
from pyscf.scf import uhf, rohf
old_get_occ = mf.get_occ
mol = mf.mol
def guess_occ(mo_energy, nocc):
mo_occ = numpy.zeros_like(mo_energy)
if nocc:
sorted_idx = numpy.argsort(mo_energy)
homo = mo_energy[sorted_idx[nocc-1]]
lumo = mo_energy[sorted_idx[nocc]]
frac_occ_lst = abs(mo_energy - homo) < tol
integer_occ_lst = (mo_energy <= homo) & (~frac_occ_lst)
mo_occ[integer_occ_lst] = 1
degen = numpy.count_nonzero(frac_occ_lst)
frac = nocc - numpy.count_nonzero(integer_occ_lst)
mo_occ[frac_occ_lst] = float(frac) / degen
else:
homo = 0.0
lumo = 0.0
frac_occ_lst = numpy.zeros_like(mo_energy, dtype=bool)
return mo_occ, numpy.where(frac_occ_lst)[0], homo, lumo
get_grad = None
if isinstance(mf, uhf.UHF):
def get_occ(mo_energy, mo_coeff=None):
nocca, noccb = mol.nelec
mo_occa, frac_lsta, homoa, lumoa = guess_occ(mo_energy[0], nocca)
mo_occb, frac_lstb, homob, lumob = guess_occ(mo_energy[1], noccb)
if abs(homoa - lumoa) < tol or abs(homob - lumob) < tol:
mo_occ = numpy.array([mo_occa, mo_occb])
if len(frac_lstb):
logger.warn(mf, 'fraction occ = %6g for alpha orbitals %s '
'%6g for beta orbitals %s',
mo_occa[frac_lsta[0]], frac_lsta,
mo_occb[frac_lstb[0]], frac_lstb)
logger.info(mf, ' alpha HOMO = %.12g LUMO = %.12g', homoa, lumoa)
logger.info(mf, ' beta HOMO = %.12g LUMO = %.12g', homob, lumob)
logger.debug(mf, ' alpha mo_energy = %s', mo_energy[0])
logger.debug(mf, ' beta mo_energy = %s', mo_energy[1])
else:
logger.warn(mf, 'fraction occ = %6g for alpha orbitals %s ',
mo_occa[frac_lsta[0]], frac_lsta)
logger.info(mf, ' alpha HOMO = %.12g LUMO = %.12g', homoa, lumoa)
logger.debug(mf, ' alpha mo_energy = %s', mo_energy[0])
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
elif isinstance(mf, rohf.ROHF):
def get_occ(mo_energy, mo_coeff=None):
nocca, noccb = mol.nelec
mo_occa, frac_lsta, homoa, lumoa = guess_occ(mo_energy, nocca)
mo_occb, frac_lstb, homob, lumob = guess_occ(mo_energy, noccb)
if abs(homoa - lumoa) < tol or abs(homob - lumob) < tol:
mo_occ = mo_occa + mo_occb
if len(frac_lstb):
logger.warn(mf, 'fraction occ = %6g for alpha orbitals %s '
'%6g for beta orbitals %s',
mo_occa[frac_lsta[0]], frac_lsta,
mo_occb[frac_lstb[0]], frac_lstb)
logger.info(mf, ' HOMO = %.12g LUMO = %.12g', homoa, lumoa)
logger.debug(mf, ' mo_energy = %s', mo_energy)
else:
logger.warn(mf, 'fraction occ = %6g for alpha orbitals %s ',
mo_occa[frac_lsta[0]], frac_lsta)
logger.info(mf, ' HOMO = %.12g LUMO = %.12g', homoa, lumoa)
logger.debug(mf, ' mo_energy = %s', mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
def get_grad(mo_coeff, mo_occ, fock):
occidxa = mo_occ > 0
occidxb = mo_occ > 1
viridxa = ~occidxa
viridxb = ~occidxb
uniq_var_a = viridxa.reshape(-1,1) & occidxa
uniq_var_b = viridxb.reshape(-1,1) & occidxb
if getattr(fock, 'focka', None) is not None:
focka = fock.focka
fockb = fock.fockb
elif getattr(fock, 'ndim', None) == 3:
focka, fockb = fock
else:
focka = fockb = fock
focka = reduce(numpy.dot, (mo_coeff.T.conj(), focka, mo_coeff))
fockb = reduce(numpy.dot, (mo_coeff.T.conj(), fockb, mo_coeff))
g = numpy.zeros_like(focka)
g[uniq_var_a] = focka[uniq_var_a]
g[uniq_var_b] += fockb[uniq_var_b]
return g[uniq_var_a | uniq_var_b]
else: # RHF
def get_occ(mo_energy, mo_coeff=None):
nocc = (mol.nelectron+1) // 2 # n_docc + n_socc
mo_occ, frac_lst, homo, lumo = guess_occ(mo_energy, nocc)
n_docc = mol.nelectron // 2
n_socc = nocc - n_docc
if abs(homo - lumo) < tol or n_socc:
mo_occ *= 2
degen = len(frac_lst)
mo_occ[frac_lst] -= float(n_socc) / degen
logger.warn(mf, 'fraction occ = %6g for orbitals %s',
mo_occ[frac_lst[0]], frac_lst)
logger.info(mf, 'HOMO = %.12g LUMO = %.12g', homo, lumo)
logger.debug(mf, ' mo_energy = %s', mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
mf.get_occ = get_occ
if get_grad is not None:
mf.get_grad = get_grad
return mf
frac_occ = frac_occ_
def dynamic_occ_(mf, tol=1e-3):
assert(isinstance(mf, hf.RHF))
old_get_occ = mf.get_occ
def get_occ(mo_energy, mo_coeff=None):
mol = mf.mol
nocc = mol.nelectron // 2
sort_mo_energy = numpy.sort(mo_energy)
lumo = sort_mo_energy[nocc]
if abs(sort_mo_energy[nocc-1] - lumo) < tol:
mo_occ = numpy.zeros_like(mo_energy)
mo_occ[mo_energy<lumo] = 2
lst = abs(mo_energy - lumo) < tol
mo_occ[lst] = 0
logger.warn(mf, 'set charge = %d', mol.charge+int(lst.sum())*2)
logger.info(mf, 'HOMO = %.12g LUMO = %.12g',
sort_mo_energy[nocc-1], sort_mo_energy[nocc])
logger.debug(mf, ' mo_energy = %s', sort_mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
mf.get_occ = get_occ
return mf
dynamic_occ = dynamic_occ_
def dynamic_level_shift_(mf, factor=1.):
'''Dynamically change the level shift in each SCF cycle. The level shift
value is set to (HF energy change * factor)
'''
old_get_fock = mf.get_fock
last_e = [None]
def get_fock(h1e, s1e, vhf, dm, cycle=-1, diis=None,
diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
if cycle >= 0 or diis is not None:
ehf =(numpy.einsum('ij,ji', h1e, dm) +
numpy.einsum('ij,ji', vhf, dm) * .5)
if last_e[0] is not None:
level_shift_factor = abs(ehf-last_e[0]) * factor
logger.info(mf, 'Set level shift to %g', level_shift_factor)
last_e[0] = ehf
return old_get_fock(h1e, s1e, vhf, dm, cycle, diis, diis_start_cycle,
level_shift_factor, damp_factor)
mf.get_fock = get_fock
return mf
dynamic_level_shift = dynamic_level_shift_
def float_occ_(mf):
'''
For UHF, allowing the Sz value being changed during SCF iteration.
Determine occupation of alpha and beta electrons based on energy spectrum
'''
from pyscf.scf import uhf
assert(isinstance(mf, uhf.UHF))
def get_occ(mo_energy, mo_coeff=None):
mol = mf.mol
ee = numpy.sort(numpy.hstack(mo_energy))
n_a = numpy.count_nonzero(mo_energy[0]<(ee[mol.nelectron-1]+1e-3))
n_b = mol.nelectron - n_a
if mf.nelec is None:
nelec = mf.mol.nelec
else:
nelec = mf.nelec
if n_a != nelec[0]:
logger.info(mf, 'change num. alpha/beta electrons '
' %d / %d -> %d / %d',
nelec[0], nelec[1], n_a, n_b)
mf.nelec = (n_a, n_b)
return uhf.UHF.get_occ(mf, mo_energy, mo_coeff)
mf.get_occ = get_occ
return mf
dynamic_sz_ = float_occ = float_occ_
def follow_state_(mf, occorb=None):
occstat = [occorb]
old_get_occ = mf.get_occ
def get_occ(mo_energy, mo_coeff=None):
if occstat[0] is None:
mo_occ = old_get_occ(mo_energy, mo_coeff)
else:
mo_occ = numpy.zeros_like(mo_energy)
s = reduce(numpy.dot, (occstat[0].T, mf.get_ovlp(), mo_coeff))
nocc = mf.mol.nelectron // 2
#choose a subset of mo_coeff, which maximizes <old|now>
idx = numpy.argsort(numpy.einsum('ij,ij->j', s, s))
mo_occ[idx[-nocc:]] = 2
logger.debug(mf, ' mo_occ = %s', mo_occ)
logger.debug(mf, ' mo_energy = %s', mo_energy)
occstat[0] = mo_coeff[:,mo_occ>0]
return mo_occ
mf.get_occ = get_occ
return mf
follow_state = follow_state_
def mom_occ_(mf, occorb, setocc):
'''Use maximum overlap method to determine occupation number for each orbital in every
iteration. It can be applied to unrestricted HF/KS and restricted open-shell
HF/KS.'''
from pyscf.scf import uhf, rohf
if isinstance(mf, uhf.UHF):
coef_occ_a = occorb[0][:, setocc[0]>0]
coef_occ_b = occorb[1][:, setocc[1]>0]
elif isinstance(mf, rohf.ROHF):
if mf.mol.spin != (numpy.sum(setocc[0]) - numpy.sum(setocc[1])):
raise ValueError('Wrong occupation setting for restricted open-shell calculation.')
coef_occ_a = occorb[:, setocc[0]>0]
coef_occ_b = occorb[:, setocc[1]>0]
else:
raise RuntimeError('Cannot support this class of instance %s' % mf)
log = logger.Logger(mf.stdout, mf.verbose)
def get_occ(mo_energy=None, mo_coeff=None):
if mo_energy is None: mo_energy = mf.mo_energy
if mo_coeff is None: mo_coeff = mf.mo_coeff
if isinstance(mf, rohf.ROHF): mo_coeff = numpy.array([mo_coeff, mo_coeff])
mo_occ = numpy.zeros_like(setocc)
nocc_a = int(numpy.sum(setocc[0]))
nocc_b = int(numpy.sum(setocc[1]))
s_a = reduce(numpy.dot, (coef_occ_a.T, mf.get_ovlp(), mo_coeff[0]))
s_b = reduce(numpy.dot, (coef_occ_b.T, mf.get_ovlp(), mo_coeff[1]))
#choose a subset of mo_coeff, which maximizes <old|now>
idx_a = numpy.argsort(numpy.einsum('ij,ij->j', s_a, s_a))[::-1]
idx_b = numpy.argsort(numpy.einsum('ij,ij->j', s_b, s_b))[::-1]
mo_occ[0][idx_a[:nocc_a]] = 1.
mo_occ[1][idx_b[:nocc_b]] = 1.
log.debug(' New alpha occ pattern: %s', mo_occ[0])
log.debug(' New beta occ pattern: %s', mo_occ[1])
if isinstance(mf.mo_energy, numpy.ndarray) and mf.mo_energy.ndim == 1:
log.debug1(' Current mo_energy(sorted) = %s', mo_energy)
else:
log.debug1(' Current alpha mo_energy(sorted) = %s', mo_energy[0])
log.debug1(' Current beta mo_energy(sorted) = %s', mo_energy[1])
if (int(numpy.sum(mo_occ[0])) != nocc_a):
log.error('mom alpha electron occupation numbers do not match: %d, %d',
nocc_a, int(numpy.sum(mo_occ[0])))
if (int(numpy.sum(mo_occ[1])) != nocc_b):
log.error('mom beta electron occupation numbers do not match: %d, %d',
nocc_b, int(numpy.sum(mo_occ[1])))
#output 1-dimension occupation number for restricted open-shell
if isinstance(mf, rohf.ROHF): mo_occ = mo_occ[0, :] + mo_occ[1, :]
return mo_occ
mf.get_occ = get_occ
return mf
mom_occ = mom_occ_
def project_mo_nr2nr(mol1, mo1, mol2):
r''' Project orbital coefficients from basis set 1 (C1 for mol1) to basis
set 2 (C2 for mol2).
.. math::
|\psi1\rangle = |AO1\rangle C1
|\psi2\rangle = P |\psi1\rangle = |AO2\rangle S^{-1}\langle AO2| AO1\rangle> C1 = |AO2\rangle> C2
C2 = S^{-1}\langle AO2|AO1\rangle C1
There are three relevant functions:
:func:`project_mo_nr2nr` is the projection for non-relativistic (scalar) basis.
:func:`project_mo_nr2r` projects from non-relativistic to relativistic basis.
:func:`project_mo_r2r` is the projection between relativistic (spinor) basis.
'''
s22 = mol2.intor_symmetric('int1e_ovlp')
s21 = mole.intor_cross('int1e_ovlp', mol2, mol1)
if isinstance(mo1, numpy.ndarray) and mo1.ndim == 2:
return lib.cho_solve(s22, numpy.dot(s21, mo1), strict_sym_pos=False)
else:
return [lib.cho_solve(s22, numpy.dot(s21, x), strict_sym_pos=False)
for x in mo1]
@lib.with_doc(project_mo_nr2nr.__doc__)
def project_mo_nr2r(mol1, mo1, mol2):
assert(not mol1.cart)
s22 = mol2.intor_symmetric('int1e_ovlp_spinor')
s21 = mole.intor_cross('int1e_ovlp_sph', mol2, mol1)
ua, ub = mol2.sph2spinor_coeff()
s21 = numpy.dot(ua.T.conj(), s21) + numpy.dot(ub.T.conj(), s21) # (*)
# mo2: alpha, beta have been summed in Eq. (*)
# so DM = mo2[:,:nocc] * 1 * mo2[:,:nocc].H
if isinstance(mo1, numpy.ndarray) and mo1.ndim == 2:
mo2 = numpy.dot(s21, mo1)
return lib.cho_solve(s22, mo2, strict_sym_pos=False)
else:
return [lib.cho_solve(s22, numpy.dot(s21, x), strict_sym_pos=False)
for x in mo1]
@lib.with_doc(project_mo_nr2nr.__doc__)
def project_mo_r2r(mol1, mo1, mol2):
s22 = mol2.intor_symmetric('int1e_ovlp_spinor')
t22 = mol2.intor_symmetric('int1e_spsp_spinor')
s21 = mole.intor_cross('int1e_ovlp_spinor', mol2, mol1)
t21 = mole.intor_cross('int1e_spsp_spinor', mol2, mol1)
n2c = s21.shape[1]
pl = lib.cho_solve(s22, s21, strict_sym_pos=False)
ps = lib.cho_solve(t22, t21, strict_sym_pos=False)
if isinstance(mo1, numpy.ndarray) and mo1.ndim == 2:
return numpy.vstack((numpy.dot(pl, mo1[:n2c]),
numpy.dot(ps, mo1[n2c:])))
else:
return [numpy.vstack((numpy.dot(pl, x[:n2c]),
numpy.dot(ps, x[n2c:]))) for x in mo1]
def project_dm_nr2nr(mol1, dm1, mol2):
r''' Project density matrix representation from basis set 1 (mol1) to basis
set 2 (mol2).
.. math::
|AO2\rangle DM_AO2 \langle AO2|
= |AO2\rangle P DM_AO1 P \langle AO2|
DM_AO2 = P DM_AO1 P
P = S_{AO2}^{-1}\langle AO2|AO1\rangle
There are three relevant functions:
:func:`project_dm_nr2nr` is the projection for non-relativistic (scalar) basis.
:func:`project_dm_nr2r` projects from non-relativistic to relativistic basis.
:func:`project_dm_r2r` is the projection between relativistic (spinor) basis.
'''
s22 = mol2.intor_symmetric('int1e_ovlp')
s21 = mole.intor_cross('int1e_ovlp', mol2, mol1)
p21 = lib.cho_solve(s22, s21, strict_sym_pos=False)
if isinstance(dm1, numpy.ndarray) and dm1.ndim == 2:
return reduce(numpy.dot, (p21, dm1, p21.conj().T))
else:
return lib.einsum('pi,nij,qj->npq', p21, dm1, p21.conj())
@lib.with_doc(project_dm_nr2nr.__doc__)
def project_dm_nr2r(mol1, dm1, mol2):
assert(not mol1.cart)
s22 = mol2.intor_symmetric('int1e_ovlp_spinor')
s21 = mole.intor_cross('int1e_ovlp_sph', mol2, mol1)
ua, ub = mol2.sph2spinor_coeff()
s21 = numpy.dot(ua.T.conj(), s21) + numpy.dot(ub.T.conj(), s21) # (*)
# mo2: alpha, beta have been summed in Eq. (*)
# so DM = mo2[:,:nocc] * 1 * mo2[:,:nocc].H
p21 = lib.cho_solve(s22, s21, strict_sym_pos=False)
if isinstance(dm1, numpy.ndarray) and dm1.ndim == 2:
return reduce(numpy.dot, (p21, dm1, p21.conj().T))
else:
return lib.einsum('pi,nij,qj->npq', p21, dm1, p21.conj())
@lib.with_doc(project_dm_nr2nr.__doc__)
def project_dm_r2r(mol1, dm1, mol2):
s22 = mol2.intor_symmetric('int1e_ovlp_spinor')
t22 = mol2.intor_symmetric('int1e_spsp_spinor')
s21 = mole.intor_cross('int1e_ovlp_spinor', mol2, mol1)
t21 = mole.intor_cross('int1e_spsp_spinor', mol2, mol1)
pl = lib.cho_solve(s22, s21, strict_sym_pos=False)
ps = lib.cho_solve(t22, t21, strict_sym_pos=False)
p21 = scipy.linalg.block_diag(pl, ps)
if isinstance(dm1, numpy.ndarray) and dm1.ndim == 2:
return reduce(numpy.dot, (p21, dm1, p21.conj().T))
else:
return lib.einsum('pi,nij,qj->npq', p21, dm1, p21.conj())
def canonical_orth_(S, thr=1e-7):
'''Löwdin's canonical orthogonalization'''
# Ensure the basis functions are normalized (symmetry-adapted ones are not!)
normlz = numpy.power(numpy.diag(S), -0.5)
Snorm = numpy.dot(numpy.diag(normlz), numpy.dot(S, numpy.diag(normlz)))
# Form vectors for normalized overlap matrix
Sval, Svec = numpy.linalg.eigh(Snorm)
X = Svec[:,Sval>=thr] / numpy.sqrt(Sval[Sval>=thr])
# Plug normalization back in
X = numpy.dot(numpy.diag(normlz), X)
return X
def partial_cholesky_orth_(S, canthr=1e-7, cholthr=1e-9):
'''Partial Cholesky orthogonalization for curing overcompleteness.
References:
Susi Lehtola, Curing basis set overcompleteness with pivoted
Cholesky decompositions, J. Chem. Phys. 151, 241102 (2019),
doi:10.1063/1.5139948.
Susi Lehtola, Accurate reproduction of strongly repulsive
interatomic potentials, Phys. Rev. A 101, 032504 (2020),
doi:10.1103/PhysRevA.101.032504.
'''
# Ensure the basis functions are normalized
normlz = numpy.power(numpy.diag(S), -0.5)
Snorm = numpy.dot(numpy.diag(normlz), numpy.dot(S, numpy.diag(normlz)))
# Sort the basis functions according to the Gershgorin circle
# theorem so that the Cholesky routine is well-initialized
odS = numpy.abs(Snorm)
numpy.fill_diagonal(odS, 0.0)
odSs = numpy.sum(odS, axis=0)
sortidx = numpy.argsort(odSs)
# Run the pivoted Cholesky decomposition
Ssort = Snorm[numpy.ix_(sortidx, sortidx)].copy()
c, piv, r_c = pivoted_cholesky(Ssort, tol=cholthr)
# The functions we're going to use are given by the pivot as
idx = sortidx[piv[:r_c]]
# Get the (un-normalized) sub-basis
Ssub = S[numpy.ix_(idx, idx)].copy()
# Orthogonalize sub-basis
Xsub = canonical_orth_(Ssub, thr=canthr)
# Full X
X = numpy.zeros((S.shape[0], Xsub.shape[1]))
X[idx,:] = Xsub
return X
def remove_linear_dep_(mf, threshold=LINEAR_DEP_THRESHOLD,
lindep=LINEAR_DEP_TRIGGER,
cholesky_threshold=CHOLESKY_THRESHOLD,
force_pivoted_cholesky=FORCE_PIVOTED_CHOLESKY):
'''
Args:
threshold : float
The threshold under which the eigenvalues of the overlap matrix are
discarded to avoid numerical instability.
lindep : float
The threshold that triggers the special treatment of the linear
dependence issue.
'''
s = mf.get_ovlp()
cond = numpy.max(lib.cond(s))
if cond < 1./lindep and not force_pivoted_cholesky:
return mf
logger.info(mf, 'Applying remove_linear_dep_ on SCF object.')
logger.debug(mf, 'Overlap condition number %g', cond)
if(cond < 1./numpy.finfo(s.dtype).eps and not force_pivoted_cholesky):
logger.info(mf, 'Using canonical orthogonalization with threshold {}'.format(threshold))
def eigh(h, s):
x = canonical_orth_(s, threshold)
xhx = reduce(numpy.dot, (x.T.conj(), h, x))
e, c = numpy.linalg.eigh(xhx)
c = numpy.dot(x, c)
return e, c
mf._eigh = eigh
else:
logger.info(mf, 'Using partial Cholesky orthogonalization '
'(doi:10.1063/1.5139948, doi:10.1103/PhysRevA.101.032504)')
logger.info(mf, 'Using threshold {} for pivoted Cholesky'.format(cholesky_threshold))
logger.info(mf, 'Using threshold {} to orthogonalize the subbasis'.format(threshold))
def eigh(h, s):
x = partial_cholesky_orth_(s, canthr=threshold, cholthr=cholesky_threshold)
xhx = reduce(numpy.dot, (x.T.conj(), h, x))
e, c = numpy.linalg.eigh(xhx)
c = numpy.dot(x, c)
return e, c
mf._eigh = eigh
return mf
remove_linear_dep = remove_linear_dep_
def convert_to_uhf(mf, out=None, remove_df=False):
'''Convert the given mean-field object to the unrestricted HF/KS object
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mf object. If mf is an second order SCF (SOSCF) object, the SOSCF layer
will be discarded. Its underlying SCF object mf._scf will be converted.
Args:
mf : SCF object
Kwargs
remove_df : bool
Whether to convert the DF-SCF object to the normal SCF object.
This conversion is not applied by default.
Returns:
An unrestricted SCF object
'''
from pyscf import scf
from pyscf import dft
assert(isinstance(mf, hf.SCF))
logger.debug(mf, 'Converting %s to UHF', mf.__class__)
def update_mo_(mf, mf1):
if mf.mo_energy is not None:
if isinstance(mf, scf.uhf.UHF):
mf1.mo_occ = mf.mo_occ
mf1.mo_coeff = mf.mo_coeff
mf1.mo_energy = mf.mo_energy
elif getattr(mf, 'kpts', None) is None: # RHF/ROHF
mf1.mo_occ = numpy.array((mf.mo_occ>0, mf.mo_occ==2), dtype=numpy.double)
# ROHF orbital energies, not canonical UHF orbital energies
mo_ea = getattr(mf.mo_energy, 'mo_ea', mf.mo_energy)
mo_eb = getattr(mf.mo_energy, 'mo_eb', mf.mo_energy)
mf1.mo_energy = (mo_ea, mo_eb)
mf1.mo_coeff = (mf.mo_coeff, mf.mo_coeff)
else: # This to handle KRHF object
mf1.mo_occ = ([numpy.asarray(occ> 0, dtype=numpy.double)
for occ in mf.mo_occ],
[numpy.asarray(occ==2, dtype=numpy.double)
for occ in mf.mo_occ])
mo_ea = getattr(mf.mo_energy, 'mo_ea', mf.mo_energy)
mo_eb = getattr(mf.mo_energy, 'mo_eb', mf.mo_energy)
mf1.mo_energy = (mo_ea, mo_eb)
mf1.mo_coeff = (mf.mo_coeff, mf.mo_coeff)
return mf1
if isinstance(mf, scf.ghf.GHF):
raise NotImplementedError
elif out is not None:
assert(isinstance(out, scf.uhf.UHF))
out = _update_mf_without_soscf(mf, out, remove_df)
elif isinstance(mf, scf.uhf.UHF):
# Remove with_df for SOSCF method because the post-HF code checks the
# attribute .with_df to identify whether an SCF object is DF-SCF method.
# with_df in SOSCF is used in orbital hessian approximation only. For the
# returned SCF object, whehter with_df exists in SOSCF has no effects on the
# mean-field energy and other properties.
if getattr(mf, '_scf', None):
return _update_mf_without_soscf(mf, copy.copy(mf._scf), remove_df)
else:
return copy.copy(mf)
else:
known_cls = {scf.hf.RHF : scf.uhf.UHF,
scf.rohf.ROHF : scf.uhf.UHF,
scf.hf_symm.RHF : scf.uhf_symm.UHF,
scf.hf_symm.ROHF : scf.uhf_symm.UHF,
dft.rks.RKS : dft.uks.UKS,
dft.roks.ROKS : dft.uks.UKS,
dft.rks_symm.RKS : dft.uks_symm.UKS,
dft.rks_symm.ROKS : dft.uks_symm.UKS}
out = _object_without_soscf(mf, known_cls, remove_df)
return update_mo_(mf, out)
def _object_without_soscf(mf, known_class, remove_df=False):
from pyscf.soscf import newton_ah
sub_classes = []
obj = None
for i, cls in enumerate(mf.__class__.__mro__):
if cls in known_class:
obj = known_class[cls](mf.mol)
break
else:
sub_classes.append(cls)
if obj is None:
raise NotImplementedError(
"Incompatible object types. Mean-field `mf` class not found in "
"`known_class` type.\n\nmf = '%s'\n\nknown_class = '%s'" %
(mf.__class__.__mro__, known_class))
if isinstance(mf, newton_ah._CIAH_SOSCF):
remove_df = (remove_df or
# The main SCF object is not a DFHF object
not getattr(mf._scf, 'with_df', None))
# Mimic the initialization procedure to restore the Hamiltonian
for cls in reversed(sub_classes):
class_name = cls.__name__
if '_DFHF' in class_name:
if not remove_df:
obj = obj.density_fit()
elif '_SGXHF' in class_name:
if not remove_df:
obj = obj.COSX()
elif '_X2C_SCF' in class_name:
obj = obj.x2c()
elif 'WithSolvent' in class_name:
obj = obj.ddCOSMO(mf.with_solvent)
elif 'QMMM' in class_name and getattr(mf, 'mm_mol', None):
from pyscf.qmmm.itrf import qmmm_for_scf
obj = qmmm_for_scf(obj, mf.mm_mol)
elif '_DFTD3' in class_name:
from pyscf.dftd3.itrf import dftd3
obj = dftd3(obj)
return _update_mf_without_soscf(mf, obj, remove_df)
def _update_mf_without_soscf(mf, out, remove_df=False):
from pyscf.soscf import newton_ah
mf_dic = dict(mf.__dict__)
# if mf is SOSCF object, avoid to overwrite the with_df method
# FIXME: it causes bug when converting pbc-SOSCF.
if isinstance(mf, newton_ah._CIAH_SOSCF):
mf_dic.pop('with_df', None)
out.__dict__.update(mf_dic)
if remove_df and getattr(out, 'with_df', None):
delattr(out, 'with_df')
return out
def convert_to_rhf(mf, out=None, remove_df=False):
'''Convert the given mean-field object to the restricted HF/KS object
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mf object. If mf is an second order SCF (SOSCF) object, the SOSCF layer
will be discarded. Its underlying SCF object mf._scf will be converted.
Args:
mf : SCF object
Kwargs
remove_df : bool
Whether to convert the DF-SCF object to the normal SCF object.
This conversion is not applied by default.
Returns:
An unrestricted SCF object
'''
from pyscf import scf
from pyscf import dft
assert(isinstance(mf, hf.SCF))
logger.debug(mf, 'Converting %s to RHF', mf.__class__)
def update_mo_(mf, mf1):
if mf.mo_energy is not None:
if isinstance(mf, scf.hf.RHF): # RHF/ROHF/KRHF/KROHF
mf1.mo_occ = mf.mo_occ
mf1.mo_coeff = mf.mo_coeff
mf1.mo_energy = mf.mo_energy
elif getattr(mf, 'kpts', None) is None: # UHF
mf1.mo_occ = mf.mo_occ[0] + mf.mo_occ[1]
mf1.mo_energy = mf.mo_energy[0]
mf1.mo_coeff = mf.mo_coeff[0]
if getattr(mf.mo_coeff[0], 'orbsym', None) is not None:
mf1.mo_coeff = lib.tag_array(mf1.mo_coeff, orbsym=mf.mo_coeff[0].orbsym)
else: # KUHF
mf1.mo_occ = [occa+occb for occa, occb in zip(*mf.mo_occ)]
mf1.mo_energy = mf.mo_energy[0]
mf1.mo_coeff = mf.mo_coeff[0]
return mf1
if getattr(mf, 'nelec', None) is None:
nelec = mf.mol.nelec
else:
nelec = mf.nelec
if isinstance(mf, scf.ghf.GHF):
raise NotImplementedError
elif out is not None:
assert(isinstance(out, scf.hf.RHF))
out = _update_mf_without_soscf(mf, out, remove_df)
elif (isinstance(mf, scf.hf.RHF) or
(nelec[0] != nelec[1] and isinstance(mf, scf.rohf.ROHF))):
if getattr(mf, '_scf', None):
return _update_mf_without_soscf(mf, copy.copy(mf._scf), remove_df)
else:
return copy.copy(mf)
else:
if nelec[0] == nelec[1]:
known_cls = {scf.uhf.UHF : scf.hf.RHF ,
scf.uhf_symm.UHF : scf.hf_symm.RHF ,
dft.uks.UKS : dft.rks.RKS ,
dft.uks_symm.UKS : dft.rks_symm.RKS,
scf.rohf.ROHF : scf.hf.RHF ,
scf.hf_symm.ROHF : scf.hf_symm.RHF ,
dft.roks.ROKS : dft.rks.RKS ,
dft.rks_symm.ROKS: dft.rks_symm.RKS}
else:
known_cls = {scf.uhf.UHF : scf.rohf.ROHF ,
scf.uhf_symm.UHF : scf.hf_symm.ROHF ,
dft.uks.UKS : dft.roks.ROKS ,
dft.uks_symm.UKS : dft.rks_symm.ROKS}
out = _object_without_soscf(mf, known_cls, remove_df)
return update_mo_(mf, out)
def convert_to_ghf(mf, out=None, remove_df=False):
'''Convert the given mean-field object to the generalized HF/KS object
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mf object. If mf is an second order SCF (SOSCF) object, the SOSCF layer
will be discarded. Its underlying SCF object mf._scf will be converted.
Args:
mf : SCF object
Kwargs
remove_df : bool
Whether to convert the DF-SCF object to the normal SCF object.
This conversion is not applied by default.
Returns:
An generalized SCF object
'''
from pyscf import scf
from pyscf import dft
assert(isinstance(mf, hf.SCF))
logger.debug(mf, 'Converting %s to GHF', mf.__class__)
def update_mo_(mf, mf1):
if mf.mo_energy is not None:
if isinstance(mf, scf.hf.RHF): # RHF
nao, nmo = mf.mo_coeff.shape
orbspin = get_ghf_orbspin(mf.mo_energy, mf.mo_occ, True)
mf1.mo_energy = numpy.empty(nmo*2)
mf1.mo_energy[orbspin==0] = mf.mo_energy
mf1.mo_energy[orbspin==1] = mf.mo_energy
mf1.mo_occ = numpy.empty(nmo*2)
mf1.mo_occ[orbspin==0] = mf.mo_occ > 0
mf1.mo_occ[orbspin==1] = mf.mo_occ == 2
mo_coeff = numpy.zeros((nao*2,nmo*2), dtype=mf.mo_coeff.dtype)
mo_coeff[:nao,orbspin==0] = mf.mo_coeff
mo_coeff[nao:,orbspin==1] = mf.mo_coeff
if getattr(mf.mo_coeff, 'orbsym', None) is not None:
orbsym = numpy.zeros_like(orbspin)
orbsym[orbspin==0] = mf.mo_coeff.orbsym
orbsym[orbspin==1] = mf.mo_coeff.orbsym
mo_coeff = lib.tag_array(mo_coeff, orbsym=orbsym)
mf1.mo_coeff = lib.tag_array(mo_coeff, orbspin=orbspin)
else: # UHF
nao, nmo = mf.mo_coeff[0].shape
orbspin = get_ghf_orbspin(mf.mo_energy, mf.mo_occ, False)
mf1.mo_energy = numpy.empty(nmo*2)
mf1.mo_energy[orbspin==0] = mf.mo_energy[0]
mf1.mo_energy[orbspin==1] = mf.mo_energy[1]
mf1.mo_occ = numpy.empty(nmo*2)
mf1.mo_occ[orbspin==0] = mf.mo_occ[0]
mf1.mo_occ[orbspin==1] = mf.mo_occ[1]
mo_coeff = numpy.zeros((nao*2,nmo*2), dtype=mf.mo_coeff[0].dtype)
mo_coeff[:nao,orbspin==0] = mf.mo_coeff[0]
mo_coeff[nao:,orbspin==1] = mf.mo_coeff[1]
if getattr(mf.mo_coeff[0], 'orbsym', None) is not None:
orbsym = numpy.zeros_like(orbspin)
orbsym[orbspin==0] = mf.mo_coeff[0].orbsym
orbsym[orbspin==1] = mf.mo_coeff[1].orbsym
mo_coeff = lib.tag_array(mo_coeff, orbsym=orbsym)
mf1.mo_coeff = lib.tag_array(mo_coeff, orbspin=orbspin)
return mf1
if out is not None:
assert(isinstance(out, scf.ghf.GHF))
out = _update_mf_without_soscf(mf, out, remove_df)
elif isinstance(mf, scf.ghf.GHF):
if getattr(mf, '_scf', None):
return _update_mf_without_soscf(mf, copy.copy(mf._scf), remove_df)
else:
return copy.copy(mf)
else:
known_cls = {scf.hf.RHF : scf.ghf.GHF,
scf.rohf.ROHF : scf.ghf.GHF,
scf.uhf.UHF : scf.ghf.GHF,
scf.hf_symm.RHF : scf.ghf_symm.GHF,
scf.hf_symm.ROHF : scf.ghf_symm.GHF,
scf.uhf_symm.UHF : scf.ghf_symm.GHF,
dft.rks.RKS : dft.gks.GKS,
dft.roks.ROKS : dft.gks.GKS,
dft.uks.UKS : dft.gks.GKS,
dft.rks_symm.RKS : dft.gks_symm.GKS,
dft.rks_symm.ROKS : dft.gks_symm.GKS,
dft.uks_symm.UKS : dft.gks_symm.GKS}
out = _object_without_soscf(mf, known_cls, remove_df)
return update_mo_(mf, out)
def get_ghf_orbspin(mo_energy, mo_occ, is_rhf=None):
'''Spin of each GHF orbital when the GHF orbitals are converted from
RHF/UHF orbitals
For RHF orbitals, the orbspin corresponds to first occupied orbitals then
unoccupied orbitals. In the occupied orbital space, if degenerated, first
alpha then beta, last the (open-shell) singly occupied (alpha) orbitals. In
the unoccupied orbital space, first the (open-shell) unoccupied (beta)
orbitals if applicable, then alpha and beta orbitals
For UHF orbitals, the orbspin corresponds to first occupied orbitals then
unoccupied orbitals.
'''
if is_rhf is None: # guess whether the orbitals are RHF orbitals
is_rhf = mo_energy[0].ndim == 0
if is_rhf:
nmo = mo_energy.size
nocc = numpy.count_nonzero(mo_occ >0)
nvir = nmo - nocc
ndocc = numpy.count_nonzero(mo_occ==2)
nsocc = nocc - ndocc
orbspin = numpy.array([0,1]*ndocc + [0]*nsocc + [1]*nsocc + [0,1]*nvir)
else:
nmo = mo_energy[0].size
nocca = numpy.count_nonzero(mo_occ[0]>0)
nvira = nmo - nocca
noccb = numpy.count_nonzero(mo_occ[1]>0)
nvirb = nmo - noccb
# round(6) to avoid numerical uncertainty in degeneracy
es = numpy.append(mo_energy[0][mo_occ[0] >0],
mo_energy[1][mo_occ[1] >0])
oidx = numpy.argsort(es.round(6))
es = numpy.append(mo_energy[0][mo_occ[0]==0],
mo_energy[1][mo_occ[1]==0])
vidx = numpy.argsort(es.round(6))
orbspin = numpy.append(numpy.array([0]*nocca+[1]*noccb)[oidx],
numpy.array([0]*nvira+[1]*nvirb)[vidx])
return orbspin
del(LINEAR_DEP_THRESHOLD, LINEAR_DEP_TRIGGER)
def fast_newton(mf, mo_coeff=None, mo_occ=None, dm0=None,
auxbasis=None, dual_basis=None, **newton_kwargs):
'''This is a wrap function which combines several operations. This
function first setup the initial guess
from density fitting calculation then use for
Newton solver and call Newton solver.
Newton solver attributes [max_cycle_inner, max_stepsize, ah_start_tol,
ah_conv_tol, ah_grad_trust_region, ...] can be passed through **newton_kwargs.
'''
import copy
from pyscf.lib import logger
from pyscf import df
from pyscf.soscf import newton_ah
if auxbasis is None:
auxbasis = df.addons.aug_etb_for_dfbasis(mf.mol, 'ahlrichs', beta=2.5)
if dual_basis:
mf1 = mf.newton()
pmol = mf1.mol = newton_ah.project_mol(mf.mol, dual_basis)
mf1 = mf1.density_fit(auxbasis)
else:
mf1 = mf.newton().density_fit(auxbasis)
mf1.with_df._compatible_format = False
mf1.direct_scf_tol = 1e-7
if getattr(mf, 'grids', None):
from pyscf.dft import gen_grid
approx_grids = gen_grid.Grids(mf.mol)
approx_grids.verbose = 0
approx_grids.level = max(0, mf.grids.level-3)
mf1.grids = approx_grids
approx_numint = copy.copy(mf._numint)
mf1._numint = approx_numint
for key in newton_kwargs:
setattr(mf1, key, newton_kwargs[key])
if mo_coeff is None or mo_occ is None:
mo_coeff, mo_occ = mf.mo_coeff, mf.mo_occ
if dm0 is not None:
mo_coeff, mo_occ = mf1.from_dm(dm0)
elif mo_coeff is None or mo_occ is None:
logger.note(mf, '========================================================')
logger.note(mf, 'Generating initial guess with DIIS-SCF for newton solver')
logger.note(mf, '========================================================')
if dual_basis:
mf0 = copy.copy(mf)
mf0.mol = pmol
mf0 = mf0.density_fit(auxbasis)
else:
mf0 = mf.density_fit(auxbasis)
mf0.direct_scf_tol = 1e-7
mf0.conv_tol = 3.
mf0.conv_tol_grad = 1.
if mf0.level_shift == 0:
mf0.level_shift = .2
if getattr(mf, 'grids', None):
mf0.grids = approx_grids
mf0._numint = approx_numint
# Note: by setting small_rho_cutoff, dft.get_veff function may overwrite
# approx_grids and approx_numint. It will further changes the corresponding
# mf1 grids and _numint. If inital guess dm0 or mo_coeff/mo_occ were given,
# dft.get_veff are not executed so that more grid points may be found in
# approx_grids.
mf0.small_rho_cutoff = mf.small_rho_cutoff * 10
mf0.kernel()
mf1.with_df = mf0.with_df
mo_coeff, mo_occ = mf0.mo_coeff, mf0.mo_occ
if dual_basis:
if mo_occ.ndim == 2:
mo_coeff =(project_mo_nr2nr(pmol, mo_coeff[0], mf.mol),
project_mo_nr2nr(pmol, mo_coeff[1], mf.mol))
else:
mo_coeff = project_mo_nr2nr(pmol, mo_coeff, mf.mol)
mo_coeff, mo_occ = mf1.from_dm(mf.make_rdm1(mo_coeff,mo_occ))
mf0 = None
logger.note(mf, '============================')
logger.note(mf, 'Generating initial guess end')
logger.note(mf, '============================')
mf1.kernel(mo_coeff, mo_occ)
mf.converged = mf1.converged
mf.e_tot = mf1.e_tot
mf.mo_energy = mf1.mo_energy
mf.mo_coeff = mf1.mo_coeff
mf.mo_occ = mf1.mo_occ
# mf = copy.copy(mf)
# def mf_kernel(*args, **kwargs):
# logger.warn(mf, "fast_newton is a wrap function to quickly setup and call Newton solver. "
# "There's no need to call kernel function again for fast_newton.")
# del(mf.kernel) # warn once and remove circular depdence
# return mf.e_tot
# mf.kernel = mf_kernel
return mf
|
sunqm/pyscf
|
pyscf/scf/addons.py
|
Python
|
apache-2.0
| 40,634
|
[
"PySCF"
] |
5e0f0bdea57896e002e9ea0a2897a9d580de460c3421ddda0c1da8566c3fc8dd
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import sys
from os import path
import urllib
import numpy
from ocw.dataset import Bounds
import ocw.data_source.local as local
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
# Download some example NetCDF files for the evaluation
################################################################################
if not path.exists(FILE_1):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if not path.exists(FILE_2):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
# Load the example datasets into OCW Dataset objects. We want to load
# the 'tasmax' variable values. We'll also name the datasets for use
# when plotting.
################################################################################
knmi_dataset = local.load_file(FILE_1, "tasmax")
wrf_dataset = local.load_file(FILE_2, "tasmax")
knmi_dataset.name = "knmi"
wrf_dataset.name = "wrf"
# Date values from loaded datasets might not always fall on reasonable days.
# With monthly data, we could have data falling on the 1st, 15th, or some other
# day of the month. Let's fix that real quick.
################################################################################
knmi_dataset = dsp.normalize_dataset_datetimes(knmi_dataset, 'monthly')
wrf_dataset = dsp.normalize_dataset_datetimes(wrf_dataset, 'monthly')
# We're only going to run this evaluation over a years worth of data. We'll
# make a Bounds object and use it to subset our datasets.
################################################################################
subset = Bounds(-45, 42, -24, 60, datetime.datetime(1989, 1, 1), datetime.datetime(1989, 12, 1))
knmi_dataset = dsp.subset(subset, knmi_dataset)
wrf_dataset = dsp.subset(subset, wrf_dataset)
# Temporally re-bin the data into a monthly timestep.
################################################################################
knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=30))
wrf_dataset = dsp.temporal_rebin(wrf_dataset, datetime.timedelta(days=30))
# Spatially regrid the datasets onto a 1 degree grid.
################################################################################
# Get the bounds of the reference dataset and use it to create a new
# set of lat/lon values on a 1 degree step
# Using the bounds we will create a new set of lats and lons on 1 degree step
min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries()
new_lons = numpy.arange(min_lon, max_lon, 1)
new_lats = numpy.arange(min_lat, max_lat, 1)
# Spatially regrid datasets using the new_lats, new_lons numpy arrays
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons)
# Load the metrics that we want to use for the evaluation.
################################################################################
taylor_diagram = metrics.SpatialPatternTaylorDiagram()
# Create our new evaluation object. The knmi dataset is the evaluations
# reference dataset. We then provide a list of 1 or more target datasets
# to use for the evaluation. In this case, we only want to use the wrf dataset.
# Then we pass a list of all the metrics that we want to use in the evaluation.
################################################################################
test_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [taylor_diagram])
test_evaluation.run()
# Pull our the evaluation results and prepare them for drawing a Taylor diagram.
################################################################################
taylor_data = test_evaluation.results[0]
# Draw our taylor diagram!
################################################################################
plotter.draw_taylor_diagram(taylor_data,
[wrf_dataset.name],
knmi_dataset.name,
fname='taylor_plot',
fmt='png',
frameon=False)
|
MJJoyce/climate
|
examples/taylor_diagram_example.py
|
Python
|
apache-2.0
| 5,081
|
[
"NetCDF"
] |
f11e06f04dc32f411ebc7a1ba04d95781f7947b359bcbcfc861ada06d8c65a3e
|
#!/usr/bin/env python
# ________ _______________ ____________
# ___ __/__ ____(_)_ /__ /______________ ___/_ /__________________ _______ ___
# __ / __ | /| / /_ /_ __/ __/ _ \_ ___/____ \_ __/_ ___/ _ \ __ `/_ __ `__ \
# _ / __ |/ |/ /_ / / /_ / /_ / __/ / ____/ // /_ _ / / __/ /_/ /_ / / / / /
# /_/ ____/|__/ /_/ \__/ \__/ \___//_/ /____/ \__/ /_/ \___/\__,_/ /_/ /_/ /_/
#
import tweepy
import threading, logging, time
from kafka.client import KafkaClient
from kafka.consumer import SimpleConsumer
from kafka.producer import SimpleProducer
import string
######################################################################
# Authentication details. To obtain these visit dev.twitter.com
######################################################################
consumer_key = 'aybpmREJAzUkbrF2f0cWg'#eWkgf0izE2qtN8Ftk5yrVpaaI
consumer_secret = 'OUbjAwEDTSqt4hJHxOETbaNWFr6he6cMT7wqI6ooQ1w'#BYYnkSEDx463mGzIxjSifxfXN6V1ggpfJaGBKlhRpUMuQ02lBX
access_token = '1355650081-A03VSt3vdGHq6QyVS6lBd6EdsUTLcyQSXQVmS2I'#1355650081-Mq5jok7mbcrIbTpqZPcMHgWjcymqSrG1kVaut39
access_token_secret = 'UgV99l4TgdC3qv56uSwknkjB7iUfV50qMIYRkPEbrJ7AG'#QovqxQnw0hSPrKwFIYLWct3Zv4MeGMash66IaOoFyXNWs
mytopic='dna'
######################################################################
#Create a handler for the streaming data that stays open...
######################################################################
class StdOutListener(tweepy.StreamListener):
#Handler
''' Handles data received from the stream. '''
######################################################################
#For each status event
######################################################################
def on_status(self, status):
# Prints the text of the tweet
#print '%d,%d,%d,%s,%s' % (status.user.followers_count, status.user.friends_count,status.user.statuses_count, status.user.id_str, status.user.screen_name)
# Schema changed to add the tweet text
print '%d,%d,%d,%s,%s' % (status.user.followers_count, status.user.friends_count,status.user.statuses_count, status.text, status.user.screen_name)
message = status.text + ',' + status.user.screen_name
msg = filter(lambda x: x in string.printable, message)
try:
producer.send_messages(mytopic, str(msg))
except Exception, e:
return True
return True
######################################################################
#Supress Failure to keep demo running... In a production situation
#Handle with seperate handler
######################################################################
def on_error(self, status_code):
print('Got an error with status code: ' + str(status_code))
return True # To continue listening
def on_timeout(self):
print('Timeout...')
return True # To continue listening
######################################################################
#Main Loop Init
######################################################################
if __name__ == '__main__':
listener = StdOutListener()
#sign oath cert
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
#uncomment to use api in stream for data send/retrieve algorythms
#api = tweepy.API(auth)
stream = tweepy.Stream(auth, listener)
######################################################################
#Sample dilivers a stream of 1% (random selection) of all tweets
######################################################################
client = KafkaClient("localhost:9092")
producer = SimpleProducer(client)
stream.sample()
######################################################################
#Custom Filter rules pull all traffic for those filters in real time.
#Bellow are some examples add or remove as needed...
######################################################################
#A Good demo stream of reasonable amount
#stream.filter(track=['actian', 'BigData', 'Hadoop', 'Predictive', 'Quantum', 'bigdata', 'Analytics', 'IoT'])
#Hadoop Summit following
#stream.filter(track=['actian', 'hadoop', 'hadoopsummit'])
|
ActianCorp/twitter-streaming
|
python/twitterStream.py
|
Python
|
apache-2.0
| 4,498
|
[
"VisIt"
] |
e984a7565168bada63a52dfbeebd5aa18a54113c22179c55672c26be79f50ccf
|
# Load dependencies
import ovito.io
import ovito.io.particles
# Load the native code modules
import CrystalAnalysis
# Register export formats.
ovito.io.export_file._formatTable["ca"] = CrystalAnalysis.CAExporter
|
srinath-chakravarthy/ovito
|
src/plugins/crystalanalysis/resources/python/ovito/io/crystalanalysis/__init__.py
|
Python
|
gpl-3.0
| 214
|
[
"OVITO"
] |
a1fa8ef2980b64e0d2faae7b44d917201b04c42f6fb0451f92f973ba1fa78c91
|
from common.hgraph.hgraph import NonterminalLabel
from common import log
import itertools
# Some general advice for reading this file:
#
# Every rule specifies some fragment of the object (graph, string or both) that
# is being parsed, as well as a visit order on the individual elements of that
# fragment (tokens or edges respectively). The number of elements already
# visited is called the "size" of this item, and an item with nothing left to
# visit is "closed". The visit order specifies an implicit binarization of the
# rule in question, by allowing the item to consume only one other object (which
# we call the "outside" of the item) at any given time.
#
# In consuming this object, we either "shift" a terminal element or "complete" a
# nonterminal (actually a closed chart item). Each of these steps produces a new
# chart item.
class Item(object):
pass
class HergItem(Item):
"""
Chart item for a HRG parse.
"""
def __init__(self, rule, size=None, shifted=None, mapping=None, nodeset=None, nodelabels=False):
# by default start empty, with no part of the graph consumed
if size == None:
size = 0
if shifted == None:
shifted = frozenset()
if mapping == None:
mapping = dict()
if nodeset == None:
nodeset = frozenset()
self.rule = rule
self.size = size
self.shifted = shifted
self.mapping = mapping
self.nodeset = nodeset
self.rev_mapping = dict((val, key) for key, val in mapping.items())
self.nodelabels = nodelabels
# Store the nonterminal symbol and index of the previous complete
# on this item so we can rebuild the derivation easily
triples = rule.rhs1.triples(nodelabels = nodelabels)
self.outside_symbol = None
if size < len(triples):
# this item is not closed
self.outside_triple = triples[rule.rhs1_visit_order[size]]
self.outside_edge = self.outside_triple[1]
self.closed = False
self.outside_is_nonterminal = isinstance(self.outside_triple[1],
NonterminalLabel)
if self.outside_is_nonterminal:
# strip the index off of the nonterminal label
#self.outside_symbol = str(self.outside_triple[1])
#self.outside_symbol = self.outside_symbol[1:].split('[')[0]
self.outside_symbol = self.outside_triple[1].label
self.outside_nt_index = self.outside_triple[1].index
else:
# this item is closed
self.outside_triple = None
self.outside_edge = None
self.closed = True
self.outside_is_nonterminal = False
self.__cached_hash = None
def __hash__(self):
# memoize the hash function
if not self.__cached_hash:
self.__cached_hash = 2 * hash(self.rule) + 3 * self.size + \
5 * hash(self.shifted)
return self.__cached_hash
def __eq__(self, other):
return isinstance(other, HergItem) and \
other.rule == self.rule and \
other.size == self.size and \
other.shifted == self.shifted and \
other.mapping == self.mapping
def uniq_str(self):
"""
Produces a unique string representation of this item. When representing
charts in other formats (e.g. when writing a tiburon RTG file) we have to
represent this item as a string, which we build from the rule id and list of
nodes.
"""
return 'R%d__%s' % (self.rule.rule_id, self.uniq_cover_str())
def uniq_cover_str(self):
edges = set()
for head, elabel, tail in self.shifted:
if tail:
edges.add('%s:%s' % (head[0], ':'.join([x[0] for x in tail])))
else:
edges.add(head[0])
return ','.join(sorted(list(edges)))
def __repr__(self):
return 'HergItem(%d, %d, %s, %s)' % (self.rule.rule_id, self.size, self.rule.symbol, len(self.shifted))
def __str__(self):
return '[%d, %d/%d, %s, {%s}]' % (self.rule.rule_id,
self.size,
len(self.rule.rhs1.triples()),
self.outside_symbol,
str([x for x in self.shifted]))
def can_shift(self, new_edge):
"""
Determines whether new_edge matches the outside of this item, and can be
shifted.
"""
#print "SHIFT", self, "<---", new_edge
# can't shift into a closed item
if self.closed:
return False
# can't shift an edge that is already inside this item
if new_edge in self.shifted:
return False
olabel = self.outside_triple[1]
nlabel = new_edge[1]
# make sure new_edge mathes the outside label
if olabel != nlabel:
return False
# make sure new_edge preserves a consistent mapping between the nodes of the
# graph and the nodes of the rule
if self.nodelabels:
#print o1
o1, o1_label = self.outside_triple[0]
n1, n1_label = new_edge[0]
if o1_label != n1_label:
return False
else:
o1 = self.outside_triple[0]
n1 = new_edge[0]
if o1 in self.mapping and self.mapping[o1] != n1:
return False
# if this node is not a node of this rule RHS, but of a subgraph it needs to have a mapping.
# otherwise, we can't attach.
if n1 in self.nodeset and n1 not in self.rev_mapping:
return False
if self.nodelabels:
if self.outside_triple[2]:
o2, o2_labels = zip(*self.outside_triple[2])
else: o2, o2_labels = [],[]
if new_edge[2]:
n2, n2_labels = zip(*new_edge[2])
else: n2, n2_labels = [],[]
if o2_labels != n2_labels:
return False
else:
o2 = self.outside_triple[2]
n2 = new_edge[2]
if len(o2) != len(n2):
return False
for i in range(len(o2)):
if o2[i] in self.mapping and self.mapping[o2[i]] != n2[i]:
return False
# Again, need to make sure this node is part of the rule RHS, not of a
# proper subgraph.
if n2[i] in self.nodeset and n2[i] not in self.rev_mapping:
return False
return True
def shift(self, new_edge):
"""
Creates the chart item resulting from a shift of new_edge. Assumes
can_shift returned true.
"""
olabel = self.outside_triple[1]
o1 = self.outside_triple[0][0] if self.nodelabels else self.outside_triple[0]
o2 = tuple(x[0] for x in self.outside_triple[2]) if self.nodelabels else self.outside_triple[2]
nlabel = new_edge[1]
n1 = new_edge[0][0] if self.nodelabels else new_edge[0]
n2 = tuple(x[0] for x in new_edge[2]) if self.nodelabels else new_edge[2]
new_nodeset = self.nodeset | set(n2) | set([n1])
assert len(o2) == len(n2)
new_size = self.size + 1
new_shifted = frozenset(self.shifted | set([new_edge]))
new_mapping = dict(self.mapping)
new_mapping[o1] = n1
for i in range(len(o2)):
new_mapping[o2[i]] = n2[i]
return HergItem(self.rule, new_size, new_shifted, new_mapping, new_nodeset, self.nodelabels)
def can_complete(self, new_item):
"""
Determines whether new_item matches the outside of this item (i.e. if the
nonterminals match and the node mappings agree).
"""
# can't add to a closed item
if self.closed:
#log.debug('fail bc closed')
return False
# can't shift an incomplete item
if not new_item.closed:
#log.debug('fail bc other not closed')
return False
# make sure labels agree
if not self.outside_is_nonterminal:
#log.debug('fail bc outside terminal')
return False
#Make sure items are disjoint
if any(edge in self.shifted for edge in new_item.shifted):
#log.debug('fail bc overlap')
return False
# make sure mappings agree
if self.nodelabels:
o1, o1label = self.outside_triple[0]
if self.outside_triple[2]:
o2, o2labels = zip(*self.outside_triple[2])
else:
o2, o2labels = [],[]
else:
o1 = self.outside_triple[0]
o2 = self.outside_triple[2]
if len(o2) != len(new_item.rule.rhs1.external_nodes):
#log.debug('fail bc hyperedge type mismatch')
return False
nroot = list(new_item.rule.rhs1.roots)[0]
#Check root label
if self.nodelabels and o1label != new_item.rule.rhs1.node_to_concepts[nroot]:
return False
if o1 in self.mapping and self.mapping[o1] != new_item.mapping[nroot]:
# new_item.mapping[new_item.rule.rhs1.roots[0]]:
#log.debug('fail bc mismapping')
return False
real_nroot = new_item.mapping[nroot]
real_ntail = None
for i in range(len(o2)):
otail = o2[i]
ntail = new_item.rule.rhs1.rev_external_nodes[i]
#Check tail label
if self.nodelabels and o2labels[i] != new_item.rule.rhs1.node_to_concepts[ntail]:
return False
if otail in self.mapping and self.mapping[otail] != new_item.mapping[ntail]:
#log.debug('fail bc bad mapping in tail')
return False
for node in new_item.mapping.values():
if node in self.rev_mapping:
onode = self.rev_mapping[node]
if not (onode == o1 or onode in o2):
return False
return True
def complete(self, new_item):
"""
Creates the chart item resulting from a complete of new_item. Assumes
can_shift returned true.
"""
olabel = self.outside_triple[1]
o1 = self.outside_triple[0][0] if self.nodelabels else self.outside_triple[0]
o2 = tuple(x[0] for x in self.outside_triple[2]) if self.nodelabels else self.outside_triple[2]
new_size = self.size + 1
new_shifted = frozenset(self.shifted | new_item.shifted)
new_mapping = dict(self.mapping)
new_mapping[o1] = new_item.mapping[list(new_item.rule.rhs1.roots)[0]]
for i in range(len(o2)):
otail = o2[i]
ntail = new_item.rule.rhs1.rev_external_nodes[i]
new_mapping[otail] = new_item.mapping[ntail]
new_nodeset = self.nodeset | new_item.nodeset
new = HergItem(self.rule, new_size, new_shifted, new_mapping, new_nodeset, self.nodelabels)
return new
class CfgItem(Item):
"""
Chart item for a CFG parse.
"""
def __init__(self, rule, size=None, i=None, j=None, nodelabels = False):
# until this item is associated with some span in the sentence, let i and j
# (the left and right boundaries) be -1
if size == None:
size = 0
if i == None:
i = -1
if j == None:
j = -1
self.rule = rule
self.i = i
self.j = j
self.size = size
self.shifted = []
assert len(rule.rhs1) != 0
if size == 0:
assert i == -1
assert j == -1
self.closed = False
self.outside_word = rule.rhs1[rule.rhs1_visit_order[0]]
elif size < len(rule.string):
self.closed = False
self.outside_word = rule.string[rule.rhs1_visit_order[self.size]]
else:
self.closed = True
self.outside_word = None
if self.outside_word and isinstance(self.outside_word, NonterminalLabel):
self.outside_is_nonterminal = True
self.outside_symbol = self.outside_word.label
self.outside_nt_index = self.outside_word.index
else:
self.outside_is_nonterminal = False
self.__cached_hash = None
def __hash__(self):
if not self.__cached_hash:
self.__cached_hash = 2 * hash(self.rule) + 3 * self.i + 5 * self.j
return self.__cached_hash
def __eq__(self, other):
return isinstance(other, CfgItem) and \
other.rule == self.rule and \
other.i == self.i and \
other.j == self.j and \
other.size == self.size
def __repr__(self):
return 'CfgItem(%d, %d, %s, (%d, %d))' % (self.rule.rule_id, self.size, str(self.closed), self.i, self.j)
def __str__(self):
return '[%s, %d/%d, (%d,%d)]' % (self.rule,
self.size,
len(self.rule.rhs1),
self.i,self.j)
def uniq_str(self):
"""
Produces a unique string representation of this item (see note on uniq_str
in HergItem above).
"""
return '%d__%d_%d' % (self.rule.rule_id, self.i, self.j)
def can_shift(self, word, index):
"""
Determines whether word matches the outside of this item (i.e. is adjacent
and has the right symbol) and can be shifted.
"""
if self.closed:
return False
if self.i == -1:
return True
if index == self.i - 1:
return self.outside_word == word
elif index == self.j:
return self.outside_word == word
return False
def shift(self, word, index):
"""
Creates the chart item resulting from a shift of the word at the given
index.
"""
if self.i == -1:
return CfgItem(self.rule, self.size+1, index, index+1)
elif index == self.i - 1:
return CfgItem(self.rule, self.size+1, self.i-1, self.j)
elif index == self.j:
return CfgItem(self.rule, self.size+1, self.i, self.j+1)
assert False
def can_complete(self, new_item):
"""
Determines whether new_item matches the outside of this item.
"""
if self.closed:
return False
if not new_item.closed:
return False
if self.outside_symbol != new_item.rule.symbol:
return False
return self.i == -1 or new_item.i == self.j #or new_item.j == self.i
def complete(self, new_item):
"""
Creates the chart item resulting from a completion with the given item.
"""
if self.i == -1:
return CfgItem(self.rule, self.size+1, new_item.i, new_item.j)
elif new_item.i == self.j:
return CfgItem(self.rule, self.size+1, self.i, new_item.j)
elif new_item.j == self.i:
return CfgItem(self.rule, self.size+1, new_item.i, self.j)
assert False
class SynchronousItem(Item):
"""
Chart item for a synchronous CFG/HRG parse. (Just a wrapper for paired
CfgItem / HergItem.)
"""
def __init__(self, rule, item1class, item2class, item1 = None, item2 = None, nodelabels = False):
self.shifted = ([],[])
self.rule = rule
self.nodelabels = nodelabels
self.item1class = item1class
self.item2class = item2class
if item1:
self.item1 = item1
else:
self.item1 = item1class(rule.project_left(), nodelabels = nodelabels)
if item2:
self.item2 = item2
else:
self.item2 = item2class(rule.project_right(), nodelabels = nodelabels)
if self.item1.closed and self.item2.closed:
self.closed = True
else:
self.closed = False
# Now we potentially have two outsides---one in the graph and the other in
# the string. The visit order will guarantee that if we first consume all
# terminals in any order, the remainder of both string and graph visit
# orders will agree on the sequence in which to consume nonterminals. (See
# the Rule class.) Before consuming all terminals, it might be the case that
# one item has a terminal outside and the other a nonterminal; in that case
# we do not want an outside nonterminal associated with this item.
if item1class is CfgItem:
self.outside1_is_nonterminal = self.item1.outside_is_nonterminal
self.outside_object1 = self.item1.outside_word
else:
self.outside1_is_nonterminal = self.item1.outside_is_nonterminal
self.outside_object1= self.item1.outside_triple[1] if \
self.item1.outside_triple else None
if item2class is CfgItem:
self.outside2_is_nonterminal = self.item2.outside_is_nonterminal
self.outside_object2 = self.item2.outside_word
else:
self.outside2_is_nonterminal = self.item2.outside_is_nonterminal
self.outside_object2 = self.item2.outside_triple[1] if \
self.item2.outside_triple else None
self.outside_is_nonterminal = self.outside1_is_nonterminal and \
self.outside2_is_nonterminal
if self.outside_is_nonterminal:
assert self.outside_object1 == self.outside_object2
self.outside_symbol = self.item1.outside_symbol
self.outside_nt_index = self.item1.outside_nt_index
self.__cached_hash = None
def uniq_str(self):
"""
Produces a unique string representation of this item (see note on uniq_str
in HergItem above).
"""
edges = set()
if item1class is CfgItem:
item1cover = "%d,%d" % (self.item1.i, self.item1.j)
elif item1class is HergItem:
item1cover = item1.uniq_cover_str(self)
if item2class is CfgItem:
item2cover = "%d,%d" % (self.item2.i, self.item2.j)
elif item2class is HergItem:
item2cover = item2.uniq_cover_str(self)
return '%d__%s__%s' % (self.rule.rule_id, item1cover, item2cover)
def __hash__(self):
if not self.__cached_hash:
self.__cached_hash = 2 * hash(self.item1) + 7 * hash(self.item2)
return self.__cached_hash
def __eq__(self, other):
return isinstance(other, SynchronousItem) and other.item1 == self.item1 \
and other.item2 == self.item2
def __repr__(self):
return "(%s, %s, %s, %s)" % (self.item1.__repr__(), self.item2.__repr__(), str(self.item1.closed),str(self.item2.closed))
def can_shift_word1(self, word, index):
"""
Determines whether given word, index can be shifted onto the CFG item.
"""
assert isinstance(self.item1, CfgItem)
return self.item1.can_shift(word, index)
def can_shift_word2(self, word, index):
"""
Determines whether given word, index can be shifted onto the CFG item.
"""
assert isinstance(self.item2, CfgItem)
return self.item2.can_shift(word, index)
def shift_word1(self, word, index):
"""
Shifts onto the CFG item.
"""
assert isinstance(self.item1, CfgItem)
nitem = self.item1.shift(word, index)
self.shifted = (self.item1.shifted, self.item2.shifted)
return SynchronousItem(self.rule, self.item1class, self.item2class, nitem, self.item2, nodelabels = self.nodelabels)
def shift_word2(self, word, index):
"""
Shifts onto the CFG item.
"""
assert isinstance(self.item2, CfgItem)
nitem = self.item2.shift(word, index)
self.shifted = (self.item1.shifted, self.item2.shifted)
return SynchronousItem(self.rule, self.item1class, self.item2class, self.item1, nitem, nodelabels = self.nodelabels)
def can_shift_edge1(self, edge):
"""
Determines whether the given edge can be shifted onto the HERG item.
"""
assert isinstance(self.item1, HergItem)
self.shifted = (self.item1.shifted, self.item2.shifted)
return self.item1.can_shift(edge)
def can_shift_edge2(self, edge):
"""
Determines whether the given edge can be shifted onto the HERG item.
"""
assert isinstance(self.item2, HergItem)
self.shifted = (self.item1.shifted, self.item2.shifted)
return self.item2.can_shift(edge)
def shift_edge1(self, edge):
"""
Shifts onto the HERG item.
"""
nitem = self.item1.shift(edge)
return SynchronousItem(self.rule, self.item1class, self.item2class, nitem, self.item2, nodelabels = self.nodelabels)
def shift_edge2(self, edge):
"""
Shifts onto the HERG item.
"""
nitem = self.item2.shift(edge)
return SynchronousItem(self.rule, self.item1class, self.item2class, self.item1, nitem, nodelabels = self.nodelabels)
def can_complete(self, new_item):
"""
Determines whether given item can complete both sides.
"""
if not (self.item1.can_complete(new_item.item1) and
self.item2.can_complete(new_item.item2)):
return False
return True
def complete(self, new_item):
"""
Performs the synchronous completion, and gives back a new item.
"""
nitem1 = self.item1.complete(new_item.item1)
nitem2 = self.item2.complete(new_item.item2)
return SynchronousItem(self.rule, self.item1class, self.item2class, nitem1, nitem2, nodelabels = self.nodelabels)
|
isi-nlp/bolinas
|
parser/vo_item.py
|
Python
|
mit
| 19,950
|
[
"VisIt"
] |
f9e09ac79068e3354326aa468ba30d9aba9605dac39952f9d220fff562613de9
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010 (ita)
"""
Classes and functions required for waf commands
"""
import os, imp, sys
from waflib import Utils, Errors, Logs
import waflib.Node
# the following 3 constants are updated on each new release (do not touch)
HEXVERSION=0x1060700
"""Constant updated on new releases"""
WAFVERSION="1.6.7"
"""Constant updated on new releases"""
WAFREVISION="11426"
"""Constant updated on new releases"""
ABI = 98
"""Version of the build data cache file format (used in :py:const:`waflib.Context.DBFILE`)"""
DBFILE = '.wafpickle-%d' % ABI
"""Name of the pickle file for storing the build data"""
APPNAME = 'APPNAME'
"""Default application name (used by ``waf dist``)"""
VERSION = 'VERSION'
"""Default application version (used by ``waf dist``)"""
TOP = 'top'
"""The variable name for the top-level directory in wscript files"""
OUT = 'out'
"""The variable name for the output directory in wscript files"""
WSCRIPT_FILE = 'wscript'
"""Name of the waf script files"""
launch_dir = ''
"""Directory from which waf has been called"""
run_dir = ''
"""Location of the wscript file to use as the entry point"""
top_dir = ''
"""Location of the project directory (top), if the project was configured"""
out_dir = ''
"""Location of the build directory (out), if the project was configured"""
waf_dir = ''
"""Directory containing the waf modules"""
local_repo = ''
"""Local repository containing additional Waf tools (plugins)"""
remote_repo = 'http://waf.googlecode.com/svn/'
"""
Remote directory containing downloadable waf tools. The missing tools can be downloaded by using::
$ waf configure --download
"""
remote_locs = ['branches/waf-%s/waflib/extras' % WAFVERSION, 'trunk/waflib/extras', 'trunk/waflib/Tools']
"""
Remote directories for use with :py:const:`waflib.Context.remote_repo`
"""
g_module = None
"""
Module representing the main wscript file (see :py:const:`waflib.Context.run_dir`)
"""
STDOUT = 1
STDERR = -1
BOTH = 0
classes = []
"""
List of :py:class:`waflib.Context.Context` subclasses that can be used as waf commands. The classes
are added automatically by a metaclass.
"""
def create_context(cmd_name, *k, **kw):
"""
Create a new :py:class:`waflib.Context.Context` instance corresponding to the given command.
Used in particular by :py:func:`waflib.Scripting.run_command`
:param cmd_name: command
:type cmd_name: string
:param k: arguments to give to the context class initializer
:type k: list
:param k: keyword arguments to give to the context class initializer
:type k: dict
"""
global classes
for x in classes:
if x.cmd == cmd_name:
return x(*k, **kw)
ctx = Context(*k, **kw)
ctx.fun = cmd_name
return ctx
class store_context(type):
"""
Metaclass for storing the command classes into the list :py:const:`waflib.Context.classes`
Context classes must provide an attribute 'cmd' representing the command to execute
"""
def __init__(cls, name, bases, dict):
super(store_context, cls).__init__(name, bases, dict)
name = cls.__name__
if name == 'ctx' or name == 'Context':
return
try:
cls.cmd
except AttributeError:
raise Errors.WafError('Missing command for the context class %r (cmd)' % name)
if not getattr(cls, 'fun', None):
cls.fun = cls.cmd
global classes
classes.insert(0, cls)
ctx = store_context('ctx', (object,), {})
"""Base class for the :py:class:`waflib.Context.Context` classes"""
class Context(ctx):
"""
Default context for waf commands, and base class for new command contexts.
Context objects are passed to top-level functions::
def foo(ctx):
print(ctx.__class__.__name__) # waflib.Context.Context
Subclasses must define the attribute 'cmd':
:param cmd: command to execute as in ``waf cmd``
:type cmd: string
:param fun: function name to execute when the command is called
:type fun: string
.. inheritance-diagram:: waflib.Context.Context waflib.Build.BuildContext waflib.Build.InstallContext waflib.Build.UninstallContext waflib.Build.StepContext waflib.Build.ListContext waflib.Configure.ConfigurationContext waflib.Scripting.Dist waflib.Scripting.DistCheck waflib.Build.CleanContext
"""
errors = Errors
"""
Shortcut to :py:mod:`waflib.Errors` provided for convenience
"""
tools = {}
"""
A cache for modules (wscript files) read by :py:meth:`Context.Context.load`
"""
def __init__(self, **kw):
try:
rd = kw['run_dir']
except KeyError:
global run_dir
rd = run_dir
# binds the context to the nodes in use to avoid a context singleton
class node_class(waflib.Node.Node):
pass
self.node_class = node_class
self.node_class.__module__ = "waflib.Node"
self.node_class.__name__ = "Nod3"
self.node_class.ctx = self
self.root = self.node_class('', None)
self.cur_script = None
self.path = self.root.find_dir(rd)
self.stack_path = []
self.exec_dict = {'ctx':self, 'conf':self, 'bld':self, 'opt':self}
self.logger = None
def __hash__(self):
"""
Return a hash value for storing context objects in dicts or sets. The value is not persistent.
:return: hash value
:rtype: int
"""
return id(self)
def load(self, tool_list, *k, **kw):
"""
Load a Waf tool as a module, and try calling the function named :py:const:`waflib.Context.Context.fun` from it.
A ``tooldir`` value may be provided as a list of module paths.
:type tool_list: list of string or space-separated string
:param tool_list: list of Waf tools to use
"""
tools = Utils.to_list(tool_list)
path = Utils.to_list(kw.get('tooldir', ''))
for t in tools:
module = load_tool(t, path)
fun = getattr(module, kw.get('name', self.fun), None)
if fun:
fun(self)
def execute(self):
"""
Execute the command. Redefine this method in subclasses.
"""
global g_module
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self, node):
"""
Method executed immediately before a folder is read by :py:meth:`waflib.Context.Context.recurse`. The node given is set
as an attribute ``self.cur_script``, and as the current path ``self.path``
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
self.stack_path.append(self.cur_script)
self.cur_script = node
self.path = node.parent
def post_recurse(self, node):
"""
Restore ``self.cur_script`` and ``self.path`` right after :py:meth:`waflib.Context.Context.recurse` terminates.
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
self.cur_script = self.stack_path.pop()
if self.cur_script:
self.path = self.cur_script.parent
def recurse(self, dirs, name=None, mandatory=True, once=True):
"""
Run user code from the supplied list of directories.
The directories can be either absolute, or relative to the directory
of the wscript file. The methods :py:meth:`waflib.Context.Context.pre_recurse` and :py:meth:`waflib.Context.Context.post_recurse`
are called immediately before and after a script has been executed.
:param dirs: List of directories to visit
:type dirs: list of string or space-separated string
:param name: Name of function to invoke from the wscript
:type name: string
:param mandatory: whether sub wscript files are required to exist
:type mandatory: bool
:param once: read the script file once for a particular context
:type once: bool
"""
try:
cache = self.recurse_cache
except:
cache = self.recurse_cache = {}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
# absolute paths only
d = os.path.join(self.path.abspath(), d)
WSCRIPT = os.path.join(d, WSCRIPT_FILE)
WSCRIPT_FUN = WSCRIPT + '_' + (name or self.fun)
node = self.root.find_node(WSCRIPT_FUN)
if node and (not once or node not in cache):
cache[node] = True
self.pre_recurse(node)
try:
function_code = node.read('rU')
exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node = self.root.find_node(WSCRIPT)
if node and (not once or node not in cache):
cache[node] = True
self.pre_recurse(node)
try:
wscript_module = load_module(node.abspath())
user_function = getattr(wscript_module, (name or self.fun), None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %s defined in %s' % (name or self.fun, node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
raise Errors.WafError('No wscript file in directory %s' % d)
def exec_command(self, cmd, **kw):
"""
Execute a command and return the exit status. If the context has the attribute 'log',
capture and log the process stderr/stdout for logging purposes::
def run(tsk):
ret = tsk.generator.bld.exec_command('touch foo.txt')
return ret
Do not confuse this method with :py:meth:`waflib.Context.Context.cmd_and_log` which is used to
return the standard output/error values.
:param cmd: command argument for subprocess.Popen
:param kw: keyword arguments for subprocess.Popen
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % cmd)
Logs.debug('runner_env: kw=%s' % kw)
try:
if self.logger:
# warning: may deadlock with a lot of output (subprocess limitation)
self.logger.info(cmd)
kw['stdout'] = kw['stderr'] = subprocess.PIPE
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate()
if out:
self.logger.debug('out: %s' % out.decode(sys.stdout.encoding or 'iso8859-1'))
if err:
self.logger.error('err: %s' % err.decode(sys.stdout.encoding or 'iso8859-1'))
return p.returncode
else:
p = subprocess.Popen(cmd, **kw)
return p.wait()
except OSError:
return -1
def cmd_and_log(self, cmd, **kw):
"""
Execute a command and return stdout if the execution is successful.
An exception is thrown when the exit status is non-0. In that case, both stderr and stdout
will be bound to the WafError object::
def configure(conf):
out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH)
(out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH)
try:
conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH)
except Exception as e:
print(e.stdout, e.stderr)
:param cmd: args for subprocess.Popen
:param kw: keyword arguments for subprocess.Popen
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % cmd)
if 'quiet' in kw:
quiet = kw['quiet']
del kw['quiet']
else:
quiet = None
if 'output' in kw:
to_ret = kw['output']
del kw['output']
else:
to_ret = STDOUT
kw['stdout'] = kw['stderr'] = subprocess.PIPE
if quiet is None:
self.to_log(cmd)
try:
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate()
except Exception as e:
try:
self.to_log(str(err))
except:
pass
raise Errors.WafError('Execution failure', ex=e)
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding or 'iso8859-1')
if not isinstance(err, str):
err = err.decode(sys.stdout.encoding or 'iso8859-1')
if out and quiet != STDOUT and quiet != BOTH:
self.to_log('out: %s' % out)
if err and quiet != STDERR and quiet != BOTH:
self.to_log('err: %s' % err)
if p.returncode:
e = Errors.WafError('command %r returned %r' % (cmd, p.returncode))
e.returncode = p.returncode
e.stderr = err
e.stdout = out
raise e
if to_ret == BOTH:
return (out, err)
elif to_ret == STDERR:
return err
return out
def fatal(self, msg, ex=None):
"""
Raise a configuration error to interrupt the execution immediately::
def configure(conf):
conf.fatal('a requirement is missing')
:param msg: message to display
:type msg: string
:param ex: optional exception object
:type ex: exception
"""
if self.logger:
self.logger.info('from %s: %s' % (self.path.abspath(), msg))
try:
msg = '%s\n(complete log in %s)' % (msg, self.logger.handlers[0].baseFilename)
except:
pass
raise self.errors.ConfigurationError(msg, ex=ex)
def to_log(self, msg):
"""
Log some information to the logger (if present), or to stderr. If the message is empty,
it is not printed::
def build(bld):
bld.to_log('starting the build')
When in doubt, override this method, or provide a logger on the context class.
:param msg: message
:type msg: string
"""
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self, msg, result, color=None):
"""
Print a configuration message of the form ``msg: result``.
The second part of the message will be in colors. The output
can be disabled easly by setting ``in_msg`` to a positive value::
def configure(conf):
self.in_msg = 1
conf.msg('Checking for library foo', 'ok')
# no output
:param msg: message to display to the user
:type msg: string
:param result: result to display
:type result: string or boolean
:param color: color to use, see :py:const:`waflib.Logs.colors_lst`
:type color: string
"""
self.start_msg(msg)
if not isinstance(color, str):
color = result and 'GREEN' or 'YELLOW'
self.end_msg(result, color)
def start_msg(self, msg):
"""
Print the beginning of a 'Checking for xxx' message. See :py:meth:`waflib.Context.Context.msg`
"""
try:
if self.in_msg:
self.in_msg += 1
return
except:
self.in_msg = 0
self.in_msg += 1
try:
self.line_just = max(self.line_just, len(msg))
except AttributeError:
self.line_just = max(40, len(msg))
for x in (self.line_just * '-', msg):
self.to_log(x)
Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
def end_msg(self, result, color=None):
"""Print the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`"""
self.in_msg -= 1
if self.in_msg:
return
defcolor = 'GREEN'
if result == True:
msg = 'ok'
elif result == False:
msg = 'not found'
defcolor = 'YELLOW'
else:
msg = str(result)
self.to_log(msg)
Logs.pprint(color or defcolor, msg)
def load_special_tools(self, var, ban=[]):
global waf_dir
lst = self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py', ''))
cache_modules = {}
"""
Dictionary holding already loaded modules, keyed by their absolute path.
The modules are added automatically by :py:func:`waflib.Context.load_module`
"""
def load_module(path):
"""
Load a source file as a python module.
:param path: file path
:type path: string
:return: Loaded Python module
:rtype: module
"""
try:
return cache_modules[path]
except KeyError:
pass
module = imp.new_module(WSCRIPT_FILE)
try:
code = Utils.readf(path, m='rU')
except (IOError, OSError):
raise Errors.WafError('Could not read the file %r' % path)
module_dir = os.path.dirname(path)
sys.path.insert(0, module_dir)
exec(compile(code, path, 'exec'), module.__dict__)
sys.path.remove(module_dir)
cache_modules[path] = module
return module
def load_tool(tool, tooldir=None):
"""
Import a Waf tool (python module), and store it in the dict :py:const:`waflib.Context.Context.tools`
:type tool: string
:param tool: Name of the tool
:type tooldir: list
:param tooldir: List of directories to search for the tool module
"""
tool = tool.replace('++', 'xx')
tool = tool.replace('java', 'javaw')
tool = tool.replace('compiler_cc', 'compiler_c')
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
try:
__import__(tool)
ret = sys.modules[tool]
Context.tools[tool] = ret
return ret
finally:
for d in tooldir:
sys.path.remove(d)
else:
global waf_dir
try:
os.stat(os.path.join(waf_dir, 'waflib', 'extras', tool + '.py'))
d = 'waflib.extras.%s' % tool
except:
try:
os.stat(os.path.join(waf_dir, 'waflib', 'Tools', tool + '.py'))
d = 'waflib.Tools.%s' % tool
except:
d = tool # user has messed with sys.path
__import__(d)
ret = sys.modules[d]
Context.tools[tool] = ret
return ret
|
Gnomescroll/Gnomescroll
|
server/waflib/Context.py
|
Python
|
gpl-3.0
| 16,338
|
[
"VisIt"
] |
c3201bc8c21c9041182a880764465ed266fdfb6d59f7091c3117c298b8d9b8ec
|
#Dump each file to a flattened array-feature vector is all files interleaved
import glob
import os
import numpy as np
from scipy.io import netcdf as ncd
def extract_data(filename, var_name):
fp = ncd.netcdf_file(os.path.join('../data',filename))
d = fp.variables[var_name][:,:,:]
fp.close()
return d
if __name__ == '__main__':
drange = range(1985, 2010)
VAR = "TMP_2maboveground"
DIM = (1, 190, 384)
fstr = "TMP_*{}f{:02}.nc"
astr = "TMP_*_a.nc"
dates = sorted([(y,m) for m in range(1,13) for y in drange])
fnames = ["TMP_{0}{1:02d}".format(y, m) for y,m in dates]
dstamp= [y*100+m for y, m in dates]
with open("dataflat.txt", 'a') as df:
all_data = []
for fn, ds in zip(fnames, dstamp):
print fn
data = []
for i in range(1, 10):
filename = "{0}_f{1:02d}.nc".format(fn, i)
d = extract_data(filename, VAR)
data.append(d.flatten())
filename = "{0}_a.nc".format(fn)
d = extract_data(filename, VAR)
data.append(d.flatten())
all_data.append(np.vstack(data).T)
#np.savetxt(df, np.vstack(all_data).flatten(), fmt='%f4')
darr = np.vstack(all_data)
print darr.min(), darr.max()
|
story645/spfinal
|
dataexport.py
|
Python
|
bsd-3-clause
| 1,139
|
[
"NetCDF"
] |
d5770c0ee781aeb2bd31a35c7e5a2e14d3a431018ea360ba83b36ec3ae2c1ce4
|
#!/usr/bin/env python
import sys
import os
import re
import tempfile
import subprocess
import fileinput
import shutil
import optparse
import urllib2
import gzip
from ftplib import FTP
import tarfile
from galaxy.util.json import from_json_string, to_json_string
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
def fetch_databases(jar_path,genome_list=None):
snpDBs = dict()
(snpEff_dir,snpEff_jar) = os.path.split(jar_path)
databases_path = 'databases.out'
databases_output = open(databases_path,'w')
args = [ 'java','-jar', ]
args.append( snpEff_jar )
args.append( 'databases' )
# tmp_stderr = tempfile.NamedTemporaryFile( prefix = "tmp-data-manager-snpEff-stderr" )
# databases_output = open(databases_path)
# proc = subprocess.Popen( args=args, shell=False, cwd=snpEff_dir, stdout=databases_output.fileno(), stderr=tmp_stderr.fileno() )
proc = subprocess.Popen( args=args, shell=False, cwd=snpEff_dir, stdout=databases_output.fileno() )
return_code = proc.wait()
if return_code:
sys.exit( return_code )
databases_output.close()
try:
fh = open(databases_path,'r')
for i,line in enumerate(fh):
fields = line.split('\t')
if len(fields) >= 2:
genome_version = fields[0].strip()
if genome_list and genome_version not in genome_list:
continue
if genome_version.startswith("Genome") or genome_version.startswith("-"):
continue
description = fields[1].strip()
snpDBs[genome_version] = description;
except Exception, e:
stop_err( 'Error parsing %s %s\n' % (config,str( e )) )
else:
fh.close()
return snpDBs
def getOrganismNames(jar_path,genomes,organisms) :
genome_list = genomes.split(',')
organism_list = organisms.split(',') if organisms else []
if len(genome_list) != len(organism_list):
descriptions = []
snpDBdict = fetch_databases(jar_path,genome_list=genome_list);
for genome in snpDBdict:
descriptions.append(snpDBdict[genome] if genome in snpDBdict else genome)
return ','.join(descriptions)
return organisms
def getSnpeffVersion(jar_path):
snpeff_version = 'SnpEff ?.?'
(snpEff_dir,snpEff_jar) = os.path.split(jar_path)
stderr_path = 'snpeff.err'
stderr_fh = open(stderr_path,'w')
args = [ 'java','-jar', ]
args.append( snpEff_jar )
args.append( '-h' )
proc = subprocess.Popen( args=args, shell=False, cwd=snpEff_dir, stderr=stderr_fh.fileno() )
return_code = proc.wait()
if return_code != 255:
sys.exit( return_code )
stderr_fh.close()
fh = open(stderr_path,'r')
for line in fh:
m = re.match('^[Ss]npEff version (SnpEff)\s*(\d+\.\d+).*$',line)
if m:
snpeff_version = m.groups()[0] + m.groups()[1]
break
fh.close()
return snpeff_version
# Starting with SnpEff 4.1 the .bin files contain the SnpEff version:
# Example - the first 3 line of GRCh37.75/snpEffectPredictor.bin (uncompressed):
"""
SnpEff 4.1
CHROMOSOME 2 1 0 179197 GL000219.1 false
CHROMOSOME 3 1 0 81347269 HSCHR17_1 false
"""
def getSnpeffVersionFromFile(path):
snpeff_version = None
try:
fh = gzip.open(path, 'rb')
buf = fh.read(100)
lines = buf.splitlines()
m = re.match('^(SnpEff)\s+(\d+\.\d+).*$',lines[0].strip())
if m:
snpeff_version = m.groups()[0] + m.groups()[1]
fh.close()
except Exception, e:
stop_err( 'Error parsing SnpEff version from: %s %s\n' % (path,str( e )) )
return snpeff_version
"""
# Download human database 'hg19'
java -jar snpEff.jar download -v hg19
<command>java -jar \$SNPEFF_JAR_PATH/snpEff.jar download -c \$JAVA_JAR_PATH/snpEff.config $genomeVersion > $logfile </command>
snpEffectPredictor.bin
regulation_HeLa-S3.bin
regulation_pattern = 'regulation_(.+).bin'
"""
def download_database(data_manager_dict, target_directory, jar_path, config, genome_version, organism):
## get data_dir from config
##---
## Databases are stored here
## E.g.: Information for 'hg19' is stored in data_dir/hg19/
##
## Note: Since version 2.1 you can use tilde ('~') as first character to refer to your home directory
##---
#data_dir = ~/snpEff/data/
data_dir = target_directory
(snpEff_dir,snpEff_jar) = os.path.split(jar_path)
args = [ 'java','-jar' ]
args.append( jar_path )
args.append( 'download' )
args.append( '-c' )
args.append( config )
args.append( '-dataDir' )
args.append( data_dir )
args.append( '-v' )
args.append( genome_version )
proc = subprocess.Popen( args=args, shell=False, cwd=snpEff_dir )
return_code = proc.wait()
if return_code:
sys.exit( return_code )
## search data_dir/genome_version for files
regulation_pattern = 'regulation_(.+).bin'
# annotation files that are included in snpEff by a flag
annotations_dict = {'nextProt.bin' : '-nextprot','motif.bin': '-motif'}
genome_path = os.path.join(data_dir,genome_version)
snpeff_version = getSnpeffVersion(jar_path)
key = snpeff_version + '_' + genome_version
if os.path.isdir(genome_path):
for root, dirs, files in os.walk(genome_path):
for fname in files:
if fname.startswith('snpEffectPredictor'):
# if snpEffectPredictor.bin download succeeded
name = genome_version + (' : ' + organism if organism else '')
# version = getSnpeffVersionFromFile(os.path.join(root,fname))
data_table_entry = dict(key=key,version=snpeff_version,value=genome_version, name=name, path=data_dir)
_add_data_table_entry( data_manager_dict, 'snpeffv_genomedb', data_table_entry )
else:
m = re.match(regulation_pattern,fname)
if m:
name = m.groups()[0]
data_table_entry = dict(key=key,version=snpeff_version,genome=genome_version,value=name, name=name)
_add_data_table_entry( data_manager_dict, 'snpeffv_regulationdb', data_table_entry )
elif fname in annotations_dict:
value = annotations_dict[fname]
name = value.lstrip('-')
data_table_entry = dict(key=key,version=snpeff_version,genome=genome_version,value=value, name=name)
_add_data_table_entry( data_manager_dict, 'snpeffv_annotations', data_table_entry )
return data_manager_dict
def _add_data_table_entry( data_manager_dict, data_table, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][data_table] = data_manager_dict['data_tables'].get( data_table, [] )
data_manager_dict['data_tables'][data_table].append( data_table_entry )
return data_manager_dict
def main():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-j', '--jar_path', dest='jar_path', action='store', type="string", default=None, help='snpEff.jar path' )
parser.add_option( '-c', '--config', dest='config', action='store', type="string", default=None, help='snpEff.config path' )
parser.add_option( '-g', '--genome_version', dest='genome_version', action='store', type="string", default=None, help='genome_version' )
parser.add_option( '-o', '--organism', dest='organism', action='store', type="string", default=None, help='organism name' )
(options, args) = parser.parse_args()
filename = args[0]
params = from_json_string( open( filename ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
os.mkdir( target_directory )
data_manager_dict = {}
#Create SnpEff Reference Data
for genome_version, organism in zip(options.genome_version.split(','), getOrganismNames(options.jar_path,options.genome_version,options.organism).split(',')):
download_database( data_manager_dict, target_directory, options.jar_path, options.config, genome_version, organism )
#save info to json file
open( filename, 'wb' ).write( to_json_string( data_manager_dict ) )
if __name__ == "__main__": main()
|
mr-c/tools-iuc
|
data_managers/data_manager_snpeff/data_manager/data_manager_snpEff_download.py
|
Python
|
mit
| 8,487
|
[
"Galaxy"
] |
0a1677f80ace212c88f05b193bdc1ee47bca84fb2fc58e4c4a3728b7d2111200
|
import numpy
import sklearn.cluster
import time
import scipy
import os
import audioFeatureExtraction as aF
import audioTrainTest as aT
import audioBasicIO
import matplotlib.pyplot as plt
from scipy.spatial import distance
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import sklearn.discriminant_analysis
import csv
import os.path
import sklearn
import sklearn.cluster
import hmmlearn.hmm
import cPickle
import glob
""" General utility functions """
def smoothMovingAvg(inputSignal, windowLen=11):
windowLen = int(windowLen)
if inputSignal.ndim != 1:
raise ValueError("")
if inputSignal.size < windowLen:
raise ValueError("Input vector needs to be bigger than window size.")
if windowLen < 3:
return inputSignal
s = numpy.r_[2*inputSignal[0] - inputSignal[windowLen-1::-1], inputSignal, 2*inputSignal[-1]-inputSignal[-1:-windowLen:-1]]
w = numpy.ones(windowLen, 'd')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[windowLen:-windowLen+1]
def selfSimilarityMatrix(featureVectors):
'''
This function computes the self-similarity matrix for a sequence of feature vectors.
ARGUMENTS:
- featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector
RETURNS:
- S: the self-similarity matrix (nVectors x nVectors)
'''
[nDims, nVectors] = featureVectors.shape
[featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))
return S
def flags2segs(Flags, window):
'''
ARGUMENTS:
- Flags: a sequence of class flags (per time window)
- window: window duration (in seconds)
RETURNS:
- segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
preFlag = 0
curFlag = 0
numOfSegments = 0
curVal = Flags[curFlag]
segsList = []
classes = []
while (curFlag < len(Flags) - 1):
stop = 0
preFlag = curFlag
preVal = curVal
while (stop == 0):
curFlag = curFlag + 1
tempVal = Flags[curFlag]
if ((tempVal != curVal) | (curFlag == len(Flags) - 1)): # stop
numOfSegments = numOfSegments + 1
stop = 1
curSegment = curVal
curVal = Flags[curFlag]
segsList.append((curFlag * window))
classes.append(preVal)
segs = numpy.zeros((len(segsList), 2))
for i in range(len(segsList)):
if i > 0:
segs[i, 0] = segsList[i-1]
segs[i, 1] = segsList[i]
return (segs, classes)
def segs2flags(segStart, segEnd, segLabel, winSize):
'''
This function converts segment endpoints and respective segment labels to fix-sized class labels.
ARGUMENTS:
- segStart: segment start points (in seconds)
- segEnd: segment endpoints (in seconds)
- segLabel: segment labels
- winSize: fix-sized window (in seconds)
RETURNS:
- flags: numpy array of class indices
- classNames: list of classnames (strings)
'''
flags = []
classNames = list(set(segLabel))
curPos = winSize / 2.0
while curPos < segEnd[-1]:
for i in range(len(segStart)):
if curPos > segStart[i] and curPos <= segEnd[i]:
break
flags.append(classNames.index(segLabel[i]))
curPos += winSize
return numpy.array(flags), classNames
def computePreRec(CM, classNames):
'''
This function computes the Precision, Recall and F1 measures, given a confusion matrix
'''
numOfClasses = CM.shape[0]
if len(classNames) != numOfClasses:
print "Error in computePreRec! Confusion matrix and classNames list must be of the same size!"
return
Precision = []
Recall = []
F1 = []
for i, c in enumerate(classNames):
Precision.append(CM[i,i] / numpy.sum(CM[:,i]))
Recall.append(CM[i,i] / numpy.sum(CM[i,:]))
F1.append( 2 * Precision[-1] * Recall[-1] / (Precision[-1] + Recall[-1]))
return Recall, Precision, F1
def readSegmentGT(gtFile):
'''
This function reads a segmentation ground truth file, following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gtFile: the path of the CSV segment file
RETURNS:
- segStart: a numpy array of segments' start positions
- segEnd: a numpy array of segments' ending positions
- segLabel: a list of respective class labels (strings)
'''
f = open(gtFile, "rb")
reader = csv.reader(f, delimiter=',')
segStart = []
segEnd = []
segLabel = []
for row in reader:
if len(row) == 3:
segStart.append(float(row[0]))
segEnd.append(float(row[1]))
#if row[2]!="other":
# segLabel.append((row[2]))
#else:
# segLabel.append("silence")
segLabel.append((row[2]))
return numpy.array(segStart), numpy.array(segEnd), segLabel
def plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, ONLY_EVALUATE=False):
'''
This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method if ground-truth is available.
'''
flags = [classNames[int(f)] for f in flagsInd]
(segs, classes) = flags2segs(flags, mtStep)
minLength = min(flagsInd.shape[0], flagsIndGT.shape[0])
if minLength > 0:
accuracy = numpy.sum(flagsInd[0:minLength] == flagsIndGT[0:minLength]) / float(minLength)
else:
accuracy = -1
if not ONLY_EVALUATE:
Duration = segs[-1, 1]
SPercentages = numpy.zeros((len(classNames), 1))
Percentages = numpy.zeros((len(classNames), 1))
AvDurations = numpy.zeros((len(classNames), 1))
for iSeg in range(segs.shape[0]):
SPercentages[classNames.index(classes[iSeg])] += (segs[iSeg, 1]-segs[iSeg, 0])
for i in range(SPercentages.shape[0]):
Percentages[i] = 100.0 * SPercentages[i] / Duration
S = sum(1 for c in classes if c == classNames[i])
if S > 0:
AvDurations[i] = SPercentages[i] / S
else:
AvDurations[i] = 0.0
for i in range(Percentages.shape[0]):
print classNames[i], Percentages[i], AvDurations[i]
font = {'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(classNames))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(range(len(flagsInd))) * mtStep + mtStep / 2.0, flagsInd)
if flagsIndGT.shape[0] > 0:
ax1.plot(numpy.array(range(len(flagsIndGT))) * mtStep + mtStep / 2.0, flagsIndGT + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(classNames) + 1, 0, 100))
ax2.set_xticks(numpy.array(range(len(classNames) + 1)))
ax2.set_xticklabels([" "] + classNames)
ax2.bar(numpy.array(range(len(classNames))) + 0.5, Percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(classNames)+1, 0, AvDurations.max()))
ax3.set_xticks(numpy.array(range(len(classNames) + 1)))
ax3.set_xticklabels([" "] + classNames)
ax3.bar(numpy.array(range(len(classNames))) + 0.5, AvDurations)
fig.tight_layout()
plt.show()
return accuracy
def evaluateSpeakerDiarization(flags, flagsGT):
minLength = min(flags.shape[0], flagsGT.shape[0])
flags = flags[0:minLength]
flagsGT = flagsGT[0:minLength]
uFlags = numpy.unique(flags)
uFlagsGT = numpy.unique(flagsGT)
# compute contigency table:
cMatrix = numpy.zeros((uFlags.shape[0], uFlagsGT.shape[0]))
for i in range(minLength):
cMatrix[int(numpy.nonzero(uFlags == flags[i])[0]), int(numpy.nonzero(uFlagsGT == flagsGT[i])[0])] += 1.0
Nc, Ns = cMatrix.shape
N_s = numpy.sum(cMatrix, axis=0)
N_c = numpy.sum(cMatrix, axis=1)
N = numpy.sum(cMatrix)
purityCluster = numpy.zeros((Nc, ))
puritySpeaker = numpy.zeros((Ns, ))
# compute cluster purity:
for i in range(Nc):
purityCluster[i] = numpy.max((cMatrix[i, :])) / (N_c[i])
for j in range(Ns):
puritySpeaker[j] = numpy.max((cMatrix[:, j])) / (N_s[j])
purityClusterMean = numpy.sum(purityCluster * N_c) / N
puritySpeakerMean = numpy.sum(puritySpeaker * N_s) / N
return purityClusterMean, puritySpeakerMean
def trainHMM_computeStatistics(features, labels):
'''
This function computes the statistics used to train an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a numpy matrix of feature vectors (numOfDimensions x numOfWindows)
- labels: a numpy array of class indices (numOfWindows x 1)
RETURNS:
- startprob: matrix of prior class probabilities (numOfClasses x 1)
- transmat: transition matrix (numOfClasses x numOfClasses)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
'''
uLabels = numpy.unique(labels)
nComps = len(uLabels)
nFeatures = features.shape[0]
if features.shape[1] < labels.shape[0]:
print "trainHMM warning: number of short-term feature vectors must be greater or equal to the labels length!"
labels = labels[0:features.shape[1]]
# compute prior probabilities:
startprob = numpy.zeros((nComps,))
for i, u in enumerate(uLabels):
startprob[i] = numpy.count_nonzero(labels == u)
startprob = startprob / startprob.sum() # normalize prior probabilities
# compute transition matrix:
transmat = numpy.zeros((nComps, nComps))
for i in range(labels.shape[0]-1):
transmat[int(labels[i]), int(labels[i + 1])] += 1
for i in range(nComps): # normalize rows of transition matrix:
transmat[i, :] /= transmat[i, :].sum()
means = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == uLabels[i])[0]].mean(axis=1))
cov = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==uLabels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!
cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == uLabels[i])[0]], axis=1)
return startprob, transmat, means, cov
def trainHMM_fromFile(wavFile, gtFile, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wavFile: the path of the audio filename
- gtFile: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read ground truth data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to fix-sized sequence of flags
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read audio data
#F = aF.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs);
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050)) # feature extraction
startprob, transmat, means, cov = trainHMM_computeStatistics(F, flags) # compute HMM statistics (priors, transition matrix, etc)
hmm = hmmlearn.hmm.GaussianHMM(startprob.shape[0], "diag") # hmm training
hmm.startprob_ = startprob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # output to file
cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classNames, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classNames
def trainHMM_fromDir(dirPath, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- dirPath: the path of the data diretory
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
flagsAll = numpy.array([])
classesAll = []
for i, f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file
wavFile = f
gtFile = f.replace('.wav', '.segments') # open for annotated file
if not os.path.isfile(gtFile): # if current WAV file does not have annotation -> skip
continue
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
for c in classNames: # update classnames:
if c not in classesAll:
classesAll.append(c)
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read audio data
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050)) # feature extraction
lenF = F.shape[1]
lenL = len(flags)
MIN = min(lenF, lenL)
F = F[:, 0:MIN]
flags = flags[0:MIN]
flagsNew = []
for j, fl in enumerate(flags): # append features and labels
flagsNew.append(classesAll.index(classNames[flags[j]]))
flagsAll = numpy.append(flagsAll, numpy.array(flagsNew))
if i == 0:
Fall = F
else:
Fall = numpy.concatenate((Fall, F), axis=1)
startprob, transmat, means, cov = trainHMM_computeStatistics(Fall, flagsAll) # compute HMM statistics
hmm = hmmlearn.hmm.GaussianHMM(startprob.shape[0], "diag") # train HMM
hmm.startprob_ = startprob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # save HMM model
cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classesAll, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classesAll
def hmmSegmentation(wavFileName, hmmModelName, PLOT=False, gtFileName=""):
[Fs, x] = audioBasicIO.readAudioFile(wavFileName) # read audio data
try:
fo = open(hmmModelName, "rb")
except IOError:
print "didn't find file"
return
try:
hmm = cPickle.load(fo)
classesAll = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
except:
fo.close()
fo.close()
#Features = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs); # feature extraction
[Features, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050))
flagsInd = hmm.predict(Features.T) # apply model
#for i in range(len(flagsInd)):
# if classesAll[flagsInd[i]]=="silence":
# flagsInd[i]=classesAll.index("speech")
# plot results
if os.path.isfile(gtFileName):
[segStart, segEnd, segLabels] = readSegmentGT(gtFileName)
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep)
flagsGTNew = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classesAll:
flagsGTNew.append(classesAll.index(classNamesGT[flagsGT[j]]))
else:
flagsGTNew.append(-1)
CM = numpy.zeros((len(classNamesGT), len(classNamesGT)))
flagsIndGT = numpy.array(flagsGTNew)
for i in range(min(flagsInd.shape[0], flagsIndGT.shape[0])):
CM[int(flagsIndGT[i]),int(flagsInd[i])] += 1
else:
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classesAll, mtStep, not PLOT)
if acc >= 0:
print "Overall Accuracy: {0:.2f}".format(acc)
return (flagsInd, classNamesGT, acc, CM)
else:
return (flagsInd, classesAll, -1, -1)
def mtFileClassification(inputFile, modelName, modelType, plotResults=False, gtFile=""):
'''
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used, i.e. a pre-trained classifier.
ARGUMENTS:
- inputFile: path of the input WAV file
- modelName: name of the classification model
- modelType: svm or knn depending on the classifier type
- plotResults: True if results are to be plotted using matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
if not os.path.isfile(modelName):
print "mtFileClassificationError: input modelType not found!"
return (-1, -1, -1)
# Load classifier:
if modelType == 'svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType == 'knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
elif modelType == 'randomforest':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadRandomForestModel(modelName)
elif modelType == 'gradientboosting':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadGradientBoostingModel(modelName)
elif modelType == 'extratrees':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadExtraTreesModel(modelName)
if computeBEAT:
print "Model " + modelName + " contains long-term music features (beat etc) and cannot be used in segmentation"
return (-1, -1, -1)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # load input file
if Fs == -1: # could not read file
return (-1, -1, -1)
x = audioBasicIO.stereo2mono(x) # convert stereo (if) to mono
Duration = len(x) / Fs
# mid-term feature extraction:
[MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep))
flags = []
Ps = []
flagsInd = []
for i in range(MidTermFeatures.shape[1]): # for each feature vector (i.e. for each fix-sized segment):
curFV = (MidTermFeatures[:, i] - MEAN) / STD # normalize current feature vector
[Result, P] = aT.classifierWrapper(Classifier, modelType, curFV) # classify vector
flagsInd.append(Result)
flags.append(classNames[int(Result)]) # update class label matrix
Ps.append(numpy.max(P)) # update probability matrix
flagsInd = numpy.array(flagsInd)
# 1-window smoothing
for i in range(1, len(flagsInd) - 1):
if flagsInd[i-1] == flagsInd[i + 1]:
flagsInd[i] = flagsInd[i + 1]
(segs, classes) = flags2segs(flags, mtStep) # convert fix-sized flags to segments and classes
segs[-1] = len(x) / float(Fs)
# Load grount-truth:
if os.path.isfile(gtFile):
[segStartGT, segEndGT, segLabelsGT] = readSegmentGT(gtFile)
flagsGT, classNamesGT = segs2flags(segStartGT, segEndGT, segLabelsGT, mtStep)
flagsIndGT = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classNames:
flagsIndGT.append(classNames.index(classNamesGT[flagsGT[j]]))
else:
flagsIndGT.append(-1)
flagsIndGT = numpy.array(flagsIndGT)
CM = numpy.zeros((len(classNamesGT), len(classNamesGT)))
for i in range(min(flagsInd.shape[0], flagsIndGT.shape[0])):
CM[int(flagsIndGT[i]),int(flagsInd[i])] += 1
else:
CM = []
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, not plotResults)
if acc >= 0:
print "Overall Accuracy: {0:.3f}".format(acc)
return (flagsInd, classNamesGT, acc, CM)
else:
return (flagsInd, classNames, acc, CM)
def evaluateSegmentationClassificationDir(dirName, modelName, methodName):
flagsAll = numpy.array([])
classesAll = []
accuracys = []
for i, f in enumerate(glob.glob(dirName + os.sep + '*.wav')): # for each WAV file
wavFile = f
print wavFile
gtFile = f.replace('.wav', '.segments') # open for annotated file
if methodName.lower() in ["svm", "knn","randomforest","gradientboosting","extratrees"]:
flagsInd, classNames, acc, CMt = mtFileClassification(wavFile, modelName, methodName, False, gtFile)
else:
flagsInd, classNames, acc, CMt = hmmSegmentation(wavFile, modelName, False, gtFile)
if acc > -1:
if i==0:
CM = numpy.copy(CMt)
else:
CM = CM + CMt
accuracys.append(acc)
print CMt, classNames
print CM
[Rec, Pre, F1] = computePreRec(CMt, classNames)
CM = CM / numpy.sum(CM)
[Rec, Pre, F1] = computePreRec(CM, classNames)
print " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "
print "Average Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).mean())
print "Average Recall: {0:.1f}".format(100.0*numpy.array(Rec).mean())
print "Average Precision: {0:.1f}".format(100.0*numpy.array(Pre).mean())
print "Average F1: {0:.1f}".format(100.0*numpy.array(F1).mean())
print "Median Accuracy: {0:.1f}".format(100.0*numpy.median(numpy.array(accuracys)))
print "Min Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).min())
print "Max Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).max())
def silenceRemoval(x, Fs, stWin, stStep, smoothWindow=0.5, Weight=0.5, plot=False):
'''
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- Fs: sampling freq
- stWin, stStep: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- Weight: (optinal) weight factor (0 < Weight < 1) the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- segmentLimits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
'''
if Weight >= 1:
Weight = 0.99
if Weight <= 0:
Weight = 0.01
# Step 1: feature extraction
x = audioBasicIO.stereo2mono(x) # convert to mono
ShortTermFeatures = aF.stFeatureExtraction(x, Fs, stWin * Fs, stStep * Fs) # extract short-term features
# Step 2: train binary SVM classifier of low vs high energy frames
EnergySt = ShortTermFeatures[1, :] # keep only the energy short-term sequence (2nd feature)
E = numpy.sort(EnergySt) # sort the energy feature values:
L1 = int(len(E) / 10) # number of 10% of the total short-term windows
T1 = numpy.mean(E[0:L1]) + 0.000000000000001 # compute "lower" 10% energy threshold
T2 = numpy.mean(E[-L1:-1]) + 0.000000000000001 # compute "higher" 10% energy threshold
Class1 = ShortTermFeatures[:, numpy.where(EnergySt <= T1)[0]] # get all features that correspond to low energy
Class2 = ShortTermFeatures[:, numpy.where(EnergySt >= T2)[0]] # get all features that correspond to high energy
featuresSS = [Class1.T, Class2.T] # form the binary classification task and ...
[featuresNormSS, MEANSS, STDSS] = aT.normalizeFeatures(featuresSS) # normalize and ...
SVM = aT.trainSVM(featuresNormSS, 1.0) # train the respective SVM probabilistic model (ONSET vs SILENCE)
# Step 3: compute onset probability based on the trained SVM
ProbOnset = []
for i in range(ShortTermFeatures.shape[1]): # for each frame
curFV = (ShortTermFeatures[:, i] - MEANSS) / STDSS # normalize feature vector
ProbOnset.append(SVM.predict_proba(curFV.reshape(1,-1))[0][1]) # get SVM probability (that it belongs to the ONSET class)
ProbOnset = numpy.array(ProbOnset)
ProbOnset = smoothMovingAvg(ProbOnset, smoothWindow / stStep) # smooth probability
# Step 4A: detect onset frame indices:
ProbOnsetSorted = numpy.sort(ProbOnset) # find probability Threshold as a weighted average of top 10% and lower 10% of the values
Nt = ProbOnsetSorted.shape[0] / 10
T = (numpy.mean((1 - Weight) * ProbOnsetSorted[0:Nt]) + Weight * numpy.mean(ProbOnsetSorted[-Nt::]))
MaxIdx = numpy.where(ProbOnset > T)[0] # get the indices of the frames that satisfy the thresholding
i = 0
timeClusters = []
segmentLimits = []
# Step 4B: group frame indices to onset segments
while i < len(MaxIdx): # for each of the detected onset indices
curCluster = [MaxIdx[i]]
if i == len(MaxIdx)-1:
break
while MaxIdx[i+1] - curCluster[-1] <= 2:
curCluster.append(MaxIdx[i+1])
i += 1
if i == len(MaxIdx)-1:
break
i += 1
timeClusters.append(curCluster)
segmentLimits.append([curCluster[0] * stStep, curCluster[-1] * stStep])
# Step 5: Post process: remove very small segments:
minDuration = 0.2
segmentLimits2 = []
for s in segmentLimits:
if s[1] - s[0] > minDuration:
segmentLimits2.append(s)
segmentLimits = segmentLimits2
if plot:
timeX = numpy.arange(0, x.shape[0] / float(Fs), 1.0 / Fs)
plt.subplot(2, 1, 1)
plt.plot(timeX, x)
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.subplot(2, 1, 2)
plt.plot(numpy.arange(0, ProbOnset.shape[0] * stStep, stStep), ProbOnset)
plt.title('Signal')
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.title('SVM Probability')
plt.show()
return segmentLimits
def speakerDiarization(fileName, numOfSpeakers, mtSize=2.0, mtStep=0.2, stWin=0.05, LDAdim=35, PLOT=False):
'''
ARGUMENTS:
- fileName: the name of the WAV file to be analyzed
- numOfSpeakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mtSize (opt) mid-term window size
- mtStep (opt) mid-term window step
- stWin (opt) short-term window size
- LDAdim (opt) LDA dimension (0 for no LDA)
- PLOT (opt) 0 for not plotting the results 1 for plottingy
'''
[Fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
Duration = len(x) / Fs
[Classifier1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.loadKNNModel(os.path.join("data","knnSpeakerAll"))
[Classifier2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.loadKNNModel(os.path.join("data","knnSpeakerFemaleMale"))
[MidTermFeatures, ShortTermFeatures] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, mtStep * Fs, round(Fs * stWin), round(Fs*stWin * 0.5))
MidTermFeatures2 = numpy.zeros((MidTermFeatures.shape[0] + len(classNames1) + len(classNames2), MidTermFeatures.shape[1]))
for i in range(MidTermFeatures.shape[1]):
curF1 = (MidTermFeatures[:, i] - MEAN1) / STD1
curF2 = (MidTermFeatures[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
MidTermFeatures2[0:MidTermFeatures.shape[0], i] = MidTermFeatures[:, i]
MidTermFeatures2[MidTermFeatures.shape[0]:MidTermFeatures.shape[0]+len(classNames1), i] = P1 + 0.0001
MidTermFeatures2[MidTermFeatures.shape[0] + len(classNames1)::, i] = P2 + 0.0001
MidTermFeatures = MidTermFeatures2 # TODO
# SELECT FEATURES:
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20]; # SET 0A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 99,100]; # SET 0B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,
# 97,98, 99,100]; # SET 0C
iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53] # SET 1A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 1B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 1C
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53]; # SET 2A
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 2B
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 2C
#iFeaturesSelect = range(100); # SET 3
#MidTermFeatures += numpy.random.rand(MidTermFeatures.shape[0], MidTermFeatures.shape[1]) * 0.000000010
MidTermFeatures = MidTermFeatures[iFeaturesSelect, :]
(MidTermFeaturesNorm, MEAN, STD) = aT.normalizeFeatures([MidTermFeatures.T])
MidTermFeaturesNorm = MidTermFeaturesNorm[0].T
numOfWindows = MidTermFeatures.shape[1]
# remove outliers:
DistancesAll = numpy.sum(distance.squareform(distance.pdist(MidTermFeaturesNorm.T)), axis=0)
MDistancesAll = numpy.mean(DistancesAll)
iNonOutLiers = numpy.nonzero(DistancesAll < 1.2 * MDistancesAll)[0]
# TODO: Combine energy threshold for outlier removal:
#EnergyMin = numpy.min(MidTermFeatures[1,:])
#EnergyMean = numpy.mean(MidTermFeatures[1,:])
#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
#iNonOutLiers = numpy.nonzero(MidTermFeatures[1,:] > Thres)[0]
#print iNonOutLiers
perOutLier = (100.0 * (numOfWindows - iNonOutLiers.shape[0])) / numOfWindows
MidTermFeaturesNormOr = MidTermFeaturesNorm
MidTermFeaturesNorm = MidTermFeaturesNorm[:, iNonOutLiers]
# LDA dimensionality reduction:
if LDAdim > 0:
#[mtFeaturesToReduce, _] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, stWin * Fs, round(Fs*stWin), round(Fs*stWin));
# extract mid-term features with minimum step:
mtWinRatio = int(round(mtSize / stWin))
mtStepRatio = int(round(stWin / stWin))
mtFeaturesToReduce = []
numOfFeatures = len(ShortTermFeatures)
numOfStatistics = 2
#for i in range(numOfStatistics * numOfFeatures + 1):
for i in range(numOfStatistics * numOfFeatures):
mtFeaturesToReduce.append([])
for i in range(numOfFeatures): # for each of the short-term features:
curPos = 0
N = len(ShortTermFeatures[i])
while (curPos < N):
N1 = curPos
N2 = curPos + mtWinRatio
if N2 > N:
N2 = N
curStFeatures = ShortTermFeatures[i][N1:N2]
mtFeaturesToReduce[i].append(numpy.mean(curStFeatures))
mtFeaturesToReduce[i+numOfFeatures].append(numpy.std(curStFeatures))
curPos += mtStepRatio
mtFeaturesToReduce = numpy.array(mtFeaturesToReduce)
mtFeaturesToReduce2 = numpy.zeros((mtFeaturesToReduce.shape[0] + len(classNames1) + len(classNames2), mtFeaturesToReduce.shape[1]))
for i in range(mtFeaturesToReduce.shape[1]):
curF1 = (mtFeaturesToReduce[:, i] - MEAN1) / STD1
curF2 = (mtFeaturesToReduce[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
mtFeaturesToReduce2[0:mtFeaturesToReduce.shape[0], i] = mtFeaturesToReduce[:, i]
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]:mtFeaturesToReduce.shape[0] + len(classNames1), i] = P1 + 0.0001
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]+len(classNames1)::, i] = P2 + 0.0001
mtFeaturesToReduce = mtFeaturesToReduce2
mtFeaturesToReduce = mtFeaturesToReduce[iFeaturesSelect, :]
#mtFeaturesToReduce += numpy.random.rand(mtFeaturesToReduce.shape[0], mtFeaturesToReduce.shape[1]) * 0.0000010
(mtFeaturesToReduce, MEAN, STD) = aT.normalizeFeatures([mtFeaturesToReduce.T])
mtFeaturesToReduce = mtFeaturesToReduce[0].T
#DistancesAll = numpy.sum(distance.squareform(distance.pdist(mtFeaturesToReduce.T)), axis=0)
#MDistancesAll = numpy.mean(DistancesAll)
#iNonOutLiers2 = numpy.nonzero(DistancesAll < 3.0*MDistancesAll)[0]
#mtFeaturesToReduce = mtFeaturesToReduce[:, iNonOutLiers2]
Labels = numpy.zeros((mtFeaturesToReduce.shape[1], ));
LDAstep = 1.0
LDAstepRatio = LDAstep / stWin
#print LDAstep, LDAstepRatio
for i in range(Labels.shape[0]):
Labels[i] = int(i*stWin/LDAstepRatio);
clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=LDAdim)
clf.fit(mtFeaturesToReduce.T, Labels)
MidTermFeaturesNorm = (clf.transform(MidTermFeaturesNorm.T)).T
if numOfSpeakers <= 0:
sRange = range(2, 10)
else:
sRange = [numOfSpeakers]
clsAll = []
silAll = []
centersAll = []
for iSpeakers in sRange:
k_means = sklearn.cluster.KMeans(n_clusters = iSpeakers)
k_means.fit(MidTermFeaturesNorm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
# Y = distance.squareform(distance.pdist(MidTermFeaturesNorm.T))
clsAll.append(cls)
centersAll.append(means)
silA = []; silB = []
for c in range(iSpeakers): # for each speaker (i.e. for each extracted cluster)
clusterPerCent = numpy.nonzero(cls==c)[0].shape[0] / float(len(cls))
if clusterPerCent < 0.020:
silA.append(0.0)
silB.append(0.0)
else:
MidTermFeaturesNormTemp = MidTermFeaturesNorm[:,cls==c] # get subset of feature vectors
Yt = distance.pdist(MidTermFeaturesNormTemp.T) # compute average distance between samples that belong to the cluster (a values)
silA.append(numpy.mean(Yt)*clusterPerCent)
silBs = []
for c2 in range(iSpeakers): # compute distances from samples of other clusters
if c2!=c:
clusterPerCent2 = numpy.nonzero(cls==c2)[0].shape[0] / float(len(cls))
MidTermFeaturesNormTemp2 = MidTermFeaturesNorm[:,cls==c2]
Yt = distance.cdist(MidTermFeaturesNormTemp.T, MidTermFeaturesNormTemp2.T)
silBs.append(numpy.mean(Yt)*(clusterPerCent+clusterPerCent2)/2.0)
silBs = numpy.array(silBs)
silB.append(min(silBs)) # ... and keep the minimum value (i.e. the distance from the "nearest" cluster)
silA = numpy.array(silA);
silB = numpy.array(silB);
sil = []
for c in range(iSpeakers): # for each cluster (speaker)
sil.append( ( silB[c] - silA[c]) / (max(silB[c], silA[c])+0.00001) ) # compute silhouette
silAll.append(numpy.mean(sil)) # keep the AVERAGE SILLOUETTE
#silAll = silAll * (1.0/(numpy.power(numpy.array(sRange),0.5)))
imax = numpy.argmax(silAll) # position of the maximum sillouette value
nSpeakersFinal = sRange[imax] # optimal number of clusters
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows: this is achieved by giving them the value of their nearest non-outlier window)
cls = numpy.zeros((numOfWindows,))
for i in range(numOfWindows):
j = numpy.argmin(numpy.abs(i-iNonOutLiers))
cls[i] = clsAll[imax][j]
# Post-process method 1: hmm smoothing
for i in range(1):
startprob, transmat, means, cov = trainHMM_computeStatistics(MidTermFeaturesNormOr, cls)
hmm = hmmlearn.hmm.GaussianHMM(startprob.shape[0], "diag") # hmm training
hmm.startprob_ = startprob
hmm.transmat_ = transmat
hmm.means_ = means; hmm.covars_ = cov
cls = hmm.predict(MidTermFeaturesNormOr.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
sil = silAll[imax] # final sillouette
classNames = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)];
# load ground-truth if available
gtFile = fileName.replace('.wav', '.segments'); # open for annotated file
if os.path.isfile(gtFile): # if groundturh exists
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
if PLOT:
fig = plt.figure()
if numOfSpeakers>0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(classNames))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(range(len(cls)))*mtStep+mtStep/2.0, cls)
if os.path.isfile(gtFile):
if PLOT:
ax1.plot(numpy.array(range(len(flagsGT)))*mtStep+mtStep/2.0, flagsGT, 'r')
purityClusterMean, puritySpeakerMean = evaluateSpeakerDiarization(cls, flagsGT)
print "{0:.1f}\t{1:.1f}".format(100*purityClusterMean, 100*puritySpeakerMean)
if PLOT:
plt.title("Cluster purity: {0:.1f}% - Speaker purity: {1:.1f}%".format(100*purityClusterMean, 100*puritySpeakerMean) )
if PLOT:
plt.xlabel("time (seconds)")
#print sRange, silAll
if numOfSpeakers<=0:
plt.subplot(212)
plt.plot(sRange, silAll)
plt.xlabel("number of clusters");
plt.ylabel("average clustering's sillouette");
plt.show()
return cls
def speakerDiarizationEvaluateScript(folderName, LDAs):
'''
This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth)
ARGUMENTS:
- folderName: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored
- LDAs: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folderName, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wavFile in wavFilesList:
gtFile = wavFile.replace('.wav', '.segments');
if os.path.isfile(gtFile):
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
N.append(len(list(set(segLabels))))
else:
N.append(-1)
for l in LDAs:
print "LDA = {0:d}".format(l)
for i, wavFile in enumerate(wavFilesList):
speakerDiarization(wavFile, N[i], 2.0, 0.2, 0.05, l, PLOT = False)
print
def musicThumbnailing(x, Fs, shortTermSize=1.0, shortTermStep=0.5, thumbnailSize=10.0, Limit1 = 0, Limit2 = 1):
'''
This function detects instances of the most representative part of a music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1 second
- Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered) self-similarity matrix.
The audio segments that correspond to the diagonial around that position are the selected thumbnails
ARGUMENTS:
- x: input signal
- Fs: sampling frequency
- shortTermSize: window size (in seconds)
- shortTermStep: window step (in seconds)
- thumbnailSize: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[Fs, x] = basicIO.readAudioFile(inputFile)
[A1, A2, B1, B2] = musicThumbnailing(x, Fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
'''
x = audioBasicIO.stereo2mono(x);
# feature extraction:
stFeatures = aF.stFeatureExtraction(x, Fs, Fs*shortTermSize, Fs*shortTermStep)
# self-similarity matrix
S = selfSimilarityMatrix(stFeatures)
# moving filter:
M = int(round(thumbnailSize / shortTermStep))
B = numpy.eye(M,M)
S = scipy.signal.convolve2d(S, B, 'valid')
# post-processing (remove main diagonal elements)
MIN = numpy.min(S)
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if abs(i-j) < 5.0 / shortTermStep or i > j:
S[i,j] = MIN;
# find max position:
S[0:int(Limit1*S.shape[0]), :] = MIN
S[:, 0:int(Limit1*S.shape[0])] = MIN
S[int(Limit2*S.shape[0])::, :] = MIN
S[:, int(Limit2*S.shape[0])::] = MIN
maxVal = numpy.max(S)
[I, J] = numpy.unravel_index(S.argmax(), S.shape)
#plt.imshow(S)
#plt.show()
# expand:
i1 = I; i2 = I
j1 = J; j2 = J
while i2-i1<M:
if i1 <=0 or j1<=0 or i2>=S.shape[0]-2 or j2>=S.shape[1]-2:
break
if S[i1-1, j1-1] > S[i2+1,j2+1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return (shortTermStep*i1, shortTermStep*i2, shortTermStep*j1, shortTermStep*j2, S)
|
muthu1993/InteliEQ
|
audioSegmentation.py
|
Python
|
apache-2.0
| 46,767
|
[
"Gaussian"
] |
e8600f9287df2f164a4999f3b9afe49444bab7e3773b89dd563bfdb63053bb9b
|
#!/usr/bin/env python
"""Read the contents of a single file containing DFT output and create a csv style file of information"""
from __future__ import print_function
import numpy as np
import os, sys
import PDielec.Utilities as Utilities
import PDielec.__init__
version = PDielec.__init__.__version__
def print_help():
print('p1reader -program program [-version] filename', file=sys.stderr)
print(' \"program\" must be one of \"abinit\", \"castep\", \"crystal\", \"gulp\" ', file=sys.stderr)
print(' \"phonopy\", \"qe\", \"vasp\", \"experiment\", \"auto\" ', file=sys.stderr)
print(' The default is auto, so the program tries to guess the package from ', file=sys.stderr)
print(' the contents of the directory. However this is not fool-proof! ', file=sys.stderr)
print(' If phonopy is used it must be followed by the QM package ', file=sys.stderr)
print(' in auto mode if the file was created by a phonopy VASP is assumed ', file=sys.stderr)
print(' -debug to switch on more debug information ', file=sys.stderr)
print(' -version print the version of PDielec library being used ', file=sys.stderr)
print(' Version ',version,file=sys.stderr)
exit()
def main():
# Start processing the directories
if len(sys.argv) <= 1 :
print_help()
filename = ''
tokens = sys.argv[1:]
ntokens = len(tokens)-1
itoken = -1
program = 'auto'
qmprogram = 'vasp'
debug = False
while itoken < ntokens:
itoken += 1
token = tokens[itoken]
token = token.replace('--','-')
if token == "-debug":
debug = True
elif token == "-help":
print_help()
elif token == "-version":
print(' Version ',version,file=sys.stderr)
exit()
elif token == "-program":
itoken += 1
program = tokens[itoken]
if program == 'phonopy':
itoken += 1
qmprogram = tokens[itoken]
else:
filename = tokens[itoken]
if len(program) < 1:
print('Please give a filename to be read in',file=sys.stderr)
exit()
if not program in ['auto','abinit','castep','crystal','gulp','qe','vasp','phonopy','experiment']:
print('Program is not recognised: ',program,file=sys.stderr)
exit()
if program == 'phonopy':
if not qmprogram in ['abinit','castep','crystal','gulp','qe','vasp']:
print('Phonopy QM program is not recognised: ',qmprogram,file=sys.stderr)
exit()
print(' QM program used by Phonopy is: ',qmprogram,file=sys.stderr)
print(' Program is ',program,file=sys.stderr)
if not os.path.isfile(filename):
print('Error file requested for analysis does not exist',filename,file=sys.stderr)
exit()
#
# If no program information was given try and work out what package created the outputfile
#
if program == "auto":
program = Utilities.find_program_from_name(filename)
#
# Print out what we are doing
#
print(' Analysing {} generated by {}'.format(filename,program),file=sys.stderr)
#
# Get the reader from the filename and package used to create it
#
reader = Utilities.get_reader(filename, program, qmprogram)
#
# applying before reading the file debug
#
reader.debug = debug
#
# Now read the output file
#
reader.read_output()
#
# Test to make sure we have a functioning reader
#
reader.print_info()
return
if __name__ == "__main__":
main()
|
JohnKendrick/PDielec
|
PDielec/p1reader.py
|
Python
|
mit
| 3,741
|
[
"ABINIT",
"CASTEP",
"CRYSTAL",
"GULP",
"VASP",
"phonopy"
] |
9014523d6554bdf9e2bfcfc1ffb1e72d7a1fbab8bb770bcd57a36751bfc640e6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# lswww v2.3.1 - A web spider library
# Copyright (C) 2006 Nicolas Surribas
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import re
import socket
import getopt
import os
import HTMLParser
import urllib
import urllib2
from distutils.sysconfig import get_python_lib
BASE_DIR = None
if '' in sys.path:
sys.path.remove('')
for python_dir in sys.path:
if os.path.isdir(os.path.join(python_dir, "wapiti")):
BASE_DIR = os.path.join(python_dir, "wapiti")
break
if not BASE_DIR:
for lib_dir in [get_python_lib(prefix="/usr/local"), get_python_lib()]:
if os.path.isdir(os.path.join(lib_dir, "wapiti")):
BASE_DIR = os.path.join(lib_dir, "wapiti")
sys.path.append(BASE_DIR)
break
if not BASE_DIR:
sys.path.append("")
if "__file__" in dir():
BASE_DIR = os.path.normpath(os.path.join(os.path.abspath(__file__), '..'))
else:
BASE_DIR = os.getcwd()
import httplib2
from htmlentitydefs import name2codepoint as n2cp
from xml.dom import minidom
from crawlerpersister import CrawlerPersister
import libcookie
import BeautifulSoup
class lswww:
"""
lswww explore a website and extract links and forms fields.
Usage: python lswww.py http://server.com/base/url/ [options]
Supported options are:
-s <url>
--start <url>
To specify an url to start with
-x <url>
--exclude <url>
To exclude an url from the scan (for example logout scripts)
You can also use a wildcard (*)
Exemple : -x "http://server/base/?page=*&module=test"
or -x http://server/base/admin/* to exclude a directory
-p <url_proxy>
--proxy <url_proxy>
To specify a proxy
Exemple: -p http://proxy:port/
-c <cookie_file>
--cookie <cookie_file>
To use a cookie
-a <login%password>
--auth <login%password>
Set credentials for HTTP authentication
Doesn't work with Python 2.4
-r <parameter_name>
--remove <parameter_name>
Remove a parameter from URLs
-v <level>
--verbose <level>
Set verbosity level
0: only print results
1: print a dot for each url found (default)
2: print each url
-t <timeout>
--timeout <timeout>
Set the timeout (in seconds)
-n <limit>
--nice <limit>
Define a limit of urls to read with the same pattern
Use this option to prevent endless loops
Must be greater than 0
-i <file>
--continue <file>
This parameter indicates Wapiti to continue with the scan from the specified
file, this file should contain data from a previous scan.
The file is optional, if it is not specified, Wapiti takes the default file
from \"scans\" folder.
-h
--help
To print this usage message
"""
SCOPE_DOMAIN = "domain"
SCOPE_FOLDER = "folder"
SCOPE_PAGE = "page"
SCOPE_DEFAULT = "default"
root = ""
server = ""
tobrowse = []
browsed = {}
proxy = ""
excluded = []
forms = []
uploads = []
allowed = ['php', 'html', 'htm', 'xml', 'xhtml', 'xht', 'xhtm',
'asp', 'aspx', 'php3', 'php4', 'php5', 'txt', 'shtm',
'shtml', 'phtm', 'phtml', 'jhtml', 'pl', 'jsp', 'cfm', 'cfml']
verbose = 0
cookie = ""
auth_basic = []
bad_params = []
timeout = 6
h = None
global_headers = {}
cookiejar = None
scope = None
link_encoding = {}
persister = None
# 0 means no limits
nice = 0
def __init__(self, root, crawlerFile=None):
if root.startswith("-"):
print _("First argument must be the root url !")
sys.exit(0)
if root.find("://") == -1:
root = "http://" + root
if(self.__checklink(root)):
print _("Invalid protocol:"), root.split("://")[0]
sys.exit(0)
if root[-1] != "/" and (root.split("://")[1]).find("/") == -1:
root += "/"
server = (root.split("://")[1]).split("/")[0]
self.root = root # Initial URL
self.server = server # Domain
self.scopeURL = root # Scope of the analysis
self.tobrowse.append(root)
self.persister = CrawlerPersister()
def setTimeOut(self, timeout = 6):
"""Set the timeout in seconds to wait for a page"""
self.timeout = timeout
def setProxy(self, proxy = ""):
"""Set proxy preferences"""
self.proxy = proxy
def setNice(self, nice=0):
"""Set the maximum of urls to visit with the same pattern"""
self.nice = nice
def setScope(self, scope):
self.scope = scope
if scope == self.SCOPE_FOLDER:
self.scopeURL = "/".join(self.root.split("/")[:-1]) + "/"
elif scope == self.SCOPE_DOMAIN:
self.scopeURL = "http://" + self.server
def addStartURL(self, url):
if(self.__checklink(url)):
print _("Invalid link argument") + ":", url
sys.exit(0)
if(self.__inzone(url) == 0):
self.tobrowse.append(url)
def addExcludedURL(self, url):
"""Add an url to the list of forbidden urls"""
self.excluded.append(url)
def setCookieFile(self, cookie):
"""Set the file to read the cookie from"""
self.cookie = cookie
def setAuthCredentials(self, auth_basic):
self.auth_basic = auth_basic
def addBadParam(self, bad_param):
self.bad_params.append(bad_param)
def browse(self, url):
"""Extract urls from a webpage and add them to the list of urls to browse if they aren't in the exclusion list"""
# return an empty dictionnary => won't be logged
# We don't need destination anchors
current = url.split("#")[0]
# Url without query string
current = current.split("?")[0]
# Get the dirname of the file
currentdir = "/".join(current.split("/")[:-1]) + "/"
# Timeout must not be too long to block big documents (for exemple a download script)
# and not too short to give good results
socket.setdefaulttimeout(self.timeout)
try:
info, data = self.h.request(url, headers = self.cookiejar.headers_url(url))
except socket.timeout:
self.excluded.append(url)
return {}
except socket.error, msg:
if msg.errno == 111:
print _("Connection refused!")
self.excluded.append(url)
return {}
code = info['status']
if not self.link_encoding.has_key(url):
self.link_encoding[url] = ""
proto = url.split("://")[0]
if proto == "http" or proto == "https":
if not isinstance(proto, unicode): proto = unicode(proto)
# Check the content-type first
#if not u.info().get("Content-Type"):
if not info.has_key("content-type"):
# Sometimes there's no content-type... so we rely on the document extension
if (current.split(".")[-1] not in self.allowed) and current[-1] != "/":
return info
elif info["content-type"].find("text") == -1:
return info
page_encoding = BeautifulSoup.BeautifulSoup(data).originalEncoding
# Manage redirections
if info.has_key("location"):
redir = self.correctlink(info["location"], current, currentdir, proto)
if redir != None:
if(self.__inzone(redir) == 0):
self.link_encoding[redir] = self.link_encoding[url]
# Is the document already visited of forbidden ?
if (redir in self.browsed.keys()) or (redir in self.tobrowse) or \
self.isExcluded(redir):
pass
else:
# No -> Will browse it soon
self.tobrowse.append(redir)
if page_encoding != None:
htmlSource = unicode(data, page_encoding, "ignore")
else:
htmlSource = data
p = linkParser(url)
try:
p.feed(htmlSource)
except HTMLParser.HTMLParseError, err:
htmlSource = BeautifulSoup.BeautifulSoup(htmlSource).prettify()
if not isinstance(htmlSource, unicode) and page_encoding != None:
htmlSource = unicode(htmlSource, page_encoding, "ignore")
try:
p.reset()
p.feed(htmlSource)
except HTMLParser.HTMLParseError, err:
p = linkParser2(url, self.verbose)
p.feed(htmlSource)
# Sometimes the page is badcoded but the parser doesn't see the error
# So if we got no links we can force a correction of the page
if len(p.liens) == 0:
if page_encoding != None:
htmlSource = BeautifulSoup.BeautifulSoup(htmlSource).prettify(page_encoding)
else:
htmlSource = BeautifulSoup.BeautifulSoup(htmlSource).prettify()
try:
p.reset()
p.feed(htmlSource)
except HTMLParser.HTMLParseError, err:
p = linkParser2(url, self.verbose)
p.feed(htmlSource)
for lien in p.uploads:
self.uploads.append(self.correctlink(lien, current, currentdir, proto))
for lien in p.liens:
if page_encoding != None and not isinstance(lien, unicode):
lien = unicode(lien, page_encoding, "ignore")
lien = self.correctlink(lien, current, currentdir, proto)
if lien != None:
if(self.__inzone(lien) == 0):
# Is the document already visited of forbidden ?
if (lien in self.browsed.keys()) or (lien in self.tobrowse) or self.isExcluded(lien):
pass
elif self.nice > 0:
if self.__countMatches(lien) >= self.nice:
# don't waste time next time we found it
self.excluded.append(lien)
return {}
else:
self.tobrowse.append(lien)
else:
# No -> Will browse it soon
self.tobrowse.append(lien)
self.link_encoding[lien] = page_encoding
for form in p.forms:
action = self.correctlink(form[0], current, currentdir, proto)
if action == None: action = current
form = (action, form[1], url, page_encoding)
if form[0:3] not in [x[0:3] for x in self.forms]: self.forms.append(form)
# We automaticaly exclude 404 urls
if code == "404":
self.excluded.append(url)
#return {} # exclude from scan but can be useful for some modules maybe
return info
def correctlink(self, lien, current, currentdir, proto):
"""Transform relatives urls in absolutes ones"""
# No leading or trailing whitespaces
lien = lien.strip()
if lien == "":
return current
if lien == "..":
lien = "../"
# bad protocols
llien = lien.lower()
if llien.find("telnet:", 0) == 0 or llien.find("ftp:", 0) == 0 or \
llien.find("mailto:", 0) == 0 or llien.find("javascript:", 0) == 0 or \
llien.find("news:", 0) == 0 or llien.find("file:", 0) == 0 or \
llien.find("gopher:", 0) == 0 or llien.find("irc:", 0) == 0:
return None
# Good protocols or relatives links
else:
# full url, nothing to do :)
if (lien.find("http://", 0) == 0) or (lien.find("https://", 0) == 0):
pass
else:
# root-url related link
if(lien[0] == '/'):
lien = proto + u"://" + self.server + lien
else:
# same page + query string
if(lien[0] == '?'):
lien = current + lien
# current directory related link
else:
lien = currentdir + lien
# No destination anchor
if lien.find("#") != -1:
lien = lien.split("#")[0]
# reorganize parameters in alphabetical order
if lien.find("?") != -1:
args = lien.split("?")[1]
if args.find("&") != -1 :
args = args.split("&")
args.sort()
args = [i for i in args if i != "" and i.find("=") >= 0]
for i in self.bad_params:
for j in args:
if j.startswith(i + "="): args.remove(j)
args = "&".join(args)
# a hack for auto-generated Apache directory index
if args in ["C=D;O=A", "C=D;O=D", "C=M;O=A", "C=M;O=D",
"C=N;O=A", "C=N;O=D", "C=S;O=A", "C=S;O=D"]:
lien = lien.split("?")[0]
else:
lien = lien.split("?")[0] + u"?" + args
# Remove the trailing '?' if its presence doesn't make sense
if lien[-1:] == "?":
lien = lien[:-1]
# remove useless slashes
if lien.find("?") != -1:
file = lien.split("?")[0]
file = re.sub("[^:]//+", "/", file)
if file[-2:] == "/.":
file = file[:-1]
lien = file + "?" + lien.split("?")[1]
else:
if lien[-2:] == "/.":
lien = lien[:-1]
# links going to a parrent directory (..)
while re.search("/([~:!,;a-zA-Z0-9\.\-+_]+)/\.\./", lien) != None:
lien = re.sub("/([~:!,;a-zA-Z0-9\.\-+_]+)/\.\./", "/", lien)
lien = re.sub("/\./", "/", lien)
# Everything is good here
return lien
def __checklink(self, url):
"""Verify the protocol"""
if (url.find("http://", 0) == 0) or (url.find("https://", 0) == 0):
return 0
else:
return 1
def __inzone(self, url):
"""Make sure the url is under the root url"""
if(url.find(self.scopeURL, 0) == 0):
return 0
else:
return 1
def isExcluded(self, url):
"""Return True if the url is not allowed to be scan"""
match = False
for regexp in self.excluded:
if self.__reWildcard(regexp, url):
match = True
return match
def __countMatches(self, url):
"""Return the number of known urls matching the pattern of the given url"""
matches = 0
if url.find("?") != -1:
if url.find("=") != -1:
i = 0
for x in range(0, url.count("=")):
start = url.find("=", i)
i = url.find("&", start)
if i != -1:
for u in self.browsed.keys():
if u.startswith(url[:start] + "=") and u.endswith(url[i:]):
matches += 1
else:
for u in self.browsed.keys():
if u.startswith(url[:start] + "="):
matches += 1
else:#QUERY_STRING
for a in [u for u in self.browsed.keys() if u.find("=") < 0]:
if a.startswith(url.split("?")[0]):
matches += 1
return matches
def __reWildcard(self, regexp, string):
"""Wildcard-based regular expression system"""
regexp = re.sub("\*+", "*", regexp)
match = True
if regexp.count("*") == 0:
if regexp == string:
return True
else:
return False
blocks = regexp.split("*")
start = ""
end = ""
if not regexp.startswith("*"):
start = blocks[0]
if not regexp.endswith("*"):
end = blocks[-1]
if start != "":
if string.startswith(start):
blocks = blocks[1:]
else:
return False
if end != "":
if string.endswith(end):
blocks = blocks[:-1]
else:
return False
blocks = [block for block in blocks if block != ""]
if blocks == []:
return match
for block in blocks:
i = string.find(block)
if i == -1: return False
string = string[i + len(block):]
return match
def go(self, crawlerFile):
proxy = None
if self.proxy != "":
(proxy_type, proxy_usr, proxy_pwd, proxy_host, proxy_port,
path, query, fragment) = httplib2.parse_proxy(self.proxy)
proxy = httplib2.ProxyInfo(proxy_type, proxy_host, proxy_port,
proxy_user = proxy_usr, proxy_pass = proxy_pwd)
self.h = httplib2.Http(cache = None, timeout = self.timeout,
proxy_info = proxy)
self.h.follow_redirects = False
self.cookiejar = libcookie.libcookie(self.server)
if os.path.isfile(self.cookie):
self.cookiejar.loadfile(self.cookie)
if self.auth_basic != []:
self.h.add_credentials(self.auth_basic[0], self.auth_basic[1])
# load of the crawler status if a file is passed to it.
if crawlerFile != None:
if self.persister.isDataForUrl(crawlerFile) == 1:
self.persister.loadXML(crawlerFile)
self.tobrowse = self.persister.getToBrose()
# TODO: change xml file for browsed urls
self.browsed = self.persister.getBrowsed()
self.forms = self.persister.getForms()
self.uploads = self.persister.getUploads()
print _("File") + " " + crawlerFile + " " + _("loaded, the scan continues") + ":"
if self.verbose == 2:
print " * " + _("URLs to browse")
for x in self.tobrowse:
print " + " + x
print " * " + _("URLs browsed")
for x in self.browsed.keys():
print " + " + x
else:
print _("File") + " " + crawlerFile + " " + _("not found, Wapiti will scan again the web site")
# while url list isn't empty, continue browsing
# if the user stop the scan with Ctrl+C, give him all found urls
# and they are saved in an XML file
try:
while len(self.tobrowse) > 0:
lien = self.tobrowse.pop(0)
if (lien not in self.browsed.keys() and lien not in self.excluded):
headers = self.browse(lien)
if headers != {}:
if not headers.has_key("link_encoding"):
if self.link_encoding.has_key(lien):
headers["link_encoding"] = self.link_encoding[lien]
self.browsed[lien] = headers
if self.verbose == 1:
sys.stderr.write('.')
elif self.verbose == 2:
print lien
if(self.scope == self.SCOPE_PAGE):
self.tobrowse = []
self.saveCrawlerData()
print ""
print " " + _("Notice") + " "
print "========"
print _("This scan has been saved in the file") + " " + self.persister.CRAWLER_DATA_DIR + '/' + self.server + ".xml"
print _("You can use it to perform attacks without scanning again the web site with the \"-k\" parameter")
except KeyboardInterrupt:
self.saveCrawlerData()
print ""
print " " + _("Notice") + " "
print "========"
print _("Scan stopped, the data has been saved in the file") + " " + self.persister.CRAWLER_DATA_DIR + '/' + self.server + ".xml"
print _("To continue this scan, you should launch Wapiti with the \"-i\" parameter")
pass
def verbosity(self, vb):
"""Set verbosity level"""
self.verbose = vb
def printLinks(self):
"""Print found URLs on standard output"""
l = self.browsed.keys()
l.sort()
sys.stderr.write("\n+ " + _("URLs") + ":\n")
for lien in l:
print lien
def printForms(self):
"""Print found forms on standard output"""
if self.forms != []:
sys.stderr.write("\n+ "+_("Forms Info") + ":\n")
for form in self.forms:
print _("From") + ":", form[2]
print _("To") + ":", form[0]
for k, v in form[1].items():
print "\t" + k, ":", v
print
def printUploads(self):
"""Print urls accepting uploads"""
if self.uploads != []:
sys.stderr.write("\n+ " + _("Upload Scripts") + ":\n")
for up in self.uploads:
print up
def exportXML(self,filename,encoding="UTF-8"):
"Export the urls and the forms found in an XML file."
xml = minidom.Document()
items = xml.createElement("items")
xml.appendChild(items)
for lien in self.browsed.keys():
get = xml.createElement("get")
get.setAttribute("url", lien)
items.appendChild(get)
for form in self.forms:
post = xml.createElement("post")
post.setAttribute("url", form[0])
post.setAttribute("referer", form[2])
for k, v in form[1].items():
var = xml.createElement("var")
var.setAttribute("name", k)
var.setAttribute("value", v)
post.appendChild(var)
items.appendChild(post)
for up in self.uploads:
upl = xml.createElement("upload")
upl.setAttribute("url", up)
items.appendChild(upl)
fd = open(filename,"w")
xml.writexml(fd, " ", " ", "\n", encoding)
fd.close()
def getLinks(self):
return self.browsed
def getForms(self):
return self.forms
def getUploads(self):
self.uploads.sort()
return self.uploads
def saveCrawlerData(self):
self.persister.setRootURL(self.root);
self.persister.setToBrose(self.tobrowse);
self.persister.setBrowsed(self.browsed);
self.persister.setForms (self.forms);
self.persister.setUploads(self.uploads);
self.persister.saveXML(self.persister.CRAWLER_DATA_DIR + '/' + self.server + '.xml')
class linkParser(HTMLParser.HTMLParser):
"""Extract urls in 'a' href HTML tags"""
def __init__(self, url = ""):
HTMLParser.HTMLParser.__init__(self)
self.liens = []
self.forms = []
self.form_values = {}
self.inform = 0
self.current_form_url = url
self.uploads = []
self.current_form_method = "get"
self.url = url
def handle_starttag(self, tag, attrs):
tmpdict = {}
val = None
for k, v in dict(attrs).items():
tmpdict[k.lower()] = v
if tag.lower() == 'a':
if "href" in tmpdict.keys():
self.liens.append(tmpdict['href'])
if tag.lower() == 'form':
self.inform = 1
self.form_values = {}
self.current_form_url = self.url
if "action" in tmpdict.keys():
self.liens.append(tmpdict['action'])
self.current_form_url = tmpdict['action']
# Forms use GET method by default
self.current_form_method = "get"
if "method" in tmpdict.keys():
if tmpdict["method"].lower() == "post":
self.current_form_method = "post"
if tag.lower() == 'input':
if self.inform == 1:
if "type" not in tmpdict.keys():
tmpdict["type"] = "text"
if "name" in tmpdict.keys():
if tmpdict['type'].lower() in ['text', 'password', 'radio',
'checkbox', 'hidden', 'submit', 'search']:
# use default value if present or set it to 'on'
if "value" in tmpdict.keys():
if tmpdict["value"] != "": val = tmpdict["value"]
else: val = u"on"
else: val = u"on"
self.form_values.update(dict([(tmpdict['name'], val)]))
if tmpdict['type'].lower() == "file":
self.uploads.append(self.current_form_url)
if tag.lower() in ["textarea", "select"]:
if self.inform == 1:
if "name" in tmpdict.keys():
self.form_values.update(dict([(tmpdict['name'], u'on')]))
if tag.lower() in ["frame", "iframe"]:
if "src" in tmpdict.keys():
self.liens.append(tmpdict['src'])
def handle_endtag(self, tag):
if tag.lower() == 'form':
self.inform = 0
if self.current_form_method == "post":
self.forms.append((self.current_form_url, self.form_values))
else:
l = ["=".join([k, v]) for k, v in self.form_values.items()]
l.sort()
self.liens.append(self.current_form_url.split("?")[0] + "?" + "&".join(l))
class linkParser2:
verbose = 0
"""Extract urls in 'a' href HTML tags"""
def __init__(self, url = "", verb = 0):
self.liens = []
self.forms = []
self.form_values = {}
self.inform = 0
self.current_form_url = ""
self.uploads = []
self.current_form_method = "get"
self.verbose = verb
def __findTagAttributes(self, tag):
attDouble = re.findall('<\w*[ ]| *(.*?)[ ]*=[ ]*"(.*?)"[ +|>]', tag)
attSingle = re.findall('<\w*[ ]| *(.*?)[ ]*=[ ]*\'(.*?)\'[ +|>]', tag)
attNone = re.findall('<\w*[ ]| *(.*?)[ ]*=[ ]*["|\']?(.*?)["|\']?[ +|>]', tag)
attNone.extend(attSingle)
attNone.extend(attDouble)
return attNone
def feed(self, htmlSource):
htmlSource = htmlSource.replace("\n", "")
htmlSource = htmlSource.replace("\r", "")
htmlSource = htmlSource.replace("\t", "")
links = re.findall('<a.*?>', htmlSource)
linkAttributes = []
for link in links:
linkAttributes.append(self.__findTagAttributes(link))
#Finding all the forms: getting the text from "<form..." to "...</form>"
#the array forms will contain all the forms of the page
forms = re.findall('<form.*?>.*?</form>', htmlSource)
formsAttributes = []
for form in forms:
formsAttributes.append(self.__findTagAttributes(form))
#Processing the forms, obtaining the method and all the inputs
#Also finding the method of the forms
inputsInForms = []
textAreasInForms = []
selectsInForms = []
for form in forms:
inputsInForms .append(re.findall('<input.*?>', form))
textAreasInForms.append(re.findall('<textarea.*?>', form))
selectsInForms .append(re.findall('<select.*?>', form))
#Extracting the attributes of the <input> tag as XML parser
inputsAttributes = []
for i in range(len(inputsInForms)):
inputsAttributes.append([])
for inputt in inputsInForms[i]:
inputsAttributes[i].append(self.__findTagAttributes(inputt))
selectsAttributes = []
for i in range(len(selectsInForms)):
selectsAttributes.append([])
for select in selectsInForms[i]:
selectsAttributes[i].append(self.__findTagAttributes(select))
textAreasAttributes = []
for i in range(len(textAreasInForms)):
textAreasAttributes.append([])
for textArea in textAreasInForms[i]:
textAreasAttributes[i].append(self.__findTagAttributes(textArea))
if(self.verbose == 3):
print "\n\n" + _("Forms")
print "====="
for i in range(len(forms)):
print _("Form") + " " + str(i)
tmpdict = {}
for k, v in dict(formsAttributes[i]).items():
tmpdict[k.lower()] = v
print " * " + _("Method") + ": " + self.__decode_htmlentities(tmpdict['action'])
print " * " + _("Intputs") + ": "
for j in range(len(inputsInForms[i])):
print " + " + inputsInForms[i][j]
for att in inputsAttributes[i][j]:
print " - " + str(att)
print " * " + _("Selects") + ": "
for j in range(len(selectsInForms[i])):
print " + " + selectsInForms[i][j]
for att in selectsAttributes[i][j]:
print " - " + str(att)
print " * " + _("TextAreas")+": "
for j in range(len(textAreasInForms[i])):
print " + " + textAreasInForms[i][j]
for att in textAreasAttributes[i][j]:
print " - " + str(att)
print "\n"+_("URLS")
print "===="
for i in range(len(links)):
tmpdict = {}
for k, v in dict(linkAttributes[i]).items():
tmpdict[k.lower()] = v
if "href" in tmpdict.keys():
self.liens.append(self.__decode_htmlentities(tmpdict['href']))
if(self.verbose == 3):
print self.__decode_htmlentities(tmpdict['href'])
for i in range(len(forms)):
tmpdict = {}
for k, v in dict(formsAttributes[i]).items():
tmpdict[k.lower()] = v
self.form_values = {}
if "action" in tmpdict.keys():
self.liens.append(self.__decode_htmlentities(tmpdict['action']))
self.current_form_url = self.__decode_htmlentities(tmpdict['action'])
# Forms use GET method by default
self.current_form_method = "get"
if "method" in tmpdict.keys():
if tmpdict["method"].lower() == "post":
self.current_form_method = "post"
for j in range(len(inputsAttributes[i])):
tmpdict = {}
for k, v in dict(inputsAttributes[i][j]).items():
tmpdict[k.lower()] = v
if "type" not in tmpdict.keys():
tmpdict["type"] = "text"
if "name" in tmpdict.keys():
if tmpdict['type'].lower() in \
['text', 'password', 'radio', 'checkbox', 'hidden',
'submit', 'search']:
# use default value if present or set it to 'on'
if "value" in tmpdict.keys():
if tmpdict["value"] != "": val = tmpdict["value"]
else: val = u"on"
else: val = u"on"
self.form_values.update(dict([(tmpdict['name'], val)]))
if tmpdict['type'].lower() == "file":
self.uploads.append(self.current_form_url)
for j in range(len(textAreasAttributes[i])):
tmpdict = {}
for k, v in dict(textAreasAttributes[i][j]).items():
tmpdict[k.lower()] = v
if "name" in tmpdict.keys():
self.form_values.update(dict([(tmpdict['name'], u'on')]))
for j in range(len(selectsAttributes[i])):
tmpdict = {}
for k, v in dict(selectsAttributes[i][j]).items():
tmpdict[k.lower()] = v
if "name" in tmpdict.keys():
self.form_values.update(dict([(tmpdict['name'], u'on')]))
if self.current_form_method == "post":
self.forms.append((self.current_form_url, self.form_values))
else:
l = ["=".join([k, v]) for k, v in self.form_values.items()]
l.sort()
self.liens.append(self.current_form_url.split("?")[0] + "?" + "&".join(l))
def __substitute_entity(self, match):
ent = match.group(2)
if match.group(1) == "#":
return unichr(int(ent))
else:
cp = n2cp.get(ent)
if cp:
return unichr(cp)
else:
return match.group()
def __decode_htmlentities(self, string):
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});")
return entity_re.subn(self.__substitute_entity, string)[0]
def reset(self):
self.liens = []
self.forms = []
self.form_values = {}
self.inform = 0
self.current_form_url = ""
self.uploads = []
self.current_form_method = "get"
if __name__ == "__main__":
def _(text):
return text
try:
prox = ""
auth = []
xmloutput = ""
crawlerFile = None
if len(sys.argv)<2:
print lswww.__doc__
sys.exit(0)
if '-h' in sys.argv or '--help' in sys.argv:
print lswww.__doc__
sys.exit(0)
myls = lswww(sys.argv[1])
myls.verbosity(1)
try:
opts, args = getopt.getopt(sys.argv[2:], "hp:s:x:c:a:r:v:t:n:e:ib:",
["help", "proxy=", "start=", "exclude=", "cookie=", "auth=",
"remove=", "verbose=", "timeout=", "nice=", "export=", "continue",
"scope="])
except getopt.GetoptError, e:
print e
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
print lswww.__doc__
sys.exit(0)
if o in ("-s", "--start"):
if (a.find("http://", 0) == 0) or (a.find("https://", 0) == 0):
myls.addStartURL(a)
if o in ("-x", "--exclude"):
if (a.find("http://", 0) == 0) or (a.find("https://", 0) == 0):
myls.addExcludedURL(a)
if o in ("-p", "--proxy"):
myls.setProxy(a)
if o in ("-c", "--cookie"):
myls.setCookieFile(a)
if o in ("-r", "--remove"):
myls.addBadParam(a)
if o in ("-a", "--auth"):
if a.find("%") >= 0:
auth = [a.split("%")[0], a.split("%")[1]]
myls.setAuthCredentials(auth)
if o in ("-v", "--verbose"):
if str.isdigit(a):
myls.verbosity(int(a))
if o in ("-t", "--timeout"):
if str.isdigit(a):
myls.setTimeOut(int(a))
if o in ("-n", "--nice"):
if str.isdigit(a):
myls.setNice(int(a))
if o in ("-e", "--export"):
xmloutput = a
if o in ("-b", "--scope"):
myls.setScope(a)
if o in ("-i", "--continue"):
crawlerPersister = CrawlerPersister()
crawlerFile = crawlerPersister.CRAWLER_DATA_DIR + '/' + sys.argv[1].split("://")[1] + '.xml'
try:
opts, args = getopt.getopt(sys.argv[2:], "hp:s:x:c:a:r:v:t:n:e:i:b:",
["help", "proxy=", "start=", "exclude=", "cookie=", "auth=",
"remove=", "verbose=", "timeout=", "nice=", "export=", "continue=",
"scope="])
except getopt.GetoptError, e:
""
for o, a in opts:
if o in ("-i", "--continue"):
if a != '' and a[0] != '-':
crawlerFile = a
myls.go(crawlerFile)
myls.printLinks()
myls.printForms()
myls.printUploads()
if xmloutput != "":
myls.exportXML(xmloutput)
except SystemExit:
pass
|
teknolab/teknolab-wapiti
|
wapiti/net/lswww.py
|
Python
|
gpl-2.0
| 32,330
|
[
"VisIt"
] |
6903c2381716ce6462d40ec835c9f3fdca3afdb1def828c3c6f7fd15af3ba07f
|
import os
from setuptools import setup, find_packages
from setuptools.extension import Extension
import numpy as np
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst')).read()
except IOError:
README = ''
# Test if rdkit is present with INCHI support
try:
from rdkit.Chem.inchi import INCHI_AVAILABLE
if not INCHI_AVAILABLE:
raise Exception('RDKit with INCHI support is required')
except ImportError:
raise Exception('RDKit with INCHI support is required')
# Test if pp is present
try:
import pp
except ImportError:
raise Exception('Parallel Python (pp) is required')
# Only use Cython if it is available, else just use the pre-generated files
try:
from Cython.Distutils import build_ext
source_ext = '.pyx'
cmdclass = {'build_ext': build_ext}
except ImportError:
# If missing can be created with 'cython magma/fragmentation_cy.pyx'
source_ext = '.c'
cmdclass = {}
ext_modules = [Extension('magma.fragmentation_cy',
['magma/fragmentation_cy' + source_ext])]
setup(
name='Magma',
version='1.3',
license='commercial',
author='Lars Ridder',
author_email='lars.ridder@esciencecenter.nl>',
url='http://www.esciencecenter.nl',
description='Ms Annotation based on in silico Generated Metabolites',
long_description=README,
classifiers=["Intended Audience :: Science/Research",
"Environment :: Console",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Chemistry",
],
packages=find_packages(),
install_requires=['sqlalchemy', 'lxml', 'numpy', 'requests', 'macauthlib', 'mock', 'nose', 'coverage'],
package_data={
'magma': ['data/*.smirks', 'script/reactor'],
},
entry_points={
'console_scripts': [
'magma = magma.script:main',
],
},
cmdclass=cmdclass,
ext_modules=ext_modules,
include_dirs=[np.get_include()],
)
|
NLeSC/MAGMa
|
job/setup.py
|
Python
|
apache-2.0
| 2,099
|
[
"RDKit"
] |
520146f7970cbfc84e50d5b3a32356c3ce1a53519ee1df438ab1b5a603714b2b
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import mock
import time
import unittest
import logging
import functools
from nose.tools import * # noqa: F403
import pytest
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate
from osf.models import (
Retraction,
NodeLicense,
OSFGroup,
Tag,
Preprint,
QuickFilesNode,
)
from addons.wiki.models import WikiPage
from addons.osfstorage.models import OsfStorageFile
from scripts.populate_institutions import main as populate_institutions
from osf_tests import factories
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.utils import run_celery_tasks
TEST_INDEX = 'test'
def query(term, raw=False):
results = search.search(build_query(term), index=elastic_search.INDEX, raw=raw)
return results
def query_collections(name):
term = 'category:collectionSubmission AND "{}"'.format(name)
return query(term, raw=True)
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
def query_file(name):
term = 'category:file AND "{}"'.format(name)
return query(term)
def query_tag_file(name):
term = 'category:file AND (tags:u"{}")'.format(name)
return query(term)
def retry_assertion(interval=0.3, retries=3):
def test_wrapper(func):
t_interval = interval
t_retries = retries
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError as e:
if retries:
time.sleep(t_interval)
retry_assertion(interval=t_interval, retries=t_retries - 1)(func)(*args, **kwargs)
else:
raise e
return wrapped
return test_wrapper
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestCollectionsSearch(OsfTestCase):
def setUp(self):
super(TestCollectionsSearch, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='Salif Keita')
self.node_private = factories.NodeFactory(creator=self.user, title='Salif Keita: Madan', is_public=False)
self.node_public = factories.NodeFactory(creator=self.user, title='Salif Keita: Yamore', is_public=True)
self.node_one = factories.NodeFactory(creator=self.user, title='Salif Keita: Mandjou', is_public=True)
self.node_two = factories.NodeFactory(creator=self.user, title='Salif Keita: Tekere', is_public=True)
self.reg_private = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=False, archive=True)
self.reg_public = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True, archive=True)
self.reg_one = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True, archive=True)
self.provider = factories.CollectionProviderFactory()
self.reg_provider = factories.RegistrationProviderFactory()
self.collection_one = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider)
self.collection_public = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider)
self.collection_private = factories.CollectionFactory(creator=self.user, is_public=False, provider=self.provider)
self.reg_collection = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=True)
self.reg_collection_private = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=False)
def test_only_public_collections_submissions_are_searchable(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_private, self.user)
self.reg_collection.collect_object(self.reg_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
assert_false(self.node_one.is_collected)
assert_false(self.node_public.is_collected)
self.collection_one.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_public.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
self.collection_private.collect_object(self.node_two, self.user)
self.reg_collection_private.collect_object(self.reg_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
def test_index_on_submission_privacy_changes(self):
# test_submissions_turned_private_are_deleted_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_one.collect_object(self.node_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
with run_celery_tasks():
self.node_one.is_public = False
self.node_one.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_turned_public_are_added_to_index
self.collection_public.collect_object(self.node_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.node_private.is_public = True
self.node_private.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 1)
def test_index_on_collection_privacy_changes(self):
# test_submissions_of_collection_turned_private_are_removed_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
with run_celery_tasks():
self.collection_public.is_public = False
self.collection_public.save()
self.reg_collection.is_public = False
self.reg_collection.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_of_collection_turned_public_are_added_to_index
self.collection_private.collect_object(self.node_one, self.user)
self.collection_private.collect_object(self.node_two, self.user)
self.collection_private.collect_object(self.node_public, self.user)
self.reg_collection_private.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_two.is_collected)
assert_true(self.node_public.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.collection_private.is_public = True
self.collection_private.save()
self.reg_collection.is_public = True
self.reg_collection.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
def test_collection_submissions_are_removed_from_index_on_delete(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
self.collection_public.delete()
self.reg_collection.delete()
assert_true(self.collection_public.deleted)
assert_true(self.reg_collection.deleted)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_removed_submission_are_removed_from_index(self):
self.collection_public.collect_object(self.node_one, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
self.collection_public.remove_object(self.node_one)
self.reg_collection.remove_object(self.reg_public)
assert_false(self.node_one.is_collected)
assert_false(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_collection_submission_doc_structure(self):
self.collection_public.collect_object(self.node_one, self.user)
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
with run_celery_tasks():
self.node_one.title = 'Keita Royal Family of Mali'
self.node_one.save()
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
assert_equal(docs[0]['_source']['abstract'], self.node_one.description)
assert_equal(docs[0]['_source']['contributors'][0]['url'], self.user.url)
assert_equal(docs[0]['_source']['contributors'][0]['fullname'], self.user.fullname)
assert_equal(docs[0]['_source']['url'], self.node_one.url)
assert_equal(docs[0]['_source']['id'], '{}-{}'.format(self.node_one._id,
self.node_one.collecting_metadata_list[0].collection._id))
assert_equal(docs[0]['_source']['category'], 'collectionSubmission')
def test_search_updated_after_id_change(self):
self.provider.primary_collection.collect_object(self.node_one, self.node_one.creator)
with run_celery_tasks():
self.node_one.save()
term = f'provider:{self.provider._id}'
docs = search.search(build_query(term), index=elastic_search.INDEX, raw=True)
assert_equal(len(docs['results']), 1)
self.provider._id = 'new_id'
self.provider.save()
docs = query(f'provider:new_id', raw=True)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestUserUpdate(OsfTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = factories.UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
@retry_assertion
def test_change_name(self):
# Add a user, change her name, and verify that only the new name is
# found in search.
user = factories.UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
# Test that disabled users are not in search index
user = factories.UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
@pytest.mark.enable_quickfiles_creation
def test_merged_user(self):
user = factories.UserFactory(fullname='Annie Lennox')
merged_user = factories.UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = factories.UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = factories.UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = factories.UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestProject(OsfTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.project = factories.ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
# Verify that a private project is not present in Elastic Search.
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
# Make project public, and verify that it is present in Elastic
# Search.
with run_celery_tasks():
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestOSFGroup(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestOSFGroup, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.user_two = factories.UserFactory(fullname='Grapes McGee')
self.group = OSFGroup(
name='Cornbread',
creator=self.user,
)
self.group.save()
self.project = factories.ProjectFactory(is_public=True, creator=self.user, title='Biscuits')
self.project.save()
def test_create_osf_group(self):
title = 'Butter'
group = OSFGroup(name=title, creator=self.user)
group.save()
docs = query(title)['results']
assert_equal(len(docs), 1)
def test_set_group_name(self):
title = 'Eggs'
self.group.set_group_name(title)
self.group.save()
docs = query(title)['results']
assert_equal(len(docs), 1)
docs = query('Cornbread')['results']
assert_equal(len(docs), 0)
def test_add_member(self):
self.group.make_member(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 1)
self.group.make_manager(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 1)
self.group.remove_member(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 0)
def test_connect_to_node(self):
self.project.add_osf_group(self.group)
docs = query('category:project AND "{}"'.format(self.group.name))['results']
assert_equal(len(docs), 1)
self.project.remove_osf_group(self.group)
docs = query('category:project AND "{}"'.format(self.group.name))['results']
assert_equal(len(docs), 0)
def test_remove_group(self):
group_name = self.group.name
self.project.add_osf_group(self.group)
docs = query('category:project AND "{}"'.format(group_name))['results']
assert_equal(len(docs), 1)
self.group.remove_group()
docs = query('category:project AND "{}"'.format(group_name))['results']
assert_equal(len(docs), 0)
docs = query(group_name)['results']
assert_equal(len(docs), 0)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestPreprint(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPreprint, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.preprint = Preprint(
title='Red Special',
description='We are the champions',
creator=self.user,
provider=factories.PreprintProviderFactory()
)
self.preprint.save()
self.file = OsfStorageFile.create(
target=self.preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.published_preprint = factories.PreprintFactory(
creator=self.user,
title='My Fairy King',
description='Under pressure',
)
def test_new_preprint_unsubmitted(self):
# Verify that an unsubmitted preprint is not present in Elastic Search.
title = 'Apple'
self.preprint.title = title
self.preprint.save()
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_new_preprint_unpublished(self):
# Verify that an unpublished preprint is not present in Elastic Search.
title = 'Banana'
self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title)
assert self.preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_unsubmitted_preprint_primary_file(self):
# Unpublished preprint's primary_file not showing up in Elastic Search
title = 'Cantaloupe'
self.preprint.title = title
self.preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
assert self.preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_publish_preprint(self):
title = 'Date'
self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title)
self.preprint.set_published(True, auth=Auth(self.preprint.creator), save=True)
assert self.preprint.title == title
docs = query(title)['results']
# Both preprint and primary_file showing up in Elastic
assert_equal(len(docs), 2)
def test_preprint_title_change(self):
title_original = self.published_preprint.title
new_title = 'New preprint title'
self.published_preprint.set_title(new_title, auth=Auth(self.user), save=True)
docs = query('category:preprint AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:preprint AND ' + new_title)['results']
assert_equal(len(docs), 1)
def test_preprint_description_change(self):
description_original = self.published_preprint.description
new_abstract = 'My preprint abstract'
self.published_preprint.set_description(new_abstract, auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
docs = query('category:preprint AND ' + description_original)['results']
assert_equal(len(docs), 0)
docs = query('category:preprint AND ' + new_abstract)['results']
assert_equal(len(docs), 1)
def test_set_preprint_private(self):
# Not currently an option for users, but can be used for spam
self.published_preprint.set_privacy('private', auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
# Both preprint and primary_file showing up in Elastic
assert_equal(len(docs), 0)
def test_set_primary_file(self):
# Only primary_file should be in index, if primary_file is changed, other files are removed from index.
self.file = OsfStorageFile.create(
target=self.published_preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.published_preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
assert_equal(len(docs), 2)
assert_equal(docs[1]['name'], self.file.name)
def test_set_license(self):
license_details = {
'id': 'NONE',
'year': '2015',
'copyrightHolders': ['Iron Man']
}
title = 'Elderberry'
self.published_preprint.title = title
self.published_preprint.set_preprint_license(license_details, Auth(self.user), save=True)
assert self.published_preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 2)
assert_equal(docs[0]['license']['copyright_holders'][0], 'Iron Man')
assert_equal(docs[0]['license']['name'], 'No license')
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
self.published_preprint.remove_tag(tag, Auth(self.user), save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
# with run_celery_tasks():
self.published_preprint.add_contributor(user2, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2, save=True)
self.published_preprint.remove_contributor(user2, Auth(self.user))
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2)
self.published_preprint.set_visible(user2, False, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.published_preprint.set_visible(user2, True, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_move_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
docs[0]['contributors'][0]['fullname'] == self.user.fullname
docs[0]['contributors'][1]['fullname'] == user2.fullname
self.published_preprint.move_contributor(user2, Auth(self.user), 0)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
docs[0]['contributors'][0]['fullname'] == user2.fullname
docs[0]['contributors'][1]['fullname'] == self.user.fullname
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
docs = query(self.published_preprint.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestNodeSearch(OsfTestCase):
def setUp(self):
super(TestNodeSearch, self).setUp()
with run_celery_tasks():
self.node = factories.ProjectFactory(is_public=True, title='node')
self.public_child = factories.ProjectFactory(parent=self.node, is_public=True, title='public_child')
self.private_child = factories.ProjectFactory(parent=self.node, title='private_child')
self.public_subchild = factories.ProjectFactory(parent=self.private_child, is_public=True)
self.node.node_license = factories.NodeLicenseRecordFactory()
self.node.save()
self.query = 'category:project & category:component'
@retry_assertion()
def test_node_license_added_to_search(self):
docs = query(self.query)['results']
node = [d for d in docs if d['title'] == self.node.title][0]
assert_in('license', node)
assert_equal(node['license']['id'], self.node.node_license.license_id)
@unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.')
@retry_assertion(retries=10)
def test_node_license_propogates_to_children(self):
docs = query(self.query)['results']
child = [d for d in docs if d['title'] == self.public_child.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
child = [d for d in docs if d['title'] == self.public_subchild.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
@unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.')
@retry_assertion(retries=10)
def test_node_license_updates_correctly(self):
other_license = NodeLicense.objects.get(name='MIT License')
new_license = factories.NodeLicenseRecordFactory(node_license=other_license)
self.node.node_license = new_license
self.node.save()
docs = query(self.query)['results']
for doc in docs:
assert_equal(doc['license'].get('id'), new_license.license_id)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestRegistrationRetractions(OsfTestCase):
def setUp(self):
super(TestRegistrationRetractions, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration = factories.RegistrationFactory(project=self.project, is_public=True)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_is_searchable(self):
self.registration.retract_registration(self.user)
self.registration.retraction.state = Retraction.APPROVED
self.registration.retraction.save()
self.registration.save()
self.registration.retraction._on_complete(self.user)
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_pending_retraction_wiki_content_is_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
with run_celery_tasks():
self.registration.save()
self.registration.reload()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_wiki_content_is_not_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
self.registration.retraction.state = Retraction.APPROVED
with run_celery_tasks():
self.registration.retraction.save()
self.registration.save()
self.registration.update_search()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 0)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestPublicNodes(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPublicNodes, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.component = factories.NodeFactory(
parent=self.project,
description='',
title=self.title,
creator=self.user,
is_public=True
)
self.registration = factories.RegistrationFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration.archive_job.target_addons.clear()
self.registration.archive_job.status = 'SUCCESS'
self.registration.archive_job.save()
def test_make_private(self):
# Make project public, then private, and verify that it is not present
# in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_search_node_partial(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Blue')['results']
assert_equal(len(find), 1)
def test_search_node_partial_with_sep(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Express')['results']
assert_equal(len(find), 1)
def test_search_node_not_name(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Green Flyer-Slow')['results']
assert_equal(len(find), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
# Make parent of component, public, then private, and verify that the
# component still appears but doesn't link to the parent in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_false(docs[0]['parent_title'])
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
with run_celery_tasks():
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
title_original = self.project.title
with run_celery_tasks():
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True
)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.project, key, value, self.consolidate_auth)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
# Add wiki text to page, then delete, then verify that project is not
# found when searching for wiki text.
wiki_content = 'Hammer to fall'
wp = WikiPage.objects.create_for_node(self.project, 'home', wiki_content, self.consolidate_auth)
with run_celery_tasks():
wp.update(self.user, '')
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
with run_celery_tasks():
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestAddContributor(OsfTestCase):
# Tests of the search.search_contributor method
def setUp(self):
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.name3 = u'j\xc3\xb3ebert3 Smith3'
self.name4 = u'B\xc3\xb3bbert4 Jones4'
with run_celery_tasks():
super(TestAddContributor, self).setUp()
self.user = factories.UserFactory(fullname=self.name1)
self.user3 = factories.UserFactory(fullname=self.name3)
def test_unreg_users_dont_show_in_search(self):
unreg = factories.UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
with run_celery_tasks():
unreg = factories.UnregUserFactory(fullname='Robert Paulson')
self.project = factories.ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
# Searching for full name yields exactly one result.
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
# Searching for first name yields exactly one result.
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
# Searching for part of first name yields exactly one
# result.
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_fullname_special_character(self):
# Searching for a fullname with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4)
assert_equal(len(contribs['users']), 0)
def test_search_firstname_special_charcter(self):
# Searching for a first name with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial_special_character(self):
# Searching for a partial name with a special character yields
# exctly one result.
contribs = search.search_contributor(self.name3.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_profile(self):
orcid = '123456'
user = factories.UserFactory()
user.social['orcid'] = orcid
user.save()
contribs = search.search_contributor(orcid)
assert_equal(len(contribs['users']), 1)
assert_equal(len(contribs['users'][0]['social']), 1)
assert_equal(contribs['users'][0]['social']['orcid'], user.social_links['orcid'])
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestProjectSearchResults(OsfTestCase):
def setUp(self):
self.singular = 'Spanish Inquisition'
self.plural = 'Spanish Inquisitions'
self.possessive = 'Spanish\'s Inquisition'
with run_celery_tasks():
super(TestProjectSearchResults, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project_singular = factories.ProjectFactory(
title=self.singular,
creator=self.user,
is_public=True,
)
self.project_plural = factories.ProjectFactory(
title=self.plural,
creator=self.user,
is_public=True,
)
self.project_possessive = factories.ProjectFactory(
title=self.possessive,
creator=self.user,
is_public=True,
)
self.project_unrelated = factories.ProjectFactory(
title='Cardinal Richelieu',
creator=self.user,
is_public=True,
)
def test_singular_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
time.sleep(1)
results = query(self.singular)['results']
assert_equal(len(results), 3)
def test_plural_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.plural)['results']
assert_equal(len(results), 3)
def test_possessive_query(self):
# Verify searching for possessive term includes singular,
# possessive and plural versions in results.
results = query(self.possessive)['results']
assert_equal(len(results), 3)
def job(**kwargs):
keys = [
'title',
'institution',
'department',
'location',
'startMonth',
'startYear',
'endMonth',
'endYear',
'ongoing',
]
job = {}
for key in keys:
if key[-5:] == 'Month':
job[key] = kwargs.get(key, 'December')
elif key[-4:] == 'Year':
job[key] = kwargs.get(key, '2000')
else:
job[key] = kwargs.get(key, 'test_{}'.format(key))
return job
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestUserSearchResults(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestUserSearchResults, self).setUp()
self.user_one = factories.UserFactory(jobs=[job(institution='Oxford'),
job(institution='Star Fleet')],
fullname='Date Soong')
self.user_two = factories.UserFactory(jobs=[job(institution='Grapes la Picard'),
job(institution='Star Fleet')],
fullname='Jean-Luc Picard')
self.user_three = factories.UserFactory(jobs=[job(institution='Star Fleet'),
job(institution='Federation Medical')],
fullname='Beverly Crusher')
self.user_four = factories.UserFactory(jobs=[job(institution='Star Fleet')],
fullname='William Riker')
self.user_five = factories.UserFactory(jobs=[job(institution='Traveler intern'),
job(institution='Star Fleet Academy'),
job(institution='Star Fleet Intern')],
fullname='Wesley Crusher')
for i in range(25):
factories.UserFactory(jobs=[job()])
self.current_starfleet = [
self.user_three,
self.user_four,
]
self.were_starfleet = [
self.user_one,
self.user_two,
self.user_three,
self.user_four,
self.user_five
]
@unittest.skip('Cannot guarentee always passes')
def test_current_job_first_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
current_starfleet_names = [u.fullname for u in self.current_starfleet]
for name in result_names[:2]:
assert_in(name, current_starfleet_names)
def test_had_job_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
were_starfleet_names = [u.fullname for u in self.were_starfleet]
for name in result_names:
assert_in(name, were_starfleet_names)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchExceptions(OsfTestCase):
# Verify that the correct exception is thrown when the connection is lost
@classmethod
def setUpClass(cls):
logging.getLogger('website.project.model').setLevel(logging.CRITICAL)
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._client = search.search_engine.CLIENT
search.search_engine.CLIENT = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.CLIENT = cls._client
@requires_search
def test_connection_error(self):
# Ensures that saving projects/users doesn't break as a result of connection errors
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project = factories.ProjectFactory(
title='Tom Sawyer',
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchMigration(OsfTestCase):
# Verify that the correct indices are created/deleted during migration
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
populate_institutions(default_args=True)
self.es = search.search_engine.CLIENT
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
self.project = factories.ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
self.preprint = factories.PreprintFactory(
creator=self.user
)
def test_first_migration_no_remove(self):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_remove(self):
for n in range(1, 21):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_first_migration_with_remove(self):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_remove(self):
for n in range(1, 21, 2):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
def test_migration_institutions(self):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
institution_bucket_found = False
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type=None, search_type='count', body=count_query)
for bucket in res['aggregations']['counts']['buckets']:
if bucket['key'] == u'institution':
institution_bucket_found = True
assert_equal(institution_bucket_found, True)
def test_migration_collections(self):
provider = factories.CollectionProviderFactory()
collection_one = factories.CollectionFactory(is_public=True, provider=provider)
collection_two = factories.CollectionFactory(is_public=True, provider=provider)
node = factories.NodeFactory(creator=self.user, title='Ali Bomaye', is_public=True)
collection_one.collect_object(node, self.user)
collection_two.collect_object(node, self.user)
assert node.is_collected
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type='collectionSubmission', search_type='count', body=count_query)
assert res['hits']['total'] == 2
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchFiles(OsfTestCase):
def setUp(self):
super(TestSearchFiles, self).setUp()
self.node = factories.ProjectFactory(is_public=True, title='Otis')
self.osf_storage = self.node.get_addon('osfstorage')
self.root = self.osf_storage.get_root()
def test_search_file(self):
self.root.append_file('Shake.wav')
find = query_file('Shake.wav')['results']
assert_equal(len(find), 1)
def test_search_file_name_without_separator(self):
self.root.append_file('Shake.wav')
find = query_file('Shake')['results']
assert_equal(len(find), 1)
def test_delete_file(self):
file_ = self.root.append_file('I\'ve Got Dreams To Remember.wav')
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 1)
file_.delete()
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 0)
def test_add_tag(self):
file_ = self.root.append_file('That\'s How Strong My Love Is.mp3')
tag = Tag(name='Redding')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Redding')['results']
assert_equal(len(find), 1)
def test_remove_tag(self):
file_ = self.root.append_file('I\'ve Been Loving You Too Long.mp3')
tag = Tag(name='Blue')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 1)
file_.tags.remove(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 0)
def test_make_node_private(self):
self.root.append_file('Change_Gonna_Come.wav')
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 1)
self.node.is_public = False
with run_celery_tasks():
self.node.save()
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 0)
def test_make_private_node_public(self):
self.node.is_public = False
self.node.save()
self.root.append_file('Try a Little Tenderness.flac')
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 0)
self.node.is_public = True
with run_celery_tasks():
self.node.save()
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 1)
def test_delete_node(self):
node = factories.ProjectFactory(is_public=True, title='The Soul Album')
osf_storage = node.get_addon('osfstorage')
root = osf_storage.get_root()
root.append_file('The Dock of the Bay.mp3')
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 1)
node.is_deleted = True
with run_celery_tasks():
node.save()
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 0)
def test_file_download_url_guid(self):
file_ = self.root.append_file('Timber.mp3')
file_guid = file_.get_guid(create=True)
file_.save()
find = query_file('Timber.mp3')['results']
assert_equal(find[0]['guid_url'], '/' + file_guid._id + '/')
def test_file_download_url_no_guid(self):
file_ = self.root.append_file('Timber.mp3')
path = file_.path
deep_url = '/' + file_.target._id + '/files/osfstorage' + path + '/'
find = query_file('Timber.mp3')['results']
assert_not_equal(file_.path, '')
assert_equal(file_.path, path)
assert_equal(find[0]['guid_url'], None)
assert_equal(find[0]['deep_url'], deep_url)
@pytest.mark.enable_quickfiles_creation
def test_quickfiles_files_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 1)
assert find[0]['node_url'] == '/{}/quickfiles/'.format(quickfiles.creator._id)
@pytest.mark.enable_quickfiles_creation
def test_qatest_quickfiles_files_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
file = quickfiles_root.append_file('GreenLight.mp3')
tag = Tag(name='qatest')
tag.save()
file.tags.add(tag)
file.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
@pytest.mark.enable_quickfiles_creation
def test_quickfiles_spam_user_files_do_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
self.node.creator.deactivate_account()
self.node.creator.confirm_spam()
self.node.creator.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
|
CenterForOpenScience/osf.io
|
osf_tests/test_elastic_search.py
|
Python
|
apache-2.0
| 62,145
|
[
"Brian"
] |
512d140b27e633fc8edd7b26021632803807bf48cf6fc99d0992a16a285a6313
|
"""
Visualization functions for displaying spikes, filters, and cells.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation, cm, gridspec
from matplotlib.patches import Ellipse
from . import filtertools as ft
from .utils import plotwrapper
__all__ = ['raster', 'psth', 'raster_and_psth', 'spatial', 'temporal',
'plot_sta', 'play_sta', 'ellipse', 'plot_cells', 'play_rates']
@plotwrapper
def raster(spikes, labels, title='Spike raster', marker_string='ko', **kwargs):
"""
Plot a raster of spike times.
Parameters
----------
spikes : array_like
An array of spike times.
labels : array_like
An array of labels corresponding to each spike in spikes. For example,
this can indicate which cell or trial each spike came from. Spike times
are plotted on the x-axis, and labels on the y-axis.
title : string, optional
An optional title for the plot (Default: 'Spike raster').
marker_string : string, optional
The marker string passed to matplotlib's plot function (Default: 'ko').
ax : matplotlib.axes.Axes instance, optional
An optional axes onto which the data is plotted.
fig : matplotlib.figure.Figure instance, optional
An optional figure onto which the data is plotted.
kwargs : dict
Optional keyword arguments are passed to matplotlib's plot function.
Returns
-------
fig : matplotlib.figure.Figure
Matplotlib Figure object into which raster is plotted.
ax : matplotlib.axes.Axes
Matplotlib Axes object into which raster is plotted.
"""
assert len(spikes) == len(labels), "Spikes and labels must have the same length"
kwargs.pop('fig')
ax = kwargs.pop('ax')
# Plot the spikes
ax.plot(spikes, labels, marker_string, **kwargs)
# Labels, etc.
ax.set_title(title, fontdict={'fontsize': 24})
ax.set_xlabel('Time (s)', fontdict={'fontsize': 20})
@plotwrapper
def psth(spikes, trial_length=None, binsize=0.01, **kwargs):
"""
Plot a PSTH from the given spike times.
Parameters
----------
spikes : array_like
An array of spike times.
trial_length : float
The length of each trial to stack, in seconds. If None (the
default), a single PSTH is plotted. If a float is passed, PSTHs
from each trial of the given length are averaged together before
plotting.
binsize : float
The size of bins used in computing the PSTH.
ax : matplotlib.axes.Axes instance, optional
An optional axes onto which the data is plotted.
fig : matplotlib.figure.Figure instance, optional
An optional figure onto which the data is plotted.
kwargs : dict
Keyword arguments passed to matplotlib's ``plot`` function.
Returns
-------
fig : matplotlib.figure.Figure
Matplotlib Figure object into which PSTH is plotted.
ax : matplotlib.axes.Axes
Matplotlib Axes object into which PSTH is plotted.
"""
_ = kwargs.pop('fig')
ax = kwargs.pop('ax')
# Input-checking
if not trial_length:
trial_length = spikes.max()
# Compute the histogram bins to use
ntrials = int(np.ceil(spikes.max() / trial_length))
basebins = np.arange(0, trial_length + binsize, binsize)
tbins = np.tile(basebins, (ntrials, 1)) + \
(np.tile(np.arange(0, ntrials),
(basebins.size, 1)).T * trial_length)
# Bin the spikes in each time bin
bspk = np.empty((tbins.shape[0], tbins.shape[1] - 1))
for trial in range(ntrials):
bspk[trial, :], _ = np.histogram(spikes, bins=tbins[trial, :])
# Compute the mean over each trial, and multiply by the binsize
firing_rate = np.mean(bspk, axis=0) / binsize
# Plot the PSTH
ax.plot(tbins[0, :-1], firing_rate, color='k', marker=None,
linestyle='-', linewidth=2)
# Labels etc
ax.set_title('PSTH', fontsize=24)
ax.set_xlabel('Time (s)', fontsize=20)
ax.set_ylabel('Firing rate (Hz)', fontsize=20)
@plotwrapper
def raster_and_psth(spikes, trial_length=None, binsize=0.01, **kwargs):
"""
Plot a spike raster and a PSTH on the same set of axes.
Parameters
----------
spikes : array_like
An array of spike times.
trial_length : float
The length of each trial to stack, in seconds. If None (the default),
all spikes are plotted as part of the same trial.
binsize : float
The size of bins used in computing the PSTH.
ax : matplotlib.axes.Axes instance, optional
An optional axes onto which the data is plotted.
fig : matplotlib.figure.Figure instance, optional
An optional figure onto which the data is plotted.
kwargs : dict
Keyword arguments to matplotlib's ``plot`` function.
Returns
-------
fig : matplotlib.figure.Figure
Matplotlib Figure instance onto which the data is plotted.
ax : matplotlib.axes.Axes
Matplotlib Axes instance onto which the data is plotted.
"""
_ = kwargs.pop('fig')
ax = kwargs.pop('ax')
# Input-checking
if not trial_length:
trial_length = spikes.max()
# Compute the histogram bins to use
ntrials = int(np.ceil(spikes.max() / trial_length))
basebins = np.arange(0, trial_length + binsize, binsize)
tbins = np.tile(basebins, (ntrials, 1)) + \
(np.tile(np.arange(0, ntrials),
(basebins.size, 1)).T * trial_length)
# Bin the spikes in each time bin
bspk = np.empty((tbins.shape[0], tbins.shape[1] - 1))
for trial in range(ntrials):
bspk[trial, :], _ = np.histogram(spikes, bins=tbins[trial, :])
# Compute the mean over each trial, and multiply by the binsize
firing_rate = np.mean(bspk, axis=0) / binsize
# Plot the PSTH
ax.plot(tbins[0, :-1], firing_rate, color='r', marker=None,
linestyle='-', linewidth=2)
ax.set_xlabel('Time (s)', fontdict={'fontsize': 20})
ax.set_ylabel('Firing rate (Hz)', color='r', fontdict={'fontsize': 20})
for tick in ax.get_yticklabels():
tick.set_color('r')
# Plot the raster
rastax = ax.twinx()
for trial in range(ntrials):
idx = np.bitwise_and(spikes > tbins[trial, 0],
spikes <= tbins[trial, -1])
rastax.plot(spikes[idx] - tbins[trial, 0],
trial * np.ones(spikes[idx].shape),
color='k', marker='.', linestyle='none')
rastax.set_ylabel('Trial #', color='k', fontdict={'fontsize': 20})
for tick in ax.get_yticklabels():
tick.set_color('k')
def play_sta(sta, repeat=True, frametime=100, cmap='seismic_r',
clim=None, dx=1.0):
"""
Plays a spatiotemporal spike-triggered average as a movie.
Parameters
----------
sta : array_like
Spike-triggered average array, shaped as ``(nt, nx, ny)``.
repeat : boolean, optional
Whether or not to repeat the animation (default is True).
frametime : float, optional
Length of time each frame is displayed for in milliseconds
(default is 100).
cmap : string, optional
Name of the colormap to use (Default: ``'seismic_r'``).
clim : array_like, optional
2-element color limit for animation; e.g. [0, 255].
dx : float, optional
The spatial sampling rate of the STA, setting the scale of the
x- and y-axes.
Returns
-------
anim : matplotlib animation object
"""
# mean subtract
X = sta.copy()
X -= X.mean()
# Initial frame
initial_frame = X[0]
# Set up the figure
fig = plt.figure()
plt.axis('equal')
spatial_range = (0.0, X.shape[1] * dx, 0.0, X.shape[2] * dx)
ax = plt.axes(xlim=spatial_range[:2],
ylim=spatial_range[2:])
img = plt.imshow(initial_frame, extent=spatial_range)
# Set up the colors
img.set_cmap(cmap)
img.set_interpolation('nearest')
if clim is not None:
img.set_clim(clim)
else:
maxval = np.max(np.abs(X))
img.set_clim([-maxval, maxval])
# Animation function (called sequentially)
def animate(i):
ax.set_title('Frame {0:#d}'.format(i + 1))
img.set_data(X[i])
# Call the animator
anim = animation.FuncAnimation(fig, animate, np.arange(X.shape[0]),
interval=frametime, repeat=repeat)
plt.show()
plt.draw()
return anim
@plotwrapper
def spatial(filt, dx=1.0, maxval=None, **kwargs):
"""
Plot the spatial component of a full linear filter.
If the given filter is 2D, it is assumed to be a 1D spatial filter,
and is plotted directly. If the filter is 3D, it is decomposed into
its spatial and temporal components, and the spatial component is plotted.
Parameters
----------
filt : array_like
The filter whose spatial component is to be plotted. It may have
temporal components.
dx : float, optional
The spatial sampling rate of the STA, setting the scale of the
x- and y-axes.
maxval : float, optional
The value to use as minimal and maximal values when normalizing the
colormap for this plot. See ``plt.imshow()`` documentation for more
details.
ax : matplotlib Axes object, optional
The axes on which to plot the data; defaults to creating a new figure.
Returns
-------
fig : matplotlib.figure.Figure
The figure onto which the spatial STA is plotted.
ax : matplotlib Axes object
Axes into which the spatial STA is plotted.
"""
_ = kwargs.pop('fig')
ax = kwargs.pop('ax')
if filt.ndim > 2:
spatial_filter, _ = ft.decompose(filt)
else:
spatial_filter = filt.copy()
# adjust color limits if necessary
if not maxval:
spatial_filter -= np.mean(spatial_filter)
maxval = np.max(np.abs(spatial_filter))
# plot the spatial component
spatial_range = (0.0, spatial_filter.shape[0] * dx,
0.0, spatial_filter.shape[1] * dx)
ax.imshow(spatial_filter,
cmap='seismic_r',
interpolation='nearest',
aspect='equal',
vmin=-maxval,
vmax=maxval,
extent=spatial_range,
**kwargs)
@plotwrapper
def temporal(time, filt, **kwargs):
"""
Plot the temporal component of a full linear filter.
If the given linear filter is 1D, it is assumed to be a temporal filter,
and is plotted directly. If the filter is 2 or 3D, it is decomposed into
its spatial and temporal components, and the temporal component is plotted.
Parameters
----------
time : array_like
A time vector to plot against.
filt : array_like
The full filter to plot. May be than 1D, but must match in size along
the first dimension with the ``time`` input.
ax : matplotlib Axes object, optional
the axes on which to plot the data; defaults to creating a new figure
Returns
-------
fig : matplotlib.figure.Figure
The figure onto which the temoral STA is plotted.
ax : matplotlib Axes object
Axes into which the temporal STA is plotted
"""
if filt.ndim > 1:
_, temporal_filter = ft.decompose(filt)
else:
temporal_filter = filt.copy()
kwargs['ax'].plot(time, temporal_filter,
linestyle='-', linewidth=2, color='LightCoral')
kwargs['ax'].plot([time[0], time[-1]], [0, 0],
linestyle=':', linewidth=2, color='k')
def plot_sta(time, sta, dx=1.0):
"""
Plot a linear filter.
If the given filter is 1D, it is direclty plotted. If it is 2D, it is
shown as an image, with space and time as its axes. If the filter is 3D,
it is decomposed into its spatial and temporal components, each of which
is plotted on its own axis.
Parameters
----------
time : array_like
A time vector to plot against.
dx : float, optional
The spatial sampling rate of the STA, setting the scale of the
x- and y-axes.
sta : array_like
The filter to plot.
Returns
-------
fig : matplotlib.figure.Figure
The figure onto which the STA is plotted.
ax : matplotlib Axes object
Axes into which the STA is plotted
"""
# plot 1D temporal filter
if sta.ndim == 1:
fig = plt.figure()
fig, ax = temporal(time, sta, ax=fig.add_subplot(111))
# plot 2D spatiotemporal filter
elif sta.ndim == 2:
# normalize
stan = (sta - np.mean(sta)) / np.var(sta)
# create new axes
fig = plt.figure()
fig, ax = spatial(stan, dx=dx, ax=fig.add_subplot(111))
# plot 3D spatiotemporal filter
elif sta.ndim == 3:
# build the figure
fig = plt.figure()
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
# decompose
spatial_profile, temporal_filter = ft.decompose(sta)
# plot spatial profile
_, axspatial = spatial(spatial_profile, dx=dx,
ax=fig.add_subplot(gs[0]))
# plot temporal profile
fig, axtemporal = temporal(time, temporal_filter,
ax=fig.add_subplot(gs[1]))
axtemporal.set_xlim(time[0], time[-1])
axtemporal.spines['right'].set_color('none')
axtemporal.spines['top'].set_color('none')
axtemporal.yaxis.set_ticks_position('left')
axtemporal.xaxis.set_ticks_position('bottom')
# return handles
ax = (axspatial, axtemporal)
else:
raise ValueError('The sta parameter has an invalid '
'number of dimensions (must be 1-3)')
return fig, ax
@plotwrapper
def ellipse(filt, sigma=2.0, alpha=0.8, fc='none', ec='black',
lw=3, dx=1.0, **kwargs):
"""
Plot an ellipse fitted to the given receptive field.
Parameters
----------
filt : array_like
A linear filter whose spatial extent is to be plotted. If this
is 2D, it is assumed to be the spatial component of the receptive
field. If it is 3D, it is assumed to be a full spatiotemporal
receptive field; the spatial component is extracted and plotted.
sigma : float, optional
Determines the threshold of the ellipse contours. This is
the standard deviation of a Gaussian fitted to the filter
at which the contours are plotted. Default is 2.0.
alpha : float, optional
The alpha blending value, between 0 (transparent) and
1 (opaque) (Default: 0.8).
fc : string, optional
Ellipse face color. (Default: none)
ec : string, optional
Ellipse edge color. (Default: black)
lw : int, optional
Line width. (Default: 3)
dx : float, optional
The spatial sampling rate of the STA, setting the scale of the
x- and y-axes.
ax : matplotlib Axes object, optional
The axes onto which the ellipse should be plotted.
Defaults to a new figure.
Returns
-------
fig : matplotlib.figure.Figure
The figure onto which the ellipse is plotted.
ax : matplotlib.axes.Axes
The axes onto which the ellipse is plotted.
"""
_ = kwargs.pop('fig')
ax = kwargs.pop('ax')
if filt.ndim == 2:
spatial_filter = filt.copy()
elif filt.ndim == 3:
spatial_filter = ft.decompose(filt)[0]
else:
raise ValueError('Linear filter must be 2- or 3-D')
# get the ellipse parameters
center, widths, theta = ft.get_ellipse(spatial_filter, sigma=sigma)
# compute parameters given spatial scale
center, widths = map(lambda x: np.asarray(x) * dx, (center, widths))
# create the ellipse
ell = Ellipse(xy=center, width=widths[0], height=widths[1], angle=theta,
alpha=alpha, ec=ec, fc=fc, lw=lw, **kwargs)
ax.add_artist(ell)
ax.set_xlim(0, spatial_filter.shape[0] * dx)
ax.set_ylim(0, spatial_filter.shape[1] * dx)
@plotwrapper
def plot_cells(cells, dx=1.0, **kwargs):
"""
Plot the spatial receptive fields for multiple cells.
Parameters
----------
cells : list of array_like
A list of spatiotemporal receptive fields, each of which is
a spatiotemporal array.
dx : float, optional
The spatial sampling rate of the STA, setting the scale of the
x- and y-axes.
ax : matplotlib Axes object, optional
The axes onto which the ellipse should be plotted.
Defaults to a new figure.
Returns
------
fig : matplotlib.figure.Figure
The figure onto which the ellipses are plotted.
ax : matplotlib.axes.Axes
The axes onto which the ellipses are plotted.
"""
_ = kwargs.pop('fig')
ax = kwargs.pop('ax')
colors = cm.Set1(np.random.rand(len(cells),))
# for each cell
for color, sta in zip(colors, cells):
# get the spatial profile
try:
spatial_profile = ft.decompose(sta)[0]
except np.linalg.LinAlgError:
continue
# plot ellipse
try:
ellipse(spatial_profile, fc=color, ec=color,
lw=2, dx=dx, alpha=0.3, ax=ax)
except RuntimeError:
pass
def play_rates(rates, patches, num_levels=255, time=None,
repeat=True, frametime=100):
"""
Plays a movie representation of the firing rate of a list of cells, by
coloring a list of patches with a color proportional to the firing rate. This
is useful, for example, in conjunction with ``plot_cells``, to color the
ellipses fitted to a set of receptive fields proportional to the firing rate.
Parameters
----------
rates : array_like
An ``(N, T)`` matrix of firing rates. ``N`` is the number of cells, and
``T`` gives the firing rate at a each time point.
patches : list
A list of ``N`` matplotlib patch elements. The facecolor of these patches is
altered according to the rates values.
Returns
-------
anim : matplotlib.animation.Animation
The object representing the full animation.
"""
# Validate input
if rates.ndim == 1:
rates = rates.reshape(1, -1)
if isinstance(patches, Ellipse):
patches = [patches]
N, T = rates.shape
# Approximate necessary colormap
colors = cm.gray(np.arange(num_levels))
rscale = np.round((num_levels - 1) * (rates - rates.min()) /
(rates.max() - rates.min())).astype('int').reshape(N, T)
# set up
fig = plt.gcf()
ax = plt.gca()
if time is None:
time = np.arange(T)
# Animation function (called sequentially)
def animate(t):
for i in range(N):
patches[i].set_facecolor(colors[rscale[i, t]])
ax.set_title('Time: %0.2f seconds' % (time[t]), fontsize=20)
# Call the animator
anim = animation.FuncAnimation(fig, animate,
np.arange(T), interval=frametime, repeat=repeat)
return anim
def anim_to_html(anim):
"""
Convert an animation into an embedable HTML element.
This converts the animation objects returned by ``play_sta()`` and
``play_rates()`` into an HTML tag that can be embedded, for example
in a Jupyter notebook.
Paramters
---------
anim : matplotlib.animation.Animation
The animation object to embed.
Returns
-------
html : IPython.display.HTML
An HTML object with the encoded video. This can be directly embedded
into an IPython notebook.
Raises
------
An ImportError is raised if the IPython modules required to convert the
animation are not installed.
"""
from IPython.display import HTML
return HTML(anim.to_html5_video())
|
baccuslab/pyret
|
pyret/visualizations.py
|
Python
|
mit
| 19,921
|
[
"Gaussian"
] |
56e596dad980059edb13767a3901d3c72f8f17254c4a21b38e7cfc54c5f4dce9
|
#! /usr/bin/env python
# Copyright (C) 2016 Li Yao <yaoli95@outlook.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pysam, pickle, repConfig
from Bio.Seq import transcribe, translate
from Bio.Alphabet import IUPAC
from database import mysqlConnection
aaTable = {
'A': 'Alanine', 'R': 'Arginine', 'N': 'Asparagine', 'D': 'Aspartic acid', 'C': 'Cysteine', 'Q': 'Glutamine',
'E': 'Glutamic acid', 'G': 'Glycine', 'H': 'Histidine', 'I': 'Isoleucine', 'L': 'Leucine', 'M': 'Methionine',
'F': 'Phenylalanine', 'P': 'Proline', 'S': 'Serine', 'T': 'Threonine','W': 'Tryptophan', 'Y': 'Tyrosine',
'V': 'Valine', 'K': 'Lysine', '*': 'Stop Codon'
}
sdf = open(repConfig.getConfig("datasets", "cdslibrary"))
seqDict = pickle.load(sdf)
ceTable = pysam.Tabixfile(repConfig.getConfig("datasets", "cdsbed"))
cnx, cursor = mysqlConnection()
def isMissenseMut(chr, pos, job):
global seqDict, ceTable
for hit in ceTable.fetch(reference=chr, start=int(pos), end=int(pos)+1, parser=pysam.asBed()):
chromosome, start, end, name, gene, strand, relStart = hit
tmp = name.split('_')
transcriptID = tmp[0]
rawSeq = seqDict[transcriptID]
if strand == '+':
relPos = int(pos) - int(start) + int(relStart)-1
else:
relPos = int(end) - int(pos) + int(relStart)
editedSeq = rawSeq[:relPos]+'G'+rawSeq[relPos+1:]
rawSeqr = transcribe(rawSeq)
rawPr = translate(rawSeqr)
editedSeqr = transcribe(editedSeq)
editedPr = translate(editedSeqr)
tag, rp, fa, ta = seqCompare(rawPr, editedPr)
if tag:
recordMissense(chr, pos, job, gene, transcriptID, rp, aaTable[fa], aaTable[ta])
return 1
else:
return 0
def seqCompare(raw, edited):
tag = 0
pos = 0
for index, aa in enumerate(raw):
if aa != edited[index]:
tag = 1
pos = index - 1
return tag, pos, aa, edited[index]
return 0, 0, 0, 0
def recordMissense(chr, pos, job, gene, transcript, relp, fromA, toA):
try:
sql = """INSERT INTO `%s` (`job`, `chromosome`, `position`, `gene`, `transcript`, `relpos`, `fr`, `to`) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');""" % (repConfig.getConfig("datasets", "mist"), job, chr, pos, gene, transcript, relp, fromA, toA)
cursor.execute(sql)
cnx.commit()
except Exception, e:
print e
return 0
def factory(chr, pos, jobId):
counter = 0
for x in chr.index:
if isMissenseMut(chr.ix[x], pos.ix[x], jobId):
counter += 1
return counter
|
RNAEDITINGPLUS/main
|
node/missenseOntology.py
|
Python
|
apache-2.0
| 3,650
|
[
"pysam"
] |
761f98db64da030307c1f5bc4be9345e5a6bcd5fc40e3323f3f230fcaf2758c2
|
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import division
import numpy as np
from scipy.linalg import cholesky
class MerweScaledSigmaPoints(object):
def __init__(self, n, alpha, beta, kappa, sqrt_method=None, subtract=None):
""" Generates sigma points and weights according to Van der Merwe's
2004 dissertation[1] for the UnscentedKalmanFilter class.. It
parametizes the sigma points using alpha, beta, kappa terms, and
is the version seen in most publications.
Unless you know better, this should be your default choice.
Parameters
----------
n : int
Dimensionality of the state. 2n+1 weights will be generated.
alpha : float
Determins the spread of the sigma points around the mean.
Usually a small positive value (1e-3) according to [3].
beta : float
Incorporates prior knowledge of the distribution of the mean. For
Gaussian x beta=2 is optimal, according to [3].
kappa : float, default=0.0
Secondary scaling parameter usually set to 0 according to [4],
or to 3-n according to [5].
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm. Different choices affect how the sigma points
are arranged relative to the eigenvectors of the covariance matrix.
Usually this will not matter to you; if so the default cholesky()
yields maximal performance. As of van der Merwe's dissertation of
2004 [6] this was not a well reseached area so I have no advice
to give you.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
are state vectors, not scalars.
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
References
----------
.. [1] R. Van der Merwe "Sigma-Point Kalman Filters for Probabilitic
Inference in Dynamic State-Space Models" (Doctoral dissertation)
"""
self.n = n
self.alpha = alpha
self.beta = beta
self.kappa = kappa
if sqrt_method is None:
self.sqrt = cholesky
else:
self.sqrt = sqrt_method
if subtract is None:
self.subtract= np.subtract
else:
self.subtract = subtract
def num_sigmas(self):
""" Number of sigma points for each variable in the state x"""
return 2*self.n + 1
def sigma_points(self, x, P):
""" Computes the sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
Returns tuple of the sigma points and weights.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
X An array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
Returns
-------
sigmas : np.array, of size (n, 2n+1)
Two dimensional array of sigma points. Each column contains all of
the sigmas for one dimension in the problem space.
Ordered by Xi_0, Xi_{1..n}, Xi_{n+1..2n}
"""
assert self.n == np.size(x), "expected size {}, but size is {}".format(
self.n, np.size(x))
n = self.n
if np.isscalar(x):
x = np.asarray([x])
if np.isscalar(P):
P = np.eye(n)*P
else:
P = np.asarray(P)
lambda_ = self.alpha**2 * (n + self.kappa) - n
U = self.sqrt((lambda_ + n)*P)
sigmas = np.zeros((2*n+1, n))
sigmas[0] = x
for k in range(n):
sigmas[k+1] = self.subtract(x, -U[k])
sigmas[n+k+1] = self.subtract(x, U[k])
return sigmas
def weights(self):
""" Computes the weights for the scaled unscented Kalman filter.
Returns
-------
Wm : ndarray[2n+1]
weights for mean
Wc : ndarray[2n+1]
weights for the covariances
"""
n = self.n
lambda_ = self.alpha**2 * (n +self.kappa) - n
c = .5 / (n + lambda_)
Wc = np.full(2*n + 1, c)
Wm = np.full(2*n + 1, c)
Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)
Wm[0] = lambda_ / (n + lambda_)
return Wm, Wc
class JulierSigmaPoints(object):
def __init__(self,n, kappa, sqrt_method=None, subtract=None):
""" Generates sigma points and weights according to Simon J. Julier
and Jeffery K. Uhlmann's original paper []. It parametizes the sigma
points using kappa.
Parameters
----------
n : int
Dimensionality of the state. 2n+1 weights will be generated.
kappa : float, default=0.
Scaling factor that can reduce high order errors. kappa=0 gives
the standard unscented filter. According to [Julier], if you set
kappa to 3-dim_x for a Gaussian x you will minimize the fourth
order errors in x and P.
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm. Different choices affect how the sigma points
are arranged relative to the eigenvectors of the covariance matrix.
Usually this will not matter to you; if so the default cholesky()
yields maximal performance. As of van der Merwe's dissertation of
2004 [6] this was not a well reseached area so I have no advice
to give you.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
References
----------
.. [1] Julier, Simon J.; Uhlmann, Jeffrey "A New Extension of the Kalman
Filter to Nonlinear Systems". Proc. SPIE 3068, Signal Processing,
Sensor Fusion, and Target Recognition VI, 182 (July 28, 1997)
"""
self.n = n
self.kappa = kappa
if sqrt_method is None:
self.sqrt = cholesky
else:
self.sqrt = sqrt_method
if subtract is None:
self.subtract= np.subtract
else:
self.subtract = subtract
def num_sigmas(self):
""" Number of sigma points for each variable in the state x"""
return 2*self.n + 1
def sigma_points(self, x, P):
r""" Computes the sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
kappa is an arbitrary constant. Returns sigma points.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
X : array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
kappa : float
Scaling factor.
Returns
-------
sigmas : np.array, of size (n, 2n+1)
2D array of sigma points :math:`\chi`. Each column contains all of
the sigmas for one dimension in the problem space. They
are ordered as:
.. math::
:nowrap:
\begin{eqnarray}
\chi[0] = &x \\
\chi[1..n] = &x + [\sqrt{(n+\kappa)P}]_k \\
\chi[n+1..2n] = &x - [\sqrt{(n+\kappa)P}]_k
\end{eqnarray}
"""
assert self.n == np.size(x)
n = self.n
if np.isscalar(x):
x = np.asarray([x])
n = np.size(x) # dimension of problem
if np.isscalar(P):
P = np.eye(n)*P
sigmas = np.zeros((2*n+1, n))
# implements U'*U = (n+kappa)*P. Returns lower triangular matrix.
# Take transpose so we can access with U[i]
U = self.sqrt((n + self.kappa) * P)
sigmas[0] = x
for k in range(n):
sigmas[k+1] = self.subtract(x, -U[k])
sigmas[n+k+1] = self.subtract(x, U[k])
return sigmas
def weights(self):
""" Computes the weights for the unscented Kalman filter. In this
formulatyion the weights for the mean and covariance are the same.
Returns
-------
Wm : ndarray[2n+1]
weights for mean
Wc : ndarray[2n+1]
weights for the covariances
"""
n = self.n
k = self.kappa
W = np.full(2*n+1, .5 / (n + k))
W[0] = k / (n+k)
return W, W
class SimplexSigmaPoints(object):
def __init__(self, n, alpha=1, sqrt_method=None, subtract=None):
""" Generates sigma points and weights according to the simplex
method presented in [1] DOI: 10.1051/cocv/2010006
Parameters
----------
n : int
Dimensionality of the state. n+1 weights will be generated.
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
are state vectors, not scalars.
References
----------
.. [1] Phillippe Moireau and Dominique Chapelle "Reduced-Order Unscented
Kalman Filtering with Application to Parameter Identification in
Large-Dimensional Systems"
"""
self.n = n
self.alpha = alpha
if sqrt_method is None:
self.sqrt = cholesky
else:
self.sqrt = sqrt_method
if subtract is None:
self.subtract= np.subtract
else:
self.subtract = subtract
def num_sigmas(self):
""" Number of sigma points for each variable in the state x"""
return self.n + 1
def sigma_points(self, x, P):
""" Computes the implex sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
Returns tuple of the sigma points and weights.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
X An array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
Returns
-------
sigmas : np.array, of size (n, n+1)
Two dimensional array of sigma points. Each column contains all of
the sigmas for one dimension in the problem space.
Ordered by Xi_0, Xi_{1..n}
"""
assert self.n == np.size(x), "expected size {}, but size is {}".format(
self.n, np.size(x))
n = self.n
if np.isscalar(x):
x = np.asarray([x])
x = x.reshape(-1, 1)
if np.isscalar(P):
P = np.eye(n)*P
else:
P = np.asarray(P)
U = self.sqrt(P)
lambda_ = n / (n + 1)
Istar = np.array([[-1/np.sqrt(2*lambda_), 1/np.sqrt(2*lambda_)]])
for d in range(2, n+1):
row = np.ones((1, Istar.shape[1] + 1)) * 1. / np.sqrt(lambda_*d*(d + 1))
row[0, -1] = -d / np.sqrt(lambda_ * d * (d + 1))
Istar = np.r_[np.c_[Istar, np.zeros((Istar.shape[0]))], row]
I = np.sqrt(n)*Istar
scaled_unitary = U.dot(I)
sigmas = self.subtract(x, -scaled_unitary)
return sigmas.T
def weights(self):
""" Computes the weights for the scaled unscented Kalman filter.
Returns
-------
Wm : ndarray[n+1]
weights for mean
Wc : ndarray[n+1]
weights for the covariances
"""
n = self.n
c = 1. / (n + 1)
W = np.full(n + 1, c)
return W, W
|
BrianGasberg/filterpy
|
filterpy/kalman/sigma_points.py
|
Python
|
mit
| 14,788
|
[
"Gaussian"
] |
02a754926e028a3d23e694607d095b01fe7c170bcbdea8cdc798df83dd0b8bc7
|
from django.test import TestCase, tag
from django.core.exceptions import ObjectDoesNotExist
from edc_base import get_utcnow
from edc_base.tests import SiteTestCaseMixin
from ..models import SubjectScheduleHistory
from ..schedule import Schedule
from ..site_visit_schedules import site_visit_schedules
from ..subject_schedule import SubjectSchedule, SubjectScheduleError
from ..visit_schedule import VisitSchedule
from .models import SubjectConsent, OnSchedule, OffSchedule
class TestSubjectSchedule(SiteTestCaseMixin, TestCase):
def setUp(self):
site_visit_schedules._registry = {}
self.visit_schedule = VisitSchedule(
name='visit_schedule',
verbose_name='Visit Schedule',
offstudy_model='edc_visit_schedule.SubjectOffstudy',
death_report_model='edc_visit_schedule.DeathReport')
self.schedule = Schedule(
name='schedule',
onschedule_model='edc_visit_schedule.OnSchedule',
offschedule_model='edc_visit_schedule.OffSchedule',
appointment_model='edc_appointment.appointment',
consent_model='edc_visit_schedule.subjectconsent')
self.schedule3 = Schedule(
name='schedule_three',
onschedule_model='edc_visit_schedule.OnScheduleThree',
offschedule_model='edc_visit_schedule.OffScheduleThree',
appointment_model='edc_appointment.appointment',
consent_model='edc_visit_schedule.subjectconsent')
self.visit_schedule.add_schedule(self.schedule)
self.visit_schedule.add_schedule(self.schedule3)
site_visit_schedules.register(self.visit_schedule)
self.visit_schedule_two = VisitSchedule(
name='visit_schedule_two',
verbose_name='Visit Schedule Two',
offstudy_model='edc_visit_schedule.SubjectOffstudy',
death_report_model='edc_visit_schedule.DeathReport')
self.schedule_two_1 = Schedule(
name='schedule_two',
onschedule_model='edc_visit_schedule.OnScheduleTwo',
offschedule_model='edc_visit_schedule.OffScheduleTwo',
appointment_model='edc_appointment.appointment',
consent_model='edc_visit_schedule.subjectconsent')
self.schedule_two_2 = Schedule(
name='schedule_four',
onschedule_model='edc_visit_schedule.OnScheduleFour',
offschedule_model='edc_visit_schedule.OffScheduleFour',
appointment_model='edc_appointment.appointment',
consent_model='edc_visit_schedule.subjectconsent')
self.visit_schedule_two.add_schedule(self.schedule_two_1)
self.visit_schedule_two.add_schedule(self.schedule_two_2)
site_visit_schedules.register(self.visit_schedule_two)
self.subject_identifier = '111111'
SubjectConsent.objects.create(
subject_identifier=self.subject_identifier)
def test_onschedule_updates_history(self):
"""Asserts returns the correct instances for the schedule.
"""
for onschedule_model, schedule_name in [
('edc_visit_schedule.onscheduletwo', 'schedule_two'),
('edc_visit_schedule.onschedulefour', 'schedule_four')]:
with self.subTest(onschedule_model=onschedule_model,
schedule_name=schedule_name):
visit_schedule, schedule = site_visit_schedules.get_by_onschedule_model(
onschedule_model)
subject_schedule = SubjectSchedule(
visit_schedule=visit_schedule, schedule=schedule)
subject_schedule.put_on_schedule(
subject_identifier=self.subject_identifier,
onschedule_datetime=get_utcnow())
try:
SubjectScheduleHistory.objects.get(
subject_identifier=self.subject_identifier,
schedule_name=schedule_name)
except ObjectDoesNotExist:
self.fail('ObjectDoesNotExist unexpectedly raised')
def test_multpile_consents(self):
"""Asserts does not raise if more than one consent
for this subject
"""
subject_identifier = 'ABCDEF'
SubjectConsent.objects.create(
subject_identifier=subject_identifier, version='1')
SubjectConsent.objects.create(
subject_identifier=subject_identifier, version='2')
visit_schedule, schedule = site_visit_schedules.get_by_onschedule_model(
'edc_visit_schedule.onscheduletwo')
subject_schedule = SubjectSchedule(
visit_schedule=visit_schedule,
schedule=schedule)
try:
subject_schedule.put_on_schedule(
subject_identifier=subject_identifier,
onschedule_datetime=get_utcnow())
except SubjectScheduleError:
self.fail('SubjectScheduleError unexpectedly raised.')
def test_resave(self):
"""Asserts returns the correct instances for the schedule.
"""
visit_schedule, schedule = site_visit_schedules.get_by_onschedule_model(
'edc_visit_schedule.onscheduletwo')
subject_schedule = SubjectSchedule(
visit_schedule=visit_schedule,
schedule=schedule)
subject_schedule.put_on_schedule(
subject_identifier=self.subject_identifier,
onschedule_datetime=get_utcnow())
subject_schedule.resave(subject_identifier=self.subject_identifier)
def test_put_on_schedule(self):
_, schedule = site_visit_schedules.get_by_onschedule_model(
'edc_visit_schedule.onschedule')
self.assertRaises(
ObjectDoesNotExist,
OnSchedule.objects.get,
subject_identifier=self.subject_identifier)
schedule.put_on_schedule(
subject_identifier=self.subject_identifier,
onschedule_datetime=get_utcnow())
try:
OnSchedule.objects.get(subject_identifier=self.subject_identifier)
except ObjectDoesNotExist:
self.fail('ObjectDoesNotExist unexpectedly raised')
def test_take_off_schedule(self):
visit_schedule = site_visit_schedules.get_visit_schedule(
visit_schedule_name='visit_schedule')
schedule = visit_schedule.schedules.get('schedule')
schedule.put_on_schedule(
subject_identifier=self.subject_identifier)
schedule.take_off_schedule(
subject_identifier=self.subject_identifier)
try:
OffSchedule.objects.get(subject_identifier=self.subject_identifier)
except ObjectDoesNotExist:
self.fail('ObjectDoesNotExist unexpectedly raised')
|
botswana-harvard/edc-visit-schedule
|
edc_visit_schedule/tests/test_subject_schedule.py
|
Python
|
gpl-2.0
| 6,789
|
[
"VisIt"
] |
635c77bb593c37a8f442f672424e87e1975c3df38b9b174ebbcc431ea9cfb617
|
from mayavi.core.api import registry
from simphony_mayavi.adapt2cuds import adapt2cuds
def load(filename, name=None, kind=None, rename_arrays=None):
""" Load the file data into a CUDS container.
Parameters
----------
filename : str
The file name of the file to load.
name : str
The name of the returned CUDS container. Default is 'CUDS container'.
kind : str
The kind {'mesh', 'lattice', 'particles'} of the container to return.
Default is None, where the function will use some heuristics to
infer the most appropriate type of CUDS container to return
(using adapt2cuds).
rename_array : dict
Dictionary mapping the array names used in the dataset object
to their related CUBA keywords that will be used in the returned
CUDS container.
.. note::
Only CUBA keywords are supported for array names so use this
option to provide a translation mapping to the CUBA keys.
"""
data_set = _read(filename)
return adapt2cuds(
data_set, name, kind, rename_arrays)
def _read(filename):
""" Find a suitable reader and read the tvtk.Dataset.
"""
metasource = registry.get_file_reader(filename)
if metasource is None:
message = 'No suitable reader found for file: {}'
raise RuntimeError(message.format(filename))
if metasource.factory is None:
source = metasource.get_callable()()
source.initialize(filename)
source.update()
reader = source.reader
else:
message = 'Mayavi reader that requires a scene is not supported : {}'
raise NotImplementedError(message.format(filename))
if len(source.outputs) != 1:
message = 'Only one output is expected from the reader'
raise RuntimeError(message)
return reader.output
|
simphony/simphony-mayavi
|
simphony_mayavi/load.py
|
Python
|
bsd-2-clause
| 1,869
|
[
"Mayavi"
] |
dd291560aa56f790cb79e0f6a1b1da1627e8da515e1a9df9428befcbc8675fda
|
''' AlwaysProbingPolicy module
'''
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
__RCSID__ = '$Id$'
class AlwaysProbingPolicy(PolicyBase):
'''
The AlwaysProbingPolicy is a dummy module that can be used as example, it
always returns Probing status.
'''
@staticmethod
def _evaluate(commandResult):
'''
It returns Probing status, evaluates the default command, but its output
is completely ignored.
'''
policyResult = {'Status': 'Probing',
'Reason': 'AlwaysProbing'}
return S_OK(policyResult)
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
fstagni/DIRAC
|
ResourceStatusSystem/Policy/AlwaysProbingPolicy.py
|
Python
|
gpl-3.0
| 776
|
[
"DIRAC"
] |
fb6c78ec19d302c2c93d09d6fbe0082b62fb08f4b9800664350c00d85b670d1e
|
# Copyright 2000-2004 Brad Chapman.
# Copyright 2001 Iddo Friedberg.
# Copyright 2007-2010 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes for generic sequence alignment.
Contains classes to deal with generic sequence alignment stuff not
specific to a particular program or format.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en" # Don't just use plain text in epydoc API pages!
# biopython
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import Alphabet
class Alignment(object):
"""Represent a set of alignments (DEPRECATED).
This is a base class to represent alignments, which can be subclassed
to deal with an alignment in a specific format.
With the introduction of the MultipleSeqAlignment class in Bio.Align,
this base class is deprecated and is likely to be removed in future
releases of Biopython.
"""
def __init__(self, alphabet):
"""Initialize a new Alignment object.
Arguments:
- alphabet - The alphabet to use for the sequence objects that are
created. This alphabet must be a gapped type.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGCTAG Alpha
ACT-CTAGCTAG Beta
ACTGCTAGATAG Gamma
"""
import warnings
import Bio
warnings.warn("With the introduction of the MultipleSeqAlignment class in Bio.Align, this base class is deprecated and is likely to be removed in a future release of Biopython.", Bio.BiopythonDeprecationWarning)
if not (isinstance(alphabet, Alphabet.Alphabet)
or isinstance(alphabet, Alphabet.AlphabetEncoder)):
raise ValueError("Invalid alphabet argument")
self._alphabet = alphabet
# hold everything at a list of SeqRecord objects
self._records = []
def _str_line(self, record, length=50):
"""Returns a truncated string representation of a SeqRecord (PRIVATE).
This is a PRIVATE function used by the __str__ method.
"""
if record.seq.__class__.__name__ == "CodonSeq":
if len(record.seq) <= length:
return "%s %s" % (record.seq, record.id)
else:
return "%s...%s %s" \
% (record.seq[:length - 3], record.seq[-3:], record.id)
else:
if len(record.seq) <= length:
return "%s %s" % (record.seq, record.id)
else:
return "%s...%s %s" \
% (record.seq[:length - 6], record.seq[-3:], record.id)
def __str__(self):
"""Returns a multi-line string summary of the alignment.
This output is intended to be readable, but large alignments are
shown truncated. A maximum of 20 rows (sequences) and 50 columns
are shown, with the record identifiers. This should fit nicely on a
single screen. e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGCTAG Alpha
ACT-CTAGCTAG Beta
ACTGCTAGATAG Gamma
See also the alignment's format method.
"""
rows = len(self._records)
lines = ["%s alignment with %i rows and %i columns"
% (str(self._alphabet), rows, self.get_alignment_length())]
if rows <= 20:
lines.extend(self._str_line(rec) for rec in self._records)
else:
lines.extend(self._str_line(rec) for rec in self._records[:18])
lines.append("...")
lines.append(self._str_line(self._records[-1]))
return "\n".join(lines)
def __repr__(self):
"""Returns a representation of the object for debugging.
The representation cannot be used with eval() to recreate the object,
which is usually possible with simple python ojects. For example:
<Bio.Align.Generic.Alignment instance (2 records of length 14,
SingleLetterAlphabet()) at a3c184c>
The hex string is the memory address of the object, see help(id).
This provides a simple way to visually distinguish alignments of
the same size.
"""
# A doctest for __repr__ would be nice, but __class__ comes out differently
# if run via the __main__ trick.
return "<%s instance (%i records of length %i, %s) at %x>" % \
(self.__class__, len(self._records),
self.get_alignment_length(), repr(self._alphabet), id(self))
# This version is useful for doing eval(repr(alignment)),
# but it can be VERY long:
# return "%s(%s, %s)" \
# % (self.__class__, repr(self._records), repr(self._alphabet))
def format(self, format):
"""Returns the alignment as a string in the specified file format.
The format should be a lower case string supported as an output
format by Bio.AlignIO (such as "fasta", "clustal", "phylip",
"stockholm", etc), which is used to turn the alignment into a
string.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align.format("fasta"))
>Alpha
ACTGCTAGCTAG
>Beta
ACT-CTAGCTAG
>Gamma
ACTGCTAGATAG
<BLANKLINE>
>>> print(align.format("phylip"))
3 12
Alpha ACTGCTAGCT AG
Beta ACT-CTAGCT AG
Gamma ACTGCTAGAT AG
<BLANKLINE>
For Python 2.6, 3.0 or later see also the built in format() function.
"""
# See also the __format__ added for Python 2.6 / 3.0, PEP 3101
# See also the SeqRecord class and its format() method using Bio.SeqIO
return self.__format__(format)
def __format__(self, format_spec):
"""Returns the alignment as a string in the specified file format.
This method supports the python format() function added in
Python 2.6/3.0. The format_spec should be a lower case
string supported by Bio.AlignIO as an output file format.
See also the alignment's format() method."""
if format_spec:
from Bio._py3k import StringIO
from Bio import AlignIO
handle = StringIO()
AlignIO.write([self], handle, format_spec)
return handle.getvalue()
else:
# Follow python convention and default to using __str__
return str(self)
def get_all_seqs(self):
"""Return all of the sequences involved in the alignment (DEPRECATED).
The return value is a list of SeqRecord objects.
This method is deprecated, as the Alignment object itself now offers
much of the functionality of a list of SeqRecord objects (e.g.
iteration or slicing to create a sub-alignment). Instead use the
Python builtin function list, i.e. my_list = list(my_align)
"""
import warnings
import Bio
warnings.warn("This method is deprecated, since the alignment object"
"now acts more like a list. Instead of calling "
"align.get_all_seqs() you can use list(align)",
Bio.BiopythonDeprecationWarning)
return self._records
def __iter__(self):
"""Iterate over alignment rows as SeqRecord objects.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> for record in align:
... print(record.id)
... print(record.seq)
Alpha
ACTGCTAGCTAG
Beta
ACT-CTAGCTAG
Gamma
ACTGCTAGATAG
"""
return iter(self._records)
def get_seq_by_num(self, number):
"""Retrieve a sequence by row number (DEPRECATED).
Returns:
- A Seq object for the requested sequence.
Raises:
- IndexError - If the specified number is out of range.
NOTE: This is a legacy method. In new code where you need to access
the rows of the alignment (i.e. the sequences) consider iterating
over them or accessing them as SeqRecord objects.
"""
import warnings
import Bio
warnings.warn("This is a legacy method and is likely to be removed in a future release of Biopython. In new code where you need to access the rows of the alignment (i.e. the sequences) consider iterating over them or accessing them as SeqRecord objects.", Bio.BiopythonDeprecationWarning)
return self._records[number].seq
def __len__(self):
"""Returns the number of sequences in the alignment.
Use len(alignment) to get the number of sequences (i.e. the number of
rows), and alignment.get_alignment_length() to get the length of the
longest sequence (i.e. the number of columns).
This is easy to remember if you think of the alignment as being like a
list of SeqRecord objects.
"""
return len(self._records)
def get_alignment_length(self):
"""Return the maximum length of the alignment.
All objects in the alignment should (hopefully) have the same
length. This function will go through and find this length
by finding the maximum length of sequences in the alignment.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> align.get_alignment_length()
12
If you want to know the number of sequences in the alignment,
use len(align) instead:
>>> len(align)
3
"""
max_length = 0
for record in self._records:
if len(record.seq) > max_length:
max_length = len(record.seq)
return max_length
def add_sequence(self, descriptor, sequence, start=None, end=None,
weight=1.0):
"""Add a sequence to the alignment.
This doesn't do any kind of alignment, it just adds in the sequence
object, which is assumed to be prealigned with the existing
sequences.
Arguments:
- descriptor - The descriptive id of the sequence being added.
This will be used as the resulting SeqRecord's
.id property (and, for historical compatibility,
also the .description property)
- sequence - A string with sequence info.
- start - You can explicitly set the start point of the sequence.
This is useful (at least) for BLAST alignments, which can
just be partial alignments of sequences.
- end - Specify the end of the sequence, which is important
for the same reason as the start.
- weight - The weight to place on the sequence in the alignment.
By default, all sequences have the same weight. (0.0 =>
no weight, 1.0 => highest weight)
"""
new_seq = Seq(sequence, self._alphabet)
# We are now effectively using the SeqRecord's .id as
# the primary identifier (e.g. in Bio.SeqIO) so we should
# populate it with the descriptor.
# For backwards compatibility, also store this in the
# SeqRecord's description property.
new_record = SeqRecord(new_seq,
id=descriptor,
description=descriptor)
# hack! We really need to work out how to deal with annotations
# and features in biopython. Right now, I'll just use the
# generic annotations dictionary we've got to store the start
# and end, but we should think up something better. I don't know
# if I'm really a big fan of the LocatableSeq thing they've got
# in BioPerl, but I'm not positive what the best thing to do on
# this is...
if start:
new_record.annotations['start'] = start
if end:
new_record.annotations['end'] = end
# another hack to add weight information to the sequence
new_record.annotations['weight'] = weight
self._records.append(new_record)
def get_column(self, col):
"""Returns a string containing a given column.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> align.get_column(0)
'AAA'
>>> align.get_column(3)
'G-G'
"""
# TODO - Support negative indices?
col_str = ''
assert col >= 0 and col <= self.get_alignment_length()
for rec in self._records:
col_str += rec.seq[col]
return col_str
def __getitem__(self, index):
"""Access part of the alignment.
We'll use the following example alignment here for illustration:
>>> from Bio.Alphabet import IUPAC, Gapped
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> align.add_sequence("Delta", "ACTGCTTGCTAG")
>>> align.add_sequence("Epsilon", "ACTGCTTGATAG")
You can access a row of the alignment as a SeqRecord using an integer
index (think of the alignment as a list of SeqRecord objects here):
>>> first_record = align[0]
>>> print("%s %s" % (first_record.id, first_record.seq))
Alpha ACTGCTAGCTAG
>>> last_record = align[-1]
>>> print("%s %s" % (last_record.id, last_record.seq))
Epsilon ACTGCTTGATAG
You can also access use python's slice notation to create a sub-alignment
containing only some of the SeqRecord objects:
>>> sub_alignment = align[2:5]
>>> print(sub_alignment)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGATAG Gamma
ACTGCTTGCTAG Delta
ACTGCTTGATAG Epsilon
This includes support for a step, i.e. align[start:end:step], which
can be used to select every second sequence:
>>> sub_alignment = align[::2]
>>> print(sub_alignment)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGCTAG Alpha
ACTGCTAGATAG Gamma
ACTGCTTGATAG Epsilon
Or to get a copy of the alignment with the rows in reverse order:
>>> rev_alignment = align[::-1]
>>> print(rev_alignment)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 5 rows and 12 columns
ACTGCTTGATAG Epsilon
ACTGCTTGCTAG Delta
ACTGCTAGATAG Gamma
ACT-CTAGCTAG Beta
ACTGCTAGCTAG Alpha
Right now, these are the ONLY indexing operations supported. The use of
a second column based index is under discussion for a future update.
"""
if isinstance(index, int):
# e.g. result = align[x]
# Return a SeqRecord
return self._records[index]
elif isinstance(index, slice):
# e.g. sub_aling = align[i:j:k]
# Return a new Alignment using only the specified records.
# TODO - See Bug 2554 for changing the __init__ method
# to allow us to do this more cleanly.
sub_align = Alignment(self._alphabet)
sub_align._records = self._records[index]
return sub_align
elif len(index) == 2:
raise TypeError("Row and Column indexing is not currently supported,"
"but may be in future.")
else:
raise TypeError("Invalid index type.")
def _test():
"""Run the Bio.Align.Generic module's doctests."""
print("Running doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Align/Generic.py
|
Python
|
apache-2.0
| 17,526
|
[
"BLAST",
"BioPerl",
"Biopython"
] |
23be7a96bdce1cb69fde40197eeae2fa5828f4db8b1730992635af9d37ab5816
|
# -*- coding: utf-8 -*-
#
# hl_api_connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for connection handling
"""
from .hl_api_helper import *
from .hl_api_nodes import Create
from .hl_api_info import GetStatus
from .hl_api_simulation import GetKernelStatus, SetKernelStatus
import numpy
@check_stack
@deprecated(alt_func_name='GetConnections')
def FindConnections(source, target=None, synapse_model=None,
synapse_type=None):
"""Return an array of identifiers for connections that match the
given parameters.
.. note:: Deprecated
FindConnections() is deprecated and will be removed in the future.
Use GetConnections() instead.
If target and/or synapse_model is/are given, they must be single
values, lists of length one or the same length as source.
Use GetStatus()/SetStatus() to inspect/modify the found
connections.
Parameters
----------
source : list
Source GIDs, only connections from these
pre-synaptic neurons are returned
target : list, optional
Target GIDs, only connections to these
post-synaptic neurons are returned
synapse_model : str, optional
Only connections with this synapse model are returned
synapse_type : str, optional
Only connections with this synapse type are returned
Returns
-------
tuple:
Connections as dictionaries with keys:
"source-gid", "target-gid", "target-thread", "synapse-modelid"
Raises
------
kernel.NESTError
If the aliases 'synapse_type' and 'synapse_model' are used together.
Notes
-----
synapse_type is alias for synapse_model for backward compatibility
See also
--------
GetConnections
GetStatus
"""
if synapse_model is not None and synapse_type is not None:
raise kernel.NESTError(
"'synapse_type' is alias for 'synapse_model' and cannot "
"be used together with 'synapse_model'.")
if synapse_type is not None:
synapse_model = synapse_type
if target is None and synapse_model is None:
params = [{"source": s} for s in source]
elif target is None and synapse_model is not None:
synapse_model = broadcast(
synapse_model, len(source), (uni_str,), "synapse_model")
params = [{"source": s, "synapse_model": syn}
for s, syn in zip(source, synapse_model)]
elif target is not None and synapse_model is None:
target = broadcast(target, len(source), (int,), "target")
params = [{"source": s, "target": t} for s, t in zip(source, target)]
else: # target is not None and synapse_model is not None
target = broadcast(target, len(source), (int,), "target")
synapse_model = broadcast(
synapse_model, len(source), (uni_str,), "synapse_model")
params = [{"source": s, "target": t, "synapse_model": syn}
for s, t, syn in zip(source, target, synapse_model)]
sps(params)
sr("{FindConnections} Map Flatten")
result = ({
'source': int(src),
'target_thread': int(tt),
'synapse_modelid': int(sm),
'port': int(prt)
} for src, _, tt, sm, prt in spp())
return tuple(result)
@check_stack
def GetConnections(source=None, target=None, synapse_model=None,
synapse_label=None):
"""Return an array of connection identifiers.
Any combination of source, target, synapse_model and
synapse_label parameters is permitted.
Parameters
----------
source : list, optional
Source GIDs, only connections from these
pre-synaptic neurons are returned
target : list, optional
Target GIDs, only connections to these
post-synaptic neurons are returned
synapse_model : str, optional
Only connections with this synapse type are returned
synapse_label : int, optional
(non-negative) only connections with this synapse label are returned
Returns
-------
array:
Connections as 5-tuples with entries
(source-gid, target-gid, target-thread, synapse-id, port)
Notes
-----
Only connections with targets on the MPI process executing
the command are returned.
Raises
------
TypeError
Description
"""
params = {}
if source is not None:
if not is_coercible_to_sli_array(source):
raise TypeError("source must be a list of GIDs")
params['source'] = source
if target is not None:
if not is_coercible_to_sli_array(target):
raise TypeError("target must be a list of GIDs")
params['target'] = target
if synapse_model is not None:
params['synapse_model'] = kernel.SLILiteral(synapse_model)
if synapse_label is not None:
params['synapse_label'] = synapse_label
sps(params)
sr("GetConnections")
return spp()
@check_stack
@deprecated(alt_func_name='Connect')
def OneToOneConnect(pre, post, params=None, delay=None,
model="static_synapse"):
"""Make one-to-one connections between the nodes in
pre and the nodes in post.
.. note:: Deprecated
OneToOneConnect() is deprecated and will be removed in the future.
Use Connect() instead.
Parameters
----------
pre : list
Presynaptic nodes, as list of GIDs (same length as post)
post : list
Postsynaptic nodes, as list of GIDs (same length as pre)
params : dict or list of dicts or float or list of floats, optional
If given as a single dict or list of dicts (of same length as pre
and post), these are used as parameters for the connections.
If given as a a single float or as list of floats (of same length
as pre and post), the value is used as weight(s). In this case the
delay also has to be given as float or as list of floats.
delay : float or list of floats, optional
Delays of the connections
model : str, optional
Synapse model to use
Raises
------
kernel.NESTError
"""
if len(pre) != len(post):
raise kernel.NESTError("pre and post have to be the same length")
# pre post Connect
if params is None and delay is None:
for s, d in zip(pre, post):
sps(s)
sps(d)
sr('/%s Connect' % model)
# pre post params Connect
elif params is not None and delay is None:
params = broadcast(params, len(pre), (dict,), "params")
if len(params) != len(pre):
raise kernel.NESTError(
"params must be a dict, or list of dicts of length 1 "
" or len(pre).")
for s, d, p in zip(pre, post, params):
sps(s)
sps(d)
sps(p)
sr('/%s Connect' % model)
# pre post w d Connect
elif params is not None and delay is not None:
params = broadcast(params, len(pre), (float,), "params")
if len(params) != len(pre):
raise kernel.NESTError(
"params must be a float, or list of floats of length 1 "
"or len(pre) and will be used as weight(s).")
delay = broadcast(delay, len(pre), (float,), "delay")
if len(delay) != len(pre):
raise kernel.NESTError(
"delay must be a float, or list of floats of length 1 "
"or len(pre).")
for s, d, w, dl in zip(pre, post, params, delay):
sps(s)
sps(d)
sps(w)
sps(dl)
sr('/%s Connect' % model)
else:
raise kernel.NESTError("Both 'params' and 'delay' have to be given.")
@check_stack
@deprecated(alt_func_name='Connect')
def ConvergentConnect(pre, post, weight=None, delay=None,
model="static_synapse"):
"""Connect all neurons in pre to each neuron in post.
.. note:: Deprecated
ConvergentConnect() is deprecated and will be removed in the future.
Use Connect() instead.
Parameters
----------
pre : list
Presynaptic nodes, as list of GIDs (same length as post)
post : list
Postsynaptic nodes, as list of GIDs (same length as pre)
weight : float or list of floats, optional
If given as a a single float or as list of floats (of same length
as pre and post), the value is used as weight(s). In this case the
delay also has to be given as float or as list of floats.
delay : float or list of floats, optional
Delays of the connections
model : str, optional
Synapse model to use
Raises
------
kernel.NESTError
"""
if weight is None and delay is None:
for d in post:
sps(pre)
sps(d)
sr('/%s ConvergentConnect' % model)
elif weight is not None and delay is not None:
weight = broadcast(weight, len(pre), (float,), "weight")
if len(weight) != len(pre):
raise kernel.NESTError(
"weight must be a float, or sequence of floats of length 1 "
"or len(pre)")
delay = broadcast(delay, len(pre), (float,), "delay")
if len(delay) != len(pre):
raise kernel.NESTError(
"delay must be a float, or sequence of floats of length 1 "
"or len(pre)")
for d in post:
sps(pre)
sps(d)
sps(weight)
sps(delay)
sr('/%s ConvergentConnect' % model)
else:
raise kernel.NESTError("Both 'weight' and 'delay' have to be given.")
@check_stack
@deprecated(alt_func_name='Connect')
def RandomConvergentConnect(pre, post, n, weight=None, delay=None,
model="static_synapse", options=None):
"""Connect n randomly selected neurons from pre to each neuron in
post.
.. note:: Deprecated
RandomConvergentConnect() is deprecated and will be removed
in the future. Use Connect() instead.
Parameters
----------
pre : list
Presynaptic nodes, as list of GIDs (same length as post)
post : list
Postsynaptic nodes, as list of GIDs (same length as pre)
n : int
Number of presynaptic neurons to connect
weight : float or list of floats, optional
If given as a a single float or as list of floats (of same length
as pre and post), the value is used as weight(s). In this case the
delay also has to be given as float or as list of floats.
delay : float or list of floats, optional
Delays of the connections
model : str, optional
Synapse model to use
options : dict, optional
Options to the RandomConvergentConnect function:
'allow_autapses', 'allow_multapses'
Raises
------
kernel.NESTError
TypeError
"""
if not isinstance(n, int):
raise TypeError("number of neurons n should be an integer")
# store current options, set desired options
old_options = None
error = False
if options is not None:
old_options = sli_func('GetOptions', '/RandomConvergentConnect',
litconv=True)
del old_options['DefaultOptions'] # in the way when restoring
sli_func('SetOptions', '/RandomConvergentConnect', options,
litconv=True)
if weight is None and delay is None:
sli_func(
'/m Set /n Set /pre Set ' +
'{ pre exch n m RandomConvergentConnect } forall',
post, pre, n, '/'+model, litconv=True)
elif weight is not None and delay is not None:
weight = broadcast(weight, n, (float,), "weight")
if len(weight) != n:
raise kernel.NESTError(
"weight must be a float, or sequence of floats of "
"length 1 or n")
delay = broadcast(delay, n, (float,), "delay")
if len(delay) != n:
raise kernel.NESTError(
"delay must be a float, or sequence "
"of floats of length 1 or n")
sli_func(
'/m Set /d Set /w Set /n Set ' +
'/pre Set { pre exch n w d m RandomConvergentConnect } forall',
post, pre, n, weight, delay, '/'+model, litconv=True)
else:
error = True
# restore old options
if old_options is not None:
sli_func('SetOptions', '/RandomConvergentConnect', old_options,
litconv=True)
if error:
raise kernel.NESTError("Both 'weight' and 'delay' have to be given.")
@check_stack
@deprecated(alt_func_name='Connect')
def DivergentConnect(pre, post, weight=None, delay=None,
model="static_synapse"):
"""
Connect each neuron in pre to all neurons in post.
.. note:: Deprecated
DivergentConnect() is deprecated and will be removed
in the future. Use Connect() instead.
Parameters
----------
pre : list
Presynaptic nodes, as list of GIDs (same length as post)
post : list
Postsynaptic nodes, as list of GIDs (same length as pre)
weight : float or list of floats, optional
If given as a a single float or as list of floats (of same length
as pre and post), the value is used as weight(s). In this case the
delay also has to be given as float or as list of floats.
delay : float or list of floats, optional
Delays of the connections
model : str, optional
Synapse model to use
Raises
------
kernel.NESTError
"""
if weight is None and delay is None:
for s in pre:
sps(s)
sps(post)
sr('/%s DivergentConnect' % model)
elif weight is not None and delay is not None:
weight = broadcast(weight, len(post), (float,), "weight")
if len(weight) != len(post):
raise kernel.NESTError(
"weight must be a float, or sequence of floats of length "
"1 or len(post)")
delay = broadcast(delay, len(post), (float,), "delay")
if len(delay) != len(post):
raise kernel.NESTError(
"delay must be a float, or sequence of "
"floats of length 1 or len(post)")
cmd = '/%s DivergentConnect' % model
for s in pre:
sps(s)
sps(post)
sps(weight)
sps(delay)
sr(cmd)
else:
raise kernel.NESTError("Both 'weight' and 'delay' have to be given.")
@check_stack
def Connect(pre, post, conn_spec=None, syn_spec=None, model=None):
"""
Connect pre nodes to post nodes.
Nodes in pre and post are connected using the specified connectivity
(all-to-all by default) and synapse type (static_synapse by default).
Details depend on the connectivity rule.
Parameters
----------
pre : list
Presynaptic nodes, as list of GIDs
post : list
Postsynaptic nodes, as list of GIDs
conn_spec : str or dict, optional
Specifies connectivity rule, see below
syn_spec : str or dict, optional
Specifies synapse model, see below
model : str or dict, optional
alias for syn_spec for backward compatibility
Raises
------
kernel.NESTError
Description
Notes
-----
Connect does not iterate over subnets, it only connects explicitly
specified nodes.
Connectivity specification (conn_spec)
--------------------------------------
Connectivity is specified either as a string containing the name of a
connectivity rule (default: 'all_to_all') or as a dictionary specifying
the rule and any mandatory rule-specific parameters (e.g. 'indegree').
In addition, switches setting permission for establishing
self-connections ('autapses', default: True) and multiple connections
between a pair of nodes ('multapses', default: True) can be contained
in the dictionary. Another switch enables the creation of symmetric
connections ('symmetric', default: False) by also creating connections
in the opposite direction.
Available rules and associated parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 'all_to_all' (default)
- 'one_to_one'
- 'fixed_indegree', 'indegree'
- 'fixed_outdegree', 'outdegree'
- 'fixed_total_number', 'N'
- 'pairwise_bernoulli', 'p'
Example conn-spec choices
~~~~~~~~~~~~~~~~~~~~~~~~~
- 'one_to_one'
- {'rule': 'fixed_indegree', 'indegree': 2500, 'autapses': False}
- {'rule': 'pairwise_bernoulli', 'p': 0.1}
Synapse specification (syn_spec)
--------------------------------------
The synapse model and its properties can be given either as a string
identifying a specific synapse model (default: 'static_synapse') or
as a dictionary specifying the synapse model and its parameters.
Available keys in the synapse specification dictionary are:
- 'model'
- 'weight'
- 'delay'
- 'receptor_type'
- any parameters specific to the selected synapse model.
All parameters are optional and if not specified, the default values
of the synapse model will be used. The key 'model' identifies the
synapse model, this can be one of NEST's built-in synapse models
or a user-defined model created via CopyModel().
If 'model' is not specified the default model 'static_synapse'
will be used.
All other parameters can be scalars, arrays or distributions.
In the case of scalar parameters, all keys must be doubles
except for 'receptor_type' which must be initialised with an integer.
Parameter arrays are only available for the rules 'one_to_one' and
'all_to_all':
- For 'one_to_one' the array has to be a one-dimensional
NumPy array with length len(pre).
- For 'all_to_all' the array has to be a two-dimensional NumPy array
with shape (len(post), len(pre)), therefore the rows describe the
target and the columns the source neurons.
Any distributed parameter must be initialised with a further dictionary
specifying the distribution type ('distribution', e.g. 'normal') and
any distribution-specific parameters (e.g. 'mu' and 'sigma').
To see all available distributions, run:
nest.slirun(’rdevdict info’)
To get information on a particular distribution, e.g. 'binomial', run:
nest.help(’rdevdict::binomial’)
Most common available distributions and associated parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 'normal' with 'mu', 'sigma'
- 'normal_clipped' with 'mu', 'sigma', 'low', 'high'
- 'lognormal' with 'mu', 'sigma'
- 'lognormal_clipped' with 'mu', 'sigma', 'low', 'high'
- 'uniform' with 'low', 'high'
- 'uniform_int' with 'low', 'high'
Example syn-spec choices
~~~~~~~~~~~~~~~~~~~~~~~~~
- 'stdp_synapse'
- {'weight': 2.4, 'receptor_type': 1}
- {'model': 'stdp_synapse',
'weight': 2.5,
'delay': {'distribution': 'uniform', 'low': 0.8, 'high': 2.5},
'alpha': {
'distribution': 'normal_clipped', 'low': 0.5,
'mu': 5.0, 'sigma': 1.0}
}
"""
if model is not None:
deprecation_text = "".join([
"The argument 'model' is there for backward compatibility with ",
"the old Connect function and will be removed in a future",
"version of NEST. Please change the name of the keyword argument ",
"from 'model' to 'syn_spec'. For details, see the documentation ",
"at:\nhttp://www.nest-simulator.org/connection_management"
])
show_deprecation_warning("BackwardCompatibilityConnect",
text=deprecation_text)
if model is not None and syn_spec is not None:
raise kernel.NESTError(
"'model' is an alias for 'syn_spec' and cannot "
"be used together with 'syn_spec'.")
sps(pre)
sps(post)
# default rule
rule = 'all_to_all'
if conn_spec is not None:
sps(conn_spec)
if is_string(conn_spec):
rule = conn_spec
sr("cvlit")
elif isinstance(conn_spec, dict):
rule = conn_spec['rule']
else:
raise kernel.NESTError(
"conn_spec needs to be a string or dictionary.")
else:
sr('/Connect /conn_spec GetOption')
if model is not None:
syn_spec = model
if syn_spec is not None:
if is_string(syn_spec):
sps(syn_spec)
sr("cvlit")
elif isinstance(syn_spec, dict):
for key, value in syn_spec.items():
# if value is a list, it is converted to a numpy array
if isinstance(value, (list, tuple)):
value = numpy.asarray(value)
if isinstance(value, (numpy.ndarray, numpy.generic)):
if len(value.shape) == 1:
if rule == 'one_to_one':
if value.shape[0] != len(pre):
raise kernel.NESTError(
"'" + key + "' has to be an array of "
"dimension " + str(len(pre)) + ", a "
"scalar or a dictionary.")
else:
syn_spec[key] = value
else:
raise kernel.NESTError(
"'" + key + "' has the wrong type. "
"One-dimensional parameter arrays can "
"only be used in conjunction with rule "
"'one_to_one'.")
elif len(value.shape) == 2:
if rule == 'all_to_all':
if value.shape[0] != len(post) or \
value.shape[1] != len(pre):
raise kernel.NESTError(
"'" + key + "' has to be an array of "
"dimension " + str(len(post)) + "x" +
str(len(pre)) +
" (n_target x n_sources), " +
"a scalar or a dictionary.")
else:
syn_spec[key] = value.flatten()
else:
raise kernel.NESTError(
"'" + key + "' has the wrong type. "
"Two-dimensional parameter arrays can "
"only be used in conjunction with rule "
"'all_to_all'.")
sps(syn_spec)
else:
raise kernel.NESTError(
"syn_spec needs to be a string or dictionary.")
sr('Connect')
@check_stack
def DataConnect(pre, params=None, model="static_synapse"):
"""Connect neurons from lists of connection data.
Parameters
----------
pre : list
Presynaptic nodes, given as lists of GIDs or lists
of synapse status dictionaries. See below.
params : list, optional
See below
model : str, optional
Synapse model to use, see below
Raises
------
TypeError
Usage Variants
--------------
Variant 1
~~~~~~~~~
Connect each neuron in pre to the targets given in params,
using synapse type model.
- pre: [gid_1, ... gid_n]
- params: [ {param_1}, ..., {param_n} ]
- model= 'synapse_model'
The dictionaries param_1 to param_n must contain at least the
following keys:
- 'target'
- 'weight'
- 'delay'
Each key must resolve to a list or numpy.ndarray of values.
Depending on the synapse model, other parameters can be given
in the same format. All arrays in params must have the same
length as 'target'.
Variant 2
~~~~~~~~~
Connect neurons according to a list of synapse status dictionaries,
as obtained from GetStatus.
pre = [ {synapse_state1}, ..., {synapse_state_n}]
params=None
model=None
During connection, status dictionary misses will not raise errors,
even if the kernel property 'dict_miss_is_error' is True.
"""
if not is_coercible_to_sli_array(pre):
raise TypeError(
"pre must be a list of nodes or connection dictionaries")
if params is not None:
if not is_coercible_to_sli_array(params):
raise TypeError("params must be a list of dictionaries")
cmd = '({0}) DataConnect_i_D_s '.format(model)
for s, p in zip(pre, params):
sps(s)
sps(p)
sr(cmd)
else:
# Call the variant where all connections are given explicitly
# Disable dict checking, because most models can't re-use
# their own status dict
dict_miss = GetKernelStatus('dict_miss_is_error')
SetKernelStatus({'dict_miss_is_error': False})
sps(pre)
sr('DataConnect_a')
SetKernelStatus({'dict_miss_is_error': dict_miss})
@check_stack
@deprecated(alt_func_name='Connect')
def RandomDivergentConnect(pre, post, n, weight=None, delay=None,
model="static_synapse", options=None):
"""Connect each neuron in pre to n randomly selected neurons from
post.
.. note:: Deprecated
RandomDivergentConnect() is deprecated and will be removed
in the future. Use Connect() instead.
Parameters
----------
pre : list
Presynaptic nodes, as list of GIDs (same length as post)
post : list
Postsynaptic nodes, as list of GIDs (same length as pre)
n : int
Number of postsynaptic neurons to connect to
weight : float or list of floats, optional
If given as a a single float or as list of floats (of same length
as pre and post), the value is used as weight(s). In this case the
delay also has to be given as float or as list of floats.
delay : float or list of floats, optional
Delays of the connections
model : str, optional
Synapse model to use
options : dict, optional
Options to the RandomDivergentConnect function:
'allow_autapses', 'allow_multapses'
Raises
------
kernel.NESTError
TypeError
"""
if not isinstance(n, int):
raise TypeError("number of neurons n should be an integer")
# store current options, set desired options
old_options = None
error = False
if options is not None:
old_options = sli_func('GetOptions', '/RandomDivergentConnect',
litconv=True)
del old_options['DefaultOptions'] # in the way when restoring
sli_func('SetOptions', '/RandomDivergentConnect', options,
litconv=True)
if weight is None and delay is None:
sli_func(
'/m Set /n Set /post Set ' +
'{ n post m RandomDivergentConnect } forall',
pre, post, n, '/'+model, litconv=True)
elif weight is not None and delay is not None:
weight = broadcast(weight, n, (float,), "weight")
if len(weight) != n:
raise kernel.NESTError(
"weight must be a float, or sequence of floats of length 1" +
" or n")
delay = broadcast(delay, n, (float,), "delay")
if len(delay) != n:
raise kernel.NESTError(
"delay must be a float, or sequence of floats of length 1" +
"or n")
sli_func(
'/m Set /d Set /w Set /n Set /post Set ' +
'{ n post w d m RandomDivergentConnect } forall',
pre, post, n, weight, delay, '/'+model, litconv=True)
else:
error = True
# restore old options
if old_options is not None:
sli_func('SetOptions', '/RandomDivergentConnect', old_options,
litconv=True)
if error:
raise kernel.NESTError("Both 'weight' and 'delay' have to be given.")
def _is_subnet_instance(gids):
"""Returns true if all gids point to subnet or derived type.
Parameters
----------
gids : TYPE
Description
Returns
-------
bool:
true if all gids point to subnet or derived type
"""
try:
GetChildren(gids)
return True
except kernel.NESTError:
return False
@check_stack
def CGConnect(pre, post, cg, parameter_map=None, model="static_synapse"):
"""
Connect neurons from pre to neurons from post using connectivity
specified by the connection generator cg.
This function is only available if NEST was compiled with
support for libneurosim.
Parameters
----------
pre : list
must contain 1 subnet, or a list of GIDs
post : list
must contain 1 subnet, or a list of GIDs
cg : connection generator
libneurosim connection generator to use
parameter_map : dict, optional
Maps names of values such as weight and delay to
value set positions
model : str, optional
Synapse model to use
Raises
------
kernel.NESTError
"""
sr("statusdict/have_libneurosim ::")
if not spp():
raise kernel.NESTError(
"NEST was not compiled with support for libneurosim: " +
"CGConnect is not available.")
if parameter_map is None:
parameter_map = {}
if _is_subnet_instance(pre[:1]):
if not _is_subnet_instance(post[:1]):
raise kernel.NESTError(
"if pre is a subnet, post also has to be a subnet")
if len(pre) > 1 or len(post) > 1:
raise kernel.NESTError(
"the length of pre and post has to be 1 if subnets " +
"are given")
sli_func('CGConnect', cg, pre[0], post[0],
parameter_map, '/'+model, litconv=True)
else:
sli_func('CGConnect', cg, pre, post,
parameter_map, '/'+model, litconv=True)
@check_stack
def CGParse(xml_filename):
"""Parse an XML file and return the correcponding connection
generator cg.
The library to provide the parsing can be selected
by CGSelectImplementation().
Parameters
----------
xml_filename : str
Filename of the xml file to parse.
Raises
------
kernel.NESTError
Description
"""
sr("statusdict/have_libneurosim ::")
if not spp():
raise kernel.NESTError(
"NEST was not compiled with support for libneurosim: " +
"CGParse is not available.")
sps(xml_filename)
sr("CGParse")
return spp()
@check_stack
def CGSelectImplementation(tag, library):
"""Select a library to provide a parser for XML files and associate
an XML tag with the library.
XML files can be read by CGParse().
Parameters
----------
tag : str
XML tag to associate with the library
library : str
Library to use to parse XML files
Raises
------
kernel.NESTError
Description
"""
sr("statusdict/have_libneurosim ::")
if not spp():
raise kernel.NESTError(
"NEST was not compiled with support for libneurosim: " +
"CGSelectImplementation is not available.")
sps(tag)
sps(library)
sr("CGSelectImplementation")
@check_stack
def DisconnectOneToOne(source, target, syn_spec):
"""Disconnect a currently existing synapse.
Parameters
----------
source : int
GID of presynaptic node
target : int
GID of postsynaptic node
syn_spec : str or dict
See Connect() for definition
"""
sps(source)
sps(target)
if syn_spec is not None:
sps(syn_spec)
if is_string(syn_spec):
sr("cvlit")
sr('Disconnect')
@check_stack
def Disconnect(pre, post, conn_spec, syn_spec):
"""Disconnect pre neurons from post neurons.
Neurons in pre and post are disconnected using the specified disconnection
rule (one-to-one by default) and synapse type (static_synapse by default).
Details depend on the disconnection rule.
Parameters
----------
pre : list
Presynaptic nodes, given as list of GIDs
post : list
Postsynaptic nodes, given as list of GIDs
conn_spec : str or dict
Disconnection rule, see below
syn_spec : str or dict
Synapse specifications, see below
conn_spec
---------
Apply the same rules as for connectivity specs in the Connect method
Possible choices of the conn_spec are
- 'one_to_one'
- 'all_to_all'
syn_spec
--------
The synapse model and its properties can be inserted either as a
string describing one synapse model (synapse models are listed in the
synapsedict) or as a dictionary as described below.
If no synapse model is specified the default model 'static_synapse'
will be used.
Available keys in the synapse dictionary are:
- 'model'
- 'weight'
- 'delay',
- 'receptor_type'
- parameters specific to the synapse model chosen
All parameters are optional and if not specified will use the default
values determined by the current synapse model.
'model' determines the synapse type, taken from pre-defined synapse
types in NEST or manually specified synapses created via CopyModel().
All other parameters are not currently implemented.
Note: model is alias for syn_spec for backward compatibility.
Notes
-----
Disconnect does not iterate over subnets, it only connects explicitly
specified nodes.
"""
sps(pre)
sr('cvgidcollection')
sps(post)
sr('cvgidcollection')
if conn_spec is not None:
sps(conn_spec)
if is_string(conn_spec):
sr("cvlit")
if syn_spec is not None:
sps(syn_spec)
if is_string(syn_spec):
sr("cvlit")
sr('Disconnect_g_g_D_D')
|
obreitwi/nest-simulator
|
pynest/nest/lib/hl_api_connections.py
|
Python
|
gpl-2.0
| 34,873
|
[
"NEURON"
] |
ea2dc6be38080df903d881e94b7829104da8aef290396ba9b37e3629de622ffd
|
#!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
import os
# MUGQIC Modules
from core.config import *
from core.job import *
def index(input):
return Job(
[input],
[input + ".bwt"],
[['bwa_mem', 'module_bwa']],
command="""\
bwa index \\
{input}""".format(
input=input
)
)
def mem(in1fastq, in2fastq=None, out_sam=None, read_group=None, ref=None, ini_section='bwa_mem'):
other_options = config.param(ini_section, 'other_options', required=False)
return Job(
[in1fastq, in2fastq, ref + ".bwt" if ref else None],
[out_sam],
[["bwa_mem", "module_bwa"]],
command="""\
bwa mem {other_options}{read_group} \\
{idxbase} \\
{in1fastq}{in2fastq}{out_sam}""".format(
other_options=" \\\n " + other_options if other_options else "",
read_group=" \\\n -R " + read_group if read_group else "",
idxbase=ref if ref else config.param(ini_section, 'genome_bwa_index', type='filepath'),
in1fastq=in1fastq,
in2fastq=" \\\n " + in2fastq if in2fastq else "",
out_sam=" \\\n > " + out_sam if out_sam else ""
),
removable_files=[out_sam]
)
|
ccmbioinfo/mugqic_pipelines
|
bfx/bwa.py
|
Python
|
lgpl-3.0
| 2,157
|
[
"BWA"
] |
5766f2a93c54d53a16608518cc0ac294957aa640b09964ccaacb25fddbd1f4da
|
# Copyright 2016 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Module to generate distributions**
:Authors: **Danilo Quartullo**, **Helga Timko**, **Alexandre Lasheen**,
**Juan F. Esteban Mueller**, **Theodoros Argyropoulos**,
**Joel Repond**
'''
from __future__ import division, print_function, absolute_import
from builtins import str
from builtins import range
import numpy as np
import warnings
import copy
import gc
import matplotlib.pyplot as plt
from scipy.integrate import cumtrapz
from ..trackers.utilities import is_in_separatrix
from ..beam.profile import Profile, CutOptions
from ..trackers.utilities import potential_well_cut, minmax_location
from ..utils import bmath as bm
def matched_from_line_density(beam, full_ring_and_RF, line_density_input=None,
main_harmonic_option='lowest_freq',
TotalInducedVoltage=None, plot=False,
figdir='fig', half_option='first',
extraVoltageDict=None, n_iterations=100,
n_points_potential=1e4, n_points_grid=int(1e3),
dt_margin_percent=0.40, n_points_abel=1e4,
bunch_length=None, line_density_type=None,
line_density_exponent=None, seed=None,
process_pot_well = True):
'''
*Function to generate a beam by inputing the line density. The distribution
function is then reconstructed with the Abel transform and the particles
randomly generated.*
'''
# Initialize variables depending on the accelerator parameters
slippage_factor = full_ring_and_RF.RingAndRFSection_list[0].eta_0[0]
eom_factor_dE = abs(slippage_factor) / (2*beam.beta**2. * beam.energy)
eom_factor_potential = (np.sign(slippage_factor) * beam.Particle.charge /
(full_ring_and_RF.RingAndRFSection_list[0].t_rev[0]))
#: *Number of points to be used in the potential well calculation*
n_points_potential = int(n_points_potential)
# Generate potential well
full_ring_and_RF.potential_well_generation(n_points=n_points_potential,
dt_margin_percent=dt_margin_percent,
main_harmonic_option=main_harmonic_option)
potential_well = full_ring_and_RF.potential_well
time_potential = full_ring_and_RF.potential_well_coordinates
extra_potential = 0
if extraVoltageDict is not None:
extra_voltage_time_input = extraVoltageDict['time_array']
extra_voltage_input = extraVoltageDict['voltage_array']
extra_potential_input = - (eom_factor_potential *
cumtrapz(extra_voltage_input,
dx=extra_voltage_time_input[1] -
extra_voltage_time_input[0], initial=0))
extra_potential = np.interp(time_potential, extra_voltage_time_input,
extra_potential_input)
if line_density_type != 'user_input':
# Time coordinates for the line density
n_points_line_den = int(1e4)
time_line_den = np.linspace(float(time_potential[0]), float(time_potential[-1]),
n_points_line_den)
line_den_resolution = time_line_den[1] - time_line_den[0]
# Normalizing the line density
line_density_ = line_density(time_line_den, line_density_type,
bunch_length, exponent=line_density_exponent,
bunch_position=(time_potential[0]+time_potential[-1])/2)
line_density_ -= np.min(line_density_)
line_density_ *= beam.n_macroparticles / np.sum(line_density_)
elif line_density_type != 'user_input':
# Time coordinates for the line density
time_line_den = line_density_input['time_line_den']
n_points_line_den = len(time_line_den)
line_den_resolution = time_line_den[1] - time_line_den[0]
# Normalizing the line density
line_density_ = line_density_input['line_density']
line_density_ -= np.min(line_density_)
line_density_ *= beam.n_macroparticles / np.sum(line_density_)
else:
#GenerationError
raise RuntimeError('The input for the matched_from_line_density ' +
'function was not recognized')
induced_potential_final = 0
if TotalInducedVoltage is not None:
# Calculating the induced voltage
induced_voltage_object = copy.deepcopy(TotalInducedVoltage)
profile = induced_voltage_object.profile
# Inputing new line density
profile.cut_options.cut_left = time_line_den[0] - 0.5*line_den_resolution
profile.cut_options.cut_right = time_line_den[-1] + 0.5*line_den_resolution
profile.cut_options.n_slices = n_points_line_den
profile.cut_options.cuts_unit = 's'
profile.cut_options.set_cuts()
profile.set_slices_parameters()
profile.n_macroparticles = line_density_
# Re-calculating the sources of wakes/impedances according to this
# slicing
induced_voltage_object.reprocess()
# Calculating the induced voltage
induced_voltage_object.induced_voltage_sum()
induced_voltage = induced_voltage_object.induced_voltage
# Calculating the induced potential
induced_potential = -(eom_factor_potential * cumtrapz(induced_voltage,
dx=profile.bin_size, initial=0))
# Centering the bunch in the potential well
for i in range(0, n_iterations):
if TotalInducedVoltage is not None:
# Interpolating the potential well
induced_potential_final = np.interp(time_potential,
profile.bin_centers, induced_potential)
# Induced voltage contribution
total_potential = (potential_well + induced_potential_final +
extra_potential)
# Potential well calculation around the separatrix
if process_pot_well == False:
time_potential_sep, potential_well_sep = time_potential, total_potential
else:
time_potential_sep, potential_well_sep = potential_well_cut(time_potential, total_potential)
minmax_positions_potential, minmax_values_potential = \
minmax_location(time_potential_sep, potential_well_sep)
minmax_positions_profile, minmax_values_profile = \
minmax_location(time_line_den[line_density_!=0],
line_density_[line_density_!=0])
n_minima_potential = len(minmax_positions_potential[0])
n_maxima_profile = len(minmax_positions_profile[1])
# Warnings
if n_maxima_profile > 1:
print('Warning: the profile has serveral max, the highest one ' +
'is taken. Be sure the profile is monotonous and not too noisy.')
max_profile_pos = minmax_positions_profile[1][np.where(
minmax_values_profile[1] == minmax_values_profile[1].max())]
else:
max_profile_pos = minmax_positions_profile[1]
if n_minima_potential > 1:
print('Warning: the potential well has serveral min, the deepest '+
'one is taken. The induced potential is probably splitting '+
'the potential well.')
min_potential_pos = minmax_positions_potential[0][np.where(
minmax_values_potential[0]==minmax_values_potential[0].min())]
else:
min_potential_pos = minmax_positions_potential[0]
# Moving the bunch (not for the last iteration if intensity effects
# are present)
if TotalInducedVoltage is None:
time_line_den -= max_profile_pos - min_potential_pos
max_profile_pos -= max_profile_pos - min_potential_pos
elif i != n_iterations-1:
time_line_den -= max_profile_pos - min_potential_pos
# Update profile
profile.cut_options.cut_left -= max_profile_pos - min_potential_pos
profile.cut_options.cut_right -= max_profile_pos - min_potential_pos
profile.cut_options.set_cuts()
profile.set_slices_parameters()
# Taking the first/second half of line density and potential
n_points_abel = int(n_points_abel)
abel_both_step = 1
if half_option == 'both':
abel_both_step = 2
distribution_function_average = np.zeros((n_points_abel,2))
hamiltonian_average = np.zeros((n_points_abel,2))
for abel_index in range(0, abel_both_step):
if half_option == 'first':
half_indexes = np.where((time_line_den >= time_line_den[0]) *
(time_line_den <= max_profile_pos))
if half_option == 'second':
half_indexes = np.where((time_line_den >= max_profile_pos) *
(time_line_den <= time_line_den[-1]))
if half_option == 'both' and abel_index == 0:
half_indexes = np.where((time_line_den >= time_line_den[0]) *
(time_line_den <= max_profile_pos))
if half_option == 'both' and abel_index == 1:
half_indexes = np.where((time_line_den >= max_profile_pos) *
(time_line_den <= time_line_den[-1]))
line_den_half = line_density_[half_indexes]
time_half = time_line_den[half_indexes]
potential_half = np.interp(time_half, time_potential_sep,
potential_well_sep)
potential_half = potential_half - np.min(potential_half)
# Derivative of the line density
line_den_diff = np.diff(line_den_half) / line_den_resolution
time_line_den_diff = time_half[:-1] + line_den_resolution / 2
line_den_diff = np.interp(time_half, time_line_den_diff, line_den_diff,
left=0, right=0)
# Interpolating the line density derivative and potential well for
# Abel transform
time_abel = np.linspace(float(time_half[0]), float(time_half[-1]), n_points_abel)
line_den_diff_abel = np.interp(time_abel, time_half, line_den_diff)
potential_abel = np.interp(time_abel, time_half, potential_half)
distribution_function_ = np.zeros(n_points_abel)
hamiltonian_coord = np.zeros(n_points_abel)
# Abel transform
warnings.filterwarnings("ignore")
if (half_option == 'first') or (half_option == 'both' and
abel_index == 0):
for i in range(0, n_points_abel):
integrand = (line_den_diff_abel[:i+1] /
np.sqrt(potential_abel[:i+1] - potential_abel[i]))
if len(integrand)>2:
integrand[-1] = integrand[-2] + (integrand[-2] -
integrand[-3])
elif len(integrand)>1:
integrand[-1] = integrand[-2]
else:
integrand = np.array([0])
distribution_function_[i] = (np.sqrt(eom_factor_dE) / np.pi *
np.trapz(integrand, dx=line_den_resolution))
hamiltonian_coord[i] = potential_abel[i]
if (half_option == 'second') or (half_option == 'both' and
abel_index == 1):
for i in range(0, n_points_abel):
integrand = (line_den_diff_abel[i:] /
np.sqrt(potential_abel[i:] - potential_abel[i]))
if len(integrand)>2:
integrand[0] = integrand[1] + (integrand[2] - integrand[1])
if len(integrand)>1:
integrand[0] = integrand[1]
else:
integrand = np.array([0])
distribution_function_[i] = -(np.sqrt(eom_factor_dE) / np.pi *
np.trapz(integrand, dx=line_den_resolution))
hamiltonian_coord[i] = potential_abel[i]
warnings.filterwarnings("default")
# Cleaning the distribution function from unphysical results
distribution_function_[np.isnan(distribution_function_)] = 0
distribution_function_[distribution_function_<0] = 0
if half_option == 'both':
hamiltonian_average[:,abel_index] = hamiltonian_coord
distribution_function_average[:,abel_index] = \
distribution_function_
if half_option == 'both':
hamiltonian_coord = hamiltonian_average[:,0]
distribution_function_ = (distribution_function_average[:,0] +
np.interp(hamiltonian_coord, hamiltonian_average[:,1],
distribution_function_average[:,1])) / 2
# Compute deltaE frame corresponding to the separatrix
max_potential = np.max(potential_half)
max_deltaE = np.sqrt(max_potential / eom_factor_dE)
# Initializing the grids by reducing the resolution to a
# n_points_grid*n_points_grid frame
time_for_grid = np.linspace(float(time_line_den[0]), float(time_line_den[-1]),
n_points_grid)
deltaE_for_grid = np.linspace(-float(max_deltaE), float(max_deltaE), n_points_grid)
potential_well_for_grid = np.interp(time_for_grid, time_potential_sep,
potential_well_sep)
potential_well_for_grid = (potential_well_for_grid -
potential_well_for_grid.min())
time_grid, deltaE_grid = np.meshgrid(time_for_grid, deltaE_for_grid)
potential_well_grid = np.meshgrid(potential_well_for_grid,
potential_well_for_grid)[0]
hamiltonian_grid = eom_factor_dE * deltaE_grid**2 + potential_well_grid
# Sort the distribution function and generate the density grid
hamiltonian_argsort = np.argsort(hamiltonian_coord)
hamiltonian_coord = hamiltonian_coord.take(hamiltonian_argsort)
distribution_function_ = distribution_function_.take(hamiltonian_argsort)
density_grid = np.interp(hamiltonian_grid, hamiltonian_coord,
distribution_function_)
density_grid[np.isnan(density_grid)] = 0
density_grid[density_grid<0] = 0
# Normalizing density
density_grid = density_grid / np.sum(density_grid)
reconstructed_line_den = np.sum(density_grid, axis=0)
# Ploting the result
if plot:
plt.figure('Generated bunch')
plt.plot(time_line_den, line_density_)
plt.plot(time_for_grid, reconstructed_line_den /
np.max(reconstructed_line_den) * np.max(line_density_))
plt.title('Line densities')
if plot == 'show':
plt.show()
elif plot == 'savefig':
fign = figdir + '/generated_bunch.png'
plt.savefig(fign)
# Populating the bunch
populate_bunch(beam, time_grid, deltaE_grid, density_grid,
time_for_grid[1]-time_for_grid[0],
deltaE_for_grid[1]-deltaE_for_grid[0], seed)
if TotalInducedVoltage is not None:
# Inputing new line density
profile.cut_options.cut_left = time_for_grid[0] - 0.5*(time_for_grid[1]-time_for_grid[0])
profile.cut_options.cut_right = time_for_grid[-1] + 0.5*(time_for_grid[1]-time_for_grid[0])
profile.cut_options.n_slices = n_points_grid
profile.cut_options.set_cuts()
profile.set_slices_parameters()
profile.n_macroparticles = reconstructed_line_den*beam.n_macroparticles
# Re-calculating the sources of wakes/impedances according to this
# slicing
induced_voltage_object.reprocess()
# Calculating the induced voltage
induced_voltage_object.induced_voltage_sum()
gc.collect()
return [hamiltonian_coord, distribution_function_], \
induced_voltage_object
else:
gc.collect()
return [hamiltonian_coord, distribution_function_],\
[time_line_den, line_density_]
def matched_from_distribution_function(beam, full_ring_and_RF,
distribution_function_input=None,
distribution_user_table=None,
main_harmonic_option='lowest_freq',
TotalInducedVoltage=None,
n_iterations=1, n_points_potential=1e4,
n_points_grid=int(1e3),
dt_margin_percent=0.40,
extraVoltageDict=None, seed=None,
distribution_exponent=None,
distribution_type=None,
emittance=None, bunch_length=None,
bunch_length_fit=None,
distribution_variable='Hamiltonian',
process_pot_well = True,
turn_number=0):
'''
*Function to generate a beam by inputing the distribution function (by
choosing the type of distribution and the emittance).
The potential well is preprocessed to check for the min/max and center
the frame around the separatrix.
An error will be raised if there is not a full potential well (2 max
and 1 min at least), or if there are several wells (more than 2 max and
1 min, this case will be treated in the future).
An adjustable margin (40% by default) is applied in order to be able to
catch the min/max of the potential well that might be on the edge of the
frame. The slippage factor should be updated to take the higher orders.
Outputs should be added in order for the user to check step by step if
his bunch is going to be well generated. More detailed 'step by step'
documentation should be implemented
The user can input a custom distribution function by setting the parameter
distribution_type = 'user_input' and passing the function in the
parameter distribution_options['function'], with the following definition:
distribution_function(action_array, dist_type, length, exponent=None).
The user can also add an input table by setting the parameter
distribution_type = 'user_input_table',
distribution_options['user_table_action'] = array of action (in H or in J)
and distribution_options['user_table_distribution']*
'''
# Loading the distribution function if provided by the user
if distribution_function_input is not None:
distribution_function_ = distribution_function_input
else:
distribution_function_ = distribution_function
# Initialize variables depending on the accelerator parameters
slippage_factor = full_ring_and_RF.RingAndRFSection_list[0].eta_0[turn_number]
beta = full_ring_and_RF.RingAndRFSection_list[0].rf_params.beta[turn_number]
energy = full_ring_and_RF.RingAndRFSection_list[0].rf_params.energy[turn_number]
eom_factor_dE = abs(slippage_factor) / (2*beta**2. * energy)
eom_factor_potential = (np.sign(slippage_factor) * beam.Particle.charge /
(full_ring_and_RF.RingAndRFSection_list[0].t_rev[turn_number]))
#: *Number of points to be used in the potential well calculation*
n_points_potential = int(n_points_potential)
# Generate potential well
full_ring_and_RF.potential_well_generation(turn=turn_number,
n_points=n_points_potential,
dt_margin_percent=dt_margin_percent,
main_harmonic_option=main_harmonic_option)
potential_well = full_ring_and_RF.potential_well
time_potential = full_ring_and_RF.potential_well_coordinates
induced_potential = 0
# Extra potential from previous bunches (for multi-bunch generation)
extra_potential = 0
if extraVoltageDict is not None:
extra_voltage_time_input = extraVoltageDict['time_array']
extra_voltage_input = extraVoltageDict['voltage_array']
extra_potential_input = -(eom_factor_potential *
cumtrapz(extra_voltage_input, dx=extra_voltage_time_input[1]-
extra_voltage_time_input[0], initial=0))
extra_potential = np.interp(time_potential, extra_voltage_time_input,
extra_potential_input)
total_potential = potential_well + induced_potential + extra_potential
if not TotalInducedVoltage:
n_iterations = 1
else:
induced_voltage_object = copy.deepcopy(TotalInducedVoltage)
profile = induced_voltage_object.profile
dE_trajectory = np.zeros(n_points_potential)
for i in range(n_iterations):
old_potential = copy.deepcopy(total_potential)
# Adding the induced potential to the RF potential
total_potential = (potential_well + induced_potential +
extra_potential)
sse = np.sqrt(np.sum((old_potential - total_potential)**2))
print('Matching the bunch... (iteration: ' + str(i) + ' and sse: ' +
str(sse) +')')
# Process the potential well in order to take a frame around the separatrix
if process_pot_well == False:
time_potential_sep, potential_well_sep = time_potential, total_potential
else:
time_potential_sep, potential_well_sep = potential_well_cut(time_potential, total_potential)
# Potential is shifted to put the minimum on 0
potential_well_sep = potential_well_sep - np.min(potential_well_sep)
# Compute deltaE frame corresponding to the separatrix
max_potential = np.max(potential_well_sep)
max_deltaE = np.sqrt(max_potential / eom_factor_dE)
# Initializing the grids by reducing the resolution to a
# n_points_grid*n_points_grid frame
time_potential_low_res = np.linspace(float(time_potential_sep[0]),
float(time_potential_sep[-1]),
n_points_grid)
time_resolution_low = (time_potential_low_res[1] -
time_potential_low_res[0])
deltaE_coord_array = np.linspace(-float(max_deltaE), float(max_deltaE),
n_points_grid)
potential_well_low_res = np.interp(time_potential_low_res,
time_potential_sep, potential_well_sep)
time_grid, deltaE_grid = np.meshgrid(time_potential_low_res,
deltaE_coord_array)
potential_well_grid = np.meshgrid(potential_well_low_res,
potential_well_low_res)[0]
# Computing the action J by integrating the dE trajectories
J_array_dE0 = np.zeros(n_points_grid)
full_ring_and_RF2 = copy.deepcopy(full_ring_and_RF)
for j in range(n_points_grid):
# Find left and right time coordinates for a given hamiltonian
# value
time_indexes = np.where(potential_well_low_res <=
potential_well_low_res[j])[0]
left_time = time_potential_low_res[np.max((0,time_indexes[0]))]
right_time = time_potential_low_res[np.min((time_indexes[-1],
n_points_grid-1))]
# Potential well calculation with high resolution in that frame
time_potential_high_res = np.linspace(float(left_time), float(right_time),
n_points_potential)
full_ring_and_RF2.potential_well_generation(
n_points=n_points_potential,
time_array=time_potential_high_res,
main_harmonic_option=main_harmonic_option)
pot_well_high_res = full_ring_and_RF2.potential_well
if TotalInducedVoltage is not None and i != 0:
induced_potential_hires = np.interp(time_potential_high_res,
time_potential, induced_potential +
extra_potential, left=0, right=0)
pot_well_high_res += induced_potential_hires
pot_well_high_res -= pot_well_high_res.min()
# Integration to calculate action
dE_trajectory[pot_well_high_res <= potential_well_low_res[j]] = \
np.sqrt((potential_well_low_res[j] -
pot_well_high_res[pot_well_high_res <=
potential_well_low_res[j]]) / eom_factor_dE)
dE_trajectory[pot_well_high_res > potential_well_low_res[j]] = 0
J_array_dE0[j] = 1 / np.pi * np.trapz(dE_trajectory,
dx=time_potential_high_res[1] - time_potential_high_res[0])
# Sorting the H and J functions to be able to interpolate J(H)
H_array_dE0 = potential_well_low_res
sorted_H_dE0 = H_array_dE0[H_array_dE0.argsort()]
sorted_J_dE0 = J_array_dE0[H_array_dE0.argsort()]
# Calculating the H and J grid
H_grid = eom_factor_dE * deltaE_grid**2 + potential_well_grid
J_grid = np.interp(H_grid, sorted_H_dE0, sorted_J_dE0, left=0,
right=np.inf)
# Choice of either H or J as the variable used
if distribution_variable == 'Action':
sorted_X_dE0 = sorted_J_dE0
X_grid = J_grid
elif distribution_variable == 'Hamiltonian':
sorted_X_dE0 = sorted_H_dE0
X_grid = H_grid
else:
#DistributionError
raise RuntimeError('The distribution_variable option was not ' +
'recognized')
# Computing bunch length as a function of H/J if needed
# Bunch length can be calculated as 4-rms, Gaussian fit, or FWHM
if bunch_length is not None:
X0 = X0_from_bunch_length(bunch_length, bunch_length_fit,
X_grid, sorted_X_dE0, n_points_grid,
time_potential_low_res, distribution_function_,
distribution_type, distribution_exponent, beam,
full_ring_and_RF)
elif emittance is not None:
if distribution_variable == 'Action':
X0 = emittance / (2*np.pi)
elif distribution_variable == 'Hamiltonian':
X0 = np.interp(emittance / (2*np.pi), sorted_J_dE0,
sorted_H_dE0)
# Computing the density grid
if distribution_user_table is None:
density_grid = distribution_function_(X_grid, distribution_type,
X0, distribution_exponent)
else:
density_grid = np.interp(X_grid,
distribution_user_table['user_table_action'],
distribution_user_table['user_table_distribution'])
# Normalizing the grid
density_grid[H_grid>np.max(H_array_dE0)] = 0
density_grid = density_grid / np.sum(density_grid)
# Calculating the line density
line_density_ = np.sum(density_grid, axis=0)
line_density_ *= beam.n_macroparticles / np.sum(line_density_)
# Induced voltage contribution
if TotalInducedVoltage is not None:
# Inputing new line density
profile.cut_options.cut_left = time_potential_low_res[0] - 0.5*time_resolution_low
profile.cut_options.cut_right = time_potential_low_res[-1] + 0.5*time_resolution_low
profile.cut_options.n_slices = n_points_grid
profile.cut_options.cuts_unit = 's'
profile.cut_options.set_cuts()
profile.set_slices_parameters()
profile.n_macroparticles = line_density_
# Re-calculating the sources of wakes/impedances according to this
# slicing
induced_voltage_object.reprocess()
# Calculating the induced voltage
induced_voltage_object.induced_voltage_sum()
induced_voltage = induced_voltage_object.induced_voltage
# Calculating the induced potential
induced_potential_low_res = -(eom_factor_potential *
cumtrapz(induced_voltage,
dx=time_resolution_low,
initial=0))
induced_potential = np.interp(time_potential,
time_potential_low_res, induced_potential_low_res,
left=0, right=0)
del full_ring_and_RF2
gc.collect()
# Populating the bunch
populate_bunch(beam, time_grid, deltaE_grid, density_grid,
time_resolution_low, deltaE_coord_array[1] -
deltaE_coord_array[0], seed)
if TotalInducedVoltage is not None:
return [time_potential_low_res, line_density_], induced_voltage_object
else:
return [time_potential_low_res, line_density_]
def X0_from_bunch_length(bunch_length, bunch_length_fit, X_grid, sorted_X_dE0,
n_points_grid, time_potential_low_res,
distribution_function_, distribution_type,
distribution_exponent, beam, full_ring_and_RF):
'''
Function to find the corresponding H0 or J0 for a given bunch length.
Used by matched_from_distribution_function()
'''
tau = 0.0
# Initial values for iteration
X_low = sorted_X_dE0[0]
X_hi = sorted_X_dE0[-1]
X_min = sorted_X_dE0[0]
X_max = sorted_X_dE0[-1]
X_accuracy = (sorted_X_dE0[1] - sorted_X_dE0[0]) / 2.0
bin_size = (time_potential_low_res[1] - time_potential_low_res[0])
# Iteration to find H0/J0 from the bunch length
while np.abs(bunch_length-tau) > bin_size:
# Takes middle point of the interval [X_low,X_hi]
X0 = 0.5 * (X_low + X_hi)
if bunch_length_fit == 'full':
bunchIndices = np.where(np.sum(X_grid<=X0, axis=0))[0]
tau = (time_potential_low_res[bunchIndices][-1] -
time_potential_low_res[bunchIndices][0])
else:
# Calculating the line density for the parameter X0
density_grid = distribution_function_(X_grid,
distribution_type, X0, distribution_exponent)
density_grid = density_grid / np.sum(density_grid)
line_density_ = np.sum(density_grid, axis=0)
# Calculating the bunch length of that line density
if (line_density_ > 0).any():
tau = 4.0 * np.sqrt(np.sum((time_potential_low_res -
np.sum(line_density_ * time_potential_low_res) /
np.sum(line_density_))**2 * line_density_) /
np.sum(line_density_))
if bunch_length_fit!=None:
profile = Profile(
beam, CutOptions=CutOptions(cut_left=time_potential_low_res[0] -
0.5*bin_size, cut_right=time_potential_low_res[-1] +
0.5*bin_size, n_slices=n_points_grid, RFSectionParameters=full_ring_and_RF.RingAndRFSection_list[0].rf_params))
# profile = Profile(
# full_ring_and_RF.RingAndRFSection_list[0].rf_params,
# beam, n_points_grid, cut_left=time_potential_low_res[0] -
# 0.5*bin_size , cut_right=time_potential_low_res[-1] +
# 0.5*bin_size)
profile.n_macroparticles = line_density_
if bunch_length_fit == 'gauss':
profile.bl_gauss = tau
profile.bp_gauss = np.sum(line_density_ *
time_potential_low_res) / np.sum(line_density_)
profile.gaussian_fit()
tau = profile.bl_gauss
elif bunch_length_fit == 'fwhm':
profile.fwhm()
tau = profile.bunchLength
# Update of the interval for the next iteration
if tau >= bunch_length:
X_hi = X0
else:
X_low = X0
if (X_max - X0) < X_accuracy:
print('WARNING: The bucket is too small to have the ' +
'desired bunch length! Input is %.2e, ' % (bunch_length) +
'the generation gave %.2e, ' % (tau) +
'the error is %.2e' % (bunch_length-tau))
break
if (X0-X_min) < X_accuracy:
print('WARNING: The desired bunch length is too small ' +
'to be generated accurately!')
# return 0.5 * (X_low + X_hi)
return X0
def populate_bunch(beam, time_grid, deltaE_grid, density_grid, time_step,
deltaE_step, seed):
'''
*Method to populate the bunch using a random number generator from the
particle density in phase space.*
'''
# Initialise the random number generator
np.random.seed(seed=seed)
# Generating particles randomly inside the grid cells according to the
# provided density_grid
indexes = np.random.choice(np.arange(0,np.size(density_grid)),
beam.n_macroparticles, p=density_grid.flatten())
# Randomize particles inside each grid cell (uniform distribution)
beam.dt = (np.ascontiguousarray(time_grid.flatten()[indexes] +
(np.random.rand(beam.n_macroparticles) - 0.5) * time_step)).astype(dtype=bm.precision.real_t, order='C', copy=False)
beam.dE = (np.ascontiguousarray(deltaE_grid.flatten()[indexes] +
(np.random.rand(beam.n_macroparticles) - 0.5) * deltaE_step)).astype(dtype=bm.precision.real_t, order='C', copy=False)
def distribution_function(action_array, dist_type, length, exponent=None):
'''
*Distribution function (formulas from Laclare).*
'''
if dist_type in ['binomial', 'waterbag', 'parabolic_amplitude',
'parabolic_line']:
if dist_type == 'waterbag':
exponent = 0
elif dist_type == 'parabolic_amplitude':
exponent = 1
elif dist_type == 'parabolic_line':
exponent = 0.5
warnings.filterwarnings("ignore")
distribution_function_ = (1 - action_array / length)**exponent
warnings.filterwarnings("default")
distribution_function_[action_array > length] = 0
return distribution_function_
elif dist_type == 'gaussian':
distribution_function_ = np.exp(- 2 * action_array / length)
return distribution_function_
else:
#DistributionError
raise RuntimeError('The dist_type option was not recognized')
def line_density(coord_array, dist_type, bunch_length, bunch_position=0,
exponent=None):
'''
*Line density*
'''
if dist_type in ['binomial', 'waterbag', 'parabolic_amplitude',
'parabolic_line']:
if dist_type == 'waterbag':
exponent = 0
elif dist_type == 'parabolic_amplitude':
exponent = 1
elif dist_type == 'parabolic_line':
exponent = 0.5
warnings.filterwarnings("ignore")
line_density_ = ((1 - (2.0 * (coord_array - bunch_position) /
bunch_length)**2)**(exponent+0.5))
warnings.filterwarnings("default")
line_density_[np.abs(coord_array-bunch_position) > bunch_length/2] = 0
return line_density_
elif dist_type == 'gaussian':
sigma = bunch_length/4
line_density_ = np.exp(-(coord_array-bunch_position)**2 / (2*sigma**2))
return line_density_
elif dist_type == 'cosine_squared':
warnings.filterwarnings("ignore")
line_density_ = ( np.cos(np.pi * (coord_array - bunch_position) /
bunch_length)**2 )
warnings.filterwarnings("default")
line_density_[np.abs(coord_array-bunch_position) > bunch_length/2] = 0
return line_density_
def bigaussian(Ring, RFStation, Beam, sigma_dt, sigma_dE = None, seed = None,
reinsertion = False):
r"""Function generating a Gaussian beam both in time and energy
coordinates. Fills Beam.dt and Beam.dE arrays.
Parameters
----------
Ring : class
A Ring type class
RFStation : class
An RFStation type class
Beam : class
A Beam type class
sigma_dt : float
R.m.s. extension of the Gaussian in time
sigma_dE : float (optional)
R.m.s. extension of the Gaussian in energy; default is None and will
match the energy coordinate according to bucket height and sigma_dt
seed : int (optional)
Fixed seed to have a reproducible distribution
reinsertion : bool (optional)
Re-insert particles that are generated outside the separatrix into the
bucket; default in False
"""
warnings.filterwarnings("once")
if Ring.n_sections > 1:
warnings.warn("WARNING in bigaussian(): the usage of several" +
" sections is not yet implemented. Ignoring" +
" all but the first!")
if RFStation.n_rf > 1:
warnings.warn("WARNING in bigaussian(): the usage of multiple RF" +
" systems is not yet implemented. Ignoring" +
" higher harmonics!")
counter = RFStation.counter[0]
harmonic = RFStation.harmonic[0,counter]
energy = RFStation.energy[counter]
beta = RFStation.beta[counter]
omega_rf = RFStation.omega_rf[0,counter]
phi_s = RFStation.phi_s[counter]
phi_rf = RFStation.phi_rf[0,counter]
eta0 = RFStation.eta_0[counter]
# RF wave is shifted by Pi below transition
if eta0<0:
phi_rf -= np.pi
# Calculate sigma_dE from sigma_dt using single-harmonic Hamiltonian
if sigma_dE == None:
voltage = RFStation.charge* \
RFStation.voltage[0,counter]
eta0 = RFStation.eta_0[counter]
phi_b = omega_rf*sigma_dt + phi_s
sigma_dE = np.sqrt( voltage * energy * beta**2
* (np.cos(phi_b) - np.cos(phi_s) + (phi_b - phi_s) * np.sin(phi_s))
/ (np.pi * harmonic * np.fabs(eta0)) )
Beam.sigma_dt = sigma_dt
Beam.sigma_dE = sigma_dE
# Generate coordinates
np.random.seed(seed)
Beam.dt = sigma_dt*np.random.randn(Beam.n_macroparticles).astype(dtype=bm.precision.real_t, order='C', copy=False) + \
(phi_s - phi_rf)/omega_rf
Beam.dE = sigma_dE * \
np.random.randn(Beam.n_macroparticles).astype(
dtype=bm.precision.real_t, order='C')
# Re-insert if necessary
if reinsertion == True:
itemindex = np.where(is_in_separatrix(Ring,
RFStation, Beam, Beam.dt, Beam.dE) == False)[0]
while itemindex.size != 0:
Beam.dt[itemindex] = sigma_dt*np.random.randn(itemindex.size).astype(dtype=bm.precision.real_t, order='C', copy=False) \
+ (phi_s - phi_rf)/omega_rf
Beam.dE[itemindex] = sigma_dE * \
np.random.randn(itemindex.size).astype(
dtype=bm.precision.real_t, order='C')
itemindex = np.where(is_in_separatrix(Ring,
RFStation, Beam, Beam.dt, Beam.dE) == False)[0]
|
blond-admin/BLonD
|
blond/beam/distributions.py
|
Python
|
gpl-3.0
| 41,361
|
[
"Gaussian"
] |
28b6308d1b7b52526530fc8a23d34d993a3ca45edade5e309e24e00e852ab540
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffypdnn(RPackage):
"""The package contains functions to perform the PDNN method
described by Li Zhang et al."""
homepage = "https://www.bioconductor.org/packages/affypdnn/"
url = "https://git.bioconductor.org/packages/affypdnn"
version('1.50.0', git='https://git.bioconductor.org/packages/affypdnn', commit='97ff68e9f51f31333c0330435ea23b212b3ed18a')
depends_on('r@3.4.0:3.4.9', when='@1.50.0')
depends_on('r-affy', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-affypdnn/package.py
|
Python
|
lgpl-2.1
| 1,740
|
[
"Bioconductor"
] |
6d75cb9bc9c4dbd30bf6d3f789ea425cb76038620ecbb57bc8f2653b4badda8b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest2 as unittest
import os
import warnings
from pymatgen.analysis.ewald import EwaldSummation, EwaldMinimizer
from pymatgen.io.vasp.inputs import Poscar
import numpy as np
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class EwaldSummationTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
original_s = p.structure
s = original_s.copy()
s.add_oxidation_state_by_element({"Li": 1, "Fe": 2,
"P": 5, "O": -2})
ham = EwaldSummation(s, compute_forces=True)
self.assertAlmostEqual(ham.real_space_energy, -502.23549897772602, 4)
self.assertAlmostEqual(ham.reciprocal_space_energy, 6.1541071599534654, 4)
self.assertAlmostEqual(ham.point_energy, -620.22598358035918, 4)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertAlmostEqual(ham.total_energy, -1116.30737539811, 2)
self.assertAlmostEqual(ham.forces[0, 0], -1.98818620e-01, 4)
self.assertAlmostEqual(sum(sum(abs(ham.forces))), 915.925354346, 4,
"Forces incorrect")
self.assertAlmostEqual(sum(sum(ham.real_space_energy_matrix)),
ham.real_space_energy, 4)
self.assertAlmostEqual(sum(sum(ham.reciprocal_space_energy_matrix)),
ham.reciprocal_space_energy, 4)
self.assertAlmostEqual(sum(ham.point_energy_matrix), ham.point_energy,
4)
self.assertAlmostEqual(sum(sum(ham.total_energy_matrix)),
ham.total_energy, 2)
#note that forces are not individually tested, but should work fine.
self.assertRaises(ValueError, EwaldSummation, original_s)
#try sites with charge.
charges = []
for site in original_s:
if site.specie.symbol == "Li":
charges.append(1)
elif site.specie.symbol == "Fe":
charges.append(2)
elif site.specie.symbol == "P":
charges.append(5)
else:
charges.append(-2)
original_s.add_site_property('charge', charges)
ham2 = EwaldSummation(original_s)
self.assertAlmostEqual(ham2.real_space_energy, -502.23549897772602, 4)
class EwaldMinimizerTest(unittest.TestCase):
def test_init(self):
matrix = np.array([[-3., 3., 4., -0., 3., 3., 1., 14., 9., -4.],
[1., -3., -3., 12., -4., -1., 5., 11., 1., 12.],
[14., 7., 13., 15., 13., 5., -5., 10., 14., -2.],
[9., 13., 4., 1., 3., -4., 7., 0., 6., -4.],
[4., -4., 6., 1., 12., -4., -2., 13., 0., 6.],
[13., 7., -4., 12., -2., 9., 8., -5., 3., 1.],
[8., 1., 10., -4., -2., 4., 13., 12., -3., 13.],
[2., 11., 8., 1., -1., 5., -3., 4., 5., 0.],
[-0., 14., 4., 3., -1., -5., 7., -1., -1., 3.],
[2., -2., 10., 1., 6., -5., -3., 12., 0., 13.]])
m_list = [[.9, 4, [1, 2, 3, 4, 8], 'a'], [-1, 2, [5, 6, 7], 'b']]
e_min = EwaldMinimizer(matrix, m_list, 50)
self.assertEqual(len(e_min.output_lists), 15,
"Wrong number of permutations returned")
self.assertAlmostEqual(e_min.minimized_sum, 111.63, 3,
"Returned wrong minimum value")
self.assertEqual(len(e_min.best_m_list), 6,
"Returned wrong number of permutations")
if __name__ == "__main__":
unittest.main()
|
aykol/pymatgen
|
pymatgen/analysis/tests/test_ewald.py
|
Python
|
mit
| 3,985
|
[
"VASP",
"pymatgen"
] |
765e89d14cc19c1f70668cf6b339d55c1d5581d5ad1f641628eac7446bc1ca3a
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from El.core import *
from ctypes import CFUNCTYPE
# Special matrices
# ****************
# Deterministic
# =============
# Bull's head
# -----------
lib.ElBullsHead_c.argtypes = \
lib.ElBullsHead_z.argtypes = \
lib.ElBullsHeadDist_c.argtypes = \
lib.ElBullsHeadDist_z.argtypes = \
[c_void_p,iType]
def BullsHead(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElBullsHead_c(*args)
elif A.tag == zTag: lib.ElBullsHead_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElBullsHeadDist_c(*args)
elif A.tag == zTag: lib.ElBullsHeadDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Cauchy
# ------
lib.ElCauchy_s.argtypes = \
lib.ElCauchyDist_s.argtypes = \
[c_void_p,iType,POINTER(sType),iType,POINTER(sType)]
lib.ElCauchy_d.argtypes = \
lib.ElCauchyDist_d.argtypes = \
[c_void_p,iType,POINTER(dType),iType,POINTER(dType)]
lib.ElCauchy_c.argtypes = \
lib.ElCauchyDist_c.argtypes = \
[c_void_p,iType,POINTER(cType),iType,POINTER(cType)]
lib.ElCauchy_z.argtypes = \
lib.ElCauchyDist_z.argtypes = \
[c_void_p,iType,POINTER(zType),iType,POINTER(zType)]
def Cauchy(A,x,y):
xLen = len(x)
yLen = len(y)
xBuf = (TagToType(A.tag)*xLen)(*x)
yBuf = (TagToType(A.tag)*yLen)(*y)
args = [A.obj,xLen,xBuf.yLen,yBuf]
if type(A) is Matrix:
if A.tag == sTag: lib.ElCauchy_s(*args)
elif A.tag == dTag: lib.ElCauchy_d(*args)
elif A.tag == cTag: lib.ElCauchy_c(*args)
elif A.tag == zTag: lib.ElCauchy_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElCauchyDist_s(*args)
elif A.tag == dTag: lib.ElCauchyDist_d(*args)
elif A.tag == cTag: lib.ElCauchyDist_c(*args)
elif A.tag == zTag: lib.ElCauchyDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Cauchy-like
# -----------
lib.ElCauchyLike_s.argtypes = \
lib.ElCauchyLikeDist_s.argtypes = \
[c_void_p,iType,POINTER(sType),iType,POINTER(sType),
iType,POINTER(sType),iType,POINTER(sType)]
lib.ElCauchyLike_d.argtypes = \
lib.ElCauchyLikeDist_d.argtypes = \
[c_void_p,iType,POINTER(dType),iType,POINTER(dType),
iType,POINTER(dType),iType,POINTER(dType)]
lib.ElCauchyLike_c.argtypes = \
lib.ElCauchyLikeDist_c.argtypes = \
[c_void_p,iType,POINTER(cType),iType,POINTER(cType),
iType,POINTER(cType),iType,POINTER(cType)]
lib.ElCauchyLike_z.argtypes = \
lib.ElCauchyLikeDist_z.argtypes = \
[c_void_p,iType,POINTER(zType),iType,POINTER(zType),
iType,POINTER(zType),iType,POINTER(zType)]
def CauchyLike(A,r,s,x,y):
rLen = len(r)
sLen = len(s)
xLen = len(x)
yLen = len(y)
rBuf = (TagToType(A.tag)*rLen)(*r)
sBuf = (TagToType(A.tag)*sLen)(*s)
xBuf = (TagToType(A.tag)*xLen)(*x)
yBuf = (TagToType(A.tag)*yLen)(*y)
args = [A.obj,rLen,rBuf,sLen,sBuf,xLen,xBuf,yLen,yBuf]
if type(A) is Matrix:
if A.tag == sTag: lib.ElCauchyLike_s(*args)
elif A.tag == dTag: lib.ElCauchyLike_d(*args)
elif A.tag == cTag: lib.ElCauchyLike_c(*args)
elif A.tag == zTag: lib.ElCauchyLike_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElCauchyLikeDist_s(*args)
elif A.tag == dTag: lib.ElCauchyLikeDist_d(*args)
elif A.tag == cTag: lib.ElCauchyLikeDist_c(*args)
elif A.tag == zTag: lib.ElCauchyLikeDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Circulant
# ---------
lib.ElCirculant_i.argtypes = \
lib.ElCirculantDist_i.argtypes = \
[c_void_p,iType,POINTER(iType)]
lib.ElCirculant_s.argtypes = \
lib.ElCirculantDist_s.argtypes = \
[c_void_p,iType,POINTER(sType)]
lib.ElCirculant_d.argtypes = \
lib.ElCirculantDist_d.argtypes = \
[c_void_p,iType,POINTER(dType)]
lib.ElCirculant_c.argtypes = \
lib.ElCirculantDist_c.argtypes = \
[c_void_p,iType,POINTER(cType)]
lib.ElCirculant_z.argtypes = \
lib.ElCirculantDist_z.argtypes = \
[c_void_p,iType,POINTER(zType)]
def Circulant(A,a):
aLen = len(a)
aBuf = (TagToType(A.tag)*aLen)(*a)
args = [A.obj,aLen,aBuf]
if type(A) is Matrix:
if A.tag == iTag: lib.ElCirculant_i(*args)
elif A.tag == sTag: lib.ElCirculant_s(*args)
elif A.tag == dTag: lib.ElCirculant_d(*args)
elif A.tag == cTag: lib.ElCirculant_c(*args)
elif A.tag == zTag: lib.ElCirculant_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElCirculantDist_i(*args)
elif A.tag == sTag: lib.ElCirculantDist_s(*args)
elif A.tag == dTag: lib.ElCirculantDist_d(*args)
elif A.tag == cTag: lib.ElCirculantDist_c(*args)
elif A.tag == zTag: lib.ElCirculantDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Demmel
# ------
lib.ElDemmel_s.argtypes = \
lib.ElDemmel_d.argtypes = \
lib.ElDemmel_c.argtypes = \
lib.ElDemmel_z.argtypes = \
lib.ElDemmelDist_s.argtypes = \
lib.ElDemmelDist_d.argtypes = \
lib.ElDemmelDist_c.argtypes = \
lib.ElDemmelDist_z.argtypes = \
[c_void_p,iType]
def Demmel(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElDemmel_s(*args)
elif A.tag == dTag: lib.ElDemmel_d(*args)
elif A.tag == cTag: lib.ElDemmel_c(*args)
elif A.tag == zTag: lib.ElDemmel_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElDemmelDist_s(*args)
elif A.tag == dTag: lib.ElDemmelDist_d(*args)
elif A.tag == cTag: lib.ElDemmelDist_c(*args)
elif A.tag == zTag: lib.ElDemmelDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Diagonal
# --------
lib.ElDiagonal_i.argtypes = \
lib.ElDiagonal_s.argtypes = \
lib.ElDiagonal_d.argtypes = \
lib.ElDiagonal_c.argtypes = \
lib.ElDiagonal_z.argtypes = \
lib.ElDiagonalDist_i.argtypes = \
lib.ElDiagonalDist_s.argtypes = \
lib.ElDiagonalDist_d.argtypes = \
lib.ElDiagonalDist_c.argtypes = \
lib.ElDiagonalDist_z.argtypes = \
lib.ElDiagonalSparse_i.argtypes = \
lib.ElDiagonalSparse_s.argtypes = \
lib.ElDiagonalSparse_d.argtypes = \
lib.ElDiagonalSparse_c.argtypes = \
lib.ElDiagonalSparse_z.argtypes = \
lib.ElDiagonalDistSparse_i.argtypes = \
lib.ElDiagonalDistSparse_s.argtypes = \
lib.ElDiagonalDistSparse_d.argtypes = \
lib.ElDiagonalDistSparse_c.argtypes = \
lib.ElDiagonalDistSparse_z.argtypes = \
[c_void_p,c_void_p]
def Diagonal(A,d):
args = [A.obj,d.obj]
if type(A) is Matrix:
if A.tag == iTag: lib.ElDiagonal_i(*args)
elif A.tag == sTag: lib.ElDiagonal_s(*args)
elif A.tag == dTag: lib.ElDiagonal_d(*args)
elif A.tag == cTag: lib.ElDiagonal_c(*args)
elif A.tag == zTag: lib.ElDiagonal_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElDiagonalDist_i(*args)
elif A.tag == sTag: lib.ElDiagonalDist_s(*args)
elif A.tag == dTag: lib.ElDiagonalDist_d(*args)
elif A.tag == cTag: lib.ElDiagonalDist_c(*args)
elif A.tag == zTag: lib.ElDiagonalDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElDiagonalSparse_i(*args)
elif A.tag == sTag: lib.ElDiagonalSparse_s(*args)
elif A.tag == dTag: lib.ElDiagonalSparse_d(*args)
elif A.tag == cTag: lib.ElDiagonalSparse_c(*args)
elif A.tag == zTag: lib.ElDiagonalSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElDiagonalDistSparse_i(*args)
elif A.tag == sTag: lib.ElDiagonalDistSparse_s(*args)
elif A.tag == dTag: lib.ElDiagonalDistSparse_d(*args)
elif A.tag == cTag: lib.ElDiagonalDistSparse_c(*args)
elif A.tag == zTag: lib.ElDiagonalDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# DruinskyToledo
# --------------
lib.ElDruinskyToledo_s.argtypes = \
lib.ElDruinskyToledo_d.argtypes = \
lib.ElDruinskyToledo_c.argtypes = \
lib.ElDruinskyToledo_z.argtypes = \
lib.ElDruinskyToledoDist_s.argtypes = \
lib.ElDruinskyToledoDist_d.argtypes = \
lib.ElDruinskyToledoDist_c.argtypes = \
lib.ElDruinskyToledoDist_z.argtypes = \
[c_void_p,iType]
def DruinskyToledo(A,k):
args = [A.obj,k]
if type(A) is Matrix:
if A.tag == sTag: lib.ElDruinskyToledo_s(*args)
elif A.tag == dTag: lib.ElDruinskyToledo_d(*args)
elif A.tag == cTag: lib.ElDruinskyToledo_c(*args)
elif A.tag == zTag: lib.ElDruinskyToledo_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElDruinskyToledoDist_s(*args)
elif A.tag == dTag: lib.ElDruinskyToledoDist_d(*args)
elif A.tag == cTag: lib.ElDruinskyToledoDist_c(*args)
elif A.tag == zTag: lib.ElDruinskyToledoDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Dynamic regularization counter-example
# --------------------------------------
lib.ElDynamicRegCounter_s.argtypes = \
lib.ElDynamicRegCounter_d.argtypes = \
lib.ElDynamicRegCounter_c.argtypes = \
lib.ElDynamicRegCounter_z.argtypes = \
lib.ElDynamicRegCounterDist_s.argtypes = \
lib.ElDynamicRegCounterDist_d.argtypes = \
lib.ElDynamicRegCounterDist_c.argtypes = \
lib.ElDynamicRegCounterDist_z.argtypes = \
lib.ElDynamicRegCounterSparse_s.argtypes = \
lib.ElDynamicRegCounterSparse_d.argtypes = \
lib.ElDynamicRegCounterSparse_c.argtypes = \
lib.ElDynamicRegCounterSparse_z.argtypes = \
lib.ElDynamicRegCounterDistSparse_s.argtypes = \
lib.ElDynamicRegCounterDistSparse_d.argtypes = \
lib.ElDynamicRegCounterDistSparse_c.argtypes = \
lib.ElDynamicRegCounterDistSparse_z.argtypes = \
[c_void_p,iType]
def DynamicRegCounter(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElDynamicRegCounter_s(*args)
elif A.tag == dTag: lib.ElDynamicRegCounter_d(*args)
elif A.tag == cTag: lib.ElDynamicRegCounter_c(*args)
elif A.tag == zTag: lib.ElDynamicRegCounter_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElDynamicRegCounterDist_s(*args)
elif A.tag == dTag: lib.ElDynamicRegCounterDist_d(*args)
elif A.tag == cTag: lib.ElDynamicRegCounterDist_c(*args)
elif A.tag == zTag: lib.ElDynamicRegCounterDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElDynamicRegCounterSparse_s(*args)
elif A.tag == dTag: lib.ElDynamicRegCounterSparse_d(*args)
elif A.tag == cTag: lib.ElDynamicRegCounterSparse_c(*args)
elif A.tag == zTag: lib.ElDynamicRegCounterSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElDynamicRegCounterDistSparse_s(*args)
elif A.tag == dTag: lib.ElDynamicRegCounterDistSparse_d(*args)
elif A.tag == cTag: lib.ElDynamicRegCounterDistSparse_c(*args)
elif A.tag == zTag: lib.ElDynamicRegCounterDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Egorov
# ------
lib.ElEgorov_c.argtypes = \
lib.ElEgorovDist_c.argtypes = \
[c_void_p,CFUNCTYPE(sType,iType,iType),iType]
lib.ElEgorov_z.argtypes = \
lib.ElEgorovDist_z.argtypes = \
[c_void_p,CFUNCTYPE(dType,iType,iType),iType]
def Egorov(A,phase,n):
cPhase = CFUNCTYPE(TagToType(Base(A.tag)),iType,iType)(phase)
args = [A.obj,cPhase,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElEgorov_c(*args)
elif A.tag == zTag: lib.ElEgorov_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElEgorovDist_c(*args)
elif A.tag == zTag: lib.ElEgorovDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Ehrenfest
# ---------
lib.ElEhrenfest_s.argtypes = \
lib.ElEhrenfest_d.argtypes = \
lib.ElEhrenfest_c.argtypes = \
lib.ElEhrenfest_z.argtypes = \
lib.ElEhrenfestDist_s.argtypes = \
lib.ElEhrenfestDist_d.argtypes = \
lib.ElEhrenfestDist_c.argtypes = \
lib.ElEhrenfestDist_z.argtypes = \
[c_void_p,iType]
def Ehrenfest(P,n):
args = [P.obj,n]
if type(P) is Matrix:
if P.tag == sTag: lib.ElEhrenfest_s(*args)
elif P.tag == dTag: lib.ElEhrenfest_d(*args)
elif P.tag == cTag: lib.ElEhrenfest_c(*args)
elif P.tag == zTag: lib.ElEhrenfest_z(*args)
else: DataExcept()
elif type(P) is DistMatrix:
if P.tag == sTag: lib.ElEhrenfestDist_s(*args)
elif P.tag == dTag: lib.ElEhrenfestDist_d(*args)
elif P.tag == cTag: lib.ElEhrenfestDist_c(*args)
elif P.tag == zTag: lib.ElEhrenfestDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElEhrenfestStationary_s.argtypes = \
lib.ElEhrenfestStationary_d.argtypes = \
lib.ElEhrenfestStationary_c.argtypes = \
lib.ElEhrenfestStationary_z.argtypes = \
lib.ElEhrenfestStationaryDist_s.argtypes = \
lib.ElEhrenfestStationaryDist_d.argtypes = \
lib.ElEhrenfestStationaryDist_c.argtypes = \
lib.ElEhrenfestStationaryDist_z.argtypes = \
[c_void_p,iType]
def EhrenfestStationary(PInf,n):
args = [PInf.obj,n]
if type(PInf) is Matrix:
if PInf.tag == sTag: lib.ElEhrenfestStationary_s(*args)
elif PInf.tag == dTag: lib.ElEhrenfestStationary_d(*args)
elif PInf.tag == cTag: lib.ElEhrenfestStationary_c(*args)
elif PInf.tag == zTag: lib.ElEhrenfestStationary_z(*args)
else: DataExcept()
elif type(PInf) is DistMatrix:
if PInf.tag == sTag: lib.ElEhrenfestStationaryDist_s(*args)
elif PInf.tag == dTag: lib.ElEhrenfestStationaryDist_d(*args)
elif PInf.tag == cTag: lib.ElEhrenfestStationaryDist_c(*args)
elif PInf.tag == zTag: lib.ElEhrenfestStationaryDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElEhrenfestDecay_s.argtypes = \
lib.ElEhrenfestDecay_d.argtypes = \
lib.ElEhrenfestDecay_c.argtypes = \
lib.ElEhrenfestDecay_z.argtypes = \
lib.ElEhrenfestDecayDist_s.argtypes = \
lib.ElEhrenfestDecayDist_d.argtypes = \
lib.ElEhrenfestDecayDist_c.argtypes = \
lib.ElEhrenfestDecayDist_z.argtypes = \
[c_void_p,iType]
def EhrenfestDecay(PInf,n):
args = [PInf.obj,n]
if type(PInf) is Matrix:
if PInf.tag == sTag: lib.ElEhrenfestDecay_s(*args)
elif PInf.tag == dTag: lib.ElEhrenfestDecay_d(*args)
elif PInf.tag == cTag: lib.ElEhrenfestDecay_c(*args)
elif PInf.tag == zTag: lib.ElEhrenfestDecay_z(*args)
else: DataExcept()
elif type(PInf) is DistMatrix:
if PInf.tag == sTag: lib.ElEhrenfestDecayDist_s(*args)
elif PInf.tag == dTag: lib.ElEhrenfestDecayDist_d(*args)
elif PInf.tag == cTag: lib.ElEhrenfestDecayDist_c(*args)
elif PInf.tag == zTag: lib.ElEhrenfestDecayDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Extended Kahan
# --------------
lib.ElExtendedKahan_s.argtypes = \
lib.ElExtendedKahan_c.argtypes = \
lib.ElExtendedKahanDist_s.argtypes = \
lib.ElExtendedKahanDist_c.argtypes = \
[c_void_p,iType,sType,sType]
lib.ElExtendedKahan_d.argtypes = \
lib.ElExtendedKahan_z.argtypes = \
lib.ElExtendedKahanDist_d.argtypes = \
lib.ElExtendedKahanDist_z.argtypes = \
[c_void_p,iType,dType,dType]
def ExtendedKahan(A,k,phi,mu):
args = [A.obj,k,phi,mu]
if type(A) is Matrix:
if A.tag == sTag: lib.ElExtendedKahan_s(*args)
elif A.tag == dTag: lib.ElExtendedKahan_d(*args)
elif A.tag == cTag: lib.ElExtendedKahan_c(*args)
elif A.tag == zTag: lib.ElExtendedKahan_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElExtendedKahanDist_s(*args)
elif A.tag == dTag: lib.ElExtendedKahanDist_d(*args)
elif A.tag == cTag: lib.ElExtendedKahanDist_c(*args)
elif A.tag == zTag: lib.ElExtendedKahanDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Fiedler
# -------
lib.ElFiedler_s.argtypes = \
lib.ElFiedlerDist_s.argtypes = \
[c_void_p,iType,POINTER(sType)]
lib.ElFiedler_d.argtypes = \
lib.ElFiedlerDist_d.argtypes = \
[c_void_p,iType,POINTER(dType)]
lib.ElFiedler_c.argtypes = \
lib.ElFiedlerDist_c.argtypes = \
[c_void_p,iType,POINTER(cType)]
lib.ElFiedler_z.argtypes = \
lib.ElFiedlerDist_z.argtypes = \
[c_void_p,iType,POINTER(zType)]
def Fiedler(A,c):
cLen = len(c)
cBuf = (TagToType(A.tag)*cLen)(*c)
args = [A.obj,cLen,cBuf]
if type(A) is Matrix:
if A.tag == sTag: lib.ElFiedler_s(*args)
elif A.tag == dTag: lib.ElFiedler_d(*args)
elif A.tag == cTag: lib.ElFiedler_c(*args)
elif A.tag == zTag: lib.ElFiedler_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElFiedlerDist_s(*args)
elif A.tag == dTag: lib.ElFiedlerDist_d(*args)
elif A.tag == cTag: lib.ElFiedlerDist_c(*args)
elif A.tag == zTag: lib.ElFiedlerDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Forsythe
# --------
lib.ElForsythe_i.argtypes = \
lib.ElForsytheDist_i.argtypes = \
[c_void_p,iType,iType,iType]
lib.ElForsythe_s.argtypes = \
lib.ElForsytheDist_s.argtypes = \
[c_void_p,iType,sType,sType]
lib.ElForsythe_d.argtypes = \
lib.ElForsytheDist_d.argtypes = \
[c_void_p,iType,dType,dType]
lib.ElForsythe_c.argtypes = \
lib.ElForsytheDist_c.argtypes = \
[c_void_p,iType,cType,cType]
lib.ElForsythe_z.argtypes = \
lib.ElForsytheDist_z.argtypes = \
[c_void_p,iType,zType,zType]
def Forsythe(J,n,alpha,lamb):
args = [A.obj,n,alpha,lamb]
if type(J) is Matrix:
if J.tag == iTag: lib.ElForsythe_i(*args)
elif J.tag == sTag: lib.ElForsythe_s(*args)
elif J.tag == dTag: lib.ElForsythe_d(*args)
elif J.tag == cTag: lib.ElForsythe_c(*args)
elif J.tag == zTag: lib.ElForsythe_z(*args)
else: DataExcept()
elif type(J) is DistMatrix:
if J.tag == iTag: lib.ElForsytheDist_i(*args)
elif J.tag == sTag: lib.ElForsytheDist_s(*args)
elif J.tag == dTag: lib.ElForsytheDist_d(*args)
elif J.tag == cTag: lib.ElForsytheDist_c(*args)
elif J.tag == zTag: lib.ElForsytheDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Fox-Li
# ------
lib.ElFoxLi_c.argtypes = \
lib.ElFoxLiDist_c.argtypes = \
[c_void_p,iType,sType]
lib.ElFoxLi_z.argtypes = \
lib.ElFoxLiDist_z.argtypes = \
[c_void_p,iType,dType]
def FoxLi(A,n,omega=48.):
args = [A.obj,n,omega]
if type(A) is Matrix:
if A.tag == cTag: lib.ElFoxLi_c(*args)
elif A.tag == zTag: lib.ElFoxLi_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElFoxLiDist_c(*args)
elif A.tag == zTag: lib.ElFoxLiDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Fourier
# -------
lib.ElFourier_c.argtypes = \
lib.ElFourier_z.argtypes = \
lib.ElFourierDist_c.argtypes = \
lib.ElFourierDist_z.argtypes = \
[c_void_p,iType]
def Fourier(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElFourier_c(*args)
elif A.tag == zTag: lib.ElFourier_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElFourierDist_c(*args)
elif A.tag == zTag: lib.ElFourierDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Fourier-Identity
# ----------------
lib.ElFourierIdentity_c.argtypes = \
lib.ElFourierIdentity_z.argtypes = \
lib.ElFourierIdentityDist_c.argtypes = \
lib.ElFourierIdentityDist_z.argtypes = \
[c_void_p,iType]
def FourierIdentity(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElFourierIdentity_c(*args)
elif A.tag == zTag: lib.ElFourierIdentity_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElFourierIdentityDist_c(*args)
elif A.tag == zTag: lib.ElFourierIdentityDist_z(*args)
else: DataExcept()
else: TypeExcept()
# GCD matrix
# ----------
lib.ElGCDMatrix_i.argtypes = \
lib.ElGCDMatrix_s.argtypes = \
lib.ElGCDMatrix_d.argtypes = \
lib.ElGCDMatrix_c.argtypes = \
lib.ElGCDMatrix_z.argtypes = \
lib.ElGCDMatrixDist_i.argtypes = \
lib.ElGCDMatrixDist_s.argtypes = \
lib.ElGCDMatrixDist_d.argtypes = \
lib.ElGCDMatrixDist_c.argtypes = \
lib.ElGCDMatrixDist_z.argtypes = \
[c_void_p,iType,iType]
def GCDMatrix(G,m,n):
args = [G.obj,m,n]
if type(G) is Matrix:
if G.tag == iTag: lib.ElGCDMatrix_i(*args)
elif G.tag == sTag: lib.ElGCDMatrix_s(*args)
elif G.tag == dTag: lib.ElGCDMatrix_d(*args)
elif G.tag == cTag: lib.ElGCDMatrix_c(*args)
elif G.tag == zTag: lib.ElGCDMatrix_z(*args)
else: DataExcept()
elif type(G) is DistMatrix:
if G.tag == iTag: lib.ElGCDMatrixDist_i(*args)
elif G.tag == sTag: lib.ElGCDMatrixDist_s(*args)
elif G.tag == dTag: lib.ElGCDMatrixDist_d(*args)
elif G.tag == cTag: lib.ElGCDMatrixDist_c(*args)
elif G.tag == zTag: lib.ElGCDMatrixDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Gear matrix
# -----------
lib.ElGear_i.argtypes = \
lib.ElGear_s.argtypes = \
lib.ElGear_d.argtypes = \
lib.ElGear_c.argtypes = \
lib.ElGear_z.argtypes = \
lib.ElGearDist_i.argtypes = \
lib.ElGearDist_s.argtypes = \
lib.ElGearDist_d.argtypes = \
lib.ElGearDist_c.argtypes = \
lib.ElGearDist_z.argtypes = \
[c_void_p,iType,iType,iType]
def Gear(G,n,s,t):
args = [G.obj,n,s,t]
if type(G) is Matrix:
if G.tag == iTag: lib.ElGear_i(*args)
elif G.tag == sTag: lib.ElGear_s(*args)
elif G.tag == dTag: lib.ElGear_d(*args)
elif G.tag == cTag: lib.ElGear_c(*args)
elif G.tag == zTag: lib.ElGear_z(*args)
else: DataExcept()
elif type(G) is DistMatrix:
if G.tag == iTag: lib.ElGearDist_i(*args)
elif G.tag == sTag: lib.ElGearDist_s(*args)
elif G.tag == dTag: lib.ElGearDist_d(*args)
elif G.tag == cTag: lib.ElGearDist_c(*args)
elif G.tag == zTag: lib.ElGearDist_z(*args)
else: DataExcept()
else: TypeExcept()
# GEPP Growth
# -----------
lib.ElGEPPGrowth_s.argtypes = \
lib.ElGEPPGrowth_d.argtypes = \
lib.ElGEPPGrowth_c.argtypes = \
lib.ElGEPPGrowth_z.argtypes = \
lib.ElGEPPGrowthDist_s.argtypes = \
lib.ElGEPPGrowthDist_d.argtypes = \
lib.ElGEPPGrowthDist_c.argtypes = \
lib.ElGEPPGrowthDist_z.argtypes = \
[c_void_p,iType]
def GEPPGrowth(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElGEPPGrowth_s(*args)
elif A.tag == dTag: lib.ElGEPPGrowth_d(*args)
elif A.tag == cTag: lib.ElGEPPGrowth_c(*args)
elif A.tag == zTag: lib.ElGEPPGrowth_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElGEPPGrowthDist_s(*args)
elif A.tag == dTag: lib.ElGEPPGrowthDist_d(*args)
elif A.tag == cTag: lib.ElGEPPGrowthDist_c(*args)
elif A.tag == zTag: lib.ElGEPPGrowthDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Golub/Klema/Stewart
# -------------------
lib.ElGKS_s.argtypes = \
lib.ElGKS_d.argtypes = \
lib.ElGKS_c.argtypes = \
lib.ElGKS_z.argtypes = \
lib.ElGKSDist_s.argtypes = \
lib.ElGKSDist_d.argtypes = \
lib.ElGKSDist_c.argtypes = \
lib.ElGKSDist_z.argtypes = \
[c_void_p,iType]
def GKS(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElGKS_s(*args)
elif A.tag == dTag: lib.ElGKS_d(*args)
elif A.tag == cTag: lib.ElGKS_c(*args)
elif A.tag == zTag: lib.ElGKS_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElGKSDist_s(*args)
elif A.tag == dTag: lib.ElGKSDist_d(*args)
elif A.tag == cTag: lib.ElGKSDist_c(*args)
elif A.tag == zTag: lib.ElGKSDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Grcar
# -----
lib.ElGrcar_i.argtypes = \
lib.ElGrcar_s.argtypes = \
lib.ElGrcar_d.argtypes = \
lib.ElGrcar_c.argtypes = \
lib.ElGrcar_z.argtypes = \
lib.ElGrcarDist_i.argtypes = \
lib.ElGrcarDist_s.argtypes = \
lib.ElGrcarDist_d.argtypes = \
lib.ElGrcarDist_c.argtypes = \
lib.ElGrcarDist_z.argtypes = \
[c_void_p,iType,iType]
def Grcar(A,n,k=3):
args = [A.obj,n,k]
if type(A) is Matrix:
if A.tag == iTag: lib.ElGrcar_i(*args)
elif A.tag == sTag: lib.ElGrcar_s(*args)
elif A.tag == dTag: lib.ElGrcar_d(*args)
elif A.tag == cTag: lib.ElGrcar_c(*args)
elif A.tag == zTag: lib.ElGrcar_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElGrcarDist_i(*args)
elif A.tag == sTag: lib.ElGrcarDist_s(*args)
elif A.tag == dTag: lib.ElGrcarDist_d(*args)
elif A.tag == cTag: lib.ElGrcarDist_c(*args)
elif A.tag == zTag: lib.ElGrcarDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Haar
# ----
lib.ElHaar_s.argtypes = \
lib.ElHaar_d.argtypes = \
lib.ElHaar_c.argtypes = \
lib.ElHaar_z.argtypes = \
lib.ElHaarDist_s.argtypes = \
lib.ElHaarDist_d.argtypes = \
lib.ElHaarDist_c.argtypes = \
lib.ElHaarDist_z.argtypes = \
[c_void_p,iType]
def Haar(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHaar_s(*args)
elif A.tag == dTag: lib.ElHaar_d(*args)
elif A.tag == cTag: lib.ElHaar_c(*args)
elif A.tag == zTag: lib.ElHaar_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHaarDist_s(*args)
elif A.tag == dTag: lib.ElHaarDist_d(*args)
elif A.tag == cTag: lib.ElHaarDist_c(*args)
elif A.tag == zTag: lib.ElHaarDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElImplicitHaar_s.argtypes = \
lib.ElImplicitHaar_d.argtypes = \
lib.ElImplicitHaar_c.argtypes = \
lib.ElImplicitHaar_z.argtypes = \
lib.ElImplicitHaarDist_s.argtypes = \
lib.ElImplicitHaarDist_d.argtypes = \
lib.ElImplicitHaarDist_c.argtypes = \
lib.ElImplicitHaarDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p,iType]
def ImplicitHaar(A,n):
if type(A) is Matrix:
t = Matrix(A.tag)
d = Matrix(Base(A.tag))
args = [A.obj,t.obj,d.obj,n]
if A.tag == sTag: lib.ElImplicitHaar_s(*args)
elif A.tag == dTag: lib.ElImplicitHaar_d(*args)
elif A.tag == cTag: lib.ElImplicitHaar_c(*args)
elif A.tag == zTag: lib.ElImplicitHaar_z(*args)
else: DataExcept()
return t, d
elif type(A) is DistMatrix:
t = DistMatrix(A.tag,MC,STAR,A.Grid())
d = DistMatrix(Base(A.tag),MC,STAR,A.Grid())
args = [A.obj,t.obj,d.obj,n]
if A.tag == sTag: lib.ElImplicitHaarDist_s(*args)
elif A.tag == dTag: lib.ElImplicitHaarDist_d(*args)
elif A.tag == cTag: lib.ElImplicitHaarDist_c(*args)
elif A.tag == zTag: lib.ElImplicitHaarDist_z(*args)
else: DataExcept()
return t, d
else: TypeExcept()
# Hankel
# ------
lib.ElHankel_i.argtypes = \
lib.ElHankelDist_i.argtypes = \
[c_void_p,iType,iType,iType,POINTER(iType)]
lib.ElHankel_s.argtypes = \
lib.ElHankelDist_s.argtypes = \
[c_void_p,iType,iType,iType,POINTER(sType)]
lib.ElHankel_d.argtypes = \
lib.ElHankelDist_d.argtypes = \
[c_void_p,iType,iType,iType,POINTER(dType)]
lib.ElHankel_c.argtypes = \
lib.ElHankelDist_c.argtypes = \
[c_void_p,iType,iType,iType,POINTER(cType)]
lib.ElHankel_z.argtypes = \
lib.ElHankelDist_z.argtypes = \
[c_void_p,iType,iType,iType,POINTER(zType)]
def Hankel(A,m,n,a):
aLen = len(a)
aBuf = (TagToType(A.tag)*aLen)(*a)
args = [A.obj,m,n,aLen,aBuf]
if type(A) is Matrix:
if A.tag == iTag: lib.ElHankel_i(*args)
elif A.tag == sTag: lib.ElHankel_s(*args)
elif A.tag == dTag: lib.ElHankel_d(*args)
elif A.tag == cTag: lib.ElHankel_c(*args)
elif A.tag == zTag: lib.ElHankel_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElHankelDist_i(*args)
elif A.tag == sTag: lib.ElHankelDist_s(*args)
elif A.tag == dTag: lib.ElHankelDist_d(*args)
elif A.tag == cTag: lib.ElHankelDist_c(*args)
elif A.tag == zTag: lib.ElHankelDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hanowa
# ------
lib.ElHanowa_i.argtypes = \
lib.ElHanowaDist_i.argtypes = \
[c_void_p,iType,iType]
lib.ElHanowa_s.argtypes = \
lib.ElHanowaDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElHanowa_d.argtypes = \
lib.ElHanowaDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElHanowa_c.argtypes = \
lib.ElHanowaDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElHanowa_z.argtypes = \
lib.ElHanowaDist_z.argtypes = \
[c_void_p,iType,zType]
def Hanowa(A,n,mu):
args = [A.obj,n,mu]
if type(A) is Matrix:
if A.tag == iTag: lib.ElHanowa_i(*args)
elif A.tag == sTag: lib.ElHanowa_s(*args)
elif A.tag == dTag: lib.ElHanowa_d(*args)
elif A.tag == cTag: lib.ElHanowa_c(*args)
elif A.tag == zTag: lib.ElHanowa_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElHanowaDist_i(*args)
elif A.tag == sTag: lib.ElHanowaDist_s(*args)
elif A.tag == dTag: lib.ElHanowaDist_d(*args)
elif A.tag == cTag: lib.ElHanowaDist_c(*args)
elif A.tag == zTag: lib.ElHanowaDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hatano-Nelson
# -------------
lib.ElHatanoNelson_s.argtypes = \
lib.ElHatanoNelsonDist_s.argtypes = \
[c_void_p,iType,sType,sType,sType,bType]
lib.ElHatanoNelson_d.argtypes = \
lib.ElHatanoNelsonDist_d.argtypes = \
[c_void_p,iType,dType,dType,dType,bType]
lib.ElHatanoNelson_c.argtypes = \
lib.ElHatanoNelsonDist_c.argtypes = \
[c_void_p,iType,cType,sType,cType,bType]
lib.ElHatanoNelson_z.argtypes = \
lib.ElHatanoNelsonDist_z.argtypes = \
[c_void_p,iType,zType,dType,zType,bType]
def HatanoNelson(A,n,center,radius,g,periodic=True):
args = [A.obj,n,center,radius,g,periodic]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHatanoNelson_s(*args)
elif A.tag == dTag: lib.ElHatanoNelson_d(*args)
elif A.tag == cTag: lib.ElHatanoNelson_c(*args)
elif A.tag == zTag: lib.ElHatanoNelson_z(*args)
else: DataExcept()
elif tyep(A) is DistMatrix:
if A.tag == sTag: lib.ElHatanoNelsonDist_s(*args)
elif A.tag == dTag: lib.ElHatanoNelsonDist_d(*args)
elif A.tag == cTag: lib.ElHatanoNelsonDist_c(*args)
elif A.tag == zTag: lib.ElHatanoNelsonDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Helmholtz
# ---------
lib.ElHelmholtz1D_s.argtypes = \
lib.ElHelmholtz1DDist_s.argtypes = \
lib.ElHelmholtz1DSparse_s.argtypes = \
lib.ElHelmholtz1DDistSparse_s.argtypes = \
[c_void_p,iType,sType]
lib.ElHelmholtz1D_d.argtypes = \
lib.ElHelmholtz1DDist_d.argtypes = \
lib.ElHelmholtz1DSparse_d.argtypes = \
lib.ElHelmholtz1DDistSparse_d.argtypes = \
[c_void_p,iType,dType]
lib.ElHelmholtz1D_c.argtypes = \
lib.ElHelmholtz1DDist_c.argtypes = \
lib.ElHelmholtz1DSparse_c.argtypes = \
lib.ElHelmholtz1DDistSparse_c.argtypes = \
[c_void_p,iType,cType]
lib.ElHelmholtz1D_z.argtypes = \
lib.ElHelmholtz1DDist_z.argtypes = \
lib.ElHelmholtz1DSparse_z.argtypes = \
lib.ElHelmholtz1DDistSparse_z.argtypes = \
[c_void_p,iType,zType]
def Helmholtz1D(H,nx,shift):
args = [H.obj,nx,shift]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHelmholtz1D_s(*args)
elif A.tag == dTag: lib.ElHelmholtz1D_d(*args)
elif A.tag == cTag: lib.ElHelmholtz1D_c(*args)
elif A.tag == zTag: lib.ElHelmholtz1D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHelmholtz1DDist_s(*args)
elif A.tag == dTag: lib.ElHelmholtz1DDist_d(*args)
elif A.tag == cTag: lib.ElHelmholtz1DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtz1DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElHelmholtz1DSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz1DSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz1DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz1DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElHelmholtz1DDistSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz1DDistSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz1DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz1DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHelmholtz2D_s.argtypes = \
lib.ElHelmholtz2DDist_s.argtypes = \
lib.ElHelmholtz2DSparse_s.argtypes = \
lib.ElHelmholtz2DDistSparse_s.argtypes = \
[c_void_p,iType,iType,sType]
lib.ElHelmholtz2D_d.argtypes = \
lib.ElHelmholtz2DDist_d.argtypes = \
lib.ElHelmholtz2DSparse_d.argtypes = \
lib.ElHelmholtz2DDistSparse_d.argtypes = \
[c_void_p,iType,iType,dType]
lib.ElHelmholtz2D_c.argtypes = \
lib.ElHelmholtz2DDist_c.argtypes = \
lib.ElHelmholtz2DSparse_c.argtypes = \
lib.ElHelmholtz2DDistSparse_c.argtypes = \
[c_void_p,iType,iType,cType]
lib.ElHelmholtz2D_z.argtypes = \
lib.ElHelmholtz2DDist_z.argtypes = \
lib.ElHelmholtz2DSparse_z.argtypes = \
lib.ElHelmholtz2DDistSparse_z.argtypes = \
[c_void_p,iType,iType,zType]
def Helmholtz2D(H,nx,ny,shift):
args = [H.obj,nx,ny,shift]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHelmholtz2D_s(*args)
elif A.tag == dTag: lib.ElHelmholtz2D_d(*args)
elif A.tag == cTag: lib.ElHelmholtz2D_c(*args)
elif A.tag == zTag: lib.ElHelmholtz2D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHelmholtz2DDist_s(*args)
elif A.tag == dTag: lib.ElHelmholtz2DDist_d(*args)
elif A.tag == cTag: lib.ElHelmholtz2DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtz2DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElHelmholtz2DSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz2DSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz2DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz2DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElHelmholtz2DSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz2DSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz2DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz2DSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHelmholtz3D_s.argtypes = \
lib.ElHelmholtz3DDist_s.argtypes = \
[c_void_p,iType,iType,iType,sType]
lib.ElHelmholtz3D_d.argtypes = \
lib.ElHelmholtz3DDist_d.argtypes = \
[c_void_p,iType,iType,iType,dType]
lib.ElHelmholtz3D_c.argtypes = \
lib.ElHelmholtz3DDist_c.argtypes = \
[c_void_p,iType,iType,iType,cType]
lib.ElHelmholtz3D_z.argtypes = \
lib.ElHelmholtz3DDist_z.argtypes = \
[c_void_p,iType,iType,iType,zType]
def Helmholtz3D(H,nx,ny,nz,shift):
args = [H.obj,nx,ny,nz,shift]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHelmholtz3D_s(*args)
elif A.tag == dTag: lib.ElHelmholtz3D_d(*args)
elif A.tag == cTag: lib.ElHelmholtz3D_c(*args)
elif A.tag == zTag: lib.ElHelmholtz3D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHelmholtz3DDist_s(*args)
elif A.tag == dTag: lib.ElHelmholtz3DDist_d(*args)
elif A.tag == cTag: lib.ElHelmholtz3DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtz3DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElHelmholtz3DSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz3DSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz3DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz3DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElHelmholtz3DDistSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz3DDistSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz3DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz3DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Helmholtz with PML
# ------------------
lib.ElHelmholtzPML1D_c.argtypes = \
lib.ElHelmholtzPML1DDist_c.argtypes = \
lib.ElHelmholtzPML1DSparse_c.argtypes = \
lib.ElHelmholtzPML1DDistSparse_c.argtypes = \
[c_void_p,iType,cType,iType,sType,sType]
lib.ElHelmholtzPML1D_z.argtypes = \
lib.ElHelmholtzPML1DDist_z.argtypes = \
lib.ElHelmholtzPML1DSparse_z.argtypes = \
lib.ElHelmholtzPML1DDistSparse_z.argtypes = \
[c_void_p,iType,zType,iType,dType,dType]
def HelmholtzPML1D(H,nx,omega,numPml,sigma,pmlExp):
args = [H.obj,nx,omega,numPml,sigma,pmlExp]
if type(A) is Matrix:
if A.tag == cTag: lib.ElHelmholtzPML1D_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML1D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElHelmholtzPML1DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML1DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML1DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML1DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML1DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML1DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHelmholtzPML2D_c.argtypes = \
lib.ElHelmholtzPML2DDist_c.argtypes = \
lib.ElHelmholtzPML2DSparse_c.argtypes = \
lib.ElHelmholtzPML2DDistSparse_c.argtypes = \
[c_void_p,iType,iType,cType,iType,sType,sType]
lib.ElHelmholtzPML2D_z.argtypes = \
lib.ElHelmholtzPML2DDist_z.argtypes = \
lib.ElHelmholtzPML2DSparse_z.argtypes = \
lib.ElHelmholtzPML2DDistSparse_z.argtypes = \
[c_void_p,iType,iType,zType,iType,dType,dType]
def HelmholtzPML2D(H,nx,ny,omega,numPml,sigma,pmlExp):
args = [H.obj,nx,ny,omega,numPml,sigma,pmlExp]
if type(A) is Matrix:
if A.tag == cTag: lib.ElHelmholtzPML2D_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML2D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElHelmholtzPML2DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML2DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML2DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML2DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML2DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML2DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHelmholtzPML3D_c.argtypes = \
lib.ElHelmholtzPML3DDist_c.argtypes = \
lib.ElHelmholtzPML3DSparse_c.argtypes = \
lib.ElHelmholtzPML3DDistSparse_c.argtypes = \
[c_void_p,iType,iType,iType,cType,iType,sType,sType]
lib.ElHelmholtzPML3D_z.argtypes = \
lib.ElHelmholtzPML3DDist_z.argtypes = \
lib.ElHelmholtzPML3DSparse_z.argtypes = \
lib.ElHelmholtzPML3DDistSparse_z.argtypes = \
[c_void_p,iType,iType,iType,zType,iType,dType,dType]
def HelmholtzPML3D(H,nx,ny,nz,omega,numPml,sigma,pmlExp):
args = [H.obj,nx,ny,nz,omega,numPml,sigma,pmlExp]
if type(A) is Matrix:
if A.tag == cTag: lib.ElHelmholtzPML3D_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML3D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElHelmholtzPML3DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML3DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML3DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML3DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML3DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML3DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Hermitian from EVD
# ------------------
lib.ElHermitianFromEVD_s.argtypes = \
lib.ElHermitianFromEVD_d.argtypes = \
lib.ElHermitianFromEVD_c.argtypes = \
lib.ElHermitianFromEVD_z.argtypes = \
lib.ElHermitianFromEVDDist_s.argtypes = \
lib.ElHermitianFromEVDDist_d.argtypes = \
lib.ElHermitianFromEVDDist_c.argtypes = \
lib.ElHermitianFromEVDDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p]
def HermitianFromEVD(uplo,A,w,Z):
if type(A) is not type(w) or type(w) is not type(Z):
raise Exception('Types of {A,w,Z} must match')
if A.tag != Z.tag:
raise Exception('Datatypes of A and Z must match')
if w.tag != Base(Z.tag):
raise Exception('w must be of the base datatype of Z')
args = [uplo,A.obj,w.obj,Z.obj]
if type(Z) is Matrix:
if Z.tag == sTag: lib.ElHermitianFromEVD_s(*args)
elif Z.tag == dTag: lib.ElHermitianFromEVD_d(*args)
elif Z.tag == cTag: lib.ElHermitianFromEVD_c(*args)
elif Z.tag == zTag: lib.ElHermitianFromEVD_z(*args)
else: DataExcept()
elif type(Z) is DistMatrix:
if Z.tag == sTag: lib.ElHermitianFromEVDDist_s(*args)
elif Z.tag == dTag: lib.ElHermitianFromEVDDist_d(*args)
elif Z.tag == cTag: lib.ElHermitianFromEVDDist_c(*args)
elif Z.tag == zTag: lib.ElHermitianFromEVDDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hermitian uniform spectrum
# --------------------------
lib.ElHermitianUniformSpectrum_s.argtypes = \
lib.ElHermitianUniformSpectrum_c.argtypes = \
lib.ElHermitianUniformSpectrumDist_s.argtypes = \
lib.ElHermitianUniformSpectrumDist_c.argtypes = \
[c_void_p,iType,sType,sType]
lib.ElHermitianUniformSpectrum_d.argtypes = \
lib.ElHermitianUniformSpectrum_z.argtypes = \
lib.ElHermitianUniformSpectrumDist_d.argtypes = \
lib.ElHermitianUniformSpectrumDist_z.argtypes = \
[c_void_p,iType,dType,dType]
def HermitianUniformSpectrum(A,n,lower=0,upper=1):
args = [A.obj,n,lower,upper]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHermitianUniformSpectrum_s(*args)
elif A.tag == dTag: lib.ElHermitianUniformSpectrum_d(*args)
elif A.tag == cTag: lib.ElHermitianUniformSpectrum_c(*args)
elif A.tag == zTag: lib.ElHermitianUniformSpectrum_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHermitianUniformSpectrumDist_s(*args)
elif A.tag == dTag: lib.ElHermitianUniformSpectrumDist_d(*args)
elif A.tag == cTag: lib.ElHermitianUniformSpectrumDist_c(*args)
elif A.tag == zTag: lib.ElHermitianUniformSpectrumDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hilbert
# -------
lib.ElHilbert_s.argtypes = \
lib.ElHilbert_d.argtypes = \
lib.ElHilbert_c.argtypes = \
lib.ElHilbert_z.argtypes = \
lib.ElHilbertDist_s.argtypes = \
lib.ElHilbertDist_d.argtypes = \
lib.ElHilbertDist_c.argtypes = \
lib.ElHilbertDist_z.argtypes = \
[c_void_p,iType]
def Hilbert(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHilbert_s(*args)
elif A.tag == dTag: lib.ElHilbert_d(*args)
elif A.tag == cTag: lib.ElHilbert_c(*args)
elif A.tag == zTag: lib.ElHilbert_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHilbertDist_s(*args)
elif A.tag == dTag: lib.ElHilbertDist_d(*args)
elif A.tag == cTag: lib.ElHilbertDist_c(*args)
elif A.tag == zTag: lib.ElHilbertDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Identity
# --------
lib.ElIdentity_i.argtypes = \
lib.ElIdentity_s.argtypes = \
lib.ElIdentity_d.argtypes = \
lib.ElIdentity_c.argtypes = \
lib.ElIdentity_z.argtypes = \
lib.ElIdentityDist_i.argtypes = \
lib.ElIdentityDist_s.argtypes = \
lib.ElIdentityDist_d.argtypes = \
lib.ElIdentityDist_c.argtypes = \
lib.ElIdentityDist_z.argtypes = \
lib.ElIdentitySparse_i.argtypes = \
lib.ElIdentitySparse_s.argtypes = \
lib.ElIdentitySparse_d.argtypes = \
lib.ElIdentitySparse_c.argtypes = \
lib.ElIdentitySparse_z.argtypes = \
lib.ElIdentityDistSparse_i.argtypes = \
lib.ElIdentityDistSparse_s.argtypes = \
lib.ElIdentityDistSparse_d.argtypes = \
lib.ElIdentityDistSparse_c.argtypes = \
lib.ElIdentityDistSparse_z.argtypes = \
[c_void_p,iType,iType]
def Identity(A,m,n):
args = [A.obj,m,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElIdentity_i(*args)
elif A.tag == sTag: lib.ElIdentity_s(*args)
elif A.tag == dTag: lib.ElIdentity_d(*args)
elif A.tag == cTag: lib.ElIdentity_c(*args)
elif A.tag == zTag: lib.ElIdentity_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElIdentityDist_i(*args)
elif A.tag == sTag: lib.ElIdentityDist_s(*args)
elif A.tag == dTag: lib.ElIdentityDist_d(*args)
elif A.tag == cTag: lib.ElIdentityDist_c(*args)
elif A.tag == zTag: lib.ElIdentityDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElIdentitySparse_i(*args)
elif A.tag == sTag: lib.ElIdentitySparse_s(*args)
elif A.tag == dTag: lib.ElIdentitySparse_d(*args)
elif A.tag == cTag: lib.ElIdentitySparse_c(*args)
elif A.tag == zTag: lib.ElIdentitySparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElIdentityDistSparse_i(*args)
elif A.tag == sTag: lib.ElIdentityDistSparse_s(*args)
elif A.tag == dTag: lib.ElIdentityDistSparse_d(*args)
elif A.tag == cTag: lib.ElIdentityDistSparse_c(*args)
elif A.tag == zTag: lib.ElIdentityDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Jordan
# ------
lib.ElJordan_i.argtypes = \
lib.ElJordanDist_i.argtypes = \
[c_void_p,iType,iType]
lib.ElJordan_s.argtypes = \
lib.ElJordanDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElJordan_d.argtypes = \
lib.ElJordanDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElJordan_c.argtypes = \
lib.ElJordanDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElJordan_z.argtypes = \
lib.ElJordanDist_z.argtypes = \
[c_void_p,iType,zType]
def Jordan(J,n,lambPre):
lamb = TagToType(J.tag)(lambPre)
args = [J.obj,n,lamb]
if type(J) is Matrix:
if J.tag == iTag: lib.ElJordan_i(*args)
elif J.tag == sTag: lib.ElJordan_s(*args)
elif J.tag == dTag: lib.ElJordan_d(*args)
elif J.tag == cTag: lib.ElJordan_c(*args)
elif J.tag == zTag: lib.ElJordan_z(*args)
else: DataExcept()
elif type(J) is DistMatrix:
if J.tag == iTag: lib.ElJordanDist_i(*args)
elif J.tag == sTag: lib.ElJordanDist_s(*args)
elif J.tag == dTag: lib.ElJordanDist_d(*args)
elif J.tag == cTag: lib.ElJordanDist_c(*args)
elif J.tag == zTag: lib.ElJordanDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Jordan-Cholesky
# ---------------
lib.ElJordanCholesky_s.argtypes = \
lib.ElJordanCholesky_d.argtypes = \
lib.ElJordanCholesky_c.argtypes = \
lib.ElJordanCholesky_z.argtypes = \
lib.ElJordanCholeskyDist_s.argtypes = \
lib.ElJordanCholeskyDist_d.argtypes = \
lib.ElJordanCholeskyDist_c.argtypes = \
lib.ElJordanCholeskyDist_z.argtypes = \
lib.ElJordanCholeskySparse_s.argtypes = \
lib.ElJordanCholeskySparse_d.argtypes = \
lib.ElJordanCholeskySparse_c.argtypes = \
lib.ElJordanCholeskySparse_z.argtypes = \
lib.ElJordanCholeskyDistSparse_s.argtypes = \
lib.ElJordanCholeskyDistSparse_d.argtypes = \
lib.ElJordanCholeskyDistSparse_c.argtypes = \
lib.ElJordanCholeskyDistSparse_z.argtypes = \
[c_void_p,iType]
def JordanCholesky(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElJordanCholesky_s(*args)
elif A.tag == dTag: lib.ElJordanCholesky_d(*args)
elif A.tag == cTag: lib.ElJordanCholesky_c(*args)
elif A.tag == zTag: lib.ElJordanCholesky_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElJordanCholeskyDist_s(*args)
elif A.tag == dTag: lib.ElJordanCholeskyDist_d(*args)
elif A.tag == cTag: lib.ElJordanCholeskyDist_c(*args)
elif A.tag == zTag: lib.ElJordanCholeskyDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElJordanCholeskySparse_s(*args)
elif A.tag == dTag: lib.ElJordanCholeskySparse_d(*args)
elif A.tag == cTag: lib.ElJordanCholeskySparse_c(*args)
elif A.tag == zTag: lib.ElJordanCholeskySparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElJordanCholeskyDistSparse_s(*args)
elif A.tag == dTag: lib.ElJordanCholeskyDistSparse_d(*args)
elif A.tag == cTag: lib.ElJordanCholeskyDistSparse_c(*args)
elif A.tag == zTag: lib.ElJordanCholeskyDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Kahan
# -----
lib.ElKahan_s.argtypes = \
lib.ElKahanDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElKahan_d.argtypes = \
lib.ElKahanDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElKahan_c.argtypes = \
lib.ElKahanDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElKahan_z.argtypes = \
lib.ElKahanDist_z.argtypes = \
[c_void_p,iType,zType]
def Kahan(A,n,phi):
args = [A.obj,n,phi]
if type(A) is Matrix:
if A.tag == sTag: lib.ElKahan_s(*args)
elif A.tag == dTag: lib.ElKahan_d(*args)
elif A.tag == cTag: lib.ElKahan_c(*args)
elif A.tag == zTag: lib.ElKahan_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElKahanDist_s(*args)
elif A.tag == dTag: lib.ElKahanDist_d(*args)
elif A.tag == cTag: lib.ElKahanDist_c(*args)
elif A.tag == zTag: lib.ElKahanDist_z(*args)
else: DataExcept()
else: TypeExcept()
# KMS
# ---
lib.ElKMS_i.argtypes = \
lib.ElKMSDist_i.argtypes = \
[c_void_p,iType,iType]
lib.ElKMS_s.argtypes = \
lib.ElKMSDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElKMS_d.argtypes = \
lib.ElKMSDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElKMS_c.argtypes = \
lib.ElKMSDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElKMS_z.argtypes = \
lib.ElKMSDist_z.argtypes = \
[c_void_p,iType,zType]
def KMS(K,n,rho):
args = [K.obj,n,rho]
if type(K) is Matrix:
if K.tag == iTag: lib.ElKMS_i(*args)
elif K.tag == sTag: lib.ElKMS_s(*args)
elif K.tag == dTag: lib.ElKMS_d(*args)
elif K.tag == cTag: lib.ElKMS_c(*args)
elif K.tag == zTag: lib.ElKMS_z(*args)
else: DataExcept()
elif type(K) is DistMatrix:
if K.tag == iTag: lib.ElKMSDist_i(*args)
elif K.tag == sTag: lib.ElKMSDist_s(*args)
elif K.tag == dTag: lib.ElKMSDist_d(*args)
elif K.tag == cTag: lib.ElKMSDist_c(*args)
elif K.tag == zTag: lib.ElKMSDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Laplacian
# ---------
lib.ElLaplacian1D_s.argtypes = \
lib.ElLaplacian1D_d.argtypes = \
lib.ElLaplacian1D_c.argtypes = \
lib.ElLaplacian1D_z.argtypes = \
lib.ElLaplacian1DDist_s.argtypes = \
lib.ElLaplacian1DDist_d.argtypes = \
lib.ElLaplacian1DDist_c.argtypes = \
lib.ElLaplacian1DDist_z.argtypes = \
lib.ElLaplacian1DSparse_s.argtypes = \
lib.ElLaplacian1DSparse_d.argtypes = \
lib.ElLaplacian1DSparse_c.argtypes = \
lib.ElLaplacian1DSparse_z.argtypes = \
lib.ElLaplacian1DDistSparse_s.argtypes = \
lib.ElLaplacian1DDistSparse_d.argtypes = \
lib.ElLaplacian1DDistSparse_c.argtypes = \
lib.ElLaplacian1DDistSparse_z.argtypes = \
[c_void_p,iType]
def Laplacian1D(L,nx):
args = [L.obj,nx]
if type(L) is Matrix:
if L.tag == sTag: lib.ElLaplacian1D_s(*args)
elif L.tag == dTag: lib.ElLaplacian1D_d(*args)
elif L.tag == cTag: lib.ElLaplacian1D_c(*args)
elif L.tag == zTag: lib.ElLaplacian1D_z(*args)
else: DataExcept()
elif type(L) is DistMatrix:
if L.tag == sTag: lib.ElLaplacian1DDist_s(*args)
elif L.tag == dTag: lib.ElLaplacian1DDist_d(*args)
elif L.tag == cTag: lib.ElLaplacian1DDist_c(*args)
elif L.tag == zTag: lib.ElLaplacian1DDist_z(*args)
else: DataExcept()
elif type(L) is SparseMatrix:
if L.tag == sTag: lib.ElLaplacian1DSparse_s(*args)
elif L.tag == dTag: lib.ElLaplacian1DSparse_d(*args)
elif L.tag == cTag: lib.ElLaplacian1DSparse_c(*args)
elif L.tag == zTag: lib.ElLaplacian1DSparse_z(*args)
else: DataExcept()
elif type(L) is DistSparseMatrix:
if L.tag == sTag: lib.ElLaplacian1DDistSparse_s(*args)
elif L.tag == dTag: lib.ElLaplacian1DDistSparse_d(*args)
elif L.tag == cTag: lib.ElLaplacian1DDistSparse_c(*args)
elif L.tag == zTag: lib.ElLaplacian1DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# LEFT OFF HERE (TODO: Add sparse wrappers)
lib.ElLaplacian2D_s.argtypes = \
lib.ElLaplacian2D_d.argtypes = \
lib.ElLaplacian2D_c.argtypes = \
lib.ElLaplacian2D_z.argtypes = \
lib.ElLaplacian2DDist_s.argtypes = \
lib.ElLaplacian2DDist_d.argtypes = \
lib.ElLaplacian2DDist_c.argtypes = \
lib.ElLaplacian2DDist_z.argtypes = \
[c_void_p,iType,iType]
def Laplacian2D(L,nx,ny):
args = [L.obj,nx,ny]
if type(L) is Matrix:
if L.tag == sTag: lib.ElLaplacian2D_s(*args)
elif L.tag == dTag: lib.ElLaplacian2D_d(*args)
elif L.tag == cTag: lib.ElLaplacian2D_c(*args)
elif L.tag == zTag: lib.ElLaplacian2D_z(*args)
else: DataExcept()
elif type(L) is DistMatrix:
if L.tag == sTag: lib.ElLaplacian2DDist_s(*args)
elif L.tag == dTag: lib.ElLaplacian2DDist_d(*args)
elif L.tag == cTag: lib.ElLaplacian2DDist_c(*args)
elif L.tag == zTag: lib.ElLaplacian2DDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElLaplacian3D_s.argtypes = \
lib.ElLaplacian3D_d.argtypes = \
lib.ElLaplacian3D_c.argtypes = \
lib.ElLaplacian3D_z.argtypes = \
lib.ElLaplacian3DDist_s.argtypes = \
lib.ElLaplacian3DDist_d.argtypes = \
lib.ElLaplacian3DDist_c.argtypes = \
lib.ElLaplacian3DDist_z.argtypes = \
[c_void_p,iType,iType,iType]
def Laplacian3D(L,nx,ny,nz):
args = [L.obj,nx,ny,nz]
if type(L) is Matrix:
if L.tag == sTag: lib.ElLaplacian3D_s(*args)
elif L.tag == dTag: lib.ElLaplacian3D_d(*args)
elif L.tag == cTag: lib.ElLaplacian3D_c(*args)
elif L.tag == zTag: lib.ElLaplacian3D_z(*args)
else: DataExcept()
elif type(L) is DistMatrix:
if L.tag == sTag: lib.ElLaplacian3DDist_s(*args)
elif L.tag == dTag: lib.ElLaplacian3DDist_d(*args)
elif L.tag == cTag: lib.ElLaplacian3DDist_c(*args)
elif L.tag == zTag: lib.ElLaplacian3DDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Lauchli
# -------
lib.ElLauchli_i.argtypes = \
lib.ElLauchliDist_i.argtypes = \
[c_void_p,iType,iType]
lib.ElLauchli_s.argtypes = \
lib.ElLauchliDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElLauchli_d.argtypes = \
lib.ElLauchliDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElLauchli_c.argtypes = \
lib.ElLauchliDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElLauchli_z.argtypes = \
lib.ElLauchliDist_z.argtypes = \
[c_void_p,iType,zType]
def Lauchli(A,n,mu):
args = [A.obj,n,mu]
if type(A) is Matrix:
if A.tag == iTag: lib.ElLauchli_i(*args)
elif A.tag == sTag: lib.ElLauchli_s(*args)
elif A.tag == dTag: lib.ElLauchli_d(*args)
elif A.tag == cTag: lib.ElLauchli_c(*args)
elif A.tag == zTag: lib.ElLauchli_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElLauchliDist_i(*args)
elif A.tag == sTag: lib.ElLauchliDist_s(*args)
elif A.tag == dTag: lib.ElLauchliDist_d(*args)
elif A.tag == cTag: lib.ElLauchliDist_c(*args)
elif A.tag == zTag: lib.ElLauchliDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Legendre
# --------
lib.ElLegendre_s.argtypes = \
lib.ElLegendre_d.argtypes = \
lib.ElLegendre_c.argtypes = \
lib.ElLegendre_z.argtypes = \
lib.ElLegendreDist_s.argtypes = \
lib.ElLegendreDist_d.argtypes = \
lib.ElLegendreDist_c.argtypes = \
lib.ElLegendreDist_z.argtypes = \
[c_void_p,iType]
def Legendre(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElLegendre_s(*args)
elif A.tag == dTag: lib.ElLegendre_d(*args)
elif A.tag == cTag: lib.ElLegendre_c(*args)
elif A.tag == zTag: lib.ElLegendre_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElLegendreDist_s(*args)
elif A.tag == dTag: lib.ElLegendreDist_d(*args)
elif A.tag == cTag: lib.ElLegendreDist_c(*args)
elif A.tag == zTag: lib.ElLegendreDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Lehmer
# ------
lib.ElLehmer_s.argtypes = \
lib.ElLehmer_d.argtypes = \
lib.ElLehmer_c.argtypes = \
lib.ElLehmer_z.argtypes = \
lib.ElLehmerDist_s.argtypes = \
lib.ElLehmerDist_d.argtypes = \
lib.ElLehmerDist_c.argtypes = \
lib.ElLehmerDist_z.argtypes = \
[c_void_p,iType]
def Lehmer(L,n):
args = [L.obj,n]
if type(L) is Matrix:
if L.tag == sTag: lib.ElLehmer_s(*args)
elif L.tag == dTag: lib.ElLehmer_d(*args)
elif L.tag == cTag: lib.ElLehmer_c(*args)
elif L.tag == zTag: lib.ElLehmer_z(*args)
else: DataExcept()
elif type(L) is DistMatrix:
if L.tag == sTag: lib.ElLehmerDist_s(*args)
elif L.tag == dTag: lib.ElLehmerDist_d(*args)
elif L.tag == cTag: lib.ElLehmerDist_c(*args)
elif L.tag == zTag: lib.ElLehmerDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Lotkin
# ------
lib.ElLotkin_s.argtypes = \
lib.ElLotkin_d.argtypes = \
lib.ElLotkin_c.argtypes = \
lib.ElLotkin_z.argtypes = \
lib.ElLotkinDist_s.argtypes = \
lib.ElLotkinDist_d.argtypes = \
lib.ElLotkinDist_c.argtypes = \
lib.ElLotkinDist_z.argtypes = \
[c_void_p,iType]
def Lotkin(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElLotkin_s(*args)
elif A.tag == dTag: lib.ElLotkin_d(*args)
elif A.tag == cTag: lib.ElLotkin_c(*args)
elif A.tag == zTag: lib.ElLotkin_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElLotkinDist_s(*args)
elif A.tag == dTag: lib.ElLotkinDist_d(*args)
elif A.tag == cTag: lib.ElLotkinDist_c(*args)
elif A.tag == zTag: lib.ElLotkinDist_z(*args)
else: DataExcept()
else: TypeExcept()
# MinIJ
# -----
lib.ElMinIJ_i.argtypes = \
lib.ElMinIJ_s.argtypes = \
lib.ElMinIJ_d.argtypes = \
lib.ElMinIJ_c.argtypes = \
lib.ElMinIJ_z.argtypes = \
lib.ElMinIJDist_i.argtypes = \
lib.ElMinIJDist_s.argtypes = \
lib.ElMinIJDist_d.argtypes = \
lib.ElMinIJDist_c.argtypes = \
lib.ElMinIJDist_z.argtypes = \
[c_void_p,iType]
def MinIJ(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElMinIJ_i(*args)
elif A.tag == sTag: lib.ElMinIJ_s(*args)
elif A.tag == dTag: lib.ElMinIJ_d(*args)
elif A.tag == cTag: lib.ElMinIJ_c(*args)
elif A.tag == zTag: lib.ElMinIJ_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElMinIJDist_i(*args)
elif A.tag == sTag: lib.ElMinIJDist_s(*args)
elif A.tag == dTag: lib.ElMinIJDist_d(*args)
elif A.tag == cTag: lib.ElMinIJDist_c(*args)
elif A.tag == zTag: lib.ElMinIJDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Normal from EVD
# ---------------
lib.ElNormalFromEVD_c.argtypes = \
lib.ElNormalFromEVD_z.argtypes = \
lib.ElNormalFromEVDDist_c.argtypes = \
lib.ElNormalFromEVDDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p]
def NormalFromEVD(A,w,Z):
if type(A) is not type(w): raise Exception('Types of A and w must match')
if type(A) is not type(Z): raise Exception('Types of A and Z must match')
if Z.tag != A.tag: raise Exception('Datatypes of A and Z must match')
if w.tag != Base(A.tag): raise Exception('Base datatype of A must match w')
args = [A.obj,w.obj,Z.obj]
if type(A) is Matrix:
if A.tag == cTag: lib.ElNormalFromEVD_c(*args)
elif A.tag == zTag: lib.ElNormalFromEVD_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElNormalFromEVDDist_c(*args)
elif A.tag == zTag: lib.ElNormalFromEVDDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Ones
# ----
lib.ElOnes_i.argtypes = \
lib.ElOnes_s.argtypes = \
lib.ElOnes_d.argtypes = \
lib.ElOnes_c.argtypes = \
lib.ElOnes_z.argtypes = \
lib.ElOnesDist_i.argtypes = \
lib.ElOnesDist_s.argtypes = \
lib.ElOnesDist_d.argtypes = \
lib.ElOnesDist_c.argtypes = \
lib.ElOnesDist_z.argtypes = \
lib.ElOnesDistMultiVec_i.argtypes = \
lib.ElOnesDistMultiVec_s.argtypes = \
lib.ElOnesDistMultiVec_d.argtypes = \
lib.ElOnesDistMultiVec_c.argtypes = \
lib.ElOnesDistMultiVec_z.argtypes = \
lib.ElOnesSparse_i.argtypes = \
lib.ElOnesSparse_s.argtypes = \
lib.ElOnesSparse_d.argtypes = \
lib.ElOnesSparse_c.argtypes = \
lib.ElOnesSparse_z.argtypes = \
lib.ElOnesDistSparse_i.argtypes = \
lib.ElOnesDistSparse_s.argtypes = \
lib.ElOnesDistSparse_d.argtypes = \
lib.ElOnesDistSparse_c.argtypes = \
lib.ElOnesDistSparse_z.argtypes = \
[c_void_p,iType,iType]
def Ones(A,m,n):
args = [A.obj,m,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElOnes_i(*args)
elif A.tag == sTag: lib.ElOnes_s(*args)
elif A.tag == dTag: lib.ElOnes_d(*args)
elif A.tag == cTag: lib.ElOnes_c(*args)
elif A.tag == zTag: lib.ElOnes_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElOnesDist_i(*args)
elif A.tag == sTag: lib.ElOnesDist_s(*args)
elif A.tag == dTag: lib.ElOnesDist_d(*args)
elif A.tag == cTag: lib.ElOnesDist_c(*args)
elif A.tag == zTag: lib.ElOnesDist_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == iTag: lib.ElOnesDistMultiVec_i(*args)
elif A.tag == sTag: lib.ElOnesDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElOnesDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElOnesDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElOnesDistMultiVec_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElOnesSparse_i(*args)
elif A.tag == sTag: lib.ElOnesSparse_s(*args)
elif A.tag == dTag: lib.ElOnesSparse_d(*args)
elif A.tag == cTag: lib.ElOnesSparse_c(*args)
elif A.tag == zTag: lib.ElOnesSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElOnesDistSparse_i(*args)
elif A.tag == sTag: lib.ElOnesDistSparse_s(*args)
elif A.tag == dTag: lib.ElOnesDistSparse_d(*args)
elif A.tag == cTag: lib.ElOnesDistSparse_c(*args)
elif A.tag == zTag: lib.ElOnesDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# 1-2-1 matrix
# ------------
lib.ElOneTwoOne_i.argtypes = \
lib.ElOneTwoOne_s.argtypes = \
lib.ElOneTwoOne_d.argtypes = \
lib.ElOneTwoOne_c.argtypes = \
lib.ElOneTwoOne_z.argtypes = \
lib.ElOneTwoOneDist_i.argtypes = \
lib.ElOneTwoOneDist_s.argtypes = \
lib.ElOneTwoOneDist_d.argtypes = \
lib.ElOneTwoOneDist_c.argtypes = \
lib.ElOneTwoOneDist_z.argtypes = \
[c_void_p,iType]
def OneTwoOne(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElOneTwoOne_i(*args)
elif A.tag == sTag: lib.ElOneTwoOne_s(*args)
elif A.tag == dTag: lib.ElOneTwoOne_d(*args)
elif A.tag == cTag: lib.ElOneTwoOne_c(*args)
elif A.tag == zTag: lib.ElOneTwoOne_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElOneTwoOneDist_i(*args)
elif A.tag == sTag: lib.ElOneTwoOneDist_s(*args)
elif A.tag == dTag: lib.ElOneTwoOneDist_d(*args)
elif A.tag == cTag: lib.ElOneTwoOneDist_c(*args)
elif A.tag == zTag: lib.ElOneTwoOneDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Parter
# ------
lib.ElParter_s.argtypes = \
lib.ElParter_d.argtypes = \
lib.ElParter_c.argtypes = \
lib.ElParter_z.argtypes = \
lib.ElParterDist_s.argtypes = \
lib.ElParterDist_d.argtypes = \
lib.ElParterDist_c.argtypes = \
lib.ElParterDist_z.argtypes = \
[c_void_p,iType]
def Parter(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElParter_s(*args)
elif A.tag == dTag: lib.ElParter_d(*args)
elif A.tag == cTag: lib.ElParter_c(*args)
elif A.tag == zTag: lib.ElParter_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElParterDist_s(*args)
elif A.tag == dTag: lib.ElParterDist_d(*args)
elif A.tag == cTag: lib.ElParterDist_c(*args)
elif A.tag == zTag: lib.ElParterDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Pei
# ---
lib.ElPei_s.argtypes = \
lib.ElPeiDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElPei_d.argtypes = \
lib.ElPeiDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElPei_c.argtypes = \
lib.ElPeiDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElPei_z.argtypes = \
lib.ElPeiDist_z.argtypes = \
[c_void_p,iType,zType]
def Pei(A,n,alpha):
args = [A.obj,n,alpha]
if type(A) is Matrix:
if A.tag == sTag: lib.ElPei_s(*args)
elif A.tag == dTag: lib.ElPei_d(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElPeiDist_s(*args)
elif A.tag == dTag: lib.ElPeiDist_d(*args)
else: DataExcept()
else: TypeExcept()
# Redheffer
# ---------
lib.ElRedheffer_i.argtypes = \
lib.ElRedheffer_s.argtypes = \
lib.ElRedheffer_d.argtypes = \
lib.ElRedheffer_c.argtypes = \
lib.ElRedheffer_z.argtypes = \
lib.ElRedhefferDist_i.argtypes = \
lib.ElRedhefferDist_s.argtypes = \
lib.ElRedhefferDist_d.argtypes = \
lib.ElRedhefferDist_c.argtypes = \
lib.ElRedhefferDist_z.argtypes = \
[c_void_p,iType]
def Redheffer(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElRedheffer_i(*args)
elif A.tag == sTag: lib.ElRedheffer_s(*args)
elif A.tag == dTag: lib.ElRedheffer_d(*args)
elif A.tag == cTag: lib.ElRedheffer_c(*args)
elif A.tag == zTag: lib.ElRedheffer_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElRedhefferDist_i(*args)
elif A.tag == sTag: lib.ElRedhefferDist_s(*args)
elif A.tag == dTag: lib.ElRedhefferDist_d(*args)
elif A.tag == cTag: lib.ElRedhefferDist_c(*args)
elif A.tag == zTag: lib.ElRedhefferDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Riffle
# ------
lib.ElRiffle_s.argtypes = \
lib.ElRiffle_d.argtypes = \
lib.ElRiffle_c.argtypes = \
lib.ElRiffle_z.argtypes = \
lib.ElRiffleDist_s.argtypes = \
lib.ElRiffleDist_d.argtypes = \
lib.ElRiffleDist_c.argtypes = \
lib.ElRiffleDist_z.argtypes = \
[c_void_p,iType]
def Riffle(P,n):
args = [P.obj,n]
if type(P) is Matrix:
if P.tag == sTag: lib.ElRiffle_s(*args)
elif P.tag == dTag: lib.ElRiffle_d(*args)
elif P.tag == cTag: lib.ElRiffle_c(*args)
elif P.tag == zTag: lib.ElRiffle_z(*args)
else: DataExcept()
elif type(P) is DistMatrix:
if P.tag == sTag: lib.ElRiffleDist_s(*args)
elif P.tag == dTag: lib.ElRiffleDist_d(*args)
elif P.tag == cTag: lib.ElRiffleDist_c(*args)
elif P.tag == zTag: lib.ElRiffleDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElRiffleStationary_s.argtypes = \
lib.ElRiffleStationary_d.argtypes = \
lib.ElRiffleStationary_c.argtypes = \
lib.ElRiffleStationary_z.argtypes = \
lib.ElRiffleStationaryDist_s.argtypes = \
lib.ElRiffleStationaryDist_d.argtypes = \
lib.ElRiffleStationaryDist_c.argtypes = \
lib.ElRiffleStationaryDist_z.argtypes = \
[c_void_p,iType]
def RiffleStationary(P,n):
args = [P.obj,n]
if type(P) is Matrix:
if P.tag == sTag: lib.ElRiffleStationary_s(*args)
elif P.tag == dTag: lib.ElRiffleStationary_d(*args)
elif P.tag == cTag: lib.ElRiffleStationary_c(*args)
elif P.tag == zTag: lib.ElRiffleStationary_z(*args)
else: DataExcept()
elif type(P) is DistMatrix:
if P.tag == sTag: lib.ElRiffleStationaryDist_s(*args)
elif P.tag == dTag: lib.ElRiffleStationaryDist_d(*args)
elif P.tag == cTag: lib.ElRiffleStationaryDist_c(*args)
elif P.tag == zTag: lib.ElRiffleStationaryDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElRiffleDecay_s.argtypes = \
lib.ElRiffleDecay_d.argtypes = \
lib.ElRiffleDecay_c.argtypes = \
lib.ElRiffleDecay_z.argtypes = \
lib.ElRiffleDecayDist_s.argtypes = \
lib.ElRiffleDecayDist_d.argtypes = \
lib.ElRiffleDecayDist_c.argtypes = \
lib.ElRiffleDecayDist_z.argtypes = \
[c_void_p,iType]
def RiffleDecay(P,n):
args = [P.obj,n]
if type(P) is Matrix:
if P.tag == sTag: lib.ElRiffleDecay_s(*args)
elif P.tag == dTag: lib.ElRiffleDecay_d(*args)
elif P.tag == cTag: lib.ElRiffleDecay_c(*args)
elif P.tag == zTag: lib.ElRiffleDecay_z(*args)
else: DataExcept()
elif type(P) is DistMatrix:
if P.tag == sTag: lib.ElRiffleDecayDist_s(*args)
elif P.tag == dTag: lib.ElRiffleDecayDist_d(*args)
elif P.tag == cTag: lib.ElRiffleDecayDist_c(*args)
elif P.tag == zTag: lib.ElRiffleDecayDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Ris
# ---
lib.ElRis_s.argtypes = \
lib.ElRis_d.argtypes = \
lib.ElRis_c.argtypes = \
lib.ElRis_z.argtypes = \
lib.ElRisDist_s.argtypes = \
lib.ElRisDist_d.argtypes = \
lib.ElRisDist_c.argtypes = \
lib.ElRisDist_z.argtypes = \
[c_void_p,iType]
def Ris(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElRis_s(*args)
elif A.tag == dTag: lib.ElRis_d(*args)
elif A.tag == cTag: lib.ElRis_c(*args)
elif A.tag == zTag: lib.ElRis_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElRisDist_s(*args)
elif A.tag == dTag: lib.ElRisDist_d(*args)
elif A.tag == cTag: lib.ElRisDist_c(*args)
elif A.tag == zTag: lib.ElRisDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Toeplitz
# --------
lib.ElToeplitz_i.argtypes = [c_void_p,iType,iType,iType,POINTER(iType)]
lib.ElToeplitzDist_i.argtypes = [c_void_p,iType,iType,iType,POINTER(iType)]
lib.ElToeplitz_s.argtypes = [c_void_p,iType,iType,iType,POINTER(sType)]
lib.ElToeplitzDist_s.argtypes = [c_void_p,iType,iType,iType,POINTER(sType)]
lib.ElToeplitz_d.argtypes = [c_void_p,iType,iType,iType,POINTER(dType)]
lib.ElToeplitzDist_d.argtypes = [c_void_p,iType,iType,iType,POINTER(dType)]
lib.ElToeplitz_c.argtypes = [c_void_p,iType,iType,iType,POINTER(cType)]
lib.ElToeplitzDist_c.argtypes = [c_void_p,iType,iType,iType,POINTER(cType)]
lib.ElToeplitz_z.argtypes = [c_void_p,iType,iType,iType,POINTER(zType)]
lib.ElToeplitzDist_z.argtypes = [c_void_p,iType,iType,iType,POINTER(zType)]
def Toeplitz(A,m,n,a):
aLen = len(a)
aBuf = (TagToType(A.tag)*aLen)(*a)
args = [A.obj,m,n,aLen,aBuf]
if type(A) is Matrix:
if A.tag == iTag: lib.ElToeplitz_i(*args)
elif A.tag == sTag: lib.ElToeplitz_s(*args)
elif A.tag == dTag: lib.ElToeplitz_d(*args)
elif A.tag == cTag: lib.ElToeplitz_c(*args)
elif A.tag == zTag: lib.ElToeplitz_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElToeplitzDist_i(*args)
elif A.tag == sTag: lib.ElToeplitzDist_s(*args)
elif A.tag == dTag: lib.ElToeplitzDist_d(*args)
elif A.tag == cTag: lib.ElToeplitzDist_c(*args)
elif A.tag == zTag: lib.ElToeplitzDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Trefethen-Embree
# ----------------
lib.ElTrefethenEmbree_c.argtypes = \
lib.ElTrefethenEmbree_z.argtypes = \
lib.ElTrefethenEmbreeDist_c.argtypes = \
lib.ElTrefethenEmbreeDist_z.argtypes = \
[c_void_p,iType]
def TrefethenEmbree(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElTrefethenEmbree_c(*args)
elif A.tag == zTag: lib.ElTrefethenEmbree_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElTrefethenEmbreeDist_c(*args)
elif A.tag == zTag: lib.ElTrefethenEmbreeDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Triangle
# --------
lib.ElTriangle_c.argtypes = \
lib.ElTriangle_z.argtypes = \
lib.ElTriangleDist_c.argtypes = \
lib.ElTriangleDist_z.argtypes = \
[c_void_p,iType]
def Triangle(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElTriangle_c(*args)
elif A.tag == zTag: lib.ElTriangle_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElTriangleDist_c(*args)
elif A.tag == zTag: lib.ElTriangleDist_z(*args)
else: DataExcept()
else: TypeExcept()
# TriW
# ----
lib.ElTriW_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElTriW_s.argtypes = [c_void_p,iType,sType,iType]
lib.ElTriW_d.argtypes = [c_void_p,iType,dType,iType]
lib.ElTriW_c.argtypes = [c_void_p,iType,cType,iType]
lib.ElTriW_z.argtypes = [c_void_p,iType,zType,iType]
lib.ElTriWDist_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElTriWDist_s.argtypes = [c_void_p,iType,sType,iType]
lib.ElTriWDist_d.argtypes = [c_void_p,iType,dType,iType]
lib.ElTriWDist_c.argtypes = [c_void_p,iType,cType,iType]
lib.ElTriWDist_z.argtypes = [c_void_p,iType,zType,iType]
def TriW(A,n,alpha,k):
args = [A.obj,n,alpha,k]
if type(A) is Matrix:
if A.tag == iTag: lib.ElTriW_i(*args)
elif A.tag == sTag: lib.ElTriW_s(*args)
elif A.tag == dTag: lib.ElTriW_d(*args)
elif A.tag == cTag: lib.ElTriW_c(*args)
elif A.tag == zTag: lib.ElTriW_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElTriWDist_i(*args)
elif A.tag == sTag: lib.ElTriWDist_s(*args)
elif A.tag == dTag: lib.ElTriWDist_d(*args)
elif A.tag == cTag: lib.ElTriWDist_c(*args)
elif A.tag == zTag: lib.ElTriWDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Walsh
# -----
lib.ElWalsh_i.argtypes = \
lib.ElWalsh_s.argtypes = \
lib.ElWalsh_d.argtypes = \
lib.ElWalsh_c.argtypes = \
lib.ElWalsh_z.argtypes = \
lib.ElWalshDist_i.argtypes = \
lib.ElWalshDist_s.argtypes = \
lib.ElWalshDist_d.argtypes = \
lib.ElWalshDist_c.argtypes = \
lib.ElWalshDist_z.argtypes = \
[c_void_p,iType,bType]
def Walsh(A,k,binary=False):
args = [A.obj,k,binary]
if type(A) is Matrix:
if A.tag == iTag: lib.ElWalsh_i(*args)
elif A.tag == sTag: lib.ElWalsh_s(*args)
elif A.tag == dTag: lib.ElWalsh_d(*args)
elif A.tag == cTag: lib.ElWalsh_c(*args)
elif A.tag == zTag: lib.ElWalsh_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElWalshDist_i(*args)
elif A.tag == sTag: lib.ElWalshDist_s(*args)
elif A.tag == dTag: lib.ElWalshDist_d(*args)
elif A.tag == cTag: lib.ElWalshDist_c(*args)
elif A.tag == zTag: lib.ElWalshDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Walsh-Identity
# --------------
lib.ElWalshIdentity_i.argtypes = \
lib.ElWalshIdentity_s.argtypes = \
lib.ElWalshIdentity_d.argtypes = \
lib.ElWalshIdentity_c.argtypes = \
lib.ElWalshIdentity_z.argtypes = \
lib.ElWalshIdentityDist_i.argtypes = \
lib.ElWalshIdentityDist_s.argtypes = \
lib.ElWalshIdentityDist_d.argtypes = \
lib.ElWalshIdentityDist_c.argtypes = \
lib.ElWalshIdentityDist_z.argtypes = \
[c_void_p,iType,bType]
def WalshIdentity(A,k,binary=False):
args = [A.obj,k,binary]
if type(A) is Matrix:
if A.tag == iTag: lib.ElWalshIdentity_i(*args)
elif A.tag == sTag: lib.ElWalshIdentity_s(*args)
elif A.tag == dTag: lib.ElWalshIdentity_d(*args)
elif A.tag == cTag: lib.ElWalshIdentity_c(*args)
elif A.tag == zTag: lib.ElWalshIdentity_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElWalshIdentityDist_i(*args)
elif A.tag == sTag: lib.ElWalshIdentityDist_s(*args)
elif A.tag == dTag: lib.ElWalshIdentityDist_d(*args)
elif A.tag == cTag: lib.ElWalshIdentityDist_c(*args)
elif A.tag == zTag: lib.ElWalshIdentityDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Whale
# -----
lib.ElWhale_c.argtypes = \
lib.ElWhale_z.argtypes = \
lib.ElWhaleDist_c.argtypes = \
lib.ElWhaleDist_z.argtypes = \
[c_void_p,iType]
def Whale(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElWhale_c(*args)
elif A.tag == zTag: lib.ElWhale_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElWhaleDist_c(*args)
elif A.tag == zTag: lib.ElWhaleDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Wilkinson
# ---------
lib.ElWilkinson_i.argtypes = \
lib.ElWilkinson_s.argtypes = \
lib.ElWilkinson_d.argtypes = \
lib.ElWilkinson_c.argtypes = \
lib.ElWilkinson_z.argtypes = \
lib.ElWilkinsonDist_i.argtypes = \
lib.ElWilkinsonDist_s.argtypes = \
lib.ElWilkinsonDist_d.argtypes = \
lib.ElWilkinsonDist_c.argtypes = \
lib.ElWilkinsonDist_z.argtypes = \
[c_void_p,iType]
def Wilkinson(A,k):
args = [A.obj,k]
if type(A) is Matrix:
if A.tag == iTag: lib.ElWilkinson_i(*args)
elif A.tag == sTag: lib.ElWilkinson_s(*args)
elif A.tag == dTag: lib.ElWilkinson_d(*args)
elif A.tag == cTag: lib.ElWilkinson_c(*args)
elif A.tag == zTag: lib.ElWilkinson_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElWilkinsonDist_i(*args)
elif A.tag == sTag: lib.ElWilkinsonDist_s(*args)
elif A.tag == dTag: lib.ElWilkinsonDist_d(*args)
elif A.tag == cTag: lib.ElWilkinsonDist_c(*args)
elif A.tag == zTag: lib.ElWilkinsonDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Zeros
# -----
lib.ElZeros_i.argtypes = \
lib.ElZeros_s.argtypes = \
lib.ElZeros_d.argtypes = \
lib.ElZeros_c.argtypes = \
lib.ElZeros_z.argtypes = \
lib.ElZerosDist_i.argtypes = \
lib.ElZerosDist_s.argtypes = \
lib.ElZerosDist_d.argtypes = \
lib.ElZerosDist_c.argtypes = \
lib.ElZerosDist_z.argtypes = \
lib.ElZerosSparse_i.argtypes = \
lib.ElZerosSparse_s.argtypes = \
lib.ElZerosSparse_d.argtypes = \
lib.ElZerosSparse_c.argtypes = \
lib.ElZerosSparse_z.argtypes = \
lib.ElZerosDistSparse_i.argtypes = \
lib.ElZerosDistSparse_s.argtypes = \
lib.ElZerosDistSparse_d.argtypes = \
lib.ElZerosDistSparse_c.argtypes = \
lib.ElZerosDistSparse_z.argtypes = \
lib.ElZerosDistMultiVec_i.argtypes = \
lib.ElZerosDistMultiVec_s.argtypes = \
lib.ElZerosDistMultiVec_d.argtypes = \
lib.ElZerosDistMultiVec_c.argtypes = \
lib.ElZerosDistMultiVec_z.argtypes = \
[c_void_p,iType,iType]
def Zeros(A,m,n):
args = [A.obj,m,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElZeros_i(*args)
elif A.tag == sTag: lib.ElZeros_s(*args)
elif A.tag == dTag: lib.ElZeros_d(*args)
elif A.tag == cTag: lib.ElZeros_c(*args)
elif A.tag == zTag: lib.ElZeros_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElZerosDist_i(*args)
elif A.tag == sTag: lib.ElZerosDist_s(*args)
elif A.tag == dTag: lib.ElZerosDist_d(*args)
elif A.tag == cTag: lib.ElZerosDist_c(*args)
elif A.tag == zTag: lib.ElZerosDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElZerosSparse_i(*args)
elif A.tag == sTag: lib.ElZerosSparse_s(*args)
elif A.tag == dTag: lib.ElZerosSparse_d(*args)
elif A.tag == cTag: lib.ElZerosSparse_c(*args)
elif A.tag == zTag: lib.ElZerosSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElZerosDistSparse_i(*args)
elif A.tag == sTag: lib.ElZerosDistSparse_s(*args)
elif A.tag == dTag: lib.ElZerosDistSparse_d(*args)
elif A.tag == cTag: lib.ElZerosDistSparse_c(*args)
elif A.tag == zTag: lib.ElZerosDistSparse_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == iTag: lib.ElZerosDistMultiVec_i(*args)
elif A.tag == sTag: lib.ElZerosDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElZerosDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElZerosDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElZerosDistMultiVec_z(*args)
else: DataExcept()
else: TypeExcept()
# Random
# ======
# Bernoulli
# ---------
lib.ElBernoulli_i.argtypes = \
lib.ElBernoulli_s.argtypes = \
lib.ElBernoulli_d.argtypes = \
lib.ElBernoulli_c.argtypes = \
lib.ElBernoulli_z.argtypes = \
lib.ElBernoulliDist_i.argtypes = \
lib.ElBernoulliDist_s.argtypes = \
lib.ElBernoulliDist_d.argtypes = \
lib.ElBernoulliDist_c.argtypes = \
lib.ElBernoulliDist_z.argtypes = \
[c_void_p,iType,iType,dType]
def Bernoulli(A,m,n,p=0.5):
args = [A.obj,m,n,p]
if type(A) is Matrix:
if A.tag == iTag: lib.ElBernoulli_i(*args)
elif A.tag == sTag: lib.ElBernoulli_s(*args)
elif A.tag == dTag: lib.ElBernoulli_d(*args)
elif A.tag == cTag: lib.ElBernoulli_c(*args)
elif A.tag == zTag: lib.ElBernoulli_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElBernoulliDist_i(*args)
elif A.tag == sTag: lib.ElBernoulliDist_s(*args)
elif A.tag == dTag: lib.ElBernoulliDist_d(*args)
elif A.tag == cTag: lib.ElBernoulliDist_c(*args)
elif A.tag == zTag: lib.ElBernoulliDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Gaussian
# --------
lib.ElGaussian_s.argtypes = [c_void_p,iType,iType,sType,sType]
lib.ElGaussian_d.argtypes = [c_void_p,iType,iType,dType,dType]
lib.ElGaussian_c.argtypes = [c_void_p,iType,iType,cType,sType]
lib.ElGaussian_z.argtypes = [c_void_p,iType,iType,zType,dType]
lib.ElGaussianDist_s.argtypes = [c_void_p,iType,iType,sType,sType]
lib.ElGaussianDist_d.argtypes = [c_void_p,iType,iType,dType,dType]
lib.ElGaussianDist_c.argtypes = [c_void_p,iType,iType,cType,sType]
lib.ElGaussianDist_z.argtypes = [c_void_p,iType,iType,zType,dType]
lib.ElGaussianDistMultiVec_s.argtypes = [c_void_p,iType,iType,sType,sType]
lib.ElGaussianDistMultiVec_d.argtypes = [c_void_p,iType,iType,dType,dType]
lib.ElGaussianDistMultiVec_c.argtypes = [c_void_p,iType,iType,cType,sType]
lib.ElGaussianDistMultiVec_z.argtypes = [c_void_p,iType,iType,zType,dType]
def Gaussian(A,m,n,meanPre=0,stddev=1):
mean = TagToType(A.tag)(meanPre)
args = [A.obj,m,n,mean,stddev]
if type(A) is Matrix:
if A.tag == sTag: lib.ElGaussian_s(*args)
elif A.tag == dTag: lib.ElGaussian_d(*args)
elif A.tag == cTag: lib.ElGaussian_c(*args)
elif A.tag == zTag: lib.ElGaussian_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElGaussianDist_s(*args)
elif A.tag == dTag: lib.ElGaussianDist_d(*args)
elif A.tag == cTag: lib.ElGaussianDist_c(*args)
elif A.tag == zTag: lib.ElGaussianDist_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == sTag: lib.ElGaussianDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElGaussianDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElGaussianDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElGaussianDistMultiVec_z(*args)
else: DataExcept()
else: TypeExcept()
# Normal uniform spectrum
# -----------------------
lib.ElNormalUniformSpectrum_c.argtypes = [c_void_p,iType,cType,sType]
lib.ElNormalUniformSpectrum_z.argtypes = [c_void_p,iType,zType,dType]
lib.ElNormalUniformSpectrumDist_c.argtypes = [c_void_p,iType,cType,sType]
lib.ElNormalUniformSpectrumDist_z.argtypes = [c_void_p,iType,zType,dType]
def NormalUniformSpectrum(A,n,centerPre=0,radius=1):
center = TagToType(A.tag)(centerPre)
args = [A.obj,n,center,radius]
if type(A) is Matrix:
if A.tag == cTag: lib.ElNormalUniformSpectrum_c(*args)
elif A.tag == zTag: lib.ElNormalUniformSpectrum_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElNormalUniformSpectrumDist_c(*args)
elif A.tag == zTag: lib.ElNormalUniformSpectrumDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Rademacher
# ----------
lib.ElRademacher_i.argtypes = \
lib.ElRademacher_s.argtypes = \
lib.ElRademacher_d.argtypes = \
lib.ElRademacher_c.argtypes = \
lib.ElRademacher_z.argtypes = \
lib.ElRademacherDist_i.argtypes = \
lib.ElRademacherDist_s.argtypes = \
lib.ElRademacherDist_d.argtypes = \
lib.ElRademacherDist_c.argtypes = \
lib.ElRademacherDist_z.argtypes = \
[c_void_p,iType,iType]
def Rademacher(A,m,n):
args = [A.obj,m,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElRademacher_i(*args)
elif A.tag == sTag: lib.ElRademacher_s(*args)
elif A.tag == dTag: lib.ElRademacher_d(*args)
elif A.tag == cTag: lib.ElRademacher_c(*args)
elif A.tag == zTag: lib.ElRademacher_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElRademacherDist_i(*args)
elif A.tag == sTag: lib.ElRademacherDist_s(*args)
elif A.tag == dTag: lib.ElRademacherDist_d(*args)
elif A.tag == cTag: lib.ElRademacherDist_c(*args)
elif A.tag == zTag: lib.ElRademacherDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Three-valued
# ------------
lib.ElThreeValued_i.argtypes = \
lib.ElThreeValued_s.argtypes = \
lib.ElThreeValued_d.argtypes = \
lib.ElThreeValued_c.argtypes = \
lib.ElThreeValued_z.argtypes = \
lib.ElThreeValuedDist_i.argtypes = \
lib.ElThreeValuedDist_s.argtypes = \
lib.ElThreeValuedDist_d.argtypes = \
lib.ElThreeValuedDist_c.argtypes = \
lib.ElThreeValuedDist_z.argtypes = \
[c_void_p,iType,iType,dType]
def ThreeValued(A,m,n,p=2./3.):
args = [A.obj,m,n,p]
if type(A) is Matrix:
if A.tag == iTag: lib.ElThreeValued_i(*args)
elif A.tag == sTag: lib.ElThreeValued_s(*args)
elif A.tag == dTag: lib.ElThreeValued_d(*args)
elif A.tag == cTag: lib.ElThreeValued_c(*args)
elif A.tag == zTag: lib.ElThreeValued_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElThreeValuedDist_i(*args)
elif A.tag == sTag: lib.ElThreeValuedDist_s(*args)
elif A.tag == dTag: lib.ElThreeValuedDist_d(*args)
elif A.tag == cTag: lib.ElThreeValuedDist_c(*args)
elif A.tag == zTag: lib.ElThreeValuedDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Uniform
# -------
lib.ElUniform_i.argtypes = \
lib.ElUniformDist_i.argtypes = \
lib.ElUniformDistMultiVec_i.argtypes = \
[c_void_p,iType,iType,iType,iType]
lib.ElUniform_s.argtypes = \
lib.ElUniformDist_s.argtypes = \
lib.ElUniformDistMultiVec_s.argtypes = \
[c_void_p,iType,iType,sType,sType]
lib.ElUniform_d.argtypes = \
lib.ElUniformDist_d.argtypes = \
lib.ElUniformDistMultiVec_d.argtypes = \
[c_void_p,iType,iType,dType,dType]
lib.ElUniform_c.argtypes = \
lib.ElUniformDist_c.argtypes = \
lib.ElUniformDistMultiVec_c.argtypes = \
[c_void_p,iType,iType,cType,sType]
lib.ElUniform_z.argtypes = \
lib.ElUniformDist_z.argtypes = \
lib.ElUniformDistMultiVec_z.argtypes = \
[c_void_p,iType,iType,zType,dType]
def Uniform(A,m,n,centerPre=0,radius=1):
center = TagToType(A.tag)(centerPre)
args = [A.obj,m,n,center,radius]
if type(A) is Matrix:
if A.tag == iTag: lib.ElUniform_i(*args)
elif A.tag == sTag: lib.ElUniform_s(*args)
elif A.tag == dTag: lib.ElUniform_d(*args)
elif A.tag == cTag: lib.ElUniform_c(*args)
elif A.tag == zTag: lib.ElUniform_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElUniformDist_i(*args)
elif A.tag == sTag: lib.ElUniformDist_s(*args)
elif A.tag == dTag: lib.ElUniformDist_d(*args)
elif A.tag == cTag: lib.ElUniformDist_c(*args)
elif A.tag == zTag: lib.ElUniformDist_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == iTag: lib.ElUniformDistMultiVec_i(*args)
elif A.tag == sTag: lib.ElUniformDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElUniformDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElUniformDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElUniformDistMultiVec_z(*args)
else: DataExcept()
else: TypeExcept()
# Uniform Helmholtz Green's
# -------------------------
lib.ElUniformHelmholtzGreens_c.argtypes = \
lib.ElUniformHelmholtzGreensDist_c.argtypes = \
[c_void_p,iType,sType]
lib.ElUniformHelmholtzGreens_z.argtypes = \
lib.ElUniformHelmholtzGreensDist_z.argtypes = \
[c_void_p,iType,dType]
def UniformHelmholtzGreens(A,n,lamb):
args = [A.obj,n,lamb]
if type(A) is Matrix:
if A.tag == cTag: lib.ElUniformHelmholtzGreens_c(*args)
elif A.tag == zTag: lib.ElUniformHelmholtzGreens_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElUniformHelmholtzGreensDist_c(*args)
elif A.tag == zTag: lib.ElUniformHelmholtzGreensDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Wigner
# ------
lib.ElWigner_s.argtypes = \
lib.ElWignerDist_s.argtypes = \
[c_void_p,iType,sType,sType]
lib.ElWigner_d.argtypes = \
lib.ElWignerDist_d.argtypes = \
[c_void_p,iType,dType,dType]
lib.ElWigner_c.argtypes = \
lib.ElWignerDist_c.argtypes = \
[c_void_p,iType,cType,sType]
lib.ElWigner_z.argtypes = \
lib.ElWignerDist_z.argtypes = \
[c_void_p,iType,zType,dType]
def Wigner(A,n,meanPre=0,stddev=1):
mean = TagToType(A.tag)(meanPre)
args = [A.obj,n,mean,stddev]
if type(A) is Matrix:
if A.tag == sTag: lib.ElWigner_s(*args)
elif A.tag == dTag: lib.ElWigner_d(*args)
elif A.tag == cTag: lib.ElWigner_c(*args)
elif A.tag == zTag: lib.ElWigner_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElWignerDist_s(*args)
elif A.tag == dTag: lib.ElWignerDist_d(*args)
elif A.tag == cTag: lib.ElWignerDist_c(*args)
elif A.tag == zTag: lib.ElWignerDist_z(*args)
else: DataExcept()
else: TypeExcept()
|
mcopik/Elemental
|
python/matrices.py
|
Python
|
bsd-3-clause
| 87,208
|
[
"Gaussian"
] |
fbd02bbca64dd7ba89ffa5aba1398d53d913ff10c07845940bd16fed5da0d26a
|
#!/usr/bin/env python
"""
"""
import vtk
def main():
colors = vtk.vtkNamedColors()
fileName = get_program_parameters()
# Read the image.
readerFactory = vtk.vtkImageReader2Factory()
reader = readerFactory.CreateImageReader2(fileName)
reader.SetFileName(fileName)
reader.Update()
cast = vtk.vtkImageCast()
cast.SetInputConnection(reader.GetOutputPort())
cast.SetOutputScalarTypeToDouble()
# Get rid of the discrete scalars.
smooth = vtk.vtkImageGaussianSmooth()
smooth.SetInputConnection(cast.GetOutputPort())
smooth.SetStandardDeviations(0.8, 0.8, 0)
m1 = vtk.vtkSphere()
m1.SetCenter(310, 130, 0)
m1.SetRadius(0)
m2 = vtk.vtkSampleFunction()
m2.SetImplicitFunction(m1)
m2.SetModelBounds(0, 264, 0, 264, 0, 1)
m2.SetSampleDimensions(264, 264, 1)
m3 = vtk.vtkImageShiftScale()
m3.SetInputConnection(m2.GetOutputPort())
m3.SetScale(0.000095)
div = vtk.vtkImageMathematics()
div.SetInputConnection(0, smooth.GetOutputPort())
div.SetInputConnection(1, m3.GetOutputPort())
div.SetOperationToMultiply()
# Create the actors.
colorWindow = 256.0
colorLevel = 127.5
originalActor = vtk.vtkImageActor()
originalActor.GetMapper().SetInputConnection(cast.GetOutputPort())
originalActor.GetProperty().SetColorWindow(colorWindow)
originalActor.GetProperty().SetColorLevel(colorLevel)
filteredActor = vtk.vtkImageActor()
filteredActor.GetMapper().SetInputConnection(div.GetOutputPort())
# Define the viewport ranges.
# (xmin, ymin, xmax, ymax)
originalViewport = [0.0, 0.0, 0.5, 1.0]
filteredViewport = [0.5, 0.0, 1.0, 1.0]
# Setup the renderers.
originalRenderer = vtk.vtkRenderer()
originalRenderer.SetViewport(originalViewport)
originalRenderer.AddActor(originalActor)
originalRenderer.ResetCamera()
originalRenderer.SetBackground(colors.GetColor3d("SlateGray"))
filteredRenderer = vtk.vtkRenderer()
filteredRenderer.SetViewport(filteredViewport)
filteredRenderer.AddActor(filteredActor)
filteredRenderer.ResetCamera()
filteredRenderer.SetBackground(colors.GetColor3d("LightSlateGray"))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(600, 300)
renderWindow.AddRenderer(originalRenderer)
renderWindow.AddRenderer(filteredRenderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
style = vtk.vtkInteractorStyleImage()
renderWindowInteractor.SetInteractorStyle(style)
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.Initialize()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'This MRI image illustrates attenuation that can occur due to sensor position.'
epilogue = '''
The artifact is removed by dividing by the attenuation profile determined manually.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='AttenuationArtifact.pgm.')
args = parser.parse_args()
return args.filename
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/ImageProcessing/Attenuation.py
|
Python
|
apache-2.0
| 3,263
|
[
"VTK"
] |
38f18d53ba51764229fbe7742b88450b6b60e4bd18af5f68dd6046cdf0c20351
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 13 10:08:00 2014
@author: Fujitsu
"""
def UserDefined(Rawfile,Indicator,Ligand_index,Protein_index, Model_index,SpiltCriteria,
CV_Method,FeatureSelectionMode, Iteration, NumPermute, SpiltMethod):
import os
user = {}
user['Root'] = os.getcwd()
user['Rawfile'] = Rawfile
user['Indicator'] = Indicator
user['Ligand_index'] = Ligand_index[1:-1].split(',')
user['Protein_index'] = Protein_index[1:-1].split(',')
user['Model_index'] = Model_index[1:-1].split(',')
user['Spiltcriteria'] = SpiltCriteria
user['CV_Method'] = CV_Method
user['SelectionMode'] = FeatureSelectionMode
user['Iteration'] = Iteration
user['NumPermute'] = NumPermute
user['SpiltMethod'] = SpiltMethod
from time import gmtime, strftime
user['Date Started'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
return user
def AnalysisInputfile(user):
Root = user['Root']
Rawfile = user['Rawfile']
Proteingroup = user['Protein_index']
Ligandgroup = user['Ligand_index']
import csv
import numpy as np
fileName = Root+'/'+Rawfile+'.csv'
with open(fileName,'rb') as csvfile:
dialect = csv.Sniffer().has_header(csvfile.read())
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
h = next(reader)
data = []
for row in reader:
data.append(row)
data_array = np.array(data)
Yname = h.pop(-1)
Y_array = np.append(np.reshape(np.array(Yname),(1,1)),data_array[:,-1])
if len(np.unique(data_array[:,-1])) > 3: #regression
user['Datatype'] = 'Regression'
elif len(np.unique(data_array[:,-1])) == 3:
user['Datatype'] = 'Classification 3 classes'
elif len(np.unique(data_array[:,-1])) == 2:
user['Datatype'] = 'Classification 2 classes'
psmiles = [ind for ind, val in enumerate(h) if val[0:5] == 'Smile' or val[0:5] == 'smile']
psequence = [ind for ind, val in enumerate(h) if val[0:8] == 'Sequence' or val[0:8] == 'sequence']
if len(psmiles) == 0 and len(psequence) == 0:
print 'All Descriptors were prepared by user'
ise = [ind for ind,val in enumerate(h) if val == '']
if len(ise) != 0:
hi = np.reshape(np.array(h), (1,len(h)))
hf = np.reshape(np.array(Yname), (1,1))
h = np.append(hi,hf, axis=1)
data_array = np.append(h,data_array,axis=0)
Array_ligand = data_array[:,:ise[0]]
Array_Pro = data_array[:,ise[0]+1:-1]
else:
if user['Ligand_index'] == []:
Array_ligand = []
Array_Pro = data_array[:,:-1]
elif user['Protein_index'] == []:
Array_ligand = data_array[:,:-1]
Array_Pro = []
elif len(psmiles) == 1 and len(psequence) == 0:
print 'Ligand descriptors will be generated'
import Descriptors_Extraction as DE
data = data_array[:,psmiles[0]]
Array_ligand = DE.Ligand_gen(data, Ligandgroup)
px = [ind for ind,val in enumerate(h) if ind!=psmiles[0]]
hx = np.array(h)[px]
Array_Pro = np.append(np.reshape(hx,(1,len(hx))),data_array[:,px],axis=0)
elif len(psmiles) == 0 and len(psequence) == 1:
print 'Protein descriptors will be generated'
import Descriptors_Extraction as DE
data = data_array[:,psequence[0]]
Array_Pro = DE.Protein_gen(data,Proteingroup)
px = [ind for ind,val in enumerate(h) if ind!=psequence[0]]
hx = np.array(h)[px]
Array_ligand = np.append(np.reshape(hx,(1,len(hx))),data_array[:,px],axis=0)
elif len(psmiles) == 1 and len(psequence) == 1:
print 'Ligand & Protein descriptors will be generated'
import Descriptors_Extraction as DE
data1 = data_array[:,psmiles[0]]
data2 = data_array[:,psequence[0]]
Array_ligand = DE.Ligand_gen(data1,Ligandgroup)
Array_Pro = DE.Ligand_gen(data2,Proteingroup)
elif len(psmiles) == 2 and len(psequence) == 0:
print 'Two different Ligand descriptors will be generated'
import Descriptors_Extraction as DE
data1 = data_array[:,psmiles[0]]
data2 = data_array[:,psmiles[1]]
Array_ligand = DE.Ligand_gen(data1,Ligandgroup)
Array_Pro = DE.Ligand_gen(data2,Proteingroup)
elif len(psmiles) == 0 and len(psequence) == 2:
print 'Two different Protein descriptors will be generated'
import Descriptors_Extraction as DE
data1 = data_array[:,psequence[0]]
data2 = data_array[:,psequence[1]]
Array_ligand = DE.Protein_gen(data1,Ligandgroup)
Array_Pro = DE.Protein_gen(data2,Proteingroup)
################## Comnbine All array for saving ##############
emp = np.array([None for i in range(Array_Pro.shape[0])])
emp = np.reshape(emp, (emp.shape[0],1))
Array = np.append(Array_ligand, emp, axis=1)
Array = np.append(Array, Array_Pro, axis=1)
Array = np.append(Array, np.reshape(Y_array,(len(Y_array),1)), axis=1)
path = user['Root']
raw = user['Rawfile']
Indica = user['Indicator']
import os
try:
os.makedirs(path+'/'+Indica)
except OSError:
pass
with open(path+'/'+Indica+'/'+raw+'_complete'+'.csv', 'wb') as csvfile:
spam = csv.writer(csvfile,delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL )
for k in range(len(Array)):
spam.writerow(Array[k])
return Array_ligand, Array_Pro, Y_array, user
def Ligand_gen(data, Ligandgroup):
import os
import numpy as np
os.chdir('C:\Users\Fujitsu\Anaconda\envs\Rdkit')
from pydpi.pydrug import PyDrug
drug=PyDrug()
HL_list, D_list = [], []
for i in range(len(data)):
drug.ReadMolFromSmile(data[i])
keys, values = [],[]
for j in Ligandgroup:
if j == '0': #all descriptors 615
res = drug.GetAllDescriptor()
elif j == '1': # constitution 30
res = drug.GetConstitution()
elif j == '2': # topology 25
res = drug.GetTopology()
elif j == '3': #connectivity 44
res = drug.GetConnectivity()
elif j == '4': #E-state 237
res = drug.GetEstate()
elif j == '5': #kappa 7
res = drug.GetKappa()
elif j == '6': #Burden 64
res = drug.GetBurden()
elif j == '7': #information 21
res = drug.GetBasak()
elif j == '8': #Moreau-Boto 32
res = drug.GetMoreauBroto()
elif j == '9': #Moran 32
res = drug.GetMoran()
elif j == '10': #Geary 32
res = drug.GetGeary()
elif j == '11': #charge 25
res = drug.GetCharge()
elif j == '12': #property 6
res = drug.GetMolProperty()
elif j == '13': #MOE-type 60
res = drug.GetMOE()
keys.extend(res.viewkeys())
values.extend(res.viewvalues())
if i == 0:
HL_list = keys
D_list.append(values)
else:
D_list.append(values)
D_ligand = np.zeros((len(data),len(HL_list)), dtype=float)
for k in range(len(data)):
D_ligand[k,:] = D_list[k]
#Variance threshold std > 0.01
import Descriptors_Selection as DesSe
ind_var = DesSe.VarinceThreshold(D_ligand)
D_ligand = D_ligand[:,ind_var]
HL_list = np.array(HL_list)[ind_var]
# #Intra pearson's correlation p-value > 0.05
# ind_corr = DesSe.Correlation(D_ligand, Y.astype(np.float))
# D_ligand = D_ligand[:,ind_corr]
# HL_list = np.array(HL_list)[ind_corr]
H_ligand = np.reshape(HL_list,(1,len(HL_list)))
Array_ligand = np.append(H_ligand, D_ligand, axis=0)
return Array_ligand
def Protein_gen(data, Proteingroup):
import numpy as np
from pydpi.pypro import PyPro
protein = PyPro()
HP_list, D_list = [], []
for ii in range(len(data)):
p = data[ii]
protein.ReadProteinSequence(p)
keys, values = [],[]
for jj in Proteingroup:
if jj == '0': #All descriptors 2049
res = protein.GetALL()
elif jj == '1': #amino acid composition 20
res = protein.GetAAComp()
elif jj == '2': #dipeptide composition 400
res = protein.GetDPComp()
elif jj == '3': #Tripeptide composition 8000
res = protein.GetTPComp()
elif jj == '4': #Moreau-Broto autocorrelation 240
res = protein.GetMoreauBrotoAuto()
elif jj == '5': #Moran autocorrelation 240
res = protein.GetMoranAuto()
elif jj == '6': #Geary autocorrelation 240
res = protein.GetGearyAuto()
elif jj == '7': #composition,transition,distribution 21+21+105
res = protein.GetCTD()
elif jj == '8': #conjoint triad features 343
res = protein.GetTriad()
elif jj == '9': #sequence order coupling number 60
res = protein.GetSOCN(30)
elif jj == '10': #quasi-sequence order descriptors 100
res = protein.GetQSO()
elif jj == '11': #pseudo amino acid composition 50
res = protein.GetPAAC(30)
keys.extend(res.viewkeys())
values.extend(res.viewvalues())
if ii == 0:
HP_list = keys
D_list.append(values)
else:
D_list.append(values)
D_Pro = np.zeros((len(D_list),len(HP_list)), dtype=float)
for k in range(len(D_list)):
D_Pro[k,:] = D_list[k]
#Variance threshold std > 0.01
import Descriptors_Selection as DesSe
ind_var = DesSe.VarinceThreshold(D_Pro)
D_Pro = D_Pro[:,ind_var]
HP_list = np.array(HP_list)[ind_var]
H_Pro = np.reshape(HP_list,(1,len(HP_list)))
Array_Pro = np.append(H_Pro, D_Pro, axis=0)
return Array_Pro
#def LigandProteinExtraction(user):
# Rawfile = user['Rawfile']
# IndicatorName = user['Indicator']
# Proteingroup = user['Protein_index']
# Ligandgroup = user['Ligand_index']
#
# import os, csv, sys
# import numpy as np
#
# path = os.path.dirname(os.path.abspath(sys.argv[0]))
# fileName = path+'/'+ Rawfile +'.csv'
#
# with open(fileName,'rb') as csvfile:
# dialect = csv.Sniffer().has_header(csvfile.read())
# csvfile.seek(0)
# reader = csv.reader(csvfile, dialect)
# h = next(reader)
# data = []
# for row in reader:
# data.append(row)
# data_array = np.array(data)
#
#
# ind = [ind for ind, val in enumerate(h) if val == 'SMILES' or val == 'Sequence']
#
# if len(ind) == 0:
# print 'Descriptors were prepared by user'
#
# Y = data_array[:,-1]
# data = np.delete(data_array,0,axis=1)
# data = np.delete(data,-1,axis=1)
#
# h.pop(0)
# Yname = h.pop(-1)
#
# idx_pt = [inx for inx,val in enumerate(data[0,:]) if val.isdigit() == True]
# Protein = np.transpose(np.transpose(data)[idx_pt])
# Ligand = np.delete(data, idx_pt, axis=1)
#
# h_pt = np.array(h)[idx_pt]
# h_li = np.delete(np.array(h), idx_pt)
#
# Array_Pro = np.append(np.reshape(h_pt,(1,len(h_pt))),Protein,axis=0)
# Array_ligand = np.append(np.reshape(h_li,(1,len(h_li))),Ligand,axis=0)
# Y_array = np.append(np.reshape(np.array(Yname),(1,1)),Y)
#
# else:
# import sys
# import Descriptors_Selection as DesSe
# print '### No Descriptors. Automatic preparation is being processed ###'
#
# if len(Ligandgroup) == 0 or len(Proteingroup) == 0:
# sys.exit('Index groups of Descriptor must be determined')
#
# Smile = data_array[:,1]
# Sequence = data_array[:,3]
# Y = data_array[:,-1]
#
# ############ Protein desciptors ################
# import Descriptors_Extraction as DesEx
# D_Pro, HP_list = DesEx.ProteinDescriptors(Sequence,Proteingroup)
#
# #Variance threshold std > 0.01
# ind_var = DesSe.VarinceThreshold(D_Pro)
# D_Pro = D_Pro[:,ind_var]
# HP_list = np.array(HP_list)[ind_var]
#
# #Intra pearson's correlation p-value > 0.05
# ind_corr = DesSe.Correlation(D_Pro, Y.astype(np.float))
# D_Pro = D_Pro[:,ind_corr]
# HP_list = np.array(HP_list)[ind_corr]
#
# H_Pro = np.reshape(HP_list,(1,len(HP_list)))
# Array_Pro = np.append(H_Pro, D_Pro, axis=0)
#
# ############## Ligand descriptors ################
# from pydpi.pydrug import PyDrug
# drug=PyDrug()
#
# HL_list, D_list = [], []
#
# for i in range(len(Smile)):
# drug.ReadMolFromSmile(Smile[i])
# keys, values = [],[]
#
# for j in Ligandgroup:
# if j == '0': #all descriptors 615
# res = drug.GetAllDescriptor()
# elif j == '1': # constitution 30
# res = drug.GetConstitution()
# elif j == '2': # topology 25
# res = drug.GetTopology()
# elif j == '3': #connectivity 44
# res = drug.GetConnectivity()
# elif j == '4': #E-state 237
# res = drug.GetEstate()
# elif j == '5': #kappa 7
# res = drug.GetKappa()
# elif j == '6': #Burden 64
# res = drug.GetBurden()
# elif j == '7': #information 21
# res = drug.GetBasak()
# elif j == '8': #Moreau-Boto 32
# res = drug.GetMoreauBroto()
# elif j == '9': #Moran 32
# res = drug.GetMoran()
# elif j == '10': #Geary 32
# res = drug.GetGeary()
# elif j == '11': #charge 25
# res = drug.GetCharge()
# elif j == '12': #property 6
# res = drug.GetMolProperty()
# elif j == '13': #MOE-type 60
# res = drug.GetMOE()
#
# keys.extend(res.viewkeys())
# values.extend(res.viewvalues())
#
# if i == 0:
# HL_list = keys
# D_list.append(values)
# else:
# D_list.append(values)
#
# D_ligand = np.zeros((len(Smile),len(HL_list)), dtype=float)
# for k in range(len(Smile)):
# D_ligand[k,:] = D_list[k]
#
# #Variance threshold std > 0.01
# ind_var = DesSe.VarinceThreshold(D_ligand)
# D_ligand = D_ligand[:,ind_var]
# HL_list = np.array(HL_list)[ind_var]
#
# #Intra pearson's correlation p-value > 0.05
# ind_corr = DesSe.Correlation(D_ligand, Y.astype(np.float))
# D_ligand = D_ligand[:,ind_corr]
# HL_list = np.array(HL_list)[ind_corr]
#
# H_ligand = np.reshape(HL_list,(1,len(HL_list)))
# Array_ligand = np.append(H_ligand, D_ligand, axis=0)
#
#
# ################## Comnbine All array for saving ##############
#
# Array = np.append(Array_Pro, Array_ligand, axis=1)
#
# Y_array = np.append(np.array(h[-1]), Y)
# Y_array = np.reshape(Y_array, (len(Y_array),1))
# Array = np.append(Array, Y_array, axis=1)
#
# try:
# os.makedirs(path+'/'+IndicatorName)
# except OSError:
# pass
#
# with open(path+'/'+IndicatorName+'/'+IndicatorName+'.csv', 'wb') as csvfile:
# spam = csv.writer(csvfile,delimiter=',',quotechar='|',
# quoting=csv.QUOTE_MINIMAL )
# for k in range(len(Array)):
# spam.writerow(Array[k])
#
#
# return Array_ligand, Array_Pro, Y_array
#
#def ProteinExtraction(user):
# RawfileName = user['Rawfile']
# IndicatorName = user['Indicator']
# Proteingroup = user['Protein_index']
#
# import os, csv, sys
# import numpy as np
# import Descriptors_Selection as DesSe
#
# path = os.path.dirname(os.path.abspath(sys.argv[0]))
# fileName = path+'/'+ RawfileName +'.csv'
#
# with open(fileName,'rb') as csvfile:
# dialect = csv.Sniffer().has_header(csvfile.read())
# csvfile.seek(0)
# reader = csv.reader(csvfile, dialect)
# h = next(reader)
# data = []
# for row in reader:
# data.append(row)
# data_array = np.array(data)
# Y = data_array[:,-1]
#
# ind_P1 = [ind for ind,val in enumerate(h) if val == 'Protein1_Sequence']
# ind_P2 = [ind for ind,val in enumerate(h) if val == 'Protein2_Sequence']
#
# data_P1 = data_array[:,ind_P1]
# data_P2 = data_array[:,ind_P2]
#
#
# S1,S2 = [],[]
# for i in range(len(data_P1)):
# A, AA = data_P1[i][0], data_P2[i][0]
# if A and AA == []:
# pass
# S1.append(A)
# S2.append(AA)
#
# import Descriptors_Extraction as DesEx
## ########## Performing Protein Block 1 #################
# PS1, H1 = DesEx.ProteinDescriptors(S1,Proteingroup)
# #Variance threshold std > 0.01
# ind_var = DesSe.VarinceThreshold(PS1)
# PS1 = PS1[:,ind_var]
# H1 = np.array(H1)[ind_var]
#
## #Intra pearson's correlation p-value > 0.05
### ind_corr = DesSe.Correlation(D_Pro, Y.astype(np.float))
### D_Pro = D_Pro[:,ind_corr]
### HP_list = np.array(HP_list)[ind_corr]
##
# H_Pro = np.reshape(H1,(1,len(H1)))
# Array_Pro = np.append(H_Pro, PS1, axis=0)
# Array_ligand = Array_Pro
#
# ########## Performing Protein Block 2 #################
# PS2, H2 = DesEx.ProteinDescriptors(S2,Proteingroup)
# #Variance threshold std > 0.01
# ind_var = DesSe.VarinceThreshold(PS2)
# PS2 = PS2[:,ind_var]
# H2 = np.array(H2)[ind_var]
#
## #Intra pearson's correlation p-value > 0.05
### ind_corr = DesSe.Correlation(D_Pro, Y.astype(np.float))
### D_Pro = D_Pro[:,ind_corr]
### HP_list = np.array(HP_list)[ind_corr]
##
# H_Pro = np.reshape(H2,(1,len(H2)))
# Array_Pro = np.append(H_Pro, PS2, axis=0)
#
# ####################### Feature selection using VIP ################
# import Descriptors_Selection as DS
#
# X1, Y1, H1 = DS.VIP_origin(PS1, Y, H_Pro)
# H1 = np.reshape(H1,(1,len(H1)))
# Array_ligand = np.append(H1, X1, axis=0)
#
# X2, Y2, H2 = DS.VIP_origin(PS2, Y, H_Pro)
# H2 = np.reshape(H2,(1,len(H2)))
# Array_Pro = np.append(H2, X2, axis=0)
#
# Y_array = np.append(np.array(h[-1]), Y2)
# Y_array = np.reshape(Y_array, (len(Y_array),1))
#
#
# ################## Comnbine All array for saving ##############
#
## Array = np.append(Array_Pro, Array_ligand, axis=1)
## Y_array = np.append(np.array(h[-1]), Y)
## Y_array = np.reshape(Y_array, (len(Y_array),1))
## Array = np.append(Array, Y_array, axis=1)
############################################################
## try:
## os.makedirs(path+'/'+IndicatorName)
## except OSError:
## pass
##
## with open(path+'/'+IndicatorName+'/'+IndicatorName+'.csv', 'wb') as csvfile:
## spam = csv.writer(csvfile,delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL )
## for k in range(len(Array)):
## spam.writerow(Array[k])
#
# return Array_ligand, Array_Pro, np.squeeze(Y_array)
#
|
Rnewbie/PCM
|
Descriptors_Extraction.py
|
Python
|
gpl-2.0
| 21,105
|
[
"MOE",
"RDKit"
] |
883c792b46bb0c98aae9bb98598ac52a721f1c60aa0b67ff5e56e1ff8553f965
|
from pymol.wizard import Wizard
from pymol import cmd
import pymol
import traceback
sele_prefix = "_mw"
sele_prefix_len = len(sele_prefix)
indi_sele = "_indicate_mw"
obj_prefix = "measure"
class Measurement(Wizard):
modes = [
'pairs',
'angle',
'dihed',
'polar',
'heavy',
'neigh',
'hbond',
]
mode_name = {
'polar':'Polar Neighbors',
'heavy':'Heavy Neighbors',
'neigh':'Neighbors',
'pairs':'Distances',
'angle':'Angles',
'dihed':'Dihedrals',
'hbond':'Polar Contacts',
}
neighbor_modes = [
'same',
'other',
'enabled',
'all',
'in_object',
'in_selection'
]
object_modes = [
'merge',
'overwr',
'append',
]
object_mode_name = {
'merge':'Merge With Previous',
'overwr':'Replace Previous',
'append':'Create New Object',
}
def __init__(self,_self=cmd):
Wizard.__init__(self,_self)
self.cmd.unpick()
self.cutoff = self.cmd.get_setting_float("neighbor_cutoff")
self.heavy_neighbor_cutoff = self.cmd.get_setting_float("heavy_neighbor_cutoff")
self.polar_neighbor_cutoff = self.cmd.get_setting_float("polar_neighbor_cutoff")
self.hbond_cutoff = self.cmd.get_setting_float("h_bond_cutoff_center")
self.status = 0 # 0 no atoms selections, 1 atom selected, 2 atoms selected, 3 atoms selected
self.error = None
self.object_name = None
# mode selection subsystem
self.mode = self.session.get('default_mode','pairs')
self.neighbor_target = ""
# TODO:
# make this a function, and call it when we call refresh wizard
# to update the object/selection list
smm = []
smm.append([ 2, 'Measurement Mode', '' ])
for a in self.modes:
if a in ("neigh", "polar", "heavy"):
smm.append([ 1, self.mode_name[a], self.neighbor_submenu(a)])
else:
smm.append([ 1, self.mode_name[a], 'cmd.get_wizard().set_mode("'+a+'")'])
self.menu['mode']=smm
# overwrite mode selection subsystem
self.object_mode = self.session.get('default_object_mode','append')
smm = []
smm.append([ 2, 'New Measurements?', '' ])
for a in self.object_modes:
smm.append([ 1, self.object_mode_name[a], 'cmd.get_wizard().set_object_mode("'+a+'")'])
self.menu['object_mode']=smm
# initially select atoms, but now users can change this
self.selection_mode = self.cmd.get_setting_legacy("mouse_selection_mode")
self.cmd.set("mouse_selection_mode",0) # set selection mode to atomic
self.cmd.deselect() # disable the active selection (if any)
self.mouse_mode = 0
def get_event_mask(self):
"""
Sets what this Wizard listens for. event_mask_dirty is too coarse an event,
so we should consider something more fine grain for mouse mode updates
"""
return Wizard.event_mask_pick + Wizard.event_mask_select + Wizard.event_mask_dirty
def neighbor_objects(self,a):
"""
for neighbor selecting, populate the menu with these names
"""
list = self.cmd.get_names("public_objects",1)[0:25] # keep this practical
list = filter(lambda x:self.cmd.get_type(x)=="object:molecule",list)
result = [[ 2, 'Object: ', '']]
for b in list:
result.append( [ 1, b, 'cmd.get_wizard().set_neighbor_target("%s","%s")' % (a,b)])
return result
def neighbor_selections(self,a):
"""
get list of public selections for populating the menu
"""
list = self.cmd.get_names("public_selections",1)[0:25] # keep this practical
list = filter(lambda x:self.cmd.get_type(x)=="selection",list)
result = [[ 2, 'Selections: ', '']]
for b in list:
result.append( [ 1, b, 'cmd.get_wizard().set_neighbor_target("%s","%s")' % (a,b)])
return result
def neighbor_submenu(self,a,_self=cmd):
return [ [2, self.mode_name[a]+": ", ''],
[1, "in all objects", 'cmd.get_wizard().set_neighbor_target("'+a+'","all")'],
[1, "in object", self.neighbor_objects(a) ], # while this is a submenu this is also a command, so [1, ...]
[1, "in selection", self.neighbor_selections(a) ], # not [2, ...]
[1, "in other objects", 'cmd.get_wizard().set_neighbor_target("'+a+'","other")'],
[1, "in same object", 'cmd.get_wizard().set_neighbor_target("'+a+'", "same")'],
]
def _validate_instance(self):
Wizard._validate_instance(self)
if not hasattr(self,'meas_count'):
self.meas_count = self.session.get('meas_count',0)
def get_name(self,untaken=1,increment=1):
"""
get a name for the next measurement object
"""
self._validate_instance()
if increment or self.meas_count<1:
self.meas_count = self.meas_count + 1
obj_name = obj_prefix+"%02d"%self.meas_count
if untaken:
name_dict = {}
for tmp_name in cmd.get_names("all"):
name_dict[tmp_name] = None
while obj_name in name_dict:
self.meas_count = self.meas_count + 1
obj_name = obj_prefix+"%02d"%self.meas_count
return obj_name
# generic set routines
def set_neighbor_target(self,mode,target):
"""
sets the neighbor target in the menu
"""
self.set_mode(mode)
self.neighbor_target=target
self.status = 0
self.clear_input()
self.cmd.refresh_wizard()
def set_mode(self,mode):
"""
sets what we're measuring, distance, angle, dihedral, etc.
"""
if mode in self.modes:
self.mode = mode
# if setting mode, we're restarting the selection process
self.status = 0
self.clear_input()
if self.mode=='hbond':
self.cmd.set("mouse_selection_mode", 5)
self.cmd.refresh_wizard()
def set_object_mode(self,mode):
if mode in self.object_modes:
self.object_mode = mode
self.status = 0
self.cmd.refresh_wizard()
def get_panel(self):
return [
[ 1, 'Measurement',''],
[ 3, self.mode_name[self.mode],'mode'],
[ 3, self.object_mode_name[self.object_mode],'object_mode'],
[ 2, 'Delete Last Object' , 'cmd.get_wizard().delete_last()'],
[ 2, 'Delete All Measurements' , 'cmd.get_wizard().delete_all()'],
[ 2, 'Done','cmd.set_wizard()'],
]
def cleanup(self):
"""
restore user session how we found it
"""
self.session['default_mode'] = self.mode
self.session['default_object_mode'] = self.object_mode
self.clear_input()
self.cmd.set("mouse_selection_mode",self.selection_mode) # restore selection mode
def clear_input(self):
"""
delete our user selections for this wizard
"""
self.cmd.delete(sele_prefix+"*")
self.cmd.delete(indi_sele)
self.cmd.delete("pk1")
self.status = 0
def get_selection_name(self):
"""
Return a textual description of the mouse mode
"""
if self.cmd.get("mouse_selection_mode", quiet=1)=="0":
return ("atom","")
elif self.cmd.get("mouse_selection_mode", quiet=1)=="1":
return ("residue"," br. ")
elif self.cmd.get("mouse_selection_mode", quiet=1)=="2":
return ("chain", " bc. ")
elif self.cmd.get("mouse_selection_mode", quiet=1)=="3":
return ("segment", " bs. ")
elif self.cmd.get("mouse_selection_mode", quiet=1)=="4":
return ("object", " bo. ")
elif self.cmd.get("mouse_selection_mode", quiet=1)=="5":
return ("molecule", " bm. ")
elif self.cmd.get("mouse_selection_mode", quiet=1)=="6":
return ("C-alpha", " bca. ")
def get_prompt(self):
(what, code) = self.get_selection_name()
self.prompt = None
if self.mode in ['pairs', 'angle', 'dihed', 'hbond' ]:
if self.status==0:
self.prompt = [ 'Please click on the first %s...' % what]
elif self.status==1:
self.prompt = [ 'Please click on the second %s...' % what ]
elif self.status==2:
self.prompt = [ 'Please click on the third %s...' % what]
elif self.status==3:
self.prompt = [ 'Please click on the fourth %s...' % what]
elif self.mode in [ 'polar', 'neigh', 'heavy', 'surf' ]:
(what, code) = self.get_selection_name()
letterN=""
if what[0] in ('a', 'e', 'i', 'o', 'u'):
letterN = "n"
else:
letterN = ""
self.prompt = [ 'Please click a%s %s...' % (letterN, what)]
if self.error!=None:
self.prompt.append(self.error)
return self.prompt
def delete_last(self):
"""
Corresponds to the "Delete Last Object" menu button
"""
self._validate_instance()
if self.status==0:
if self.meas_count>0:
name = self.get_name(0,0)
self.cmd.delete(name)
self.meas_count = self.meas_count - 1
self.status=0
self.error = None
self.clear_input()
self.cmd.refresh_wizard()
def delete_all(self):
"""
Corresponds to the "Delete All Measurements" menu button
"""
self.meas_count = 0
self.cmd.delete(obj_prefix+"*")
self.status=0
self.error = None
self.clear_input()
self.cmd.refresh_wizard()
def do_select(self,name): # map selects into picks
self.cmd.unpick()
try:
self.cmd.select("pk1", name + " and not " + sele_prefix + "*") # note, using new object name wildcards
self.cmd.delete(name)
self.do_pick(0)
except pymol.CmdException:
if self.status:
sele_name = sele_prefix + str(self.status-1)
self.cmd.select(indi_sele, sele_name)
self.cmd.enable(indi_sele)
def do_pick(self,bondFlag):
# update pk1 based on current mouse mode
(what,code) = self.get_selection_name()
self.cmd.select( "(pk1)", code + "(pk1)")
if bondFlag:
self.error = "Error: please select an atom, not a bond."
print self.error
else:
reset = 1
sele_name = sele_prefix + str(self.status)
if self.mode == 'pairs':
if self.status==0:
self.cmd.select(sele_name,"(pk1)")
self.cmd.select(indi_sele, sele_name)
self.cmd.enable(indi_sele)
self.status = 1
self.error = None
elif self.status==1:
obj_name = self.get_name((self.object_mode=='append'),
(self.object_mode=='append'))
if self.object_mode=='merge':
reset = 0
self.cmd.dist(obj_name,"(v. and " + sele_prefix+"0)","(v. and (pk1))",reset=reset)
self.cmd.enable(obj_name)
self.clear_input()
self.status = 0
self.cmd.unpick()
elif self.mode == 'angle':
if self.status<2:
self.cmd.select(sele_name,"(pk1)")
self.cmd.unpick()
self.cmd.select(indi_sele, sele_name)
self.cmd.enable(indi_sele)
self.status = self.status + 1
self.error = None
else:
obj_name = self.get_name((self.object_mode=='append'),
(self.object_mode=='append'))
if self.object_mode=='merge':
reset = 0
self.cmd.angle(obj_name, "(v. and " + sele_prefix+"0)", "(v. and " + sele_prefix+"1)",
"(v. and (pk1))", reset=reset)
self.cmd.enable(obj_name)
self.clear_input()
self.status = 0
self.cmd.unpick()
elif self.mode == 'dihed':
if self.status<3:
self.cmd.select(sele_name,"(pk1)")
self.cmd.unpick()
self.cmd.select(indi_sele, sele_name)
self.cmd.enable(indi_sele)
self.status = self.status + 1
self.error = None
else:
obj_name = self.get_name((self.object_mode=='append'),
(self.object_mode=='append'))
if self.object_mode=='merge':
reset = 0
self.cmd.dihedral(obj_name, "(v. and " + sele_prefix+"0)", "(v. and " + sele_prefix+"1)",
"(v. and " + sele_prefix+"2)", "(v. and (pk1))", reset=reset)
self.cmd.enable(obj_name)
self.clear_input()
self.status = 0
self.cmd.unpick()
if self.mode == 'hbond':
if self.status==0:
self.cmd.select(sele_name,"(pk1)")
self.cmd.select(indi_sele, sele_name)
self.cmd.enable(indi_sele)
self.status = 1
self.error = None
elif self.status==1:
obj_name = self.get_name((self.object_mode=='append'),
(self.object_mode=='append'))
if self.object_mode=='merge':
reset = 0
self.cmd.dist(obj_name,"(v. and " + sele_prefix+"0)","(v. and (pk1))",mode=2,cutoff=self.hbond_cutoff,reset=reset)
self.cmd.enable(obj_name)
self.clear_input()
self.status = 0
self.cmd.unpick()
elif self.mode in ['neigh','polar','heavy']:
reset = 1
obj_name = self.get_name((self.object_mode=='append'),
(self.object_mode=='append'))
if self.object_mode=='merge':
reset = 0
cnt = 0
sel_mod = ""
if self.mode in ('neigh', 'polar', 'heavy'):
if self.neighbor_target=="same":
sel_mod = "bm. pk1"
elif self.neighbor_target=="other":
sel_mod = "(not bm. pk1)"
elif self.neighbor_target=="enabled":
sel_mod = "(enabled)"
elif self.neighbor_target=="all":
sel_mod = "all"
else:
sel_mod = "(%s)" % self.neighbor_target
cutoffType=0
if self.mode == 'neigh':
cnt = self.cmd.select(sele_prefix,
"(v. and (pk1 a; %f) and (not (nbr. pk1)) and (not (nbr. (nbr. pk1))) and (not (nbr. (nbr. (nbr. pk1)))) and (%s))"
%(self.cutoff, sel_mod))
cutoffType=self.cutoff
elif self.mode == 'polar':
cnt = self.cmd.select(sele_prefix,
"(v. and (pk1 a; %f) and (e. n,o) and (not (nbr. pk1)) and (not (nbr. (nbr. pk1))) and (not (nbr. (nbr. (nbr. pk1)))) and (%s))"
%(self.polar_neighbor_cutoff, sel_mod))
cutoffType = self.polar_neighbor_cutoff
elif self.mode == 'heavy':
cutoffType = self.heavy_neighbor_cutoff
cnt = self.cmd.select(sele_prefix,
"(v. and (pk1 a; %f) and (not h.) and (not (nbr. pk1)) and (not (nbr. (nbr. pk1))) and (not (nbr. (nbr. (nbr. pk1)))) and (%s))"
%(self.heavy_neighbor_cutoff, sel_mod))
if cnt:
self.cmd.dist(obj_name,"(pk1)",sele_prefix,cutoff=cutoffType,reset=reset)
else:
print " Wizard: No neighbors found."
self.clear_input()
self.cmd.unpick()
self.cmd.enable(obj_name)
self.cmd.refresh_wizard()
def do_dirty(self):
if self.mouse_mode != self.cmd.get("mouse_selection_mode"):
self.mouse_mode = self.cmd.get("mouse_selection_mode")
self.cmd.refresh_wizard()
|
gratefulfrog/lib
|
python/pymol/wizard/measurement.py
|
Python
|
gpl-2.0
| 17,133
|
[
"PyMOL"
] |
b05eec2dfb42efc8eaccbd9b64e53e90c1ced47d51e6408444bd0c950fa4ee32
|
"""
Portable Executable (PE) 32 bit, little endian
Used on MSWindows systems (including DOS) for EXEs and DLLs
1999 paper:
http://download.microsoft.com/download/1/6/1/161ba512-40e2-4cc9-843a-923143f3456c/pecoff.doc
2006 with updates relevant for .NET:
http://download.microsoft.com/download/9/c/5/9c5b2167-8017-4bae-9fde-d599bac8184a/pecoff_v8.doc
"""
from construct import *
import time
import six
class UTCTimeStampAdapter(Adapter):
def _decode(self, obj, context):
return time.ctime(obj)
def _encode(self, obj, context):
return int(time.mktime(time.strptime(obj)))
def UTCTimeStamp(name):
return UTCTimeStampAdapter(ULInt32(name))
class NamedSequence(Adapter):
"""
creates a mapping between the elements of a sequence and their respective
names. this is useful for sequences of a variable length, where each
element in the sequence has a name (as is the case with the data
directories of the PE header)
"""
__slots__ = ["mapping", "rev_mapping"]
prefix = "unnamed_"
def __init__(self, subcon, mapping):
Adapter.__init__(self, subcon)
self.mapping = mapping
self.rev_mapping = dict((v, k) for k, v in mapping.items())
def _encode(self, obj, context):
d = obj.__dict__
obj2 = [None] * len(d)
for name, value in d.items():
if name in self.rev_mapping:
index = self.rev_mapping[name]
elif name.startswith("__"):
obj2.pop(-1)
continue
elif name.startswith(self.prefix):
index = int(name.split(self.prefix)[1])
else:
raise ValueError("no mapping defined for %r" % (name,))
obj2[index] = value
return obj2
def _decode(self, obj, context):
obj2 = Container()
for i, item in enumerate(obj):
if i in self.mapping:
name = self.mapping[i]
else:
name = "%s%d" % (self.prefix, i)
setattr(obj2, name, item)
return obj2
msdos_header = Struct("msdos_header",
Magic("MZ"),
ULInt16("partPag"),
ULInt16("page_count"),
ULInt16("relocation_count"),
ULInt16("header_size"),
ULInt16("minmem"),
ULInt16("maxmem"),
ULInt16("relocation_stackseg"),
ULInt16("exe_stackptr"),
ULInt16("checksum"),
ULInt16("exe_ip"),
ULInt16("relocation_codeseg"),
ULInt16("table_offset"),
ULInt16("overlay"),
Padding(8),
ULInt16("oem_id"),
ULInt16("oem_info"),
Padding(20),
ULInt32("coff_header_pointer"),
Anchor("_assembly_start"),
OnDemand(
HexDumpAdapter(
Field("code",
lambda ctx: ctx.coff_header_pointer - ctx._assembly_start
)
)
),
)
symbol_table = Struct("symbol_table",
String("name", 8, padchar = six.b("\x00")),
ULInt32("value"),
Enum(ExprAdapter(SLInt16("section_number"),
encoder = lambda obj, ctx: obj + 1,
decoder = lambda obj, ctx: obj - 1,
),
UNDEFINED = -1,
ABSOLUTE = -2,
DEBUG = -3,
_default_ = Pass,
),
Enum(ULInt8("complex_type"),
NULL = 0,
POINTER = 1,
FUNCTION = 2,
ARRAY = 3,
),
Enum(ULInt8("base_type"),
NULL = 0,
VOID = 1,
CHAR = 2,
SHORT = 3,
INT = 4,
LONG = 5,
FLOAT = 6,
DOUBLE = 7,
STRUCT = 8,
UNION = 9,
ENUM = 10,
MOE = 11,
BYTE = 12,
WORD = 13,
UINT = 14,
DWORD = 15,
),
Enum(ULInt8("storage_class"),
END_OF_FUNCTION = 255,
NULL = 0,
AUTOMATIC = 1,
EXTERNAL = 2,
STATIC = 3,
REGISTER = 4,
EXTERNAL_DEF = 5,
LABEL = 6,
UNDEFINED_LABEL = 7,
MEMBER_OF_STRUCT = 8,
ARGUMENT = 9,
STRUCT_TAG = 10,
MEMBER_OF_UNION = 11,
UNION_TAG = 12,
TYPE_DEFINITION = 13,
UNDEFINED_STATIC = 14,
ENUM_TAG = 15,
MEMBER_OF_ENUM = 16,
REGISTER_PARAM = 17,
BIT_FIELD = 18,
BLOCK = 100,
FUNCTION = 101,
END_OF_STRUCT = 102,
FILE = 103,
SECTION = 104,
WEAK_EXTERNAL = 105,
),
ULInt8("number_of_aux_symbols"),
Array(lambda ctx: ctx.number_of_aux_symbols,
Bytes("aux_symbols", 18)
)
)
coff_header = Struct("coff_header",
Magic("PE\x00\x00"),
Enum(ULInt16("machine_type"),
UNKNOWN = 0x0,
AM33 = 0x1d3,
AMD64 = 0x8664,
ARM = 0x1c0,
EBC = 0xebc,
I386 = 0x14c,
IA64 = 0x200,
M32R = 0x9041,
MIPS16 = 0x266,
MIPSFPU = 0x366,
MIPSFPU16 = 0x466,
POWERPC = 0x1f0,
POWERPCFP = 0x1f1,
R4000 = 0x166,
SH3 = 0x1a2,
SH3DSP = 0x1a3,
SH4 = 0x1a6,
SH5= 0x1a8,
THUMB = 0x1c2,
WCEMIPSV2 = 0x169,
_default_ = Pass
),
ULInt16("number_of_sections"),
UTCTimeStamp("time_stamp"),
ULInt32("symbol_table_pointer"),
ULInt32("number_of_symbols"),
ULInt16("optional_header_size"),
FlagsEnum(ULInt16("characteristics"),
RELOCS_STRIPPED = 0x0001,
EXECUTABLE_IMAGE = 0x0002,
LINE_NUMS_STRIPPED = 0x0004,
LOCAL_SYMS_STRIPPED = 0x0008,
AGGRESSIVE_WS_TRIM = 0x0010,
LARGE_ADDRESS_AWARE = 0x0020,
MACHINE_16BIT = 0x0040,
BYTES_REVERSED_LO = 0x0080,
MACHINE_32BIT = 0x0100,
DEBUG_STRIPPED = 0x0200,
REMOVABLE_RUN_FROM_SWAP = 0x0400,
SYSTEM = 0x1000,
DLL = 0x2000,
UNIPROCESSOR_ONLY = 0x4000,
BIG_ENDIAN_MACHINE = 0x8000,
),
# symbol table
Pointer(lambda ctx: ctx.symbol_table_pointer,
Array(lambda ctx: ctx.number_of_symbols, symbol_table)
)
)
def PEPlusField(name):
return IfThenElse(name, lambda ctx: ctx.pe_type == "PE32_plus",
ULInt64(None),
ULInt32(None),
)
optional_header = Struct("optional_header",
# standard fields
Enum(ULInt16("pe_type"),
PE32 = 0x10b,
PE32_plus = 0x20b,
),
ULInt8("major_linker_version"),
ULInt8("minor_linker_version"),
ULInt32("code_size"),
ULInt32("initialized_data_size"),
ULInt32("uninitialized_data_size"),
ULInt32("entry_point_pointer"),
ULInt32("base_of_code"),
# only in PE32 files
If(lambda ctx: ctx.pe_type == "PE32",
ULInt32("base_of_data")
),
# WinNT-specific fields
PEPlusField("image_base"),
ULInt32("section_aligment"),
ULInt32("file_alignment"),
ULInt16("major_os_version"),
ULInt16("minor_os_version"),
ULInt16("major_image_version"),
ULInt16("minor_image_version"),
ULInt16("major_subsystem_version"),
ULInt16("minor_subsystem_version"),
Padding(4),
ULInt32("image_size"),
ULInt32("headers_size"),
ULInt32("checksum"),
Enum(ULInt16("subsystem"),
UNKNOWN = 0,
NATIVE = 1,
WINDOWS_GUI = 2,
WINDOWS_CUI = 3,
POSIX_CIU = 7,
WINDOWS_CE_GUI = 9,
EFI_APPLICATION = 10,
EFI_BOOT_SERVICE_DRIVER = 11,
EFI_RUNTIME_DRIVER = 12,
EFI_ROM = 13,
XBOX = 14,
_default_ = Pass
),
FlagsEnum(ULInt16("dll_characteristics"),
NO_BIND = 0x0800,
WDM_DRIVER = 0x2000,
TERMINAL_SERVER_AWARE = 0x8000,
),
PEPlusField("reserved_stack_size"),
PEPlusField("stack_commit_size"),
PEPlusField("reserved_heap_size"),
PEPlusField("heap_commit_size"),
ULInt32("loader_flags"),
ULInt32("number_of_data_directories"),
NamedSequence(
Array(lambda ctx: ctx.number_of_data_directories,
Struct("data_directories",
ULInt32("address"),
ULInt32("size"),
)
),
mapping = {
0 : 'export_table',
1 : 'import_table',
2 : 'resource_table',
3 : 'exception_table',
4 : 'certificate_table',
5 : 'base_relocation_table',
6 : 'debug',
7 : 'architecture',
8 : 'global_ptr',
9 : 'tls_table',
10 : 'load_config_table',
11 : 'bound_import',
12 : 'import_address_table',
13 : 'delay_import_descriptor',
14 : 'complus_runtime_header',
}
),
)
section = Struct("section",
String("name", 8, padchar = six.b("\x00")),
ULInt32("virtual_size"),
ULInt32("virtual_address"),
ULInt32("raw_data_size"),
ULInt32("raw_data_pointer"),
ULInt32("relocations_pointer"),
ULInt32("line_numbers_pointer"),
ULInt16("number_of_relocations"),
ULInt16("number_of_line_numbers"),
FlagsEnum(ULInt32("characteristics"),
TYPE_REG = 0x00000000,
TYPE_DSECT = 0x00000001,
TYPE_NOLOAD = 0x00000002,
TYPE_GROUP = 0x00000004,
TYPE_NO_PAD = 0x00000008,
TYPE_COPY = 0x00000010,
CNT_CODE = 0x00000020,
CNT_INITIALIZED_DATA = 0x00000040,
CNT_UNINITIALIZED_DATA = 0x00000080,
LNK_OTHER = 0x00000100,
LNK_INFO = 0x00000200,
TYPE_OVER = 0x00000400,
LNK_REMOVE = 0x00000800,
LNK_COMDAT = 0x00001000,
MEM_FARDATA = 0x00008000,
MEM_PURGEABLE = 0x00020000,
MEM_16BIT = 0x00020000,
MEM_LOCKED = 0x00040000,
MEM_PRELOAD = 0x00080000,
ALIGN_1BYTES = 0x00100000,
ALIGN_2BYTES = 0x00200000,
ALIGN_4BYTES = 0x00300000,
ALIGN_8BYTES = 0x00400000,
ALIGN_16BYTES = 0x00500000,
ALIGN_32BYTES = 0x00600000,
ALIGN_64BYTES = 0x00700000,
ALIGN_128BYTES = 0x00800000,
ALIGN_256BYTES = 0x00900000,
ALIGN_512BYTES = 0x00A00000,
ALIGN_1024BYTES = 0x00B00000,
ALIGN_2048BYTES = 0x00C00000,
ALIGN_4096BYTES = 0x00D00000,
ALIGN_8192BYTES = 0x00E00000,
LNK_NRELOC_OVFL = 0x01000000,
MEM_DISCARDABLE = 0x02000000,
MEM_NOT_CACHED = 0x04000000,
MEM_NOT_PAGED = 0x08000000,
MEM_SHARED = 0x10000000,
MEM_EXECUTE = 0x20000000,
MEM_READ = 0x40000000,
MEM_WRITE = 0x80000000,
),
OnDemandPointer(lambda ctx: ctx.raw_data_pointer,
HexDumpAdapter(Field("raw_data", lambda ctx: ctx.raw_data_size))
),
OnDemandPointer(lambda ctx: ctx.line_numbers_pointer,
Array(lambda ctx: ctx.number_of_line_numbers,
Struct("line_numbers",
ULInt32("type"),
ULInt16("line_number"),
)
)
),
OnDemandPointer(lambda ctx: ctx.relocations_pointer,
Array(lambda ctx: ctx.number_of_relocations,
Struct("relocations",
ULInt32("virtual_address"),
ULInt32("symbol_table_index"),
ULInt16("type"),
)
)
),
)
pe32_file = Struct("pe32_file",
# headers
msdos_header,
coff_header,
Anchor("_start_of_optional_header"),
optional_header,
Anchor("_end_of_optional_header"),
Padding(lambda ctx: min(0,
ctx.coff_header.optional_header_size -
ctx._end_of_optional_header +
ctx._start_of_optional_header
)
),
# sections
Array(lambda ctx: ctx.coff_header.number_of_sections, section)
)
if __name__ == "__main__":
print (pe32_file.parse_stream(open("../../../tests/NOTEPAD.EXE", "rb")))
print (pe32_file.parse_stream(open("../../../tests/sqlite3.dll", "rb")))
|
mekolat/manachat
|
external/construct/formats/executable/pe32.py
|
Python
|
gpl-2.0
| 11,720
|
[
"MOE"
] |
9eac6bef21ffacb584386cd640b42d569af0b3955fda7ea00122673084f7d274
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
print(len(cw154))
print(len(trito))
totalfiles = normalB + mcell + pcell + cd19cell + cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr1_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("total_chrom1.phy", header=None, index=None)
print(tott.shape)
|
evanbiederstedt/RRBSfun
|
trees/chrom_scripts/total_chr01.py
|
Python
|
mit
| 32,997
|
[
"MCell"
] |
3b25ea7953fa2c49ac8fef9316da71f753f26138fe53e48f971dd03976b3475b
|
'''
Code for method recommendation experiments for HCD Connect cases
Written by Mark Fuge and Bud Peters
For more information on the project visit:
http://ideal.umd.edu/projects/design_method_recommendation.html
To reproduce the experimental results just run:
python paper_experiments.py
In order to get the raw case study data, you'll need to download the data files
from a separate repository and then place them in a 'data' folder (or you can
change the 'data_path' variable below):
https://github.com/IDEALLab/hcdconnect_case_data
This experiment code is what was used to the produce the results in
Mark Fuge, Bud Peters, Alice Agogino, "Machine learning algorithms for recommending design methods." Journal of Mechanical Design 136 (10)
@article{fugeHCD2014JMD,
author = {Fuge, Mark and Peters, Bud and Agogino, Alice},
day = {18},
doi = {10.1115/1.4028102},
issn = {1050-0472},
journal = {Journal of Mechanical Design},
month = aug,
number = {10},
pages = {101103+},
title = {Machine Learning Algorithms for Recommending Design Methods},
url = {http://dx.doi.org/10.1115/1.4028102},
volume = {136},
year = {2014}
}
'''
from time import time
import csv
import re
import os
from operator import itemgetter
import numpy as np
import cPickle as pickle
import matplotlib.pylab as plt
from sklearn import svm, cross_validation, tree, ensemble
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import precision_recall_curve, average_precision_score, adjusted_mutual_info_score, adjusted_rand_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.grid_search import RandomizedSearchCV
from sklearn.cluster import SpectralClustering
from sklearn.covariance import GraphLassoCV, empirical_covariance
from sklearn.utils import resample
from scipy.stats import gamma,randint
from scipy.stats import scoreatpercentile as percentile
from collaborative_filter import *
from rec_utils import *
from rec_dummy import *
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer,TfidfVectorizer
from sklearn.preprocessing import Normalizer
from sklearn.decomposition import TruncatedSVD
import brewer2mpl
import matplotlib.colors
paired = brewer2mpl.get_map('Paired', 'qualitative', 10).mpl_colors
PuBuGn4 = brewer2mpl.get_map('PuBuGn', 'sequential', 5).mpl_colors
def load_hcd_cases(data_path):
''' Loads case study data for stories, methods, and cases
Assumes that methods and stories are correctly ordered by line number
'''
# Load Stories into an indexed array table
story_csv = csv.reader(open(data_path+'stories.csv'),delimiter = '|')
method_csv = csv.reader(open(data_path+'methods.csv'),delimiter = '|')
case_csv = csv.reader(open(data_path+'cases.csv'),delimiter = '|')
stories=[]
case_categories=[]
for story in story_csv:
stories.append((story[1].lower(),story[2].lower()))
# Just Focus Area
case_categories.append(story[8].lower().split(';'))
# Focus Area + User
#uid = ['IDEO' if story[5]=='IDEO.org' else 'noIDEO' ]
#case_categories.append(story[8].lower().split(';')+uid)
methods={}
for method in method_csv:
methods[int(method[0])]=[method[1],method[2]]
methods = methods.values()
cases=np.zeros((len(stories),len(methods)))
for story_id,method_id in case_csv:
cases[int(story_id)][int(method_id)]=1
# Now remove invalid cases:
ft=np.array([True if len(s[1])>6 else False for s in stories])
return np.array(stories)[ft],methods,np.array(cases)[ft],np.array(case_categories)[ft]
def get_case_mutual_information(case_binary_matrix):
'''
Calculates the mutual information between methods given a binary case matrix
Output shape of the matrix should be symmetric num_methods by num_methods
'''
num_cases,num_methods = case_binary_matrix.shape
MI = np.zeros(shape=(num_methods,num_methods))
for i in range(num_methods):
for j in range(i,num_methods):
c1 = case_binary_matrix[:,i]
c2 = case_binary_matrix[:,j]
MI[i][j] = adjusted_mutual_info_score(c1,c2)
MI[j][i] = MI[i][j]
return MI
# Utility function to report best scores
# from http://scikit-learn.org/stable/auto_examples/randomized_search.html
def opt_report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
def rebalance_cases(cases):
''' Balances the positive and negative case results '''
ind = np.array([c[2]>0 for c in cases])
pos = cases[ind]
neg = cases[~ind]
np.random.shuffle(pos)
np.random.shuffle(neg)
if len(pos)<len(neg):
neg = neg[:len(pos)]
else:
pos = pos[:len(neg)]
cases=np.vstack([pos,neg])
np.random.shuffle(cases)
return cases
# List some options for hyperparamer optimization
# Helpful little snippet for exploring gamma PDF functions
#x=np.linspace(0,0.02);h = plt.plot(x, gamma(13,scale=0.002).pdf(x)); plt.show()
hyper_params = {'CollaborativeFilter':
{'alpha': gamma(27,scale=0.04),#gamma(20,scale=0.10),#gamma(11,scale=0.15),#,
'beta' : gamma(13,scale=0.002),#gamma(20,scale=0.001),#,
'lambda_nu' : gamma(58,scale=0.025),#gamma(58,scale=0.025),#gamma(11,scale=0.15),#gamma(3,scale=0.4),
'lambda_b' : gamma(15,scale=0.008),#,#gamma(6,scale=0.05),#gamma(3,scale=0.4),
'num_latent' : randint(2,15)#[5,10,20,40]
},
'RandomForestClassifier':
{ 'estimator__n_estimators': [5,10,20,40,80],
'estimator__criterion' : ['gini','entropy'],
'estimator__min_samples_split' : randint(1,10),
'estimator__min_samples_leaf' : randint(1,10),
'estimator__bootstrap': [True, False]
},
'SVC':
{ 'estimator__C': gamma(3,scale=1.5),
'estimator__gamma' : gamma(3,scale=.15)
},
'LogisticRegression':
{ 'estimator__penalty': ['l2'],#['l1','l2'],
'estimator__C' : gamma(3,scale=1.5),
'estimator__fit_intercept' : [True],#[True, False],
'estimator__intercept_scaling' : gamma(3,scale=1.5)
},
'BernoulliNB':
{ 'estimator__alpha': gamma(30,scale=30)#gamma(3,scale=3)
},
'RandomClassifier':{ },
'Popularity':{ }
}
# Storage of the optimal parameters found during previous runs of the algorithms
# This is only to speed up testing and evaluation, as well as to best allow
# others to replicate the conditions we used in the paper
optimal_params = {'CollaborativeFilter':
{'alpha': 1.0,
'beta' : 0.021,
'lambda_nu' : 1.5,
'lambda_b' : 0.06,
'num_latent' : 11
},
'RandomForestClassifier':
{ 'estimator__n_estimators': 80,
'estimator__criterion' : 'entropy',
'estimator__min_samples_split' : 7,
'estimator__min_samples_leaf' : 5,
'estimator__bootstrap': False
},
'SVC':
{ 'estimator__C': 5.0,
'estimator__gamma' : 0.8
},
'LogisticRegression':
{ 'estimator__penalty': 'l2',
'estimator__C' : .45,
'estimator__fit_intercept' : True,
'estimator__intercept_scaling' : 7.5
},
'BernoulliNB':
{ 'estimator__alpha': 100000 #4
},
'RandomClassifier':{ },
'Popularity':{ }
}
def run_classifier(clf,features,cases,bottom_inds,optimize_hyperparams=False):
clf_name = clf.__class__.__name__
cases = np.array(cases)
# Set up the cross_validation study
if clf_name == 'CollaborativeFilter':
cases = np.array(preprocess_recommendations(cases))
cy = [c[2] for c in cases]
cases = np.array(cases)
m_ind=cases[:,1]
else:
cy = cases[:,0]
# Pre-Run Hyperparameter Optimization
if optimize_hyperparams:
param_dist = hyper_params[clf_name]
else:
opt_param_dist = optimal_params[clf_name]
num_iterations = 1 if optimize_hyperparams else 100
shuffle = cross_validation.StratifiedShuffleSplit(y=cy,
n_iter=num_iterations,
test_size=0.1,
random_state=None)
scores =[]; Y_pred = []; Y_true = []; m_test_inds=[]
# Run study
for i,(train_index, test_index) in enumerate(shuffle):
# Separate training/test set
if (i%10)==0:
print ' CV#%d of %d...'%(i,num_iterations)
Y_train, Y_test = (cases[train_index],cases[test_index])
# Fit and predict using the models
if clf_name == 'CollaborativeFilter':
# Split the training data into X and y vectors
#Y_train = rebalance_cases(Y_train)
X_train = Y_train[:,:-1]
Y_train = Y_train[:,-1]
X_test = Y_test[:,:-1]
Y_test = Y_test[:,-1]
m_test_ind = m_ind[test_index]
m_test_inds.append(m_test_ind)
if optimize_hyperparams:
# Run Parameter Search
n_iter_search = 2
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search,
scoring='average_precision',
n_jobs=3,
refit=True,
cv=4,
verbose=1
)
start = time()
random_search.fit(X_train,Y_train)
print("RandomizedSearchCV took %.2f minutes for %d candidates"
" parameter settings." % ((time() - start)/60.0, n_iter_search))
opt_report(random_search.grid_scores_,n_top=10)
Y_hat=random_search.best_estimator_.predict_proba(X_test)
else:
clf.set_params(opt_param_dist)
clf.fit(X_train,Y_train)
Y_hat=clf.predict_proba(X_test)
else:
X_train, X_test = (features[train_index],features[test_index])
ovr = OneVsRestClassifier(clf)
if optimize_hyperparams:
n_iter_search = 400
random_search = RandomizedSearchCV(ovr, param_distributions=param_dist,
n_iter=n_iter_search,
# Average precisions scoring
# doesn't seem to work in
# multi-label case
#scoring='average_precision',
scoring='log_loss',
n_jobs=3,
refit=True,
cv=4,
verbose=1
)
start = time()
random_search.fit(X_train,Y_train)
print("RandomizedSearchCV took %.2f minutes for %d candidates"
" parameter settings." % ((time() - start)/60.0, n_iter_search))
opt_report(random_search.grid_scores_,n_top=10)
#clf = random_search.best_estimator_
Y_hat=random_search.best_estimator_.predict_proba(X_test)
else:
ovr.set_params(**opt_param_dist)
ovr.fit(X_train,Y_train)
Y_hat = ovr.predict_proba(X_test)
#Y_hat = clf.predict_proba(X_test)
# Collect the results
Y_pred.append(Y_hat)
Y_true.append(Y_test)
Y_true=np.vstack(Y_true)
Y_pred=np.vstack(Y_pred)
# Now do the overall AUC scoring
print 'Generating bootstrap samples...'
A=np.vstack([Y_true.flatten(),Y_pred.flatten()])
A=A.transpose()
auc_scores=[]
for j in range(1000):
B=resample(A)
auc_scores.append(average_precision_score(B[:,0], B[:,1]))
auc_scores=np.array(auc_scores)
# Now just test PR on the k least popular methods
if re.search('CollaborativeFilter',clf_name):
m_test_inds=np.vstack(m_test_inds)
m_test_ind = m_test_inds.flatten()
ix=np.in1d(m_test_ind.ravel(), bottom_inds).reshape(m_test_ind.shape)
A = np.vstack([Y_true.flatten()[ix],Y_pred.flatten()[ix]])
else:
A=np.vstack([Y_true[:,bottom_inds].flatten(),Y_pred[:,bottom_inds].flatten()])
A=A.transpose()
bottom_k_auc_scores=[]
for j in range(1000):
B=resample(A)
bottom_k_auc_scores.append(average_precision_score(B[:,0], B[:,1]))
bottom_k_auc_scores=np.array(bottom_k_auc_scores)
return Y_pred,Y_true, auc_scores, bottom_k_auc_scores
def run_clustering(methods, cases):
true_method_groups = [m[1] for m in methods]
edge_model = GraphLassoCV(alphas=4, n_refinements=5, n_jobs=3, max_iter=100)
edge_model.fit(cases)
CV = edge_model.covariance_
num_clusters=3
spectral = SpectralClustering(n_clusters=num_clusters,affinity='precomputed')
spectral.fit(np.asarray(CV))
spec_sort=np.argsort(spectral.labels_)
for i,m in enumerate(methods):
print "%s:%d\t%s"%(m[1],spectral.labels_[i],m[0])
print "Adj. Rand Score: %f"%adjusted_rand_score(spectral.labels_,true_method_groups)
def run_method_usage(methods,cases):
methods = [m[0] for m in methods]
# Bootstrap the percentage error bars:
percents =[]
for i in range(10000):
nc = resample(cases)
percents.append(100*np.sum(nc,axis=0)/len(nc))
percents=np.array(percents)
mean_percents = np.mean(percents,axis=0)
std_percents = np.std(percents,axis=0)*1.96
inds=np.argsort(mean_percents).tolist()
inds.reverse()
avg_usage = np.mean(mean_percents)
fig = plt.figure()
ax = fig.add_subplot(111)
x=np.arange(len(methods))
ax.plot(x,[avg_usage]*len(methods),'-',color='0.25',lw=1,alpha=0.2)
ax.bar(x, mean_percents[inds], 0.6, color=paired[0],linewidth=0,
yerr=std_percents[inds],ecolor=paired[1])
#ax.set_title('Method Occurrence')
ax.set_ylabel('Occurrence %',fontsize=30)
ax.set_xlabel('Method',fontsize=30)
ax.set_xticks(np.arange(len(methods)))
ax.set_xticklabels(np.array(methods)[inds],fontsize=8)
fig.autofmt_xdate()
fix_axes()
plt.tight_layout()
fig.savefig(figure_path+'method_occurrence.pdf', bbox_inches=0)
fig.show()
return inds,mean_percents[inds]
# main script generates results and plots
if __name__ == "__main__":
print_output = True
plot_output = True
k=10 # the number of least popular methods to include in the reduced PR part
data_path='../data/'
results_path='results/'
figure_path='figures/'
# Check if needed directories exist, if not, create it
for path in [data_path, results_path, figure_path]:
if not os.path.exists(path):
os.makedirs(path)
# Load data
print 'Loading data...'
story_text, methods, cases,case_categories = load_hcd_cases(data_path)
dataset=[s[1] for s in story_text]
vectorizer = TfidfVectorizer(max_df=0.5,stop_words='english')
X = vectorizer.fit_transform(dataset)
lsa = TruncatedSVD(n_components=50,algorithm='arpack')
X = lsa.fit_transform(X)
classifier_features = Normalizer(copy=False).fit_transform(X)
# Clustering Part
print 'Running Clustering...'
run_clustering(methods, cases)
# Method Usage:
print 'Running Method Usage...'
inds,method_freq=run_method_usage(methods, cases)
bottom_inds=list(reversed(inds))[0:k]
# Recommender System part
if(plot_output):
PR_fig = setup_plots()
# Specify the classifiers
clfs = [
BernoulliNB(alpha=0.001),
LogisticRegression(C=0.02, penalty='l1', tol=0.001),
svm.SVC(C=1,kernel='rbf',probability=True),
ensemble.RandomForestClassifier(),
CollaborativeFilter(categories=False),
Popularity(),
RandomClassifier(),
CollaborativeFilter(categories=case_categories)
]
#plot_ops = ['k:','k--','k-','k-.','r-','b-','g-']
plot_ops = [{'linewidth':8.0, 'linestyle':'-','color':PuBuGn4[1]},
{'linewidth':8.0, 'linestyle':'-','color':PuBuGn4[2]},
{'linewidth':8.0, 'linestyle':'-','color':PuBuGn4[3]},
{'linewidth':8.0, 'linestyle':'-','color':PuBuGn4[4]},
{'linewidth':3.0, 'linestyle':'-','color':PuBuGn4[4]},
{'linewidth':3.0, 'linestyle':'-','color':PuBuGn4[3]},
{'linewidth':3.0, 'linestyle':'-','color':PuBuGn4[2]},
{'linewidth':3.0, 'linestyle':'-','color':PuBuGn4[1]},
]
plt.rc('axes',color_cycle = paired)
# For each classifier
print 'Running Classifiers:'
for i,clf in enumerate(clfs):
clf_name=clf.__class__.__name__
if clf_name=='CollaborativeFilter' and (clf.categories is not False):
clf_name+='+'
print ' '+str(clf_name)+'...'
try:
# If we can load the existing data file, skip this classifier
(Y_hat,Y_true,auc_scores,bottom_k_auc_scores) = pickle.load(open(results_path+'clf_results_%s.pickle'%clf_name,'rb'))
continue
except IOError:
# We haven't generated results yet, so run the classifier
# and get the classifiers predictions
Y_hat, Y_true, auc_scores, bottom_k_auc_scores = run_classifier(clf,classifier_features,cases, bottom_inds,
optimize_hyperparams=False)
# Save the data for next time
print ' saving data...'
pickle.dump((Y_hat,Y_true,auc_scores,bottom_k_auc_scores),
open(results_path+'clf_results_%s.pickle'%clf_name,'wb'))
finally:
if(plot_output):
print ' plotting data...'
# Plot the Precision Recall Curve
# Scikit's Precision Recall
p,r,thresh = precision_recall_curve(Y_true.flatten(), Y_hat.flatten())
plt.plot(r,p,label=clf_name,**plot_ops[i])
# Now get AUC bounds via bootstrap resampling
print ' AUC bootstrap resampling...'
print("[%.3f,%.3f]: %s AUC 95 bounds"%(percentile(auc_scores,2.5),percentile(auc_scores,97.5),clf_name))
print("[%.3f,%.3f]: %s AUC 95 bounds - bottom %d methods"%(percentile(bottom_k_auc_scores,2.5),percentile(bottom_k_auc_scores,97.5),clf_name,k))
# Save and display the overall figure
if(plot_output):
#plt.legend(loc=1,fontsize=20)
fix_legend(handlelength=7)
fix_axes()
plt.ylim(0,1)
plt.xlim(0,1)
plt.hold(False)
plt.tight_layout()
PR_fig.savefig(figure_path+'precision_recall.pdf', bbox_inches=0)
PR_fig.show()
# Now print out all the results
print "Printing PR-AUC Results for each classifier..."
print "Classifier & All & Bottom10"
clfs_auc=[]
clfs_bauc=[]
classifier_names=[]
for clf in clfs:
clf_name=clf.__class__.__name__
if clf_name=='CollaborativeFilter' and (clf.categories is not False):
clf_name+='+'
classifier_names.append(clf_name)
(Y_hat,Y_true,auc_scores,bottom_k_auc_scores) = pickle.load(open(results_path+'clf_results_%s.pickle'%clf_name,'rb'))
print "%s & %.3f & %.3f"%(clf_name,np.mean(auc_scores),np.mean(bottom_k_auc_scores))
clfs_auc.append(auc_scores)
clfs_bauc.append(bottom_k_auc_scores)
clfs_auc=np.asarray(clfs_auc)
clfs_bauc=np.asarray(clfs_bauc)
classifier_names = np.asarray(classifier_names)
print "Plotting PR-AUC results"
order = [7,6,0,1,2,5,3,4]
width = 0.3 # the width of the bars
classifier_names = classifier_names[order]
plot_means = np.median(clfs_auc,axis=1)[order]
plot_bmeans = np.median(clfs_bauc,axis=1)[order]
plot_bars = np.array([abs(percentile(auc,[2.5,97.5]) - percentile(auc,50)) for auc in clfs_auc[order]]).transpose()
plot_bbars = np.array([abs(percentile(bauc,[2.5,97.5]) - percentile(bauc,50)) for bauc in clfs_bauc[order]]).transpose()
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
plt.hold(True)
ax.bar(np.arange(8),plot_means,width ,color=paired[1],label='All Methods',
yerr=plot_bars, ecolor='k',linewidth=0,align='center')
ax.bar(np.arange(8)+width, plot_bmeans, width,color=paired[0],label='Bottom 10',
yerr=plot_bbars, ecolor='k',linewidth=0,align='center')
ax.set_ylabel('PR AUC',fontsize=30)
ax.tick_params(axis='y', which='major', labelsize=25)
ax.set_xticks(np.arange(len(order))+width)
ax.set_xticklabels(classifier_names,fontsize=20)
fig.autofmt_xdate()
fix_legend()
#bbox_to_anchor=(1.0, 1.0)
fix_axes()
plt.tight_layout()
fig.savefig(figure_path+'auc_compare.pdf', bbox_inches=0)
fig.show()
|
IDEALLab/design_method_recommendation_JMD_2014
|
paper_experiments.py
|
Python
|
apache-2.0
| 22,845
|
[
"VisIt"
] |
5672af6e77a578d0265daf4fea151f003fade1e3cf933474b0853b41fb89512e
|
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import object
from .. import xtals
from ..misc import *
def fake_properties_calc_json(poscar_file):
"""Create a fake properties.calc.json file from a poscar.
Fills all the fields with ideal coordinates, no displacements,
and no energy.
Parameters
----------
poscar_file : path to file
Returns
-------
dict
"""
with open(poscar_file,'r') as posf:
pos_lines=posf.readlines()
faker={}
species_tups=xtals.crystal._vasp5_lines_species(pos_lines)
types,numbers=zip(*species_tups)
faker["atom_type"]=types
faker["atoms_per_type"]=numbers
if xtals.crystal._vasp5_lines_is_cartesian(pos_lines):
faker["coord_mode"]="cartesian"
else:
faker["coord_mode"]="direct"
coords,selectives=xtals.crystal._vasp5_lines_raw_coordinates(pos_lines)
faker["relaxed_basis"]=coords.tolist()
faker["relaxed_energy"]=0.0
blanks=(coords*0).tolist()
faker["relaxed_forces"]=blanks
faker["relaxed_forces"]=blanks
abc=xtals.crystal._vasp5_lines_to_abc(pos_lines)
faker["relaxed_lattice"]=abc.tolist()
return faker
class Properties(object):
"""A simple wrapper around a properties.calc.json dictionary
with routines to access the members. This is for a *single*
calculation, not something like a barrier, where there's a
collection of images."""
def __init__(self, properties,ignore=[]):
"""Initialize with the json dictionary
Parameters
----------
properties : dict
ignore : list of keys that you don't care about
"""
for p in self._required_keys():
if p not in properties.keys() and p not in ignore:
raise ValueError("Missing key '{}' in dictionary!".format(p))
self._properties = properties
return
@staticmethod
def _required_keys():
"""Returns all the keys that should exist in the
properties.calc.json files
Returns
-------
list of str
"""
return [
"atom_type", "atoms_per_type", "coord_mode", "relaxed_basis",
"relaxed_forces", "relaxed_lattice"
]
@classmethod
def from_json(cls, filename, ignore=[]):
"""Initialize instance with a filename
Parameters
----------
filename : string or path
ignore : list of keys you don't care about
Returns
-------
Properties
"""
filedict = json_from_file(filename, ignore)
return cls(filedict)
@classmethod
def fake(cls, posfile):
"""Construct a fake set of properties by assigning
a value of zero everywhere possible, only paying
attention to the atoms of the specified poscar
file
Parameters
----------
posfile : str or path
Returns
-------
Properties
"""
faked=fake_properties_calc_json(posfile)
return cls(faked)
def to_json(self, filename):
"""Save the properties to a file
Parameters
----------
filename : string or path
Returns
-------
void
"""
json_to_file(self._properties, filename)
def species(self):
"""Returns the names of the atom types as
a list
Returns
-------
list of str
"""
return self._properties["atom_type"]
def num_species(self):
"""Returns list of how many of each atom
there are
Returns
-------
np.array int
"""
return np.array(self._properties["atoms_per_type"])
def total_atoms(self):
"""Returns the total number of atoms in the simulation
Returns
-------
int
"""
return np.sum(self.num_species())
def compositions(self):
"""Returns list of atomic compositions
Returns
-------
list of float
"""
return self.num_species() / self.total_atoms()
def composition(self, specie):
"""Return the composition of the specified
specie
Parameters
----------
specie : str
Returns
-------
float
"""
return self.compositions()[self.species().index(specie)]
pass
def energy(self):
"""Return the calculated vasp energy
Returns
-------
float
"""
return self._properties["relaxed_energy"]
def as_dict(self):
"""Return all the data as a dictionary
Returns
-------
dict
"""
return self._properties
class BarrierProperties(object):
"""Combines a list of Properties into a single object
so that it can calculate kra values."""
def _atomic_sanity_throw(self):
"""Ensure that all images have the same number of
each type of atom
Returns
-------
void
"""
for p in ["atom_type", "atoms_per_type"]:
value = self._image_properties[0].as_dict()[p]
for props in self._image_properties[1::]:
compare = props.as_dict()[p]
if value != compare:
raise ValueError(
"The property '{}' is inconsistent throughout the images!"
)
return
def __init__(self, image_properties):
"""Initialize with a list of Properties, in the correct
interpolation order
Parameters
----------
image_properties : TODO
"""
self._image_properties = image_properties
self._atomic_sanity_throw()
@classmethod
def from_json(cls, filename, ignore=[]):
"""Initialize from a json file
Parameters
----------
filename : str or path
ignore : a list of keys you don't care about
Returns
-------
BarrierProperties
"""
filedict = json_from_file(filename)
keys = filedict.keys()
keys.sort()
props = [Properties(filedict[k],ignore) for k in keys if k.isdigit()]
return cls(props)
def species(self):
return self._image_properties[0].species()
def num_species(self):
return self._image_properties[0].num_species()
def total_atoms(self):
return self._image_properties[0].total_atoms()
def compositions(self):
return self._image_properties[0].compositions()
def composition(self,specie):
return self._image_properties[0].composition(specie)
def kra(self):
"""Calculate the KRA value. At the moment the only way
this is done is by assuming that the middle image has
the highest barrier.
Returns
-------
float
"""
# if len(self._image_properties) % 2 == 0:
# raise ValueError(
# "Under the current implementation, an odd number of images is required"
# "to calculate the KRA value.")
midpoint=(self._image_properties[0].energy()+self._image_properties[-1].energy())/2
energies=np.array([im.energy() for im in self._image_properties])
maxen=np.max(energies)
return maxen-midpoint
|
goirijo/thermoplotting
|
thermoplotting/casmfiles/properties.py
|
Python
|
mit
| 7,461
|
[
"CRYSTAL",
"VASP"
] |
cf5692c480442ab0105bb0c1e7cc1342611821dd03261990ba13d2039ab782ab
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from copy import deepcopy
from functools import partial
from gzip import GzipFile
import os
import os.path as op
import numpy as np
from scipy import sparse, linalg
from .io.constants import FIFF
from .io.meas_info import create_info
from .io.tree import dir_tree_find
from .io.tag import find_tag, read_tag
from .io.open import fiff_open
from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
from .bem import read_bem_surfaces, ConductorModel
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
_tessellate_sphere_surf, _get_surf_neighbors,
_normalize_vectors, _get_solids, _triangle_neighbors,
complete_surface_info, _compute_nearest, fast_cross_3d,
mesh_dist)
from .utils import (get_subjects_dir, run_subprocess, has_freesurfer,
has_nibabel, check_fname, logger, verbose,
check_version, _get_call_line, warn, _check_fname)
from .parallel import parallel_func, check_n_jobs
from .transforms import (invert_transform, apply_trans, _print_coord_trans,
combine_transforms, _get_trans,
_coord_frame_name, Transform, _str_to_frame,
_ensure_trans, _read_fs_xfm)
from .externals.six import string_types
def _get_lut():
"""Get the FreeSurfer LUT."""
data_dir = op.join(op.dirname(__file__), 'data')
lut_fname = op.join(data_dir, 'FreeSurferColorLUT.txt')
dtype = [('id', '<i8'), ('name', 'U47'),
('R', '<i8'), ('G', '<i8'), ('B', '<i8'), ('A', '<i8')]
return np.genfromtxt(lut_fname, dtype=dtype)
def _get_lut_id(lut, label, use_lut):
"""Convert a label to a LUT ID number."""
if not use_lut:
return 1
assert isinstance(label, string_types)
mask = (lut['name'] == label)
assert mask.sum() == 1
return lut['id'][mask]
_src_kind_dict = {
'vol': 'volume',
'surf': 'surface',
'discrete': 'discrete',
}
class SourceSpaces(list):
"""Represent a list of source space.
Currently implemented as a list of dictionaries containing the source
space information
Parameters
----------
source_spaces : list
A list of dictionaries containing the source space information.
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
Attributes
----------
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
"""
def __init__(self, source_spaces, info=None): # noqa: D102
super(SourceSpaces, self).__init__(source_spaces)
if info is None:
self.info = dict()
else:
self.info = dict(info)
@verbose
def plot(self, head=False, brain=None, skull=None, subjects_dir=None,
trans=None, verbose=None):
"""Plot the source space.
Parameters
----------
head : bool
If True, show head surface.
brain : bool | str
If True, show the brain surfaces. Can also be a str for
surface type (e.g., 'pial', same as True). Default is None,
which means 'white' for surface source spaces and False otherwise.
skull : bool | str | list of str | list of dict | None
Whether to plot skull surface. If string, common choices would be
'inner_skull', or 'outer_skull'. Can also be a list to plot
multiple skull surfaces. If a list of dicts, each dict must
contain the complete surface info (such as you get from
:func:`mne.make_bem_model`). True is an alias of 'outer_skull'.
The subjects bem and bem/flash folders are searched for the 'surf'
files. Defaults to None, which is False for surface source spaces,
and True otherwise.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
trans : str | 'auto' | dict | None
The full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration. If trans is None, an identity
matrix is assumed. This is only needed when the source space is in
head coordinates.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
fig : instance of mlab Figure
The figure.
"""
from .viz import plot_alignment
surfaces = list()
bem = None
if brain is None:
brain = 'white' if any(ss['type'] == 'surf'
for ss in self) else False
if isinstance(brain, string_types):
surfaces.append(brain)
elif brain:
surfaces.append('brain')
if skull is None:
skull = False if self.kind == 'surface' else True
if isinstance(skull, string_types):
surfaces.append(skull)
elif skull is True:
surfaces.append('outer_skull')
elif skull is not False: # list
if isinstance(skull[0], dict): # bem
skull_map = {FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner_skull',
FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer_skull',
FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer_skin'}
for this_skull in skull:
surfaces.append(skull_map[this_skull['id']])
bem = skull
else: # list of str
for surf in skull:
surfaces.append(surf)
if head:
surfaces.append('head')
if self[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
coord_frame = 'head'
if trans is None:
raise ValueError('Source space is in head coordinates, but no '
'head<->MRI transform was given. Please '
'specify the full path to the appropriate '
'*-trans.fif file as the "trans" parameter.')
else:
coord_frame = 'mri'
info = create_info(0, 1000., 'eeg')
return plot_alignment(
info, trans=trans, subject=self[0]['subject_his_id'],
subjects_dir=subjects_dir, surfaces=surfaces,
coord_frame=coord_frame, meg=(), eeg=False, dig=False, ecog=False,
bem=bem, src=self
)
def __repr__(self): # noqa: D105
ss_repr = []
for ss in self:
ss_type = ss['type']
r = _src_kind_dict[ss_type]
if ss_type == 'vol':
if 'seg_name' in ss:
r += " (%s)" % (ss['seg_name'],)
else:
r += ", shape=%s" % (ss['shape'],)
elif ss_type == 'surf':
r += (" (%s), n_vertices=%i" % (_get_hemi(ss)[0], ss['np']))
r += (', n_used=%i, coordinate_frame=%s'
% (ss['nuse'], _coord_frame_name(int(ss['coord_frame']))))
ss_repr.append('<%s>' % r)
return "<SourceSpaces: [%s]>" % ', '.join(ss_repr)
@property
def kind(self):
"""The kind of source space (surface, volume, discrete, mixed)."""
ss_types = list(set([ss['type'] for ss in self]))
if len(ss_types) != 1:
return 'mixed'
return _src_kind_dict[ss_types[0]]
def __add__(self, other):
"""Combine source spaces."""
return SourceSpaces(list.__add__(self, other))
def copy(self):
"""Make a copy of the source spaces.
Returns
-------
src : instance of SourceSpaces
The copied source spaces.
"""
src = deepcopy(self)
return src
def save(self, fname, overwrite=False):
"""Save the source spaces to a fif file.
Parameters
----------
fname : str
File to write.
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
"""
write_source_spaces(fname, self, overwrite)
@verbose
def export_volume(self, fname, include_surfaces=True,
include_discrete=True, dest='mri', trans=None,
mri_resolution=False, use_lut=True, verbose=None):
"""Export source spaces to nifti or mgz file.
Parameters
----------
fname : str
Name of nifti or mgz file to write.
include_surfaces : bool
If True, include surface source spaces.
include_discrete : bool
If True, include discrete source spaces.
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of the
original T1 image. If 'surf' the coordinate system of the
FreeSurfer surface is used (Surface RAS).
trans : dict, str, or None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()).
If string, an ending of `.fif` or `.fif.gz` will be assumed to be
in FIF format, any other ending will be assumed to be a text file
with a 4x4 transformation matrix (like the `--trans` MNE-C option.
Must be provided if source spaces are in head coordinates and
include_surfaces and mri_resolution are True.
mri_resolution : bool
If True, the image is saved in MRI resolution
(e.g. 256 x 256 x 256).
use_lut : bool
If True, assigns a numeric value to each source space that
corresponds to a color on the freesurfer lookup table.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Notes
-----
This method requires nibabel.
"""
# import nibabel or raise error
try:
import nibabel as nib
except ImportError:
raise ImportError('This function requires nibabel.')
# Check coordinate frames of each source space
coord_frames = np.array([s['coord_frame'] for s in self])
# Raise error if trans is not provided when head coordinates are used
# and mri_resolution and include_surfaces are true
if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
coords = 'head' # all sources in head coordinates
if mri_resolution and include_surfaces:
if trans is None:
raise ValueError('trans containing mri to head transform '
'must be provided if mri_resolution and '
'include_surfaces are true and surfaces '
'are in head coordinates')
elif trans is not None:
logger.info('trans is not needed and will not be used unless '
'include_surfaces and mri_resolution are True.')
elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
coords = 'mri' # all sources in mri coordinates
if trans is not None:
logger.info('trans is not needed and will not be used unless '
'sources are in head coordinates.')
# Raise error if all sources are not in the same space, or sources are
# not in mri or head coordinates
else:
raise ValueError('All sources must be in head coordinates or all '
'sources must be in mri coordinates.')
# use lookup table to assign values to source spaces
logger.info('Reading FreeSurfer lookup table')
# read the lookup table
lut = _get_lut()
# Setup a dictionary of source types
src_types = dict(volume=[], surface=[], discrete=[])
# Populate dictionary of source types
for src in self:
# volume sources
if src['type'] == 'vol':
src_types['volume'].append(src)
# surface sources
elif src['type'] == 'surf':
src_types['surface'].append(src)
# discrete sources
elif src['type'] == 'discrete':
src_types['discrete'].append(src)
# raise an error if dealing with source type other than volume
# surface or discrete
else:
raise ValueError('Unrecognized source type: %s.' % src['type'])
# Get shape, inuse array and interpolation matrix from volume sources
inuse = 0
for ii, vs in enumerate(src_types['volume']):
# read the lookup table value for segmented volume
if 'seg_name' not in vs:
raise ValueError('Volume sources should be segments, '
'not the entire volume.')
# find the color value for this volume
id_ = _get_lut_id(lut, vs['seg_name'], use_lut)
if ii == 0:
# get the inuse array
if mri_resolution:
# read the mri file used to generate volumes
aseg_data = nib.load(vs['mri_file']).get_data()
# get the voxel space shape
shape3d = (vs['mri_height'], vs['mri_depth'],
vs['mri_width'])
else:
# get the volume source space shape
# read the shape in reverse order
# (otherwise results are scrambled)
shape3d = vs['shape'][2::-1]
if mri_resolution:
# get the values for this volume
use = id_ * (aseg_data == id_).astype(int).ravel('F')
else:
use = id_ * vs['inuse']
inuse += use
# Raise error if there are no volume source spaces
if np.array(inuse).ndim == 0:
raise ValueError('Source spaces must contain at least one volume.')
# create 3d grid in the MRI_VOXEL coordinate frame
# len of inuse array should match shape regardless of mri_resolution
assert len(inuse) == np.prod(shape3d)
# setup the image in 3d space
img = inuse.reshape(shape3d).T
# include surface and/or discrete source spaces
if include_surfaces or include_discrete:
# setup affine transform for source spaces
if mri_resolution:
# get the MRI to MRI_VOXEL transform
affine = invert_transform(vs['vox_mri_t'])
else:
# get the MRI to SOURCE (MRI_VOXEL) transform
affine = invert_transform(vs['src_mri_t'])
# modify affine if in head coordinates
if coords == 'head':
# read mri -> head transformation
mri_head_t = _get_trans(trans)[0]
# get the HEAD to MRI transform
head_mri_t = invert_transform(mri_head_t)
# combine transforms, from HEAD to MRI_VOXEL
affine = combine_transforms(head_mri_t, affine,
'head', 'mri_voxel')
# loop through the surface source spaces
if include_surfaces:
# get the surface names (assumes left, right order. may want
# to add these names during source space generation
surf_names = ['Left-Cerebral-Cortex', 'Right-Cerebral-Cortex']
for i, surf in enumerate(src_types['surface']):
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
srf_rr = apply_trans(affine['trans'], surf['rr'])
# convert to numeric indices
ix_orig, iy_orig, iz_orig = srf_rr.T.round().astype(int)
# clip indices outside of volume space
ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
0)
iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
0)
iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
0)
# compare original and clipped indices
n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
iz_orig != iz_clip)).any(0).sum()
# generate use warnings for clipping
if n_diff > 0:
warn('%s surface vertices lay outside of volume space.'
' Consider using a larger volume space.' % n_diff)
# get surface id or use default value
i = _get_lut_id(lut, surf_names[i], use_lut)
# update image to include surface voxels
img[ix_clip, iy_clip, iz_clip] = i
# loop through discrete source spaces
if include_discrete:
for i, disc in enumerate(src_types['discrete']):
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
disc_rr = apply_trans(affine['trans'], disc['rr'])
# convert to numeric indices
ix_orig, iy_orig, iz_orig = disc_rr.T.astype(int)
# clip indices outside of volume space
ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
0)
iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
0)
iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
0)
# compare original and clipped indices
n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
iz_orig != iz_clip)).any(0).sum()
# generate use warnings for clipping
if n_diff > 0:
warn('%s discrete vertices lay outside of volume '
'space. Consider using a larger volume space.'
% n_diff)
# set default value
img[ix_clip, iy_clip, iz_clip] = 1
if use_lut:
logger.info('Discrete sources do not have values on '
'the lookup table. Defaulting to 1.')
# calculate affine transform for image (MRI_VOXEL to RAS)
if mri_resolution:
# MRI_VOXEL to MRI transform
transform = vs['vox_mri_t'].copy()
else:
# MRI_VOXEL to MRI transform
# NOTE: 'src' indicates downsampled version of MRI_VOXEL
transform = vs['src_mri_t'].copy()
if dest == 'mri':
# combine with MRI to RAS transform
transform = combine_transforms(transform, vs['mri_ras_t'],
transform['from'],
vs['mri_ras_t']['to'])
# now setup the affine for volume image
affine = transform['trans']
# make sure affine converts from m to mm
affine[:3] *= 1e3
# save volume data
# setup image for file
if fname.endswith(('.nii', '.nii.gz')): # save as nifit
# setup the nifti header
hdr = nib.Nifti1Header()
hdr.set_xyzt_units('mm')
# save the nifti image
img = nib.Nifti1Image(img, affine, header=hdr)
elif fname.endswith('.mgz'): # save as mgh
# convert to float32 (float64 not currently supported)
img = img.astype('float32')
# save the mgh image
img = nib.freesurfer.mghformat.MGHImage(img, affine)
else:
raise(ValueError('Unrecognized file extension'))
# write image to file
nib.save(img, fname)
def _add_patch_info(s):
"""Patch information in a source space.
Generate the patch information from the 'nearest' vector in
a source space. For vertex in the source space it provides
the list of neighboring vertices in the high resolution
triangulation.
Parameters
----------
s : dict
The source space.
"""
nearest = s['nearest']
if nearest is None:
s['pinfo'] = None
s['patch_inds'] = None
return
logger.info(' Computing patch statistics...')
indn = np.argsort(nearest)
nearest_sorted = nearest[indn]
steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
starti = np.r_[[0], steps]
stopi = np.r_[steps, [len(nearest)]]
pinfo = list()
for start, stop in zip(starti, stopi):
pinfo.append(np.sort(indn[start:stop]))
s['pinfo'] = pinfo
# compute patch indices of the in-use source space vertices
patch_verts = nearest_sorted[steps - 1]
s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
logger.info(' Patch information added...')
@verbose
def _read_source_spaces_from_tree(fid, tree, patch_stats=False,
verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
tree : dict
The FIF tree structure if source is a file id.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
src : SourceSpaces
The source spaces.
"""
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
raise ValueError('No source spaces found')
src = list()
for s in spaces:
logger.info(' Reading a source space...')
this = _read_one_source_space(fid, s)
logger.info(' [done]')
if patch_stats:
_complete_source_space_info(this)
src.append(this)
logger.info(' %d source spaces read' % len(spaces))
return SourceSpaces(src)
@verbose
def read_source_spaces(fname, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
src : SourceSpaces
The source spaces.
See Also
--------
write_source_spaces, setup_source_space, setup_volume_source_space
"""
# be more permissive on read than write (fwd/inv can contain src)
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz',
'-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz',
'-inv.fif', '-inv.fif.gz',
'_inv.fif', '_inv.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
verbose=verbose)
src.info['fname'] = fname
node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if node:
node = node[0]
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
src.info['working_dir'] = tag.data
elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
src.info['command_line'] = tag.data
return src
@verbose
def _read_one_source_space(fid, this, verbose=None):
"""Read one source space."""
FIFF_BEM_SURF_NTRI = 3104
FIFF_BEM_SURF_TRIANGLES = 3106
res = dict()
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
if tag is None:
res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
else:
res['id'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
if tag is None:
raise ValueError('Unknown source space type')
else:
src_type = int(tag.data)
if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
res['type'] = 'surf'
elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
res['type'] = 'vol'
elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
res['type'] = 'discrete'
else:
raise ValueError('Unknown source space type (%d)' % src_type)
if res['type'] == 'vol':
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
if tag is not None:
res['shape'] = tuple(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
if tag is not None:
res['src_mri_t'] = tag.data
parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
# MNE 2.7.3 (and earlier) didn't store necessary information
# about volume coordinate translations. Although there is a
# FFIF_COORD_TRANS in the higher level of the FIFF file, this
# doesn't contain all the info we need. Safer to return an
# error unless a user really wants us to add backward compat.
raise ValueError('Can not find parent MRI location. The volume '
'source space may have been made with an MNE '
'version that is too old (<= 2.7.3). Consider '
'updating and regenerating the inverse.')
mri = parent_mri[0]
for d in mri['directory']:
if d.kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, d.pos)
trans = tag.data
if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
res['vox_mri_t'] = tag.data
if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
res['mri_ras_t'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
if tag is not None:
res['interpolator'] = tag.data
else:
logger.info("Interpolation matrix for MRI not found.")
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
if tag is not None:
res['mri_file'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
if tag is not None:
res['mri_width'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
if tag is not None:
res['mri_height'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
if tag is not None:
res['mri_depth'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
if tag is not None:
res['mri_volume_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
if tag is not None:
nneighbors = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
offset = 0
neighbors = []
for n in nneighbors:
neighbors.append(tag.data[offset:offset + n])
offset += n
res['neighbor_vert'] = neighbors
tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
if tag is not None:
res['seg_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF_BEM_SURF_NTRI)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
if tag is None:
res['ntri'] = 0
else:
res['ntri'] = int(tag.data)
else:
res['ntri'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data[0]
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
raise ValueError('Vertex normals not found')
res['nn'] = tag.data.copy()
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
# Which vertices are active
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
if tag is None:
res['nuse'] = 0
res['inuse'] = np.zeros(res['nuse'], dtype=np.int)
res['vertno'] = None
else:
res['nuse'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int).T
if len(res['inuse']) != res['np']:
raise ValueError('Incorrect number of entries in source space '
'selection')
res['vertno'] = np.where(res['inuse'])[0]
# Use triangulation
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
if tag1 is None or tag2 is None:
res['nuse_tri'] = 0
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
if tag1 is None or tag2 is None:
res['nearest'] = None
res['nearest_dist'] = None
else:
res['nearest'] = tag1.data
res['nearest_dist'] = tag2.data.T
_add_patch_info(res)
# Distances
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
if tag1 is None or tag2 is None:
res['dist'] = None
res['dist_limit'] = None
else:
res['dist'] = tag1.data
res['dist_limit'] = tag2.data
# Add the upper triangle
res['dist'] = res['dist'] + res['dist'].T
if (res['dist'] is not None):
logger.info(' Distance information added...')
tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
if tag is None:
res['subject_his_id'] = None
else:
res['subject_his_id'] = tag.data
return res
@verbose
def _complete_source_space_info(this, verbose=None):
"""Add more info on surface."""
# Main triangulation
logger.info(' Completing triangulation info...')
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['tri_area'] = _normalize_vectors(this['tri_nn']) / 2.0
logger.info('[done]')
# Selected triangles
logger.info(' Completing selection triangulation info...')
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0], :]
r2 = this['rr'][this['use_tris'][:, 1], :]
r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['use_tri_area'] = np.linalg.norm(this['use_tri_nn'], axis=1) / 2.
logger.info('[done]')
def find_source_space_hemi(src):
"""Return the hemisphere id for a source space.
Parameters
----------
src : dict
The source space to investigate
Returns
-------
hemi : int
Deduced hemisphere id
"""
xave = src['rr'][:, 0].sum()
if xave < 0:
hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
else:
hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
return hemi
def label_src_vertno_sel(label, src):
"""Find vertex numbers and indices from label.
Parameters
----------
label : Label
Source space label
src : dict
Source space
Returns
-------
vertices : list of length 2
Vertex numbers for lh and rh
src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
Indices of the selected vertices in sourse space
"""
if src[0]['type'] != 'surf':
return Exception('Labels are only supported with surface source '
'spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([], int)
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([], int)
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
def _get_vertno(src):
return [s['vertno'] for s in src]
###############################################################################
# Write routines
@verbose
def _write_source_spaces_to_fid(fid, src, verbose=None):
"""Write the source spaces to a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
src : list
The list of source spaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
"""
for s in src:
logger.info(' Write a source space...')
start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
_write_one_source_space(fid, s, verbose)
end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
logger.info(' [done]')
logger.info(' %d source spaces written' % len(src))
@verbose
def write_source_spaces(fname, src, overwrite=False, verbose=None):
"""Write source spaces to a file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
src : SourceSpaces
The source spaces (as returned by read_source_spaces).
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
See Also
--------
read_source_spaces
"""
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz'))
_check_fname(fname, overwrite=overwrite)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
if src.info:
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = src.info.get('working_dir', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = src.info.get('command_line', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
_write_source_spaces_to_fid(fid, src, verbose)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _write_one_source_space(fid, this, verbose=None):
"""Write one source space."""
if this['type'] == 'surf':
src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
elif this['type'] == 'vol':
src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
elif this['type'] == 'discrete':
src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
else:
raise ValueError('Unknown source space type (%s)' % this['type'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
if this['id'] >= 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
data = this.get('subject_his_id', None)
if data:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
# Which vertices are active
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
if this['ntri'] > 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
this['tris'] + 1)
if this['type'] != 'vol' and this['use_tris'] is not None:
# Use triangulation
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
this['use_tris'] + 1)
if this['type'] == 'vol':
neighbor_vert = this.get('neighbor_vert', None)
if neighbor_vert is not None:
nneighbors = np.array([len(n) for n in neighbor_vert])
neighbors = np.concatenate(neighbor_vert)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
write_coord_trans(fid, this['src_mri_t'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_coord_trans(fid, this['mri_ras_t'])
write_coord_trans(fid, this['vox_mri_t'])
mri_volume_name = this.get('mri_volume_name', None)
if mri_volume_name is not None:
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
this['interpolator'])
if 'mri_file' in this and this['mri_file'] is not None:
write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
this['mri_file'])
write_int(fid, FIFF.FIFF_MRI_WIDTH, this['mri_width'])
write_int(fid, FIFF.FIFF_MRI_HEIGHT, this['mri_height'])
write_int(fid, FIFF.FIFF_MRI_DEPTH, this['mri_depth'])
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# Patch-related information
if this['nearest'] is not None:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
this['nearest_dist'])
# Distances
if this['dist'] is not None:
# Save only upper triangular portion of the matrix
dists = this['dist'].copy()
dists = sparse.triu(dists, format=dists.format)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
this['dist_limit'])
# Segmentation data
if this['type'] == 'vol' and ('seg_name' in this):
# Save the name of the segment
write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
##############################################################################
# Head to MRI volume conversion
@verbose
def head_to_mri(pos, subject, mri_head_t, subjects_dir=None,
verbose=None):
"""Convert pos from head coordinate system to MRI ones.
This function converts to MRI RAS coordinates and not to surface
RAS.
Parameters
----------
pos : array, shape (n_pos, 3)
The coordinates (in m) in head coordinate system
subject : string
Name of the subject.
mri_head_t: instance of Transform
MRI<->Head coordinate transformation
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
coordinates : array, shape (n_pos, 3)
The MNI coordinates (in mm) of pos
Notes
-----
This function requires either nibabel (in Python) or Freesurfer
(with utility "mri_info") to be correctly installed.
"""
import nibabel as nib
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
head_mri_t = _ensure_trans(mri_head_t, 'head', 'mri')
mri_pos = apply_trans(head_mri_t, pos) * 1e3
t1 = nib.load(t1_fname)
vox2ras_tkr = t1.header.get_vox2ras_tkr()
ras2vox_tkr = linalg.inv(vox2ras_tkr)
vox2ras = t1.header.get_vox2ras()
mri_pos = apply_trans(ras2vox_tkr, mri_pos) # in vox
mri_pos = apply_trans(vox2ras, mri_pos) # in RAS
return mri_pos
##############################################################################
# Surface to MNI conversion
@verbose
def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, mode=None,
verbose=None):
"""Convert the array of vertices for a hemisphere to MNI coordinates.
Parameters
----------
vertices : int, or list of int
Vertex number(s) to convert
hemis : int, or list of int
Hemisphere(s) the vertices belong to
subject : string
Name of the subject to load surfaces from.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
mode : string | None
Either 'nibabel' or 'freesurfer' for the software to use to
obtain the transforms. If None, 'nibabel' is tried first, falling
back to 'freesurfer' if it fails. Results should be equivalent with
either option, but nibabel may be quicker (and more pythonic).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
coordinates : n_vertices x 3 array of float
The MNI coordinates (in mm) of the vertices
Notes
-----
This function requires either nibabel (in Python) or Freesurfer
(with utility "mri_info") to be correctly installed.
"""
if not has_freesurfer() and not has_nibabel():
raise RuntimeError('NiBabel (Python) or Freesurfer (Unix) must be '
'correctly installed and accessible from Python')
if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
vertices = [vertices]
if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
hemis = [hemis] * len(vertices)
if not len(hemis) == len(vertices):
raise ValueError('hemi and vertices must match in length')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
for h in ['lh', 'rh']]
# read surface locations in MRI space
rr = [read_surface(s)[0] for s in surfs]
# take point locations in MRI space and convert to MNI coordinates
xfm = _read_talxfm(subject, subjects_dir, mode)
data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
return apply_trans(xfm['trans'], data)
##############################################################################
# Volume to MNI conversion
@verbose
def head_to_mni(pos, subject, mri_head_t, subjects_dir=None,
verbose=None):
"""Convert pos from head coordinate system to MNI ones.
Parameters
----------
pos : array, shape (n_pos, 3)
The coordinates (in m) in head coordinate system
subject : string
Name of the subject.
mri_head_t: instance of Transform
MRI<->Head coordinate transformation
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
coordinates : array, shape (n_pos, 3)
The MNI coordinates (in mm) of pos
Notes
-----
This function requires either nibabel (in Python) or Freesurfer
(with utility "mri_info") to be correctly installed.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# before we go from head to MRI (surface RAS)
head_mri_t = _ensure_trans(mri_head_t, 'head', 'mri')
coo_MRI_RAS = apply_trans(head_mri_t, pos)
# convert to MNI coordinates
xfm = _read_talxfm(subject, subjects_dir)
return apply_trans(xfm['trans'], coo_MRI_RAS * 1000)
@verbose
def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
"""Read MNI transform from FreeSurfer talairach.xfm file.
Adapted from freesurfer m-files. Altered to deal with Norig
and Torig correctly.
"""
if mode is not None and mode not in ['nibabel', 'freesurfer']:
raise ValueError('mode must be "nibabel" or "freesurfer"')
fname = op.join(subjects_dir, subject, 'mri', 'transforms',
'talairach.xfm')
# Setup the RAS to MNI transform
ras_mni_t = Transform('ras', 'mni_tal', _read_fs_xfm(fname)[0])
# We want to get from Freesurfer surface RAS ('mri') to MNI ('mni_tal').
# This file only gives us RAS (non-zero origin) ('ras') to MNI ('mni_tal').
# Se we need to get the ras->mri transform from the MRI headers.
# To do this, we get Norig and Torig
# (i.e. vox_ras_t and vox_mri_t, respectively)
path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
if not op.isfile(path):
path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(path):
raise IOError('mri not found: %s' % path)
if has_nibabel():
use_nibabel = True
else:
use_nibabel = False
if mode == 'nibabel':
raise ImportError('Tried to import nibabel but failed, try using '
'mode=None or mode=Freesurfer')
# note that if mode == None, then we default to using nibabel
if use_nibabel is True and mode == 'freesurfer':
use_nibabel = False
if use_nibabel:
hdr = _get_mri_header(path)
n_orig = hdr.get_vox2ras()
t_orig = hdr.get_vox2ras_tkr()
else:
nt_orig = list()
for conv in ['--vox2ras', '--vox2ras-tkr']:
stdout, stderr = run_subprocess(['mri_info', conv, path])
stdout = np.fromstring(stdout, sep=' ').astype(float)
if not stdout.size == 16:
raise ValueError('Could not parse Freesurfer mri_info output')
nt_orig.append(stdout.reshape(4, 4))
n_orig, t_orig = nt_orig
# extract the MRI_VOXEL to RAS (non-zero origin) transform
vox_ras_t = Transform('mri_voxel', 'ras', n_orig)
# extract the MRI_VOXEL to MRI transform
vox_mri_t = Transform('mri_voxel', 'mri', t_orig)
# construct the MRI to RAS (non-zero origin) transform
mri_ras_t = combine_transforms(
invert_transform(vox_mri_t), vox_ras_t, 'mri', 'ras')
# construct the MRI to MNI transform
mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal')
return mri_mni_t
###############################################################################
# Creation and decimation
@verbose
def _check_spacing(spacing, verbose=None):
"""Check spacing parameter."""
# check to make sure our parameters are good, parse 'spacing'
space_err = ('"spacing" must be a string with values '
'"ico#", "oct#", or "all", and "ico" and "oct"'
'numbers must be integers')
if not isinstance(spacing, string_types) or len(spacing) < 3:
raise ValueError(space_err)
if spacing == 'all':
stype = 'all'
sval = ''
elif spacing[:3] == 'ico':
stype = 'ico'
sval = spacing[3:]
elif spacing[:3] == 'oct':
stype = 'oct'
sval = spacing[3:]
else:
raise ValueError(space_err)
try:
if stype in ['ico', 'oct']:
sval = int(sval)
elif stype == 'spacing': # spacing
sval = float(sval)
except Exception:
raise ValueError(space_err)
if stype == 'all':
logger.info('Include all vertices')
ico_surf = None
src_type_str = 'all'
else:
src_type_str = '%s = %s' % (stype, sval)
if stype == 'ico':
logger.info('Icosahedron subdivision grade %s' % sval)
ico_surf = _get_ico_surface(sval)
elif stype == 'oct':
logger.info('Octahedron subdivision grade %s' % sval)
ico_surf = _tessellate_sphere_surf(sval)
return stype, sval, ico_surf, src_type_str
@verbose
def setup_source_space(subject, spacing='oct6', surface='white',
subjects_dir=None, add_dist=True, n_jobs=1,
verbose=None):
"""Set up bilateral hemisphere surface-based source space with subsampling.
Parameters
----------
subject : str
Subject to process.
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
or ``'all'`` for all points.
surface : str
The surface to use.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
add_dist : bool
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended.
n_jobs : int
Number of jobs to run in parallel. Will use at most 2 jobs
(one for each hemisphere).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
src : SourceSpaces
The source space for each hemisphere.
See Also
--------
setup_volume_source_space
"""
cmd = ('setup_source_space(%s, spacing=%s, surface=%s, '
'subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, spacing, surface, subjects_dir, add_dist, verbose))
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
logger.info('')
del spacing
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype != 'all':
logger.info('Doing the %shedral vertex picking...'
% (dict(ico='icosa', oct='octa')[stype],))
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
# Setup the surface spacing in the MRI coord frame
if stype != 'all':
logger.info('Mapping %s %s -> %s (%d) ...'
% (hemi, subject, stype, sval))
s = _create_surf_spacing(surf, hemi, subject, stype, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are run
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=FIFF.FIFFV_COORD_MRI))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
add_source_space_distances(src, n_jobs=n_jobs, verbose=verbose)
# write out if requested, then return the data
logger.info('You are now one step closer to computing the gain matrix')
return src
@verbose
def setup_volume_source_space(subject=None, pos=5.0, mri=None,
sphere=(0.0, 0.0, 0.0, 90.0), bem=None,
surface=None, mindist=5.0, exclude=0.0,
subjects_dir=None, volume_label=None,
add_interpolator=True, verbose=None):
"""Set up a volume source space with grid spacing or discrete source space.
Parameters
----------
subject : str | None
Subject to process. If None, the path to the mri volume must be
absolute. Defaults to None.
pos : float | dict
Positions to use for sources. If float, a grid will be constructed
with the spacing given by `pos` in mm, generating a volume source
space. If dict, pos['rr'] and pos['nn'] will be used as the source
space locations (in meters) and normals, respectively, creating a
discrete source space. NOTE: For a discrete source space (`pos` is
a dict), `mri` must be None.
mri : str | None
The filename of an MRI volume (mgh or mgz) to create the
interpolation matrix over. Source estimates obtained in the
volume source space can then be morphed onto the MRI volume
using this interpolator. If pos is a dict, this can be None.
sphere : ndarray, shape (4,) | ConductorModel
Define spherical source space bounds using origin and radius given
by (ox, oy, oz, rad) in mm. Only used if ``bem`` and ``surface``
are both None. Can also be a spherical ConductorModel, which will
use the origin and radius.
bem : str | None
Define source space bounds using a BEM file (specifically the inner
skull surface).
surface : str | dict | None
Define source space bounds using a FreeSurfer surface file. Can
also be a dictionary with entries `'rr'` and `'tris'`, such as
those returned by :func:`mne.read_surface`.
mindist : float
Exclude points closer than this distance (mm) to the bounding surface.
exclude : float
Exclude points closer than this distance (mm) from the center of mass
of the bounding surface.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
volume_label : str | list | None
Region of interest corresponding with freesurfer lookup table.
add_interpolator : bool
If True and ``mri`` is not None, then an interpolation matrix
will be produced.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
src : SourceSpaces
A :class:`SourceSpaces` object containing one source space for each
entry of ``volume_labels``, or a single source space if
``volume_labels`` was not specified.
See Also
--------
setup_source_space
Notes
-----
To create a discrete source space, `pos` must be a dict, 'mri' must be
None, and 'volume_label' must be None. To create a whole brain volume
source space, `pos` must be a float and 'mri' must be provided. To create
a volume source space from label, 'pos' must be a float, 'volume_label'
must be provided, and 'mri' must refer to a .mgh or .mgz file with values
corresponding to the freesurfer lookup-table (typically aseg.mgz).
"""
subjects_dir = get_subjects_dir(subjects_dir)
if bem is not None and surface is not None:
raise ValueError('Only one of "bem" and "surface" should be '
'specified')
if mri is not None:
if not op.isfile(mri):
if subject is None:
raise IOError('mri file "%s" not found' % mri)
mri = op.join(subjects_dir, subject, 'mri', mri)
if not op.isfile(mri):
raise IOError('mri file "%s" not found' % mri)
if isinstance(pos, dict):
raise ValueError('Cannot create interpolation matrix for '
'discrete source space, mri must be None if '
'pos is a dict')
if volume_label is not None:
if mri is None:
raise RuntimeError('"mri" must be provided if "volume_label" is '
'not None')
if not isinstance(volume_label, list):
volume_label = [volume_label]
# Check that volume label is found in .mgz file
volume_labels = get_volume_labels_from_aseg(mri)
for label in volume_label:
if label not in volume_labels:
raise ValueError('Volume %s not found in file %s. Double '
'check freesurfer lookup table.'
% (label, mri))
if isinstance(sphere, ConductorModel):
if not sphere['is_sphere'] or len(sphere['layers']) == 0:
raise ValueError('sphere, if a ConductorModel, must be spherical '
'with multiple layers, not a BEM or single-layer '
'sphere (got %s)' % (sphere,))
sphere = tuple(1000 * sphere['r0']) + (1000 *
sphere['layers'][0]['rad'],)
sphere = np.asarray(sphere, dtype=float)
if sphere.size != 4:
raise ValueError('"sphere" must be array_like with 4 elements, got: %s'
% (sphere,))
# triage bounding argument
if bem is not None:
logger.info('BEM file : %s', bem)
elif surface is not None:
if isinstance(surface, dict):
if not all(key in surface for key in ['rr', 'tris']):
raise KeyError('surface, if dict, must have entries "rr" '
'and "tris"')
# let's make sure we have geom info
complete_surface_info(surface, copy=False, verbose=False)
surf_extra = 'dict()'
elif isinstance(surface, string_types):
if not op.isfile(surface):
raise IOError('surface file "%s" not found' % surface)
surf_extra = surface
logger.info('Boundary surface file : %s', surf_extra)
else:
logger.info('Sphere : origin at (%.1f %.1f %.1f) mm'
% (sphere[0], sphere[1], sphere[2]))
logger.info(' radius : %.1f mm' % sphere[3])
# triage pos argument
if isinstance(pos, dict):
if not all(key in pos for key in ['rr', 'nn']):
raise KeyError('pos, if dict, must contain "rr" and "nn"')
pos_extra = 'dict()'
else: # pos should be float-like
try:
pos = float(pos)
except (TypeError, ValueError):
raise ValueError('pos must be a dict, or something that can be '
'cast to float()')
if not isinstance(pos, float):
logger.info('Source location file : %s', pos_extra)
logger.info('Assuming input in millimeters')
logger.info('Assuming input in MRI coordinates')
if isinstance(pos, float):
logger.info('grid : %.1f mm' % pos)
logger.info('mindist : %.1f mm' % mindist)
pos /= 1000.0 # convert pos from m to mm
if exclude > 0.0:
logger.info('Exclude : %.1f mm' % exclude)
if mri is not None:
logger.info('MRI volume : %s' % mri)
exclude /= 1000.0 # convert exclude from m to mm
logger.info('')
# Explicit list of points
if not isinstance(pos, float):
# Make the grid of sources
sp = _make_discrete_source_space(pos)
else:
# Load the brain surface as a template
if bem is not None:
# read bem surface in the MRI coordinate frame
surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
verbose=False)
logger.info('Loaded inner skull from %s (%d nodes)'
% (bem, surf['np']))
elif surface is not None:
if isinstance(surface, string_types):
# read the surface in the MRI coordinate frame
surf = read_surface(surface, return_dict=True)[-1]
else:
surf = surface
logger.info('Loaded bounding surface from %s (%d nodes)'
% (surface, surf['np']))
surf = deepcopy(surf)
surf['rr'] *= 1e-3 # must be converted to meters
else: # Load an icosahedron and use that as the surface
logger.info('Setting up the sphere...')
surf = dict(R=sphere[3] / 1000., r0=sphere[:3] / 1000.)
# Make the grid of sources in MRI space
if volume_label is not None:
sp = []
for label in volume_label:
vol_sp = _make_volume_source_space(surf, pos, exclude, mindist,
mri, label)
sp.append(vol_sp)
else:
sp = _make_volume_source_space(surf, pos, exclude, mindist, mri,
volume_label)
# Compute an interpolation matrix to show data in MRI_VOXEL coord frame
if not isinstance(sp, list):
sp = [sp]
if mri is not None:
for s in sp:
_add_interpolator(s, mri, add_interpolator)
elif sp[0]['type'] == 'vol':
# If there is no interpolator, it's actually a discrete source space
sp[0]['type'] = 'discrete'
for s in sp:
if 'vol_dims' in s:
del s['vol_dims']
# Save it
for s in sp:
s.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
nuse_tri=0, tris=None, subject_his_id=subject))
sp = SourceSpaces(sp, dict(working_dir=os.getcwd(), command_line='None'))
return sp
def _make_voxel_ras_trans(move, ras, voxel_size):
"""Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)."""
assert voxel_size.ndim == 1
assert voxel_size.size == 3
rot = ras.T * voxel_size[np.newaxis, :]
assert rot.ndim == 2
assert rot.shape[0] == 3
assert rot.shape[1] == 3
trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
t = Transform('mri_voxel', 'mri', trans)
return t
def _make_discrete_source_space(pos, coord_frame='mri'):
"""Use a discrete set of source locs/oris to make src space.
Parameters
----------
pos : dict
Must have entries "rr" and "nn". Data should be in meters.
coord_frame : str
The coordinate frame in which the positions are given; default: 'mri'.
The frame must be one defined in transforms.py:_str_to_frame
Returns
-------
src : dict
The source space.
"""
# Check that coordinate frame is valid
if coord_frame not in _str_to_frame: # will fail if coord_frame not string
raise KeyError('coord_frame must be one of %s, not "%s"'
% (list(_str_to_frame.keys()), coord_frame))
coord_frame = _str_to_frame[coord_frame] # now an int
# process points (copy and cast)
rr = np.array(pos['rr'], float)
nn = np.array(pos['nn'], float)
if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
rr.shape[1] == nn.shape[1]):
raise RuntimeError('"rr" and "nn" must both be 2D arrays with '
'the same number of rows and 3 columns')
npts = rr.shape[0]
_normalize_vectors(nn)
nz = np.sum(np.sum(nn * nn, axis=1) == 0)
if nz != 0:
raise RuntimeError('%d sources have zero length normal' % nz)
logger.info('Positions (in meters) and orientations')
logger.info('%d sources' % npts)
# Ready to make the source space
sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
id=-1)
return sp
def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
volume_label=None, do_neighbors=True, n_jobs=1):
"""Make a source space which covers the volume bounded by surf."""
# Figure out the grid size in the MRI coordinate frame
if 'rr' in surf:
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
cm = np.mean(surf['rr'], axis=0) # center of mass
maxdist = np.linalg.norm(surf['rr'] - cm, axis=1).max()
else:
mins = surf['r0'] - surf['R']
maxs = surf['r0'] + surf['R']
cm = surf['r0'].copy()
maxdist = surf['R']
# Define the sphere which fits the surface
logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
% (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
logger.info('Surface fits inside a sphere with radius %6.1f mm'
% (1000 * maxdist))
logger.info('Surface extent:')
for c, mi, ma in zip('xyz', mins, maxs):
logger.info(' %s = %6.1f ... %6.1f mm' % (c, 1000 * mi, 1000 * ma))
maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in mins], int)
logger.info('Grid extent:')
for c, mi, ma in zip('xyz', minn, maxn):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi * grid, 1000 * ma * grid))
# Now make the initial grid
ns = maxn - minn + 1
npts = np.prod(ns)
nrow = ns[0]
ncol = ns[1]
nplane = nrow * ncol
# x varies fastest, then y, then z (can use unravel to do this)
rr = np.meshgrid(np.arange(minn[2], maxn[2] + 1),
np.arange(minn[1], maxn[1] + 1),
np.arange(minn[0], maxn[0] + 1), indexing='ij')
x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
rr = np.array([x * grid, y * grid, z * grid]).T
sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
inuse=np.ones(npts, int), type='vol', nuse=npts,
coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
sp['nn'][:, 2] = 1.0
assert sp['rr'].shape[0] == npts
logger.info('%d sources before omitting any.', sp['nuse'])
# Exclude infeasible points
dists = np.linalg.norm(sp['rr'] - cm, axis=1)
bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
sp['inuse'][bads] = False
sp['nuse'] -= len(bads)
logger.info('%d sources after omitting infeasible sources.', sp['nuse'])
if 'rr' in surf:
_filter_source_spaces(surf, mindist, None, [sp], n_jobs)
else: # sphere
vertno = np.where(sp['inuse'])[0]
bads = (np.linalg.norm(sp['rr'][vertno] - surf['r0'], axis=-1) >=
surf['R'] - mindist / 1000.)
sp['nuse'] -= bads.sum()
sp['inuse'][vertno[bads]] = False
sp['vertno'] = np.where(sp['inuse'])[0]
del vertno
del surf
logger.info('%d sources remaining after excluding the sources outside '
'the surface and less than %6.1f mm inside.'
% (sp['nuse'], mindist))
if not do_neighbors:
if volume_label is not None:
raise RuntimeError('volume_label cannot be None unless '
'do_neighbors is True')
return sp
k = np.arange(npts)
neigh = np.empty((26, npts), int)
neigh.fill(-1)
# Figure out each neighborhood:
# 6-neighborhood first
idxs = [z > minn[2], x < maxn[0], y < maxn[1],
x > minn[0], y > minn[1], z < maxn[2]]
offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
for n, idx, offset in zip(neigh[:6], idxs, offsets):
n[idx] = k[idx] + offset
# Then the rest to complete the 26-neighborhood
# First the plane below
idx1 = z > minn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[6, idx2] = k[idx2] + 1 - nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[8, idx2] = k[idx2] + nrow - nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
neigh[10, idx2] = k[idx2] - 1 - nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[12, idx2] = k[idx2] - nrow - nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
# Then the same plane
idx1 = np.logical_and(x < maxn[0], y < maxn[1])
neigh[14, idx1] = k[idx1] + 1 + nrow
idx1 = x > minn[0]
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[15, idx2] = k[idx2] - 1 + nrow
idx2 = np.logical_and(idx1, y > minn[1])
neigh[16, idx2] = k[idx2] - 1 - nrow
idx1 = np.logical_and(y > minn[1], x < maxn[0])
neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
# Finally one plane above
idx1 = z < maxn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[18, idx2] = k[idx2] + 1 + nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[20, idx2] = k[idx2] + nrow + nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
neigh[22, idx2] = k[idx2] - 1 + nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[24, idx2] = k[idx2] - nrow + nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
# Restrict sources to volume of interest
if volume_label is not None:
try:
import nibabel as nib
except ImportError:
raise ImportError("nibabel is required to read segmentation file.")
logger.info('Selecting voxels from %s' % volume_label)
# Read the segmentation data using nibabel
mgz = nib.load(mri)
mgz_data = mgz.get_data()
# Get the numeric index for this volume label
lut = _get_lut()
vol_id = _get_lut_id(lut, volume_label, True)
# Get indices for this volume label in voxel space
vox_bool = mgz_data == vol_id
# Get the 3 dimensional indices in voxel space
vox_xyz = np.array(np.where(vox_bool)).T
# Transform to RAS coordinates
# (use tkr normalization or volume won't align with surface sources)
trans = _get_mgz_header(mri)['vox2ras_tkr']
# Convert transform from mm to m
trans[:3] /= 1000.
rr_voi = apply_trans(trans, vox_xyz) # positions of VOI in RAS space
# Filter out points too far from volume region voxels
dists = _compute_nearest(rr_voi, sp['rr'], return_dists=True)[1]
# Maximum distance from center of mass of a voxel to any of its corners
maxdist = linalg.norm(trans[:3, :3].sum(0) / 2.)
bads = np.where(dists > maxdist)[0]
# Update source info
sp['inuse'][bads] = False
sp['vertno'] = np.where(sp['inuse'] > 0)[0]
sp['nuse'] = len(sp['vertno'])
sp['seg_name'] = volume_label
sp['mri_file'] = mri
# Update log
logger.info('%d sources remaining after excluding sources too far '
'from VOI voxels', sp['nuse'])
# Omit unused vertices from the neighborhoods
logger.info('Adjusting the neighborhood info...')
# remove non source-space points
log_inuse = sp['inuse'] > 0
neigh[:, np.logical_not(log_inuse)] = -1
# remove these points from neigh
vertno = np.where(log_inuse)[0]
sp['vertno'] = vertno
old_shape = neigh.shape
neigh = neigh.ravel()
checks = np.where(neigh >= 0)[0]
removes = np.logical_not(np.in1d(checks, vertno))
neigh[checks[removes]] = -1
neigh.shape = old_shape
neigh = neigh.T
# Thought we would need this, but C code keeps -1 vertices, so we will:
# neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
sp['neighbor_vert'] = neigh
# Set up the volume data (needed for creating the interpolation matrix)
r0 = minn * grid
voxel_size = grid * np.ones(3)
ras = np.eye(3)
sp['src_mri_t'] = _make_voxel_ras_trans(r0, ras, voxel_size)
sp['vol_dims'] = maxn - minn + 1
return sp
def _vol_vertex(width, height, jj, kk, pp):
return jj + width * kk + pp * (width * height)
def _get_mri_header(fname):
"""Get MRI header using nibabel."""
import nibabel as nib
img = nib.load(fname)
try:
return img.header
except AttributeError: # old nibabel
return img.get_header()
def _get_mgz_header(fname):
"""Adapted from nibabel to quickly extract header info."""
if not fname.endswith('.mgz'):
raise IOError('Filename must end with .mgz')
header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
('Pxyz_c', '>f4', (3,))]
header_dtype = np.dtype(header_dtd)
with GzipFile(fname, 'rb') as fid:
hdr_str = fid.read(header_dtype.itemsize)
header = np.ndarray(shape=(), dtype=header_dtype,
buffer=hdr_str)
# dims
dims = header['dims'].astype(int)
dims = dims[:3] if len(dims) == 4 else dims
# vox2ras_tkr
delta = header['delta']
ds = np.array(delta, float)
ns = np.array(dims * ds) / 2.0
v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=np.float32)
# ras2vox
d = np.diag(delta)
pcrs_c = dims / 2.0
Mdc = header['Mdc'].T
pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
M = np.eye(4, 4)
M[0:3, 0:3] = np.dot(Mdc, d)
M[0:3, 3] = pxyz_0.T
M = linalg.inv(M)
header = dict(dims=dims, vox2ras_tkr=v2rtkr, ras2vox=M)
return header
def _add_interpolator(s, mri_name, add_interpolator):
"""Compute a sparse matrix to interpolate the data into an MRI volume."""
# extract transformation information from mri
logger.info('Reading %s...' % mri_name)
header = _get_mgz_header(mri_name)
mri_width, mri_height, mri_depth = header['dims']
s.update(dict(mri_width=mri_width, mri_height=mri_height,
mri_depth=mri_depth))
trans = header['vox2ras_tkr'].copy()
trans[:3, :] /= 1000.0
s['vox_mri_t'] = Transform('mri_voxel', 'mri', trans) # ras_tkr
trans = linalg.inv(np.dot(header['vox2ras_tkr'], header['ras2vox']))
trans[:3, 3] /= 1000.0
s['mri_ras_t'] = Transform('mri', 'ras', trans) # ras
s['mri_volume_name'] = mri_name
nvox = mri_width * mri_height * mri_depth
if not add_interpolator:
s['interpolator'] = sparse.csr_matrix((nvox, s['np']))
return
_print_coord_trans(s['src_mri_t'], 'Source space : ')
_print_coord_trans(s['vox_mri_t'], 'MRI volume : ')
_print_coord_trans(s['mri_ras_t'], 'MRI volume : ')
#
# Convert MRI voxels from destination (MRI volume) to source (volume
# source space subset) coordinates
#
combo_trans = combine_transforms(s['vox_mri_t'],
invert_transform(s['src_mri_t']),
'mri_voxel', 'mri_voxel')
combo_trans['trans'] = combo_trans['trans'].astype(np.float32)
logger.info('Setting up interpolation...')
# Loop over slices to save (lots of) memory
# Note that it is the slowest incrementing index
# This is equivalent to using mgrid and reshaping, but faster
data = []
indices = []
indptr = np.zeros(nvox + 1, np.int32)
for p in range(mri_depth):
js = np.arange(mri_width, dtype=np.float32)
js = np.tile(js[np.newaxis, :],
(mri_height, 1)).ravel()
ks = np.arange(mri_height, dtype=np.float32)
ks = np.tile(ks[:, np.newaxis],
(1, mri_width)).ravel()
ps = np.empty((mri_height, mri_width), np.float32).ravel()
ps.fill(p)
r0 = np.c_[js, ks, ps]
del js, ks, ps
# Transform our vertices from their MRI space into our source space's
# frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
# really a subset of the entire volume!)
r0 = apply_trans(combo_trans['trans'], r0)
rn = np.floor(r0).astype(int)
maxs = (s['vol_dims'] - 1)[np.newaxis, :]
good = np.where(np.logical_and(np.all(rn >= 0, axis=1),
np.all(rn < maxs, axis=1)))[0]
rn = rn[good]
r0 = r0[good]
# now we take each MRI voxel *in this space*, and figure out how
# to make its value the weighted sum of voxels in the volume source
# space. This is a 3D weighting scheme based (presumably) on the
# fact that we know we're interpolating from one volumetric grid
# into another.
jj = rn[:, 0]
kk = rn[:, 1]
pp = rn[:, 2]
vss = np.empty((len(jj), 8), np.int32)
width = s['vol_dims'][0]
height = s['vol_dims'][1]
jjp1 = jj + 1
kkp1 = kk + 1
ppp1 = pp + 1
vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
del jj, kk, pp, jjp1, kkp1, ppp1
uses = np.any(s['inuse'][vss], axis=1)
if uses.size == 0:
continue
vss = vss[uses].ravel() # vertex (col) numbers in csr matrix
indices.append(vss)
indptr[good[uses] + p * mri_height * mri_width + 1] = 8
del vss
# figure out weights for each vertex
r0 = r0[uses]
rn = rn[uses]
del uses, good
xf = r0[:, 0] - rn[:, 0].astype(np.float32)
yf = r0[:, 1] - rn[:, 1].astype(np.float32)
zf = r0[:, 2] - rn[:, 2].astype(np.float32)
omxf = 1.0 - xf
omyf = 1.0 - yf
omzf = 1.0 - zf
# each entry in the concatenation corresponds to a row of vss
data.append(np.array([omxf * omyf * omzf,
xf * omyf * omzf,
xf * yf * omzf,
omxf * yf * omzf,
omxf * omyf * zf,
xf * omyf * zf,
xf * yf * zf,
omxf * yf * zf], order='F').T.ravel())
del xf, yf, zf, omxf, omyf, omzf
# Compose the sparse matrix
indptr = np.cumsum(indptr, out=indptr)
indices = np.concatenate(indices)
data = np.concatenate(data)
s['interpolator'] = sparse.csr_matrix((data, indices, indptr),
shape=(nvox, s['np']))
logger.info(' %d/%d nonzero values [done]' % (len(data), nvox))
@verbose
def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
verbose=None):
"""Remove all source space points closer than a given limit (in mm)."""
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
raise RuntimeError('Source spaces are in head coordinates and no '
'coordinate transform was provided!')
# How close are the source points to the surface?
out_str = 'Source spaces are in '
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
inv_trans = invert_transform(mri_head_t)
out_str += 'head coordinates.'
elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
out_str += 'MRI coordinates.'
else:
out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
logger.info(out_str)
out_str = 'Checking that the sources are inside the bounding surface'
if limit > 0.0:
out_str += ' and at least %6.1f mm away' % (limit)
logger.info(out_str + ' (will take a few...)')
for s in src:
vertno = np.where(s['inuse'])[0] # can't trust s['vertno'] this deep
# Convert all points here first to save time
r1s = s['rr'][vertno]
if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
r1s = apply_trans(inv_trans['trans'], r1s)
# Check that the source is inside surface (often the inner skull)
outside = _points_outside_surface(r1s, surf, n_jobs)
omit_outside = np.sum(outside)
# vectorized nearest using BallTree (or cdist)
omit = 0
if limit > 0.0:
dists = _compute_nearest(surf['rr'], r1s, return_dists=True)[1]
close = np.logical_and(dists < limit / 1000.0,
np.logical_not(outside))
omit = np.sum(close)
outside = np.logical_or(outside, close)
s['inuse'][vertno[outside]] = False
s['nuse'] -= (omit + omit_outside)
s['vertno'] = np.where(s['inuse'])[0]
if omit_outside > 0:
extras = [omit_outside]
extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
logger.info('%d source space point%s omitted because %s '
'outside the inner skull surface.' % tuple(extras))
if omit > 0:
extras = [omit]
extras += ['s'] if omit_outside > 1 else ['']
extras += [limit]
logger.info('%d source space point%s omitted because of the '
'%6.1f-mm distance limit.' % tuple(extras))
# Adjust the patch inds as well if necessary
if omit + omit_outside > 0:
_adjust_patch_info(s)
logger.info('Thank you for waiting.')
@verbose
def _adjust_patch_info(s, verbose=None):
"""Adjust patch information in place after vertex omission."""
if s.get('patch_inds') is not None:
if s['nearest'] is None:
# This shouldn't happen, but if it does, we can probably come
# up with a more clever solution
raise RuntimeError('Cannot adjust patch information properly, '
'please contact the mne-python developers')
_add_patch_info(s)
@verbose
def _points_outside_surface(rr, surf, n_jobs=1, verbose=None):
"""Check whether points are outside a surface.
Parameters
----------
rr : ndarray
Nx3 array of points to check.
surf : dict
Surface with entries "rr" and "tris".
Returns
-------
outside : ndarray
1D logical array of size N for which points are outside the surface.
"""
rr = np.atleast_2d(rr)
assert rr.shape[1] == 3
assert n_jobs > 0
parallel, p_fun, _ = parallel_func(_get_solids, n_jobs)
tot_angles = parallel(p_fun(surf['rr'][tris], rr)
for tris in np.array_split(surf['tris'], n_jobs))
return np.abs(np.sum(tot_angles, axis=0) / (2 * np.pi) - 1.0) > 1e-5
@verbose
def _ensure_src(src, kind=None, verbose=None):
"""Ensure we have a source space."""
if isinstance(src, string_types):
if not op.isfile(src):
raise IOError('Source space file "%s" not found' % src)
logger.info('Reading %s...' % src)
src = read_source_spaces(src, verbose=False)
if not isinstance(src, SourceSpaces):
raise ValueError('src must be a string or instance of SourceSpaces')
if kind is not None:
if kind == 'surf':
surf = [s for s in src if s['type'] == 'surf']
if len(surf) != 2 or len(src) != 2:
raise ValueError('Source space must contain exactly two '
'surfaces.')
src = surf
return src
def _ensure_src_subject(src, subject):
src_subject = src[0].get('subject_his_id', None)
if subject is None:
subject = src_subject
if subject is None:
raise ValueError('source space is too old, subject must be '
'provided')
elif src_subject is not None and subject != src_subject:
raise ValueError('Mismatch between provided subject "%s" and subject '
'name "%s" in the source space'
% (subject, src_subject))
return subject
@verbose
def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
"""Compute inter-source distances along the cortical surface.
This function will also try to add patch info for the source space.
It will only occur if the ``dist_limit`` is sufficiently high that all
points on the surface are within ``dist_limit`` of a point in the
source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to compute distances for.
dist_limit : float
The upper limit of distances to include (in meters).
Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
10/2013) must be installed.
n_jobs : int
Number of jobs to run in parallel. Will only use (up to) as many
cores as there are source spaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
src : instance of SourceSpaces
The original source spaces, with distance information added.
The distances are stored in src[n]['dist'].
Note: this function operates in-place.
Notes
-----
Requires scipy >= 0.11 (> 0.13 for `dist_limit < np.inf`).
This function can be memory- and CPU-intensive. On a high-end machine
(2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
takes about 10 minutes to compute all distances (`dist_limit = np.inf`).
With `dist_limit = 0.007`, computing distances takes about 1 minute.
We recommend computing distances once per source space and then saving
the source space to disk, as the computed distances will automatically be
stored along with the source space data for future use.
"""
from scipy.sparse.csgraph import dijkstra
n_jobs = check_n_jobs(n_jobs)
src = _ensure_src(src)
if not np.isscalar(dist_limit):
raise ValueError('limit must be a scalar, got %s' % repr(dist_limit))
if not check_version('scipy', '0.11'):
raise RuntimeError('scipy >= 0.11 must be installed (or > 0.13 '
'if dist_limit < np.inf')
if not all(s['type'] == 'surf' for s in src):
raise RuntimeError('Currently all source spaces must be of surface '
'type')
if dist_limit < np.inf:
# can't do introspection on dijkstra function because it's Cython,
# so we'll just try quickly here
try:
dijkstra(sparse.csr_matrix(np.zeros((2, 2))), limit=1.0)
except TypeError:
raise RuntimeError('Cannot use "limit < np.inf" unless scipy '
'> 0.13 is installed')
parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
min_dists = list()
min_idxs = list()
logger.info('Calculating source space distances (limit=%s mm)...'
% (1000 * dist_limit))
for s in src:
connectivity = mesh_dist(s['tris'], s['rr'])
d = parallel(p_fun(connectivity, s['vertno'], r, dist_limit)
for r in np.array_split(np.arange(len(s['vertno'])),
n_jobs))
# deal with indexing so we can add patch info
min_idx = np.array([dd[1] for dd in d])
min_dist = np.array([dd[2] for dd in d])
midx = np.argmin(min_dist, axis=0)
range_idx = np.arange(len(s['rr']))
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
min_dists.append(min_dist)
min_idxs.append(min_idx)
# now actually deal with distances, convert to sparse representation
d = np.concatenate([dd[0] for dd in d]).ravel() # already float32
idx = d > 0
d = d[idx]
i, j = np.meshgrid(s['vertno'], s['vertno'])
i = i.ravel()[idx]
j = j.ravel()[idx]
d = sparse.csr_matrix((d, (i, j)),
shape=(s['np'], s['np']), dtype=np.float32)
s['dist'] = d
s['dist_limit'] = np.array([dist_limit], np.float32)
# Let's see if our distance was sufficient to allow for patch info
if not any(np.any(np.isinf(md)) for md in min_dists):
# Patch info can be added!
for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
s['nearest'] = min_idx
s['nearest_dist'] = min_dist
_add_patch_info(s)
else:
logger.info('Not adding patch information, dist_limit too small')
return src
def _do_src_distances(con, vertno, run_inds, limit):
"""Compute source space distances in chunks."""
from scipy.sparse.csgraph import dijkstra
if limit < np.inf:
func = partial(dijkstra, limit=limit)
else:
func = dijkstra
chunk_size = 20 # save memory by chunking (only a little slower)
lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
n_chunks = len(lims) - 1
# eventually we want this in float32, so save memory by only storing 32-bit
d = np.empty((len(run_inds), len(vertno)), np.float32)
min_dist = np.empty((n_chunks, con.shape[0]))
min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
range_idx = np.arange(con.shape[0])
for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
idx = vertno[run_inds[l1:l2]]
out = func(con, indices=idx)
midx = np.argmin(out, axis=0)
min_idx[li] = idx[midx]
min_dist[li] = out[midx, range_idx]
d[l1:l2] = out[:, vertno]
midx = np.argmin(min_dist, axis=0)
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
d[d == np.inf] = 0 # scipy will give us np.inf for uncalc. distances
return d, min_idx, min_dist
def get_volume_labels_from_aseg(mgz_fname, return_colors=False):
"""Return a list of names and colors of segmented volumes.
Parameters
----------
mgz_fname : str
Filename to read. Typically aseg.mgz or some variant in the freesurfer
pipeline.
return_colors : bool
If True returns also the labels colors
Returns
-------
label_names : list of str
The names of segmented volumes included in this mgz file.
label_colors : list of str
The RGB colors of the labels included in this mgz file.
Notes
-----
.. versionadded:: 0.9.0
"""
import nibabel as nib
# Read the mgz file using nibabel
mgz_data = nib.load(mgz_fname).get_data()
# Get the unique label names
lut = _get_lut()
label_names = [lut[lut['id'] == ii]['name'][0]
for ii in np.unique(mgz_data)]
label_colors = [[lut[lut['id'] == ii]['R'][0],
lut[lut['id'] == ii]['G'][0],
lut[lut['id'] == ii]['B'][0],
lut[lut['id'] == ii]['A'][0]]
for ii in np.unique(mgz_data)]
order = np.argsort(label_names)
label_names = [label_names[k] for k in order]
label_colors = [label_colors[k] for k in order]
if return_colors:
return label_names, label_colors
else:
return label_names
def get_volume_labels_from_src(src, subject, subjects_dir):
"""Return a list of Label of segmented volumes included in the src space.
Parameters
----------
src : instance of SourceSpaces
The source space containing the volume regions
subject: str
Subject name
subjects_dir: str
Freesurfer folder of the subjects
Returns
-------
labels_aseg : list of Label
List of Label of segmented volumes included in src space.
"""
import os.path as op
import numpy as np
from . import Label
from . import get_volume_labels_from_aseg
# Read the aseg file
aseg_fname = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
if not op.isfile(aseg_fname):
raise IOError('aseg file "%s" not found' % aseg_fname)
all_labels_aseg = get_volume_labels_from_aseg(aseg_fname,
return_colors=True)
# Create a list of Label
if len(src) < 2:
raise ValueError('No vol src space in src')
if any(np.any(s['type'] != 'vol') for s in src[2:]):
raise ValueError('source spaces have to be of vol type')
labels_aseg = list()
for nr in range(2, len(src)):
vertices = src[nr]['vertno']
pos = src[nr]['rr'][src[nr]['vertno'], :]
roi_str = src[nr]['seg_name']
try:
ind = all_labels_aseg[0].index(roi_str)
color = np.array(all_labels_aseg[1][ind]) / 255
except ValueError:
pass
if 'left' in roi_str.lower():
hemi = 'lh'
roi_str = roi_str.replace('Left-', '') + '-lh'
elif 'right' in roi_str.lower():
hemi = 'rh'
roi_str = roi_str.replace('Right-', '') + '-rh'
else:
hemi = 'both'
label = Label(vertices=vertices, pos=pos, hemi=hemi,
name=roi_str, color=color,
subject=subject)
labels_aseg.append(label)
return labels_aseg
def _get_hemi(s):
"""Get a hemisphere from a given source space."""
if s['type'] != 'surf':
raise RuntimeError('Only surface source spaces supported')
if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
return 'lh', 0, s['id']
elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
return 'rh', 1, s['id']
else:
raise ValueError('unknown surface ID %s' % s['id'])
def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
to_neighbor_tri=None):
"""Get a nearest-neigbor vertex match for a given hemi src.
The to_neighbor_tri can optionally be passed in to avoid recomputation
if it's already available.
"""
# adapted from mne_make_source_space.c, knowing accurate=False (i.e.
# nearest-neighbor mode should be used)
logger.info('Mapping %s %s -> %s (nearest neighbor)...'
% (hemi, subject_from, subject_to))
regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
for s in (subject_from, subject_to)]
reg_fro, reg_to = [read_surface(r, return_dict=True)[-1] for r in regs]
if to_neighbor_tri is not None:
reg_to['neighbor_tri'] = to_neighbor_tri
if 'neighbor_tri' not in reg_to:
reg_to['neighbor_tri'] = _triangle_neighbors(reg_to['tris'],
reg_to['np'])
morph_inuse = np.zeros(len(reg_to['rr']), bool)
best = np.zeros(fro_src['np'], int)
ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
for v, one in zip(fro_src['vertno'], ones):
# if it were actually a proper morph map, we would do this, but since
# we know it's nearest neighbor list, we don't need to:
# this_mm = mm[v]
# one = this_mm.indices[this_mm.data.argmax()]
if morph_inuse[one]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(reg_to, one) # on demand calc
was = one
one = neigh[np.where(~morph_inuse[neigh])[0]]
if len(one) == 0:
raise RuntimeError('vertex %d would be used multiple times.'
% one)
one = one[0]
logger.info('Source space vertex moved from %d to %d because of '
'double occupation.' % (was, one))
best[v] = one
morph_inuse[one] = True
return best
@verbose
def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
subjects_dir=None, verbose=None):
"""Morph an existing source space to a different subject.
.. warning:: This can be used in place of morphing source estimates for
multiple subjects, but there may be consequences in terms
of dipole topology.
Parameters
----------
src_from : instance of SourceSpaces
Surface source spaces to morph.
subject_to : str
The destination subject.
surf : str
The brain surface to use for the new source space.
subject_from : str | None
The "from" subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool | str | int | None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
src : instance of SourceSpaces
The morphed source spaces.
Notes
-----
.. versionadded:: 0.10.0
"""
# adapted from mne_make_source_space.c
src_from = _ensure_src(src_from)
subject_from = _ensure_src_subject(src_from, subject_from)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_out = list()
for fro in src_from:
hemi, idx, id_ = _get_hemi(fro)
to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
logger.info('Reading destination surface %s' % (to,))
to = read_surface(to, return_dict=True, verbose=False)[-1]
complete_surface_info(to, copy=False)
# Now we morph the vertices to the destination
# The C code does something like this, but with a nearest-neighbor
# mapping instead of the weighted one::
#
# >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
#
# Here we use a direct NN calculation, since picking the max from the
# existing morph map (which naively one might expect to be equivalent)
# differs for ~3% of vertices.
best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
subjects_dir, to['neighbor_tri'])
for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
'use_tris'):
del to[key]
to['vertno'] = np.sort(best[fro['vertno']])
to['inuse'] = np.zeros(len(to['rr']), int)
to['inuse'][to['vertno']] = True
to['use_tris'] = best[fro['use_tris']]
to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
dist=None, id=id_, dist_limit=None, type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
rr=to['rr'] / 1000.)
src_out.append(to)
logger.info('[done]\n')
info = dict(working_dir=os.getcwd(),
command_line=_get_call_line(in_verbose=True))
return SourceSpaces(src_out, info=info)
@verbose
def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
subjects_dir=None, verbose=None):
"""Get the reordering indices for a morphed source space.
Parameters
----------
vertices : list
The vertices for the left and right hemispheres.
src_from : instance of SourceSpaces
The original source space.
subject_from : str
The source subject.
subject_to : str
The destination subject.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
data_idx : ndarray, shape (n_vertices,)
The array used to reshape the data.
from_vertices : list
The right and left hemisphere vertex numbers for the "from" subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
from_vertices = list()
data_idxs = list()
offset = 0
for ii, hemi in enumerate(('lh', 'rh')):
# Get the mapping from the original source space to the destination
# subject's surface vertex numbers
best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
hemi, subjects_dir)
full_mapping = best[src_from[ii]['vertno']]
# Tragically, we might not have all of our vertno left (e.g. because
# some are omitted during fwd calc), so we must do some indexing magic:
# From all vertices, a subset could be chosen by fwd calc:
used_vertices = np.in1d(full_mapping, vertices[ii])
from_vertices.append(src_from[ii]['vertno'][used_vertices])
remaining_mapping = full_mapping[used_vertices]
if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
not np.in1d(vertices[ii], full_mapping).all():
raise RuntimeError('Could not map vertices, perhaps the wrong '
'subject "%s" was provided?' % subject_from)
# And our data have been implicitly remapped by the forced ascending
# vertno order in source spaces
implicit_mapping = np.argsort(remaining_mapping) # happens to data
data_idx = np.argsort(implicit_mapping) # to reverse the mapping
data_idx += offset # hemisphere offset
data_idxs.append(data_idx)
offset += len(implicit_mapping)
data_idx = np.concatenate(data_idxs)
# this one is really just a sanity check for us, should never be violated
# by users
assert np.array_equal(np.sort(data_idx),
np.arange(sum(len(v) for v in vertices)))
return data_idx, from_vertices
def _compare_source_spaces(src0, src1, mode='exact', nearest=True,
dist_tol=1.5e-3):
"""Compare two source spaces.
Note: this function is also used by forward/tests/test_make_forward.py
"""
from numpy.testing import (assert_allclose, assert_array_equal,
assert_equal, assert_)
from scipy.spatial.distance import cdist
if mode != 'exact' and 'approx' not in mode: # 'nointerp' can be appended
raise RuntimeError('unknown mode %s' % mode)
for si, (s0, s1) in enumerate(zip(src0, src1)):
# first check the keys
a, b = set(s0.keys()), set(s1.keys())
assert_equal(a, b, str(a ^ b))
for name in ['nuse', 'ntri', 'np', 'type', 'id']:
assert_equal(s0[name], s1[name], name)
for name in ['subject_his_id']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
for name in ['interpolator']:
if name in s0 or name in s1:
diffs = (s0['interpolator'] - s1['interpolator']).data
if len(diffs) > 0 and 'nointerp' not in mode:
# 5%
assert_(np.sqrt(np.mean(diffs ** 2)) < 0.10, name)
for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
if mode == 'exact':
assert_array_equal(s0[name], s1[name], name)
else: # 'approx' in mode
atol = 1e-3 if name == 'nn' else 1e-4
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
err_msg=name)
for name in ['seg_name']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
# these fields will exist if patch info was added
if nearest:
for name in ['nearest', 'nearest_dist', 'patch_inds']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
assert_array_equal(s0[name], s1[name])
for name in ['pinfo']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
assert_(len(s0[name]) == len(s1[name]), name)
for p1, p2 in zip(s0[name], s1[name]):
assert_(all(p1 == p2), name)
if mode == 'exact':
for name in ['inuse', 'vertno', 'use_tris']:
assert_array_equal(s0[name], s1[name], err_msg=name)
for name in ['dist_limit']:
assert_(s0[name] == s1[name], name)
for name in ['dist']:
if s0[name] is not None:
assert_equal(s1[name].shape, s0[name].shape)
assert_(len((s0['dist'] - s1['dist']).data) == 0)
else: # 'approx' in mode:
# deal with vertno, inuse, and use_tris carefully
for ii, s in enumerate((s0, s1)):
assert_array_equal(s['vertno'], np.where(s['inuse'])[0],
'src%s[%s]["vertno"] != '
'np.where(src%s[%s]["inuse"])[0]'
% (ii, si, ii, si))
assert_equal(len(s0['vertno']), len(s1['vertno']))
agreement = np.mean(s0['inuse'] == s1['inuse'])
assert_(agreement >= 0.99, "%s < 0.99" % agreement)
if agreement < 1.0:
# make sure mismatched vertno are within 1.5mm
v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
dists = cdist(s0['rr'][v0], s1['rr'][v1])
assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
atol=dist_tol, err_msg='mismatched vertno')
if s0['use_tris'] is not None: # for "spacing"
assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
else:
assert_(s1['use_tris'] is None)
assert_(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
# The above "if s0[name] is not None" can be removed once the sample
# dataset is updated to have a source space with distance info
for name in ['working_dir', 'command_line']:
if mode == 'exact':
assert_equal(src0.info[name], src1.info[name])
else: # 'approx' in mode:
if name in src0.info:
assert_(name in src1.info, '"%s" missing' % name)
else:
assert_(name not in src1.info, '"%s" should not exist' % name)
|
teonlamont/mne-python
|
mne/source_space.py
|
Python
|
bsd-3-clause
| 112,114
|
[
"Mayavi"
] |
8ae24a65da703af8521caa06e780849d9494cd6482bb4dd4f9e1c9831134be6f
|
#!/usr/bin/env python
import cgi
import sqlite3
import random
with open("../templates/header.html", "r") as header:
print header.read()
with open("../templates/navbar.html", "r") as navbar:
print navbar.read()
#
# return HTML string for ingredient
#
def getIngredientHTML(index):
onSelectionString = ""
offSelectionString = ""
selectedString = "checked"
if ingredientRadioOn[index]:
onSelectionString = selectedString
else:
offSelectionString = selectedString
return """
<div class="col-xs-12 col-sm-6 col-md-4">
<div class="input-group">
<span class="input-group-btn">
<button class="btn btn-default" type="button" onclick="clearIngredient({1})">X</button>
</span>
<input type="text" class="form-control" id="ingredient-{1}-string" name="ingredient-{1}-string" value="{0}">
<div class="input-group-addon" title="Recipe must include ingredient">
<input id="ingredient-{1}-on" type="radio" name="ingredient-{1}" class="radio-button-default" value="on" {2}>
<label for="ingredient-{1}-on"><i class="fa fa-check-circle fa-lg"></i></label>
</div>
<div class="input-group-addon" title="Recipe cannot include ingredient">
<input id="ingredient-{1}-off" type="radio" name="ingredient-{1}" value="off" {3}>
<label for="ingredient-{1}-off"><i class="fa fa-ban fa-lg"></i></label>
</div>
</div>
</div>
""".format(ingredientNames[index], index, onSelectionString, offSelectionString)
# all ingredient labels
ingredientLabels = ["dairy", "cheese", "meat", "fish", "seafood", "poultry", "main protein", "vegetable", "fruit", "sugar", "sauce", "condiment",
"soup", "nut", "alcohol", "spice or herb", "spicy", "grain", "pasta", "wrapped meal", "pasta dish", "vegetable dish", "drink"]
#
# return HTML string for ingredient label
#
def getIngredientLabelHTML(index):
eitherSelectionString = ""
onSelectionString = ""
offSelectionString = ""
selectedString = "checked"
classString = ""
if ingredientLabelValues[index] == "":
eitherSelectionString = selectedString
classString = "filter-either"
elif ingredientLabelValues[index] == "on":
onSelectionString = selectedString
classString = "filter-on"
elif ingredientLabelValues[index] == "off":
offSelectionString = selectedString
classString = "filter-off"
return """
<div class="col-xs-12 col-sm-6 col-md-4">
<div class="radio-group">
<span title="Ingredient type is optional">
<input class="radio-button-default" id="ingredient-label-{0}-either" type="radio" name="ingredient-label-{0}" value="" {2}>
<label for="ingredient-label-{0}-either"><i class="fa fa-random fa-lg"></i></label>
</span>
<span title="Recipe must include ingredient type">
<input id="ingredient-label-{0}-on" type="radio" name="ingredient-label-{0}" value="on" {3}>
<label for="ingredient-label-{0}-on"><i class="fa fa-check-circle fa-lg"></i></label>
</span>
<span title="Recipe cannot include ingredient type">
<input id="ingredient-label-{0}-off" type="radio" name="ingredient-label-{0}" value="off" {4}>
<label for="ingredient-label-{0}-off"><i class="fa fa-ban fa-lg"></i></label>
</span>
<span id="ingredient-label-{0}-string" class="{5}">{1}</span>
</div>
</div>
""".format(ingredientLabels[index].replace(" ", "-"), ingredientLabels[index], \
eitherSelectionString, onSelectionString, offSelectionString, classString)
# all recipe labels
recipeLabels = ingredientLabels
recipeLabels.append("breakfast")
recipeLabels.append("dessert")
recipeLabels.append("bread")
#
# return HTML string for recipe label
#
def getRecipeLabelHTML(index):
eitherSelectionString = ""
onSelectionString = ""
offSelectionString = ""
selectedString = "checked"
classString = ""
if recipeLabelValues[index] == "":
eitherSelectionString = selectedString
classString = "filter-either"
elif recipeLabelValues[index] == "on":
onSelectionString = selectedString
classString = "filter-on"
elif recipeLabelValues[index] == "off":
offSelectionString = selectedString
classString = "filter-off"
return """
<div class="col-xs-12 col-sm-6 col-md-4">
<div class="radio-group">
<span title="Recipe type optional">
<input class="radio-button-default" id="recipe-label-{0}-either" type="radio" name="recipe-label-{0}" value="" {2}>
<label for="recipe-label-{0}-either"><i class="fa fa-random fa-lg"></i></label>
</span>
<span title="Recipe must be of type">
<input id="recipe-label-{0}-on" type="radio" name="recipe-label-{0}" value="on" {3}>
<label for="recipe-label-{0}-on"><i class="fa fa-check-circle fa-lg"></i></label>
</span>
<span title="Recipe cannot be of type">
<input id="recipe-label-{0}-off" type="radio" name="recipe-label-{0}" value="off" {4}>
<label for="recipe-label-{0}-off"><i class="fa fa-ban fa-lg"></i></label>
</span>
<span id="recipe-label-{0}-string" class="{5}">{1}</span>
</div>
</div>
""".format(recipeLabels[index].replace(" ", "-"), recipeLabels[index], \
eitherSelectionString, onSelectionString, offSelectionString, classString)
#
# print search form
#
def displaySearch(searchString):
print("""
<h2 class="large-margin-top" id="search-header">New Recipe Search</h2>
<form role="form" method="post" action="index.py#recipe-header" id="recipe-search-form">
<ul id="ingredient-tabs" class="nav nav-tabs nav-justified" role="tablist">
<li role="presentation" class="active">
<a href="#ingredients" aria-controls="ingredients" role="tab" data-toggle="tab">Ingredients</a>
</li>
<li role="presentation">
<a href="#ingredient-labels" aria-controls="ingredient-labels" role="tab" data-toggle="tab">Ingredient Types</a>
</li>
<li role="presentation">
<a href="#recipe-labels" aria-controls="recipe-labels" role="tab" data-toggle="tab">Recipe Types</a>
</li>
</ul>
<div class="tab-content">
<div role="tabpanel" class="tab-pane active" id="ingredients">
<div class="row">""")
for i in range(0, numIngredientInputs):
print(getIngredientHTML(i))
print('</div></div><div role="tabpanel" class="tab-pane" id="ingredient-labels"><div class="row">')
for i in range(0, len(ingredientLabels)):
print(getIngredientLabelHTML(i))
print('</div></div><div role="tabpanel" class="tab-pane" id="recipe-labels"><div class="row">')
for i in range(0, len(recipeLabels)):
print(getRecipeLabelHTML(i))
print("""
</div>
</div>
</div>
<div class="input-row">
<div class="input-group">
<input type="text" class="form-control" id="recipe-input" name="recipe-input" placeholder="Enter recipe name (optional)" value=\"""" + searchString + """\">
<div class="input-group-btn">
<button type="submit" class="btn btn-primary">Find recipes</button>
<button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown" aria-haspopup="true"
aria-expanded="false">Reset <span class="caret"></span></button>
<ul class="dropdown-menu dropdown-menu-right">
<li><a href="#" onclick="clearSearch()">Clear Search</a></li>
<li><a href="#" onclick="resetFilters()">Reset Filters</a></li>
<li><a href="#" onclick="resetAll()">Reset All</a></li>
</ul>
</div>
</div>
</div>
<div class="hidden">
<input type="text" id="recipe-selection" name="recipe-selection">
<input type="text" id="transformation" name="transformation">
</div>
</form>
""")
#
# if string list is empty, return empty string (i.e. don't add a header), otherwise join strings in list with commas,
# possibly add " and" after/instead of final comma, and pad with <h4> tag
#
def formatListOfStringsAsHeader(headerString, stringList):
count = len(stringList)
if count == 0:
return ""
# add list of strings to header string
headerString += ", ".join(stringList)
# get index of final comma to add " and"
index = headerString.rfind(",", 0, -1)
if count > 2:
# insert " and" immediately after last comma
return "<h4>" + headerString[:index+1] + " and" + headerString[index+1:] + "</h4>"
elif count == 2:
# replace last comma with " and"
return "<h4>" + headerString[:index] + " and" + headerString[index+1:] + "</h4>"
else:
return "<h4>" + headerString + "</h4>"
#
# print list of all recipes and ingredients
#
def displaySearchResults(searchString):
# get lists of included and excluded ingredients
includeIngredients = []
excludeIngredients = []
for i in range(0, numIngredientInputs):
ingredientName = ingredientNames[i]
if ingredientName != "":
# add ingredient to list of included/excluded ingredients based on radio button
if ingredientRadioOn[i]:
includeIngredients.append(ingredientName)
else:
excludeIngredients.append(ingredientName)
# get lists of included and excluded ingredient labels
includeIngredientLabels = []
excludeIngredientLabels = []
for i in range(0, len(ingredientLabels)):
ingredientLabel = ingredientLabels[i]
if ingredientLabelValues[i] == "on":
includeIngredientLabels.append(ingredientLabel)
elif ingredientLabelValues[i] == "off":
excludeIngredientLabels.append(ingredientLabel)
# get lists of included and excluded recipe labels
includeRecipeLabels = []
excludeRecipeLabels = []
for i in range(0, len(recipeLabels)):
recipeLabel = recipeLabels[i]
if recipeLabelValues[i] == "on":
includeRecipeLabels.append(recipeLabel)
elif recipeLabelValues[i] == "off":
excludeRecipeLabels.append(recipeLabel)
# initiate query string
queryString = "SELECT Name FROM Recipes WHERE "
numParentheses = 1
# add search input to query string
words = None
if searchString != "":
# split search string into words
words = searchString.split(" ")
# remove "", caused by extra spaces
while "" in words:
words.remove("")
# get query "WHERE" clause for each word
for word in words:
queryString += "Name Like '%" + word.replace("'", "''") + "%' AND "
queryString += "Id IN ( SELECT Id FROM Recipes "
# append excluded ingredient labels to query string
if len(excludeIngredientLabels) > 0:
queryString += "EXCEPT "
queryString += "SELECT Ingredients.RecipeId FROM Ingredients CROSS JOIN IngredientLabels ON IngredientLabels.IngredientId = Ingredients.Id WHERE IngredientLabels.Label IN ('{0}') ".format("', '".join(excludeIngredientLabels))
# append excluded recipe labels to query string
if len(excludeRecipeLabels) > 0:
queryString += "EXCEPT "
queryString += "SELECT Labels.RecipeId FROM Labels WHERE Labels.Label IN ('{0}') ".format("', '".join(excludeRecipeLabels))
# append excluded ingredients to query string
for excludeIngredient in excludeIngredients:
queryString += "EXCEPT "
queryString += "SELECT Ingredients.RecipeId FROM Ingredients WHERE Ingredients.Name LIKE '%{0}%' COLLATE NOCASE ".format(excludeIngredient.replace("'", "''"))
# append included ingredient labels to query string
for includeIngredientLabel in includeIngredientLabels:
queryString += "INTERSECT "
queryString += "SELECT Ingredients.RecipeId FROM Ingredients CROSS JOIN IngredientLabels ON IngredientLabels.IngredientId = Ingredients.Id WHERE IngredientLabels.Label = '{0}' ".format(includeIngredientLabel)
# append included recipe labels to query string
for includeRecipeLabel in includeRecipeLabels:
queryString += "INTERSECT "
queryString += "SELECT Labels.RecipeId FROM Labels WHERE Labels.Label = '{0}' ".format(includeRecipeLabel)
# append included ingredients to query string
for includeIngredient in includeIngredients:
queryString += "INTERSECT "
queryString += "SELECT Ingredients.RecipeId FROM Ingredients WHERE Ingredients.Name LIKE '%{0}%' COLLATE NOCASE ".format(includeIngredient.replace("'", "''"))
queryString += ") ORDER BY Name ASC"
# TODO for debugging SQLite query
# print("<b>{0}</b>".format(queryString))
# open database and get cursor
connection = sqlite3.connect('recipes.db')
cursor = connection.cursor()
# perform query and get recipes
cursor.execute(queryString)
allRecipes = cursor.fetchall()
# close connection
connection.close()
# get search result header
searchResultString = "All Recipes"
if words is not None:
searchResultString = ""
for word in words:
searchResultString += word.capitalize() + " "
searchResultString += "Recipes"
# print recipe names
print("""
<div class="row large-margin-top">
<div class="col-xs-12">
<h2>""" + searchResultString + """</h2>
<div class="center">""")
# print included ingredients header string
print(formatListOfStringsAsHeader("Containing ", includeIngredients))
# print excluded ingredients header string
print(formatListOfStringsAsHeader("Without ", excludeIngredients))
# print included ingredient labels header string
print(formatListOfStringsAsHeader("Containing ingredient types ", includeIngredientLabels))
# print excluded ingredient labels header string
print(formatListOfStringsAsHeader("Without ingredient types ", excludeIngredientLabels))
# print included recipe labels header string
print(formatListOfStringsAsHeader("Containing recipe types ", includeRecipeLabels))
# print excluded recipe labels header string
print(formatListOfStringsAsHeader("Without recipe types ", excludeRecipeLabels))
# print table opening tag
print('</div><table class="table table-striped">')
# print each recipe as table row
count=0
for recipeName in allRecipes:
recipeName = str(recipeName[0].encode('utf-8'));
print("""
<tr>
<td>""" + recipeName + """</td>
<td class="text-right">
<button class="btn btn-default" onclick="viewRecipe('""" + recipeName.replace("'", "\\'") + """')">View Recipe</button>
</td>
</tr>
""")
count+=1
# display a max of 100 recipes
if count == 99:
break
# print table closing tag
print("</table></div></div>")
# tell user to narrow search if over 1000 results
if count == 99:
print("""
<div class="panel panel-warning">
<div class="panel-heading">
<h3 class="panel-title">Please narrow search</h3>
</div>
<div class="panel-body">
The search results contain too many recipes to process (99+), please narrow search to view all results.
</div>
</div>
""")
#
# return recipe object loaded from database
#
def loadRecipe(recipeName):
# open database and get cursor
connection = sqlite3.connect('recipes.db')
connection.text_factory = str
cursor = connection.cursor()
cursor.execute("SELECT * FROM Recipes WHERE Name=?", (recipeName,))
recipeArray = cursor.fetchone()
if recipeArray == None:
return None
recipe = {}
recipe["id"] = recipeArray[0]
recipe["name"] = recipeArray[1]
recipe["servings"] = recipeArray[2]
recipe["calories"] = recipeArray[3]
cursor.execute("SELECT Direction FROM Directions WHERE RecipeId=? ORDER BY Step ASC", (recipe["id"],))
directionsArray = cursor.fetchall()
recipe["directions"] = []
for directionTuple in directionsArray:
recipe["directions"].append(directionTuple[0])
cursor.execute("SELECT Footnote FROM Footnotes WHERE RecipeId=?", (recipe["id"],))
recipe["footnotes"] = cursor.fetchall()
cursor.execute("SELECT Label FROM Labels WHERE RecipeId=?", (recipe["id"],))
recipe["labels"] = cursor.fetchall()
cursor.execute("SELECT * FROM Ingredients WHERE RecipeId=? ORDER BY Name ASC", (recipe["id"],))
ingredientsArray = cursor.fetchall()
recipe["ingredients"] = []
for ingredientItem in ingredientsArray:
ingredient = {}
ingredient["ingredient"] = ingredientItem[2]
ingredient["amount"] = ingredientItem[3]
ingredient["unit"] = ingredientItem[4]
cursor.execute("SELECT Description FROM IngredientDescriptions WHERE IngredientId=?", (ingredientItem[0],))
data = cursor.fetchall()
ingredient["descriptions"]=[elt[0] for elt in data]
cursor.execute("SELECT Label FROM IngredientLabels WHERE IngredientId=?", (ingredientItem[0],))
data = cursor.fetchall()
ingredient["labels"]=[elt[0] for elt in data]
recipe["ingredients"].append(ingredient)
return recipe
#
# print single recipe
#
def displayRecipe(recipe):
transformationString = ""
if recipeTransformation != "":
transformationString = "<h4>Transformation: {0}</h4>".format(recipeTransformation)
# print recipe, servings, and calories
print("""
<div class="row center" id="recipe-header">
<div class="col-xs-12">
<h2>{0}</h2>
{4}
<div>Servings: {1}</div>
<div>Calories per serving: {2}</div>
<div><a target=blank href='http://allrecipes.com/recipe/{3}'>View on allrecipes.com</a></div>
</div>
</div>
<h4>Ingredients</h4>
<div class="table-responsive">
<table id="ingredients-table" class="table table-striped">
<tr>
<th>Ingredient</td>
<th>#</td>
<th>Unit</td>
<th>Description</td>
<th>Labels</td>
</tr>""".format(recipe["name"], recipe["servings"], recipe["calories"], recipe["id"], transformationString))
# print list of ingredients
for ingredient in recipe["ingredients"]:
# print ingredient
print("""
<tr>
<td>{0}</td>
<td>{1:10.2f}</td>
<td>{2}</td>
<td>{3}</td>
<td>{4}</td>
</tr>
""".format(ingredient["ingredient"], ingredient["amount"], ingredient["unit"], ", ".join(ingredient["descriptions"]), ", ".join(ingredient["labels"])))
# print list of directions
print("""
</table>
</div>
<div class="row">
<div class="col-xs-12">
<h4>Directions</h4>
<ol>""")
for direction in recipe["directions"]:
print("<li>%s</li>" % (direction))
print("""
</ol>
</div>
</div>""")
# iff there is at least one footnote, print list of footnotes
if len(recipe["footnotes"]) > 0:
# print row and list opening tags
print("""
<div class="row">
<div class="col-xs-12">
<h4>Footnotes</h4>
<ul>""")
# print each footnote as list item
for footnote in recipe["footnotes"]:
print("<li>" + footnote[0] + "</li>")
# print row and list closing tags
print("""
</ul>
</div>
</div>""")
# print recipe transformations row and select opening tags
print("""
<div class="row">
<div class="col-xs-12">
<h4>Transform Recipe</h4>
<div class="input-group">
<select class="form-control" id="transformation-select" name="transformation-select">
""")
# print empty value with "None" as option for resetting transformation
print("<option value=''>None</option>")
# print each possible transformation as select option
transformations = ['American - New England', 'Chinese', 'French', 'German', 'Indian', 'Indonesian', 'Italian', 'Japanese',
'Mexican', 'Spanish', 'Thai', 'Turkish', 'Vegan', 'Vegetarian']
for transformation in transformations:
print("<option>"+transformation+"</option>")
# print recipe transformations row and select closing tags
print("""
</select>
<span class="input-group-btn">
<button class="btn btn-default" onclick="viewAndTransformRecipe('"""+ recipe["name"].replace("'", "\\'") + """')">Transform</button>
</span>
</div>
</div>
</div>
""")
#
# transform amount to cups based on amount and original unit
#
def transformToCups(amount, unit):
if unit == "cups":
return amount
elif unit == "quarts":
return amount / 16
elif unit == "quarts":
return amount / 4
elif unit == "pints":
return amount / 2
elif unit == "ounces":
return amount * 8
elif unit == "tablespoons":
return amount * 16
elif unit == "teaspoons":
return amount * 48
else:
return amount
#
# add ingredient to recipe, used for transformations
#
def addIngredientToRecipe(recipe, ingredientName, ingredientLabel):
totalLabelQuantity = 0
labelCount = 0
lastIngredientWithLabel = None
for ingredient in recipe["ingredients"]:
if ingredientLabel in ingredient["labels"]:
totalLabelQuantity += transformToCups(ingredient["amount"], ingredient["unit"])
labelCount += 1
lastIngredientWithLabel = ingredient["ingredient"]
# if no ingredients have the same label, don't add ingredient
if labelCount == 0:
return recipe
newIngredient = {}
newIngredient["ingredient"] = ingredientName
newIngredient["labels"] = [ingredientLabel]
newIngredient["descriptions"] = ["chopped"]
newIngredient["amount"] = totalLabelQuantity / labelCount
newIngredient["unit"] = "cups"
recipe["ingredients"].append(newIngredient)
for description in recipe["descriptions"]:
description[0].replace(lastIngredientWithLabel, lastIngredientWithLabel + " and " + ingredientName)
#
# function for transforming recipe
#
def transformRecipe(recipe, transformation):
# transform to vegetarian or vegan
if transformation == "Vegetarian" or transformation == "Vegan":
decreasedProteins = 0.0
for ingredient in recipe["ingredients"]:
originalIngredient = ingredient["ingredient"]
substitutionMade = True
if "poulty" in ingredient["labels"]:
ingredient["ingredient"] = "tofu"
elif "meat" in ingredient["labels"]:
ingredient["ingredient"] = "meatty mushrooms"
elif "fish" in ingredient["labels"]:
ingredient["ingredient"] = "mushrooms"
elif "seafood" in ingredient["labels"]:
ingredient["ingredient"] = "walnuts"
else:
substitutionMade = False
if substitutionMade:
decreasedProteins += 1
ingredient["amount"] /= 2.0
ingredient["labels"] = ["main protein"]
for direction in recipe["directions"]:
direction = direction.replace(originalIngredient, ingredient["ingredient"])
if decreasedProteins > 0:
vegetableMultiplier = 1 + decreasedProteins / 4.0
for ingredient in recipe["ingredients"]:
if "vegetable" in ingredient["labels"]:
ingredient["amount"] *= vegetableMultiplier
# transform to only vegan
if transformation == "Vegan":
for ingredient in recipe["ingredients"]:
originalIngredient = ingredient["ingredient"]
substitutionMade = True
if ingredient["ingredient"] == "honey":
ingredient["ingredient"] = "syrup"
elif ingredient["ingredient"] == "eggs":
ingredient["ingredient"] = "soy yogurt"
ingredient["amount"] /= 4.0
ingredient["unit"] = "cups"
elif ingredient["ingredient"] == "butter":
ingredient["ingredient"] = "margarine"
elif "dairy" in ingredient["labels"]:
ingredient["ingredient"] = "soy " + ingredient["ingredient"]
ingredient["labels"].remove("dairy")
else:
substitutionMade = False
if substitutionMade:
for i in range(0, len(recipe["directions"])):
recipe["directions"][i] = recipe["directions"][i].replace(originalIngredient, ingredient["ingredient"])
# transform to difference cuisine
else:
popularDairy = []
popularMeats = []
popularPoultry = []
popularFish = []
popularSeafoods = []
popularCheeses = []
popularFruits = []
popularVegetables = []
popularSpices = []
popularDessertSpices = []
popularGrains = []
popularMainProteins = []
popularFlavorings = []
popularSauces = []
popularCondiments = []
popularSpicy = []
popularNuts = []
popularAlcohol = []
popularDrinks = []
popularPastas = []
popularCookingLiquids = []
if transformation == 'German':
popularFruits = ['apples', 'plums', 'strawberries', 'cherries']
popularFish = ['trout', 'pike', 'carp', 'tuna', 'mackerel', 'salmon']
popularSpices = [ 'parsley', 'thyme', 'laurel', 'chives', 'black pepper', 'nutmeg', 'caraway', 'basil', 'sage', 'oregano']
popularDessertSpices = ['cardamom', 'anise seed', 'cinnamon']
popularCondiments = ['mustard', 'horseradish']
popularAlcohol = ['beer', 'wine']
popularSpicy = ['horseradish']
elif transformation == 'French':
popularSeafoods = ['sardines', 'mussels', 'oysters', 'shrimp', 'calamari', 'scallops']
popularFish = ['cod', 'tuna', 'salmon', 'trout', 'herring']
popularVegetables = ['green beans', 'carrots', 'leeks', 'turnips', 'eggplants', 'zucchini', 'onions', 'tomatoes', 'mushrooms']
popularMeats = ['beef', 'veal', 'pork', 'lamb', 'horse']
popularPoultry = ['chicken', 'duck', 'goose']
popularFruits = ['oranges', 'tangerines', 'peaches', 'apricots', 'apples', 'pears', 'plums', 'cherries', 'strawberries',
'raspberries', 'blackberries', 'grapes', 'grapefruit', 'currants']
popularSpices = ['tarragon', 'rosemary', 'marjoram', 'lavender', 'thyme', 'fennel', 'sage']
elif transformation == 'Italian':
popularMeats = ['ham', 'sausage', 'pork', 'salami']
popularSeafoods = ['anchovies', 'sardines']
popularFish = ['tuna', 'cod']
popularVegetables = ['artichokes', 'eggplants', 'zucchinis', 'capers', 'olives', 'peppers', 'potatoes', 'corn']
popularPastas = ['penne', 'maccheroni', 'spaghetti', 'linguine', 'fusilli']
popularCookingLiquids = ['olive oil']
elif transformation == 'American - New England':
popularSeafoods = ['lobster', 'squid', 'crab', 'shellfish', 'scallops', 'oysters', 'clams']
popularFish = ['cod', 'salmon', 'flounder', 'haddock', 'bass', 'bluefish', 'tautog']
popularMeats = ['roast beef', 'salami', 'ham', 'moose', 'deer']
popularPoultry = ['turkey']
popularCheeses = ['cheddar', 'provolone']
popularFruits = ['raspberries', 'blueberries', 'cranberries', 'grapes', 'cherries']
popularDesertSpices = ['nutmeg', 'ginger', 'cinnamon', 'cloves', 'allspice']
popularSpices = ['thyme', 'black pepper', 'sea salt', 'sage']
elif transformation == 'Indonesian':
popularGrains = ['rice', 'noodles']
popularVegetables =['cabbage', 'cauliflower', 'potato', 'carrot', 'shallots', 'cucumbers', 'spinach', 'corn', 'scallions']
popularSpices = ['garlic', 'black pepper', 'nutmeg', 'clove', 'cinnamon', 'ginger']
popularMainProteins = ['tofu']
popularPoultry = ['chicken', 'duck', 'pidgeon']
popularMeats = ['beef', 'goat', 'venison', 'deer', 'horse']
popularPastas = ['rice', 'noodles']
popularFish = ['tuna', 'mackerel', 'milkfish', 'snapper', 'swordfish', 'shark', 'stingray']
popularSeafoods = ['anchovies', 'squid', 'shrimp', 'crabs', 'mussels']
popularSauces = ['shrimp paste', 'peanut sauce', 'soy sauce']
popularNuts = ['peanuts']
popularDairy = ['coconut milk']
elif transformation == 'Chinese':
popularGrains = ['rice', 'noodles']
popularVegetables = ['cabbage', 'spinach', 'sprouts', 'watercress', 'celery', 'carrots', 'broccoli', 'scallions']
popularSpices = ['ginger', 'garlic', ' white pepper', 'peppercorns', 'star anise', 'cinnamon', 'fennel', 'cilantro',
'parsley', 'cloves']
popularPastas = ['rice', 'noodles']
popularMainProteins = ['soybeans', 'tofu']
popularSauces = ['soy sauce']
popularAlcohol = ['white liquor']
popularDrinks = ['herb tea']
popularCookingLiquids = ['rice vinegar']
elif transformation == 'Indian':
popularFruits = ['mango', 'lemon', 'strawberry', 'orange', 'pineapple']
popularVegetables = ['peas', 'beans']
popularDessertSpices = ['cardamom', 'saffron', 'nutmeg']
popularSpices = ['chilli pepper', 'black mustard seed', 'cardamom', 'cumin', 'ginger', 'garlic', 'cardamom', 'cinnamon', 'clove']
popularPastas = ['rice']
popularMainProteins = ['lentils']
popularAlcohol = ['beer', 'rice beer']
popularDrinks = ['coffee', 'tea']
popularCookingLiquids = ['vegetable oil', 'peanut oil', 'mustard oil', 'coconut oil']
elif transformation == 'Japanese':
popularMeats = ['albacores', 'bass', 'catfish', 'cods', 'fish', 'flounder', 'grouper', 'haddock', 'halibut', 'mahi',
'monkfish', 'salmon', 'shark', 'snapper', 'sole', 'swordfishes', 'trouts', 'tunas', 'bluefish',
'bonito', 'rockfish', 'mackerel', 'naruto', 'drum', 'marlin', 'tilapia', 'carp', 'kingfish',
'mullets', 'whitefish', 'kippers', 'torsk', 'saltfish']
popularPoultry = ['anchovies', 'calamaris', 'clams', 'crabs', 'crabmeat', 'crawfish', 'lobsters', 'mussels',
'oysters', 'prawns', 'scallops', 'seafood', 'shrimps', 'squids', 'snails', 'shellfish', 'caviar']
popularVegetables = ['seaweed', 'greens', 'radishes', 'carrots', 'green beans']
popularPastas = ['rice', 'noodles']
popularSpices = ['miso', 'dashi', 'soy sauce', 'sake', 'mirin', 'vinegar', 'sugar', 'salt']
popularSauces = ['soy sauce']
popularAlcohol = ['beer', 'sake', 'whiskey']
popularDrinks = ['tea']
popularCookingLiquids = ['water']
popularSpicy = ['wasabi']
elif transformation == 'Mexican':
popularMeats = ['beef', 'pork', 'goat', 'sheep', 'venison']
popularPoultry = ['chicken']
popularFruits = ['guava', 'pears', 'sapote', 'mangoes', 'bananas', 'pineapples']
popularVegetables = ['corn', 'chile peppers', 'tomatoes', 'squashes', 'avocados']
popularSpices = ['chili pepper']
popularDessertSpices = ['cocoa']
popularGrains = ['tortillas']
popularMainProteins = ['beans']
popularFlavorings = ['vanilla']
popularSpicy = ['chilis']
popularAlcohol = ['beer', 'tequila']
popularDrinks = ['atole']
elif transformation == 'Spanish':
popularMeats = ['ham', 'lamb', 'bacon', 'sausages', 'pork', 'veal']
popularPoultry = ['goose', 'quail']
popularFish = ['bream', 'bonito', 'cod']
popularSeafoods = ['sardines', 'herring']
popularFruits = ['apples', 'pears', 'peaches', 'oranges', 'apricots']
popularVegetables = ['cabbage', 'olives', 'eggplant', 'bell peppers', 'onion', 'tomato']
popularSpices = ['garlic', 'salt']
popularMainProteins = ['beans']
popularSauces = ['romesco', 'aioli', 'bouillabaisse', 'picada']
popularCondiments = ['mayonnaise']
popularAlcohol = ['anise', 'wine', 'brandy']
popularCookingLiquids = ['olive oil']
elif transformation == 'Thai':
popularMeats = ['pork', 'beef', 'water buffalo']
popularPoultry = ['chicken', 'duck']
popularFish = ['tilapia', 'catfish']
popularSeafoods = ['prawns', 'cockles', 'shellfish']
popularFruits = [ 'papayas', 'jackfruit', 'mangoes', 'pineapples', 'apples', 'grapes', 'pears', 'peaches', 'strawberries']
popularVegetables = ['corn', 'squash', 'sweet potatoes', 'kale', 'cucumbers', 'tomatoes', 'bamboo', 'sprouts', 'eggplant']
popularSpices = ['garlic', 'galangal', 'cilantro', 'lemon grass', 'shallots', 'pepper', 'chilies', 'curry', 'peppercorns']
popularSauces = ['shrimp paste', 'fish sauce']
popularPastas = ['rice', 'noodles']
popularCookingLiquids = ['coconut oil']
elif transformation == 'Turkish':
popularDairy = ['yogurt']
popularMeats = ['lamb', 'beef', 'veal']
popularPoultry = ['chicken']
popularSeafoods = ['sardines', 'anchovies']
popularFruits = ['plums', 'apricots', 'pomegranates', 'pears', 'apples', 'grapes', 'figs']
popularVegetables = ['eggplants', 'green peppers', 'onions', 'garlic', 'lentils', 'beans', 'olives', 'tomatoes']
popularSpices = ['parsley', 'cumin', 'black pepper', 'paprika', 'mint', 'oregano', 'red pepper', 'allspice', 'thyme', 'salt']
popularMainProteins = ['legumes']
popularCondiments = ['jam', 'honey']
popularNuts = ['pistachios', 'chestnuts', 'almonds', 'hazelnuts', 'walnuts']
popularDrinks = ['Turkish tea']
popularCookingLiquids = ['olive oil', 'sunflower oil', 'canola oil', 'corn oil']
# check if it has must-haves for certain cuisines
hasSausage = False
hasTomatoes = False
for ingredient in recipe["ingredients"]:
# check if ingredient is a must-have
if not hasSausage and (ingredient["ingredient"] == "sausages" or ingredient["ingredient"] == "frankfurters" or ingredient["ingredient"] == "kielbasas"):
hasSausage = True
if not hasTomatoes and ingredient["ingredient"] == "tomatoes":
hasTomatoes = True
# set original ingredient and assume substitution will be made
originalIngredient = ingredient["ingredient"]
# check if there is an ingredient can be replaced and something that can replace it
# if so, set to random ingredient from popular list, then delete from list so it can't be reused in recipe
if "cheese" in ingredient["labels"] and len(popularCheeses) > 0:
randIndex = random.randint(0, len(popularCheeses) - 1)
ingredient["ingredient"] = popularCheeses[randIndex] + " cheese"
del popularCheeses[randIndex]
elif "meat" in ingredient["labels"] and len(popularMeats) > 0:
# don't replace sausage in German transformations
if transformation == "German" and (ingredient["ingredient"] == "sausages" or ingredient["ingredient"] == "frankfurters"):
continue
randIndex = random.randint(0, len(popularMeats) - 1)
ingredient["ingredient"] = popularMeats[randIndex]
del popularMeats[randIndex]
# japanese don't eat meat
if transformation == 'Japanese':
ingredient["ingredient"] = "fish"
elif "poultry" in ingredient["labels"] and len(popularPoultry) > 0:
randIndex = random.randint(0, len(popularPoultry) - 1)
ingredient["ingredient"] = popularPoultry[randIndex]
del popularPoultry[randIndex]
# japanese don't eat poultry
if transformation == 'Japanese':
ingredient["ingredient"] = "seafood"
elif "fish" in ingredient["labels"] and len(popularFish) > 0:
randIndex = random.randint(0, len(popularFish) - 1)
ingredient["ingredient"] = popularFish[randIndex]
del popularFish[randIndex]
elif "seafood" in ingredient["labels"] and len(popularSeafoods) > 0:
randIndex = random.randint(0, len(popularSeafoods) - 1)
ingredient["ingredient"] = popularSeafoods[randIndex]
del popularSeafoods[randIndex]
elif "main protein" in ingredient["labels"] and len(popularMainProteins) > 0:
randIndex = random.randint(0, len(popularMainProteins) - 1)
ingredient["ingredient"] = popularMainProteins[randIndex]
del popularMainProteins[randIndex]
elif "dairy" in ingredient["labels"] and len(popularDairy) > 0:
randIndex = random.randint(0, len(popularDairy) - 1)
ingredient["ingredient"] = popularDairy[randIndex]
del popularDairy[randIndex]
elif "fruit" in ingredient["labels"] and len(popularFruits) > 0:
randIndex = random.randint(0, len(popularFruits) - 1)
ingredient["ingredient"] = popularFruits[randIndex]
del popularFruits[randIndex]
elif "vegetable" in ingredient["labels"] and len(popularVegetables) > 0:
# don't replace tomatoes in Italian transformations
if transformation == "Italian" and ingredient["ingredient"] == "tomatoes":
continue
randIndex = random.randint(0, len(popularVegetables) - 1)
ingredient["ingredient"] = popularVegetables[randIndex]
del popularVegetables[randIndex]
elif "spice or herb" in ingredient["labels"]:
if ("dessert" in recipe["labels"] or "sugar" in recipe["labels"]):
if len(popularDesertSpices) > 0:
randIndex = random.randint(0, len(popularDesertSpices) - 1)
ingredient["ingredient"] = popularDesertSpices[randIndex]
del popularDesertSpices[randIndex]
else:
if len(popularSpices) > 0:
randIndex = random.randint(0, len(popularSpices) - 1)
ingredient["ingredient"] = popularSpices[randIndex]
del popularSpices[randIndex]
elif "grain" in ingredient["labels"] and len(popularGrains) > 0:
randIndex = random.randint(0, len(popularGrains) - 1)
ingredient["ingredient"] = popularGrains[randIndex]
del popularGrains[randIndex]
elif "nuts" in ingredient["labels"] and len(popularNuts) > 0:
randIndex = random.randint(0, len(popularNuts) - 1)
ingredient["ingredient"] = popularNuts[randIndex]
del popularNuts[randIndex]
elif "alcohol" in ingredient["labels"] and len(popularAlcohol) > 0:
randIndex = random.randint(0, len(popularAlcohol) - 1)
ingredient["ingredient"] = popularAlcohol[randIndex]
del popularAlcohol[randIndex]
elif "drink" in ingredient["labels"] and len(popularDrinks) > 0:
randIndex = random.randint(0, len(popularDrinks) - 1)
ingredient["ingredient"] = popularDrinks[randIndex]
del popularDrinks[randIndex]
elif "pasta" in ingredient["labels"] and len(popularPastas) > 0:
randIndex = random.randint(0, len(popularPastas) - 1)
ingredient["ingredient"] = popularPastas[randIndex]
del popularPastas[randIndex]
elif "sauce" in ingredient["labels"] and len(popularSauces) > 0:
randIndex = random.randint(0, len(popularSauces) - 1)
ingredient["ingredient"] = popularSauces[randIndex]
del popularSauces[randIndex]
elif "condiment" in ingredient["labels"] and len(popularCondiments) > 0:
randIndex = random.randint(0, len(popularCondiments) - 1)
ingredient["ingredient"] = popularCondiments[randIndex]
del popularCondiments[randIndex]
elif "spicy" in ingredient["labels"] and len(popularSpicy) > 0:
randIndex = random.randint(0, len(popularSpicy) - 1)
ingredient["ingredient"] = popularSpicy[randIndex]
del popularSpicy[randIndex]
elif "flavoring" in ingredient["labels"] and len(popularFlavorings) > 0:
randIndex = random.randint(0, len(popularFlavorings) - 1)
ingredient["ingredient"] = popularFlavorings[randIndex]
del popularFlavorings[randIndex]
elif "cooking liquid" in ingredient["labels"] and len(popularCookingLiquids) > 0:
randIndex = random.randint(0, len(popularCookingLiquids) - 1)
ingredient["ingredient"] = popularCookingLiquids[randIndex]
del popularCookingLiquids[randIndex]
# no substitution made for this ingredient, so continue
else:
continue
# substitute ingredient string in directions
for i in range(0, len(recipe["directions"])):
recipe["directions"][i] = recipe["directions"][i].replace(originalIngredient, ingredient["ingredient"])
if transformation == "German" and not hasSausage:
addIngredientToRecipe(recipe, "sausages", "meat")
if transformation == "Italian" and not hasTomatoes:
addIngredientToRecipe(recipe, "tomatoes", "vegetable")
return recipe
#
#main program
#
try:
form = cgi.FieldStorage()
# get recipe search input, selected recipe, and selected transformation
searchPhrase = form.getvalue("recipe-input", "")
recipeSelection = form.getvalue("recipe-selection", "")
recipeTransformation = form.getvalue("transformation", "")
# get ingredient strings and whether "on" selected radio button
numIngredientInputs = 12
ingredientNames = []
ingredientRadioOn = []
for i in range(0, numIngredientInputs):
ingredientFormName = "ingredient-" + str(i)
ingredientRadioOn.append(form.getvalue(ingredientFormName, "on") == "on")
ingredientNames.append(form.getvalue(ingredientFormName + "-string", ""))
# get ingredient label radio button value
ingredientLabelValues = []
for ingredientLabel in ingredientLabels:
ingredientLabelValues.append(form.getvalue("ingredient-label-" + ingredientLabel.replace(" ", "-"), ""))
# get recipe label radio button value
recipeLabelValues = []
for recipeLabel in recipeLabels:
recipeLabelValues.append(form.getvalue("recipe-label-" + recipeLabel.replace(" ", "-"), ""))
# print header and link to github
print("""
<link href="/assets/css/view-recipes.css" rel="stylesheet">
<script src="/assets/js/view-recipes.js" type="text/javascript"></script>
<div id="headers">
<div class="title">
<h1>View Recipes</h1>
</div>
<div class="subtitle">
<h4>Search for, filter, and transform recipes</h4>
</div>
<div class="logo">
<img class="img-responsive" alt="View Recipes Icon" src="/assets/img/view-recipes/icon.png">
</div>
<div class="links">
<a href="http://kevin.broh-kahn.com/view-recipes#search-header"><h4>Browse</h4></a>
</div>
</div>
<div id="description" class="container-fluid">
<div class="row description-group">
<div class="col-sm-8 col-sm-offset-2">
<p>Are you looking for a fun new cake recipe? Need to find a good pasta recipe that doesn't use nuts? Want to make something using what's left in the cupboard? This <i>View Recipes</i> application can assist you with all your needs!</p>1
<p>View recipes is a powerful tool built using natural language processing that can search for recipes by recipe name, recipe type, ingredient, or ingredient type. You can select ingredients or ingredient types to include/exclude from any recipe you see. Thanks to <a href="http://allrecipes.com/">allrecipes.com</a>, the source of all the recipes and ingredients you see, you can search through thousands of recipes to find something that will fit your liking!</p>
</div>
</div>
</div>
<div class="links" id="bottom-links">
<h4>Browse now</h4>
<div class="badge-container">
<a href="http://kevin.broh-kahn.com/view-recipes#search-header">
<img alt="Browse on kevin.broh-kahn.com" src="/assets/img/kevin.broh-kahn.com/icon.png">
</a>
</div>
<h4>View source</h4>
<div class="badge-container">
<a target="_blank" href="https://bitbucket.org/kbrohkahn/recipe-parser">
<img alt="View on Bitbucket" src="/assets/img/social-links/bitbucket_rgb_slate_45px.png">
</a>
<a target="_blank" href="https://github.com/kbrohkahn/recipe-parser">
<img alt="View on Github" src="/assets/img/social-links/GitHub_Logo_45px.png">
</a>
</div>
</div>
""")
try:
# TODO only use this when JSON file changes
#recreateDatabase()
# if recipe selected, load selected recipe
if recipeSelection is not "":
recipe = loadRecipe(recipeSelection)
if recipe is None:
print("<b>Error: recipe not found</b>")
else:
if recipeTransformation is not "":
recipe = transformRecipe(recipe, recipeTransformation)
displayRecipe(recipe)
# show search
displaySearch(searchPhrase)
# print loading results message
print('<div class="center" id="loading-search-results"><b>Loading search results...</b></div>')
# if exists, display recipe form search results
displaySearchResults(searchPhrase)
# print notice of recipes and link to allrecipes.com
print("""
<div class="row">
<div class="col-xs-12 text-center">
All recipes parsed from <a href="http://allrecipes.com/">allrecipes.com</a>
</div>
</div>
""")
except sqlite3.Error as e:
print("<b>Error %s:</b>" % e.args[0])
except:
cgi.print_exception()
with open("../templates/footer.html", "r") as footer:
print footer.read()
|
kbrohkahn/kevin.broh-kahn.com
|
view-recipes/index.py
|
Python
|
apache-2.0
| 41,516
|
[
"MOOSE"
] |
5234c772a0854d8ada1711da0c50a860f71868dd846e63dce7de016ee6a9dff0
|
'''
Created on Aug 5, 2014
@author: gearsad
'''
import vtk
import numpy
from math import sin,cos
from SceneObject import SceneObject
class LIDAR(SceneObject):
'''
A template for drawing a LIDAR point cloud.
Ref: http://stackoverflow.com/questions/7591204/how-to-display-point-cloud-in-vtk-in-different-colors
'''
# The point cloud data
vtkPointCloudPolyData = None
vtkPointCloudPoints = None
vtkPointCloudDepth = None
vtkPointCloudCells = None
#The dimensions of the window
numThetaReadings = None
numPhiReadings = None
thetaRange = [0, 0]
phiRange = [0, 0]
def __init__(self, renderer, parent, minTheta, maxTheta, numThetaReadings, minPhi, maxPhi, numPhiReadings, minDepth, maxDepth, initialValue):
'''
Initialize the LIDAR point cloud.
'''
# Call the parent constructor
super(LIDAR,self).__init__(renderer, parent)
# Cache these
self.numPhiReadings = numPhiReadings
self.numThetaReadings = numThetaReadings
self.thetaRange = [minTheta, maxTheta]
self.phiRange = [minPhi, maxPhi]
# Create a point cloud with the data
self.vtkPointCloudPoints = vtk.vtkPoints()
self.vtkPointCloudDepth = vtk.vtkDoubleArray()
self.vtkPointCloudDepth.SetName("DepthArray")
self.vtkPointCloudCells = vtk.vtkCellArray()
self.vtkPointCloudPolyData = vtk.vtkPolyData()
# Set up the structure
self.vtkPointCloudPolyData.SetPoints(self.vtkPointCloudPoints)
self.vtkPointCloudPolyData.SetVerts(self.vtkPointCloudCells)
self.vtkPointCloudPolyData.GetPointData().SetScalars(self.vtkPointCloudDepth)
self.vtkPointCloudPolyData.GetPointData().SetActiveScalars("DepthArray")
# Build the initial structure
for x in xrange(0, self.numThetaReadings):
for y in xrange(0, self.numPhiReadings):
# Add the point
point = [1, 1, 1]
pointId = self.vtkPointCloudPoints.InsertNextPoint(point)
self.vtkPointCloudDepth.InsertNextValue(1)
self.vtkPointCloudCells.InsertNextCell(1)
self.vtkPointCloudCells.InsertCellPoint(pointId)
# Use the update method to initialize the points with a NumPy matrix
initVals = numpy.ones((numThetaReadings, numPhiReadings)) * initialValue
self.UpdatePoints(initVals)
# Now build the mapper and actor.
mapper = vtk.vtkPolyDataMapper()
# There seems to be a versioning difference with this after I upgraded to VTK 6.0
if vtk.VTK_MAJOR_VERSION >= 6:
mapper.SetInputData(self.vtkPointCloudPolyData)
else:
mapper.SetInput(self.vtkPointCloudPolyData)
mapper.SetColorModeToDefault()
mapper.SetScalarRange(minDepth, maxDepth)
mapper.SetScalarVisibility(1)
self.vtkActor.SetMapper(mapper)
def UpdatePoints(self, points2DNPMatrix):
'''Update the points with a 2D array that is numThetaReadings x numPhiReadings containing the depth from the source'''
for x in xrange(0, self.numThetaReadings):
theta = (self.thetaRange[0] + float(x) * (self.thetaRange[1] - self.thetaRange[0]) / float(self.numThetaReadings)) / 180.0 * 3.14159
for y in xrange(0, self.numPhiReadings):
phi = (self.phiRange[0] + float(y) * (self.phiRange[1] - self.phiRange[0]) / float(self.numPhiReadings)) / 180.0 * 3.14159
r = points2DNPMatrix[x, y]
# Polar coordinates to Euclidean space
point = [r * sin(theta) * cos(phi), r * sin(phi), r * cos(theta) * cos(phi)]
pointId = y + x * self.numPhiReadings
self.vtkPointCloudPoints.SetPoint(pointId, point)
self.vtkPointCloudCells.Modified()
self.vtkPointCloudPoints.Modified()
self.vtkPointCloudDepth.Modified()
|
GearsAD/semisorted_arnerve
|
arnerve/scene/LIDAR.py
|
Python
|
mit
| 4,034
|
[
"VTK"
] |
b8f68ed4b9540c98526d6461912ef90d60b2f9dadd999d40d0a27899f270814b
|
"""methods_utils.py:
Some non-standard functions generic to moose.
This library may not be exposed to end-users. Intended for development by
the maintainer of this file.
Last modified: Sat Jan 18, 2014 05:01PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import re
objPathPat = re.compile(r'(\/\w+\[\d+\])+?$')
def idPathToObjPath( idPath ):
""" Append a [0] if missing from idPath.
Id-paths do not have [0] at their end. This does not allow one to do
algebra properly.
"""
m = objPathPat.match( idPath )
if m: return idPath
else:
return '{}[0]'.format(idPath)
def main():
p1 = '/cable[0]/comp_[1]/a'
p2 = '/cab[1]/comp/com'
p3 = '/cab[1]/p[2]/c[3]'
p4 = '/ca__b[1]/_p[2]/c[122]'
for p in [p1, p2, p3, p4]:
m = objPathPat.match(p)
if m:
print(m.group(0))
else:
print(("{} is invalid Obj path in moose".format( p )))
if __name__ == '__main__':
main()
|
subhacom/moose-core
|
python/moose/methods_utils.py
|
Python
|
gpl-3.0
| 1,287
|
[
"MOOSE"
] |
2c9606324a42f5c677355edcc4734162885362db657d0808f3de62f8559cd5a2
|
"""
Create and put Requests to move files.
List of operations:
#. ReplicateAndRegister LFNs
#. Check for Migration
#. Remove all other replicas for these files
"""
import os
from DIRAC import gLogger
from DIRAC.Core.Base.Script import Script
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.FrameworkSystem.private.standardLogging.LogLevels import LogLevels
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
sLog = gLogger.getSubLogger("CreateMoving")
class CreateMovingRequest:
"""Create the request to move files from one SE to another."""
def __init__(self):
"""Constructor."""
self.requests = []
self._reqClient = None
self._fcClient = None
self._requestName = "Moving_"
self.switches = {}
self.options = [
("L", "List", "File containing list of LFNs to move"),
("P", "Path", "LFN path to folder, all files in the folder will be moved"),
("S", "SourceSE", "Where to remove the LFNs from"),
("T", "TargetSE", "Where to move the LFNs to"),
("N", "Name", "Name of the Request"),
]
self.flags = [
("C", "CheckMigration", "Ensure the LFNs are migrated to tape before removing any replicas"),
("X", "Execute", "Put Requests, else dryrun"),
]
self.registerSwitchesAndParseCommandLine()
self.getLFNList()
self.getLFNMetadata()
@property
def fcClient(self):
"""Return FileCatalogClient."""
if not self._fcClient:
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
self._fcClient = FileCatalog()
return self._fcClient
@property
def reqClient(self):
"""Return RequestClient."""
if not self._reqClient:
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
self._reqClient = ReqClient()
return self._reqClient
@property
def dryRun(self):
"""Return dry run flag."""
return self.switches["DryRun"]
@property
def targetSE(self):
"""Return the list of targetSE."""
return self.switches["TargetSE"]
@property
def sourceSEs(self):
"""Return the list of sourceSEs."""
return self.switches["SourceSE"]
@property
def lfnFolderPath(self):
"""Return the lfn folder path where to find the files of the request."""
return self.switches.get("Path", None)
def registerSwitchesAndParseCommandLine(self):
"""Register the default plus additional parameters and parse options.
:param list options: list of three tuple for options to add to the script
:param list flags: list of three tuple for flags to add to the script
:param str opName
"""
for short, longOption, doc in self.options:
Script.registerSwitch(short + ":" if short else "", longOption + "=", doc)
for short, longOption, doc in self.flags:
Script.registerSwitch(short, longOption, doc)
self.switches[longOption] = False
Script.parseCommandLine()
if Script.getPositionalArgs():
Script.showHelp(exitCode=1)
for switch in Script.getUnprocessedSwitches():
for short, longOption, doc in self.options:
if switch[0] == short or switch[0].lower() == longOption.lower():
sLog.verbose("Found switch %r with value %r" % (longOption, switch[1]))
self.switches[longOption] = switch[1]
break
for short, longOption, doc in self.flags:
if switch[0] == short or switch[0].lower() == longOption.lower():
self.switches[longOption] = True
break
self.checkSwitches()
self.switches["DryRun"] = not self.switches.get("Execute", False)
self.switches["SourceSE"] = self.switches.get("SourceSE", "").split(",")
def checkSwitches(self):
"""Check the switches, set autoName if needed."""
if not self.switches.get("SourceSE"):
raise RuntimeError('Have to set "SourceSE"')
if not self.switches.get("TargetSE"):
raise RuntimeError('Have to set "TargetSE"')
if not self.switches.get("List") and not self.switches.get("Path"):
raise RuntimeError('Have to set "List" or "Path"')
def getLFNList(self):
"""Get list of LFNs.
Either read the provided file, or get the files found beneath the provided folder.
:returns: list of lfns
:raises: RuntimeError, ValueError
"""
if self.switches.get("List"):
if os.path.exists(self.switches.get("List")):
self.lfnList = list(
set([line.split()[0] for line in open(self.switches.get("List")).read().splitlines()])
)
else:
raise ValueError("%s not a file" % self.switches.get("List"))
elif self.lfnFolderPath:
path = self.lfnFolderPath
sLog.debug("Check if %r is a directory" % path)
isDir = returnSingleResult(self.fcClient.isDirectory(path))
sLog.debug("Result: %r" % isDir)
if not isDir["OK"] or not isDir["Value"]:
sLog.error("Path is not a directory", isDir.get("Message", ""))
raise RuntimeError("Path %r is not a directory" % path)
sLog.notice("Looking for files in %r" % path)
metaDict = {"SE": self.sourceSEs[0]} if self.switches.get("SourceOnly") else {}
lfns = self.fcClient.findFilesByMetadata(metaDict=metaDict, path=path)
if not lfns["OK"]:
sLog.error("Could not find files")
raise RuntimeError(lfns["Message"])
self.lfnList = lfns["Value"]
if self.lfnList:
sLog.notice("Will create request(s) with %d lfns" % len(self.lfnList))
if len(self.lfnList) == 1:
raise RuntimeError("Only 1 file in the list, aborting!")
return
raise ValueError('"Path" or "List" need to be provided!')
def getLFNMetadata(self):
"""Get the metadata for all the LFNs."""
metaData = self.fcClient.getFileMetadata(self.lfnList)
error = False
if not metaData["OK"]:
sLog.error("Unable to read metadata for lfns: %s" % metaData["Message"])
raise RuntimeError("Could not read metadata: %s" % metaData["Message"])
self.metaData = metaData["Value"]
for failedLFN, reason in self.metaData["Failed"].items():
sLog.error("skipping %s: %s" % (failedLFN, reason))
error = True
if error:
raise RuntimeError("Could not read all metadata")
for lfn in self.metaData["Successful"].keys():
sLog.verbose("found %s" % lfn)
def run(self):
"""Perform checks and create the request."""
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
for count, lfnChunk in enumerate(breakListIntoChunks(self.lfnList, 100)):
if not lfnChunk:
sLog.error("LFN list is empty!!!")
return 1
requestName = "%s_%d" % (self.switches.get("Name"), count)
request = self.createRequest(requestName, lfnChunk)
valid = RequestValidator().validate(request)
if not valid["OK"]:
sLog.error("putRequest: request not valid", "%s" % valid["Message"])
return 1
else:
self.requests.append(request)
self.putOrRunRequests()
return 0
def createRequest(self, requestName, lfnChunk):
"""Create the Request."""
request = Request()
request.RequestName = requestName
replicate = Operation()
replicate.Type = "ReplicateAndRegister"
replicate.TargetSE = self.switches.get("TargetSE")
self.addLFNs(replicate, lfnChunk, addPFN=True)
request.addOperation(replicate)
if self.switches.get("CheckMigration"):
checkMigration = Operation()
checkMigration.Type = "CheckMigration"
checkMigration.TargetSE = self.switches.get("TargetSE")
self.addLFNs(checkMigration, lfnChunk, addPFN=True)
request.addOperation(checkMigration)
removeReplicas = Operation()
removeReplicas.Type = "RemoveReplica"
removeReplicas.TargetSE = ",".join(self.switches.get("SourceSE", []))
self.addLFNs(removeReplicas, lfnChunk)
request.addOperation(removeReplicas)
return request
def addLFNs(self, operation, lfns, addPFN=False):
"""Add lfns to operation.
:param operation: the operation instance to which the files will be added
:param list lfns: list of lfns
:param bool addPFN: if true adds PFN to each File
"""
if not self.metaData:
self.getLFNMetadata()
for lfn in lfns:
metaDict = self.metaData["Successful"][lfn]
opFile = File()
opFile.LFN = lfn
if addPFN:
opFile.PFN = lfn
opFile.Size = metaDict["Size"]
if "Checksum" in metaDict:
# should check checksum type, now assuming Adler32 (metaDict['ChecksumType'] = 'AD')
opFile.Checksum = metaDict["Checksum"]
opFile.ChecksumType = "ADLER32"
operation.addFile(opFile)
def putOrRunRequests(self):
"""Run or put requests."""
requestIDs = []
if self.dryRun:
sLog.notice("Would have created %d requests" % len(self.requests))
for reqID, req in enumerate(self.requests):
sLog.notice("Request %d:" % reqID)
for opID, op in enumerate(req):
sLog.notice(" Operation %d: %s #lfn %d" % (opID, op.Type, len(op)))
return 0
for request in self.requests:
putRequest = self.reqClient.putRequest(request)
if not putRequest["OK"]:
sLog.error("unable to put request %r: %s" % (request.RequestName, putRequest["Message"]))
continue
requestIDs.append(str(putRequest["Value"]))
sLog.always("Request %r has been put to ReqDB for execution." % request.RequestName)
if requestIDs:
sLog.always("%d requests have been put to ReqDB for execution" % len(requestIDs))
sLog.always("RequestID(s): %s" % " ".join(requestIDs))
sLog.always("You can monitor the request status using the command: dirac-rms-request <requestName/ID>")
return 0
sLog.error("No requests created")
return 1
@Script()
def main():
try:
CMR = CreateMovingRequest()
CMR.run()
except Exception as e:
if LogLevels.getLevelValue(sLog.getLevel()) <= LogLevels.VERBOSE:
sLog.exception("Failed to create Moving Request")
else:
sLog.error("ERROR: Failed to create Moving Request:", str(e))
exit(1)
exit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_dms_create_moving_request.py
|
Python
|
gpl-3.0
| 11,475
|
[
"DIRAC"
] |
9417ef1e8b582ed5b1f4f962a54d7a2bba368b0d74cce8249c08a90d99036a97
|
import os
import pymatgen as pmg
import pymatgen.io.nwchem as nwchem
from cage.landscape import LandscapeAnalyzer
from cage import Cage
from json import JSONDecodeError
"""
Small utility scripts that support the use of the cage package.
"""
# Elements to ignore for the surface facet determination
IGNORE = (pmg.Element('Li'), pmg.Element('Na'), pmg.Element('Mg'),
pmg.Element('H'), pmg.Element('I'), pmg.Element('Br'),
pmg.Element('Cl'), pmg.Element('F'))
OUTPUT_FILE = "result.out"
JOB_SCRIPT = "job_nwchem.sh"
def geo(output_file):
"""
Quickly write out the initial and the final configuration of a nwchem
optimization to xyz files.
Args:
output_file (str): Output file of the nwchem calculation.
"""
data = nwchem.NwOutput(output_file).data[-1]
data['molecules'][0].to(fmt='xyz', filename='initial_geo.xyz')
data['molecules'][-1].to(fmt='xyz', filename='final_geo.xyz')
def energy(output_file):
data = nwchem.NwOutput(output_file).data[-1]
print("Total energy = " + str(data["energies"][-1]))
def check_calculation(output):
"""
Script that checks if a calculation has completed successfully from the
output file(s).
Args:
output (str): Output file or directory
"""
# TODO Make this method recursively check subdirectories
if os.path.isdir(output):
dir_list = [directory for directory in os.listdir(output)
if os.path.isdir(directory)]
for directory in dir_list:
file = os.path.join(directory, OUTPUT_FILE)
try:
out = nwchem.NwOutput(file, fmt='json')
except JSONDecodeError:
try:
out = nwchem.NwOutput(file)
except:
raise IOError('File not found.')
except FileNotFoundError:
print("No output file found in " + directory)
try:
error = False
for data in out.data:
if data['has_error']:
error = True
print('File: ' + os.path.abspath(file))
if out.data[-1]['task_time'] != 0:
print('Calculation completed in ' + str(
out.data[-1]['task_time']) + 's')
else:
print(
'No timing information found. Calculation might not '
'have completed successfully.')
print('Calculation has error: ' + str(error))
except NameError:
print("No data found in file!")
else:
try:
out = nwchem.NwOutput(output, fmt='json')
except JSONDecodeError:
try:
out = nwchem.NwOutput(output)
except:
raise IOError('File not found.')
try:
error = False
for data in out.data:
if data['has_error']:
error = True
print('File: ' + os.path.abspath(output))
if out.data[-1]['task_time'] != 0:
print('Calculation completed in ' + str(
out.data[-1]['task_time']) + 's')
else:
print('No timing information found. Calculation might not '
'have completed successfully.')
print('Calculation has error: ' + str(error))
except NameError:
print("No data found in file!")
def gather_landscape(directory):
"""
Gather the results from a landscape calculation.
Args:
directory (str): Directory which contains all the geometry directories
that describe the landscape.
"""
lands_analyzer = LandscapeAnalyzer.from_data(directory=directory)
lands_analyzer.to(os.path.join(directory, "landscape.json"))
def process_output(output):
"""
Process the results in an output file or all subdirectories in a directory.
Args:
output (str): File or directory that contains the output. If a
directory is provided, the output has to be in the subdirectories
of that directory.
"""
if os.path.isdir(output):
dir_list = [directory for directory in os.listdir(output)
if os.path.isdir(directory)]
for directory in dir_list:
print("Processing output in " +
os.path.join(directory, OUTPUT_FILE) +
"...")
out = nwchem.NwOutput(os.path.join(directory, OUTPUT_FILE))
try:
error = False
for output in out.data:
if output['has_error']:
error = True
if error:
print("File: " + os.path.join(directory, OUTPUT_FILE) +
" contains errors!")
elif out.data[-1]['task_time'] == 0:
print('No timing information found in ' +
os.path.join(directory, OUTPUT_FILE) + ".")
else:
out.to_file(os.path.join(directory, 'data.json'))
except NameError:
print("No data found in file. ")
except IndexError:
print("Data is empty!")
else:
output = os.path.abspath(output)
print('Processing output in ' + output)
try:
out = nwchem.NwOutput(output)
except:
raise IOError('Could not find proper nwchem output file.')
try:
error = False
for output in out.data:
if output['has_error']:
error = True
if error:
print("File: " + output + " contains errors!")
elif out.data[-1]['task_time'] == 0:
print('No timing information found in ' + output + ".")
else:
out.to_file(os.path.join(os.path.dirname(output),
'data.json'))
except NameError:
print("No data found in file. ")
except IndexError:
print("Data is empty!")
out.to_file(os.path.join(os.path.dirname(output), 'data.json'))
def search_and_reboot(directory):
"""
Look to a directory for calculations in its subdirectories, and reboot them
if they did not complete successfully.
Args:
directory (str):
"""
dir_list = [subdir for subdir in os.listdir(directory)
if os.path.isdir(subdir)]
for directory in dir_list:
print("Checking output in " + os.path.join(directory, OUTPUT_FILE) +
"...")
output = nwchem.NwOutput(os.path.join(directory, OUTPUT_FILE))
try:
error = False
for data in output.data:
if data['has_error']:
error = True
if error:
print("File: " + os.path.join(directory, OUTPUT_FILE) +
" contains errors! Simply rebooting is probably not "
"sufficient.")
if output.data[-1]['task_time'] == 0:
print('No timing information found in ' +
os.path.join(directory, OUTPUT_FILE) +
'. Rebooting calculation...')
os.system(
"sh -c 'cd " + directory + " && msub " + JOB_SCRIPT + " '")
except NameError:
print("No data found in file. Rebooting calculation...")
os.system("sh -c 'cd " + directory + " && msub " + JOB_SCRIPT
+ " '")
except IndexError:
print("Data is empty! Rebooting Calculation...")
os.system("sh -c 'cd " + directory + " && msub " + JOB_SCRIPT
+ " '")
def visualize_facets(filename):
"""
Visualize the facets of a molecule based on a structure file.
Args:
filename (str): Structure file of the molecule.
"""
filename = str(os.path.abspath(filename))
try:
# Load the POSCAR into a Cage
anion = Cage.from_poscar(filename)
except ValueError:
# If that fails, try other file formats supported by pymatgen
anion = Cage.from_file(filename)
anion.find_surface_facets(ignore=IGNORE)
facet_filename = "".join(filename.split("/")[-1].split(".")[0:-1])\
+ ".vesta"
anion.visualize_facets(facet_filename, ignore=IGNORE)
|
mbercx/cage
|
cage/cli/commands/util.py
|
Python
|
mit
| 8,566
|
[
"NWChem",
"pymatgen"
] |
680ce095a8e502a62240e2f532897bd6832145424b8aca46cf620a86a7e11403
|
print("""
FEM simulation using getfem++ and siconos.
""")
import siconos.kernel as kernel
import numpy as np
import getfem as gf
from matplotlib.pyplot import *
class SiconosFem:
""" The set of matrices required by Siconos, from a Finite Element Model
"""
def __init__(self):
self.nbdof = 0
self.Mass = []
self.Stiff = []
self.H = []
self.q0 = []
self.RHS=[]
def fillH(pid,sic,nbdof,BOTTOM):
nb_contacts = np.size(pid)
rowH = 3 * nb_contacts
sic.H = np.zeros((rowH,nbdof))
dofbottom = mfu.basic_dof_on_region(BOTTOM)
for i in range(0,rowH,3):
sic.H[i,dofbottom[i]+2] = 1.0
sic.H[i+1,dofbottom[i]] = 1.0
sic.H[i+2,dofbottom[i]+1] = 1.0
sico = SiconosFem()
# ===============================
# Model Parameters
# ===============================
E = 2.1e11 # Young modulus
Nu = 0.3 # Poisson coef.
# Lame coeff.
Lambda = E*Nu/((1+Nu)*(1-2*Nu))
Mu = E/(2*(1+Nu))
# Density
Rho=7800
Gravity = -9.81
t0 = 0.0 # start time
T = 0.5 # end time
h = 0.0005 # time step
e = 0.0 # restitution coeficient
mu=0.3 # Friction coefficient
theta = 0.5 # theta scheme
# ===============================
# Build FEM using getfem
# ===============================
# ==== The geometry and the mesh ====
dimX = 10.01 ; dimY = 10.01 ; dimZ = 3.01
stepX = 2.0 ; stepY = 2.0 ; stepZ = 1.5
x=np.arange(0,dimX,stepX)
y=np.arange(0,dimY,stepY)
z=np.arange(0,dimZ,stepZ)
m = gf.Mesh('regular simplices', x,y,z)
m.set('optimize_structure')
# Export the mesh to vtk
m.export_to_vtk("BlockMesh.vtk")
# Create MeshFem objects
# (i.e. assign elements onto the mesh for each variable)
mfu = gf.MeshFem(m,3) # displacement
mff = gf.MeshFem(m,1) # for plot von-mises
# assign the FEM
mfu.set_fem(gf.Fem('FEM_PK(3,1)'))
mff.set_fem(gf.Fem('FEM_PK_DISCONTINUOUS(3,1,0.01)'))
# mfu.export_to_vtk("BlockMeshDispl.vtk")
# ==== Set the integration method ====
mim = gf.MeshIm(m,gf.Integ('IM_TETRAHEDRON(5)'))
# ==== Summary ====
print(' ==================================== \n Mesh details: ')
print(' Problem dimension:', mfu.qdim(), '\n Number of elements: ', m.nbcvs(), '\n Number of nodes: ', m.nbpts())
print(' Number of dof: ', mfu.nbdof(), '\n Element type: ', mfu.fem()[0].char())
print(' ====================================')
# ==== Boundaries detection ====
allPoints = m.pts()
# Bottom points and faces
cbot = (abs(allPoints[2,:]) < 1e-6)
pidbot = np.compress(cbot,list(range(0,m.nbpts())))
fbot = m.faces_from_pid(pidbot)
BOTTOM = 1
m.set_region(BOTTOM,fbot)
# Top points and faces
ctop = (abs(allPoints[2,:]) > dimZ-stepZ)
pidtop = np.compress(ctop,list(range(0,m.nbpts())))
ftop = m.faces_from_pid(pidtop)
TOP = 2
m.set_region(TOP,ftop)
# Left points and faces
cleft = (abs(allPoints[0,:]) < 1e-6)
pidleft = np.compress(cleft,list(range(0,m.nbpts())))
fleft= m.faces_from_pid(pidleft)
LEFT = 3
m.set_region(LEFT,fleft)
# ==== Create getfem models ====
# We use two identical models, one to get the stiffness matrix and the rhs
# and the other to get the mass matrix.
#
md = gf.Model('real')
# The dof (displacements on nodes)
md.add_fem_variable('u',mfu)
# Add model constants
md.add_initialized_data('lambda',Lambda)
md.add_initialized_data('mu',Mu)
md.add_initialized_data('source_term',[0,0,-10])
md.add_initialized_data('push',[50000000.0,0.0,0.0])
md.add_initialized_data('rho',Rho)
md.add_initialized_data('gravity', Gravity)
md.add_initialized_data('weight',[0,0,Rho*Gravity])
# Build model (linear elasticity)
md.add_isotropic_linearized_elasticity_brick(mim,'u','lambda','mu')
# Add volumic/surfacic source terms
#md.add_source_term_brick(mim,'u','source_term',TOP)
md.add_source_term_brick(mim,'u','push',LEFT)
md.add_source_term_brick(mim,'u','weight')
# Assembly
md.assembly()
# Get stiffness matrix
sico.Stiff=md.tangent_matrix().full()
#
# Note: getfem returns sparse matrices. .full() means that we translate sparse to dense.
#
# Get right-hand side
sico.RHS = md.rhs()
# Get initial state
sico.initial_displacement = md.variable('u')
# Second model for the mass matrix
md2 = gf.Model('real')
md2.add_fem_variable('u',mfu)
md2.add_initialized_data('rho',Rho)
md2.add_mass_brick(mim,'u','rho')
md2.assembly()
# Get mass matrix
sico.Mass = md2.tangent_matrix().full()
# number of dof
sico.nbdof = mfu.nbdof()
sico.mfu=mfu
sico.mesh=m
# ===============================
# Here starts the Siconos stuff
# ===============================
#
# From getfem, we have Mass, Stiffness and RHS
# saved in object sico.
# H-Matrix
fillH(pidbot,sico,mfu.nbdof(),BOTTOM)
# =======================================
# Create the siconos Dynamical System
#
# Mass.ddot q + Kq = fExt
#
# q: dof vector (displacements)
# =======================================
# Initial displacement and velocity
v0 = np.zeros(sico.nbdof)
block = kernel.LagrangianLinearTIDS(sico.initial_displacement,v0,sico.Mass)
# set fExt and K
block.setFExtPtr(sico.RHS)
block.setKPtr(sico.Stiff)
# =======================================
# The interactions
# A contact is defined for each node at
# the bottom of the block
# =======================================
# Create one relation/interaction for each point
# in the bottom surface
# Each interaction is of size three with a
# relation between local coordinates at contact and global coordinates given by:
# y = Hq + b
# y = [ normal component, first tangent component, second tangent component]
#
# The friction-contact non-smooth law
nslaw = kernel.NewtonImpactFrictionNSL(e,e,mu,3)
diminter = 3
hh = np.zeros((diminter,sico.nbdof))
b = np.zeros(diminter)
b[0] = 0.0
k = 0
relation=[]
inter=[]
hh = np.zeros((diminter,sico.nbdof))
nbInter = pidbot.shape[0]
for i in range(nbInter):
# hh is a submatrix of sico.H with 3 rows.
hh[:,:] = sico.H[k:k+3,:]
k += 3
relation.append(kernel.LagrangianLinearTIR(hh,b))
inter.append(kernel.Interaction(diminter, nslaw, relation[i]))
nbInter=len(inter)
# =======================================
# The Model
# =======================================
blockModel = kernel.Model(t0,T)
# add the dynamical system to the non smooth dynamical system
blockModel.nonSmoothDynamicalSystem().insertDynamicalSystem(block)
# link the interactions and the dynamical system
for i in range(nbInter):
blockModel.nonSmoothDynamicalSystem().link(inter[i],block);
# =======================================
# The Simulation
# =======================================
# (1) OneStepIntegrators
OSI = kernel.Moreau(theta)
# (2) Time discretisation --
t = kernel.TimeDiscretisation(t0,h)
# (3) one step non smooth problem
osnspb = kernel.FrictionContact(3)
# (4) Simulation setup with (1) (2) (3)
s = kernel.TimeStepping(t)
s.insertIntegrator(OSI)
s.insertNonSmoothProblem(osnspb)
blockModel.setSimulation(s)
# simulation initialization
blockModel.initialize()
# the number of time steps
N = (T-t0)/h
# Get the values to be plotted
# ->saved in a matrix dataPlot
dataPlot = np.empty((N+1,9))
dataPlot[0, 0] = t0
dataPlot[0, 1] = block.q()[2]
nbNodes = sico.mesh.pts().shape[1]
k = 1
# time loop
while(s.hasNextEvent()):
s.computeOneStep()
name = 'friction'+str(k)+'.vtk'
dataPlot[k,0]=s.nextTime()
dataPlot[k,1]=block.q()[2]
# Post proc for paraview
md.to_variables(block.q())
VM=md.compute_isotropic_linearized_Von_Mises_or_Tresca('u','lambda','mu',mff)
dataPlot[k, 8] = VM[0]
#U = fem_model.variable('u')
sl = gf.Slice(('boundary',),sico.mfu,1)
sl.export_to_vtk(name, sico.mfu, block.q(),'Displacement', mff, VM, 'Von Mises Stress')
#print s.nextTime()
k += 1
s.nextStep()
#subplot(211)
#title('position')
#plot(dataPlot[:,0], dataPlot[:,1])
#grid()
#subplot(212)
#show()
|
bremond/siconos
|
examples/Mechanics/FEM/python/block_friction.py
|
Python
|
apache-2.0
| 7,780
|
[
"ParaView",
"VTK"
] |
8177345b0846c1d7e42d3ab5d498ac7da7447caec78561cee51117559f623a9f
|
from setuptools import setup
setup(name='suspenders',
version='0.2.6',
description='Allows the merging of alignments that have been annotated using pylapels into a single alignment that picks the highest quality alignment.',
url='http://code.google.com/p/suspenders',
author='James Holt',
author_email='holtjma@cs.unc.edu',
license='MIT',
packages=['MergeImprove'],
install_requires=['pysam', 'matplotlib'],
scripts=['bin/pysuspenders'],
zip_safe=False)
|
holtjma/suspenders
|
setup.py
|
Python
|
mit
| 515
|
[
"pysam"
] |
0c585afa8bf88bb79671c814eba69b24e78611c2a5e9022159f2d5dea84f9253
|
"""
Container page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise, EmptyPromise
from common.test.acceptance.pages.studio import BASE_URL
from common.test.acceptance.pages.common.utils import click_css, confirm_prompt
from common.test.acceptance.pages.studio.utils import type_in_codemirror
class ContainerPage(PageObject):
"""
Container page in Studio
"""
NAME_SELECTOR = '.page-header-title'
NAME_INPUT_SELECTOR = '.page-header .xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.page-header .wrapper-xblock-field'
ADD_MISSING_GROUPS_SELECTOR = '.notification-action-button[data-notification-action="add-missing-groups"]'
def __init__(self, browser, locator):
super(ContainerPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""URL to the container page for an xblock."""
return "{}/container/{}".format(BASE_URL, self.locator)
@property
def name(self):
titles = self.q(css=self.NAME_SELECTOR).text
if titles:
return titles[0]
else:
return None
def is_browser_on_page(self):
def _xblock_count(class_name, request_token):
return len(self.q(css='{body_selector} .xblock.{class_name}[data-request-token="{request_token}"]'.format(
body_selector=XBlockWrapper.BODY_SELECTOR, class_name=class_name, request_token=request_token
)).results)
def _is_finished_loading():
is_done = False
# Get the request token of the first xblock rendered on the page and assume it is correct.
data_request_elements = self.q(css='[data-request-token]')
if len(data_request_elements) > 0:
request_token = data_request_elements.first.attrs('data-request-token')[0]
# Then find the number of Studio xblock wrappers on the page with that request token.
num_wrappers = len(self.q(css='{} [data-request-token="{}"]'.format(XBlockWrapper.BODY_SELECTOR, request_token)).results)
# Wait until all components have been loaded and marked as either initialized or failed.
# See:
# - common/static/js/xblock/core.js which adds the class "xblock-initialized"
# at the end of initializeBlock.
# - common/static/js/views/xblock.js which adds the class "xblock-initialization-failed"
# if the xblock threw an error while initializing.
num_initialized_xblocks = _xblock_count('xblock-initialized', request_token)
num_failed_xblocks = _xblock_count('xblock-initialization-failed', request_token)
is_done = num_wrappers == (num_initialized_xblocks + num_failed_xblocks)
return (is_done, is_done)
# First make sure that an element with the view-container class is present on the page,
# and then wait for the loading spinner to go away and all the xblocks to be initialized.
return (
self.q(css='body.view-container').present and
self.q(css='div.ui-loading.is-hidden').present and
Promise(_is_finished_loading, 'Finished rendering the xblock wrappers.').fulfill()
)
def wait_for_component_menu(self):
"""
Waits until the menu bar of components is present on the page.
"""
EmptyPromise(
lambda: self.q(css='div.add-xblock-component').present,
'Wait for the menu of components to be present'
).fulfill()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
@property
def inactive_xblocks(self):
"""
Return a list of inactive xblocks loaded on the container page.
"""
return self._get_xblocks(".is-inactive ")
@property
def active_xblocks(self):
"""
Return a list of active xblocks loaded on the container page.
"""
return self._get_xblocks(".is-active ")
@property
def publish_title(self):
"""
Returns the title as displayed on the publishing sidebar component.
"""
return self.q(css='.pub-status').first.text[0]
@property
def release_title(self):
"""
Returns the title before the release date in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .title').first.text[0]
@property
def release_date(self):
"""
Returns the release date of the unit (with ancestor inherited from), as displayed
in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .copy').first.text[0]
@property
def last_saved_text(self):
"""
Returns the last saved message as displayed in the publishing sidebar component.
"""
return self.q(css='.wrapper-last-draft').first.text[0]
@property
def last_published_text(self):
"""
Returns the last published message as displayed in the sidebar.
"""
return self.q(css='.wrapper-last-publish').first.text[0]
@property
def currently_visible_to_students(self):
"""
Returns True if the unit is marked as currently visible to students
(meaning that a warning is being displayed).
"""
warnings = self.q(css='.container-message .warning')
if not warnings.is_present():
return False
warning_text = warnings.first.text[0]
return warning_text == "Caution: The last published version of this unit is live. By publishing changes you will change the student experience."
def shows_inherited_staff_lock(self, parent_type=None, parent_name=None):
"""
Returns True if the unit inherits staff lock from a section or subsection.
"""
return self.q(css='.bit-publishing .wrapper-visibility .copy .inherited-from').visible
@property
def sidebar_visibility_message(self):
"""
Returns the text within the sidebar visibility section.
"""
return self.q(css='.bit-publishing .wrapper-visibility').first.text[0]
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css='.action-publish').first
def discard_changes(self):
"""
Discards draft changes (which will then re-render the page).
"""
click_css(self, 'a.action-discard', 0, require_notification=False)
confirm_prompt(self)
self.wait_for_ajax()
@property
def is_staff_locked(self):
""" Returns True if staff lock is currently enabled, False otherwise """
for attr in self.q(css='a.action-staff-lock>.fa').attrs('class'):
if 'fa-check-square-o' in attr:
return True
return False
def toggle_staff_lock(self, inherits_staff_lock=False):
"""
Toggles "hide from students" which enables or disables a staff-only lock.
Returns True if the lock is now enabled, else False.
"""
was_locked_initially = self.is_staff_locked
if not was_locked_initially:
self.q(css='a.action-staff-lock').first.click()
else:
click_css(self, 'a.action-staff-lock', 0, require_notification=False)
if not inherits_staff_lock:
confirm_prompt(self)
self.wait_for_ajax()
return not was_locked_initially
def view_published_version(self):
"""
Clicks "View Live Version", which will open the published version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-view').first.click()
self._switch_to_lms()
def preview(self):
"""
Clicks "Preview", which will open the draft version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-preview').first.click()
self._switch_to_lms()
def _switch_to_lms(self):
"""
Assumes LMS has opened-- switches to that window.
"""
browser_window_handles = self.browser.window_handles
# Switch to browser window that shows HTML Unit in LMS
# The last handle represents the latest windows opened
self.browser.switch_to_window(browser_window_handles[-1])
def _get_xblocks(self, prefix=""):
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
def duplicate(self, source_index):
"""
Duplicate the item with index source_index (based on vertical placement in page).
"""
click_css(self, 'a.duplicate-button', source_index)
def delete(self, source_index):
"""
Delete the item with index source_index (based on vertical placement in page).
Only visible items are counted in the source_index.
The index of the first item is 0.
"""
# Click the delete button
click_css(self, 'a.delete-button', source_index, require_notification=False)
# Click the confirmation dialog button
confirm_prompt(self)
def edit(self):
"""
Clicks the "edit" button for the first component on the page.
"""
return _click_edit(self, '.edit-button', '.xblock-studio_view')
def add_missing_groups(self):
"""
Click the "add missing groups" link.
Note that this does an ajax call.
"""
self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).first.click()
self.wait_for_ajax()
# Wait until all xblocks rendered.
self.wait_for_page()
def missing_groups_button_present(self):
"""
Returns True if the "add missing groups" button is present.
"""
return self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).present
def get_xblock_information_message(self):
"""
Returns an information message for the container page.
"""
return self.q(css=".xblock-message.information").first.text[0]
def is_inline_editing_display_name(self):
"""
Return whether this container's display name is in its editable form.
"""
return "is-editing" in self.q(css=self.NAME_FIELD_WRAPPER_SELECTOR).first.attrs("class")[0]
def get_category_tab_names(self, category_type):
"""
Returns list of tab name in a category.
Arguments:
category_type (str): category type
Returns:
list
"""
self.q(css='.add-xblock-component-button[data-type={}]'.format(category_type)).first.click()
return self.q(css='.{}-type-tabs>li>a'.format(category_type)).text
def get_category_tab_components(self, category_type, tab_index):
"""
Return list of component names in a tab in a category.
Arguments:
category_type (str): category type
tab_index (int): tab index in a category
Returns:
list
"""
css = '#tab{tab_index} button[data-category={category_type}] span'.format(
tab_index=tab_index,
category_type=category_type
)
return self.q(css=css).html
class XBlockWrapper(PageObject):
"""
A PageObject representing a wrapper around an XBlock child shown on the Studio container page.
"""
url = None
BODY_SELECTOR = '.studio-xblock-wrapper'
NAME_SELECTOR = '.xblock-display-name'
VALIDATION_SELECTOR = '.xblock-message.validation'
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'settings_tab': '.editor-modes .settings-button',
'save_settings': '.action-save',
}
def __init__(self, browser, locator):
super(XBlockWrapper, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def student_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
"""
return self.q(css=self._bounded_selector('.xblock-student_view'))[0].text
@property
def author_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
(For blocks which implement a distinct author_view).
"""
return self.q(css=self._bounded_selector('.xblock-author_view'))[0].text
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant xblocks of this xblock.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
@property
def has_validation_message(self):
""" Is a validation warning/error/message shown? """
return self.q(css=self._bounded_selector(self.VALIDATION_SELECTOR)).present
def _validation_paragraph(self, css_class):
""" Helper method to return the <p> element of a validation warning """
return self.q(css=self._bounded_selector('{} p.{}'.format(self.VALIDATION_SELECTOR, css_class)))
@property
def has_validation_warning(self):
""" Is a validation warning shown? """
return self._validation_paragraph('warning').present
@property
def has_validation_error(self):
""" Is a validation error shown? """
return self._validation_paragraph('error').present
@property
# pylint: disable=invalid-name
def has_validation_not_configured_warning(self):
""" Is a validation "not configured" message shown? """
return self._validation_paragraph('not-configured').present
@property
def validation_warning_text(self):
""" Get the text of the validation warning. """
return self._validation_paragraph('warning').text[0]
@property
def validation_error_text(self):
""" Get the text of the validation error. """
return self._validation_paragraph('error').text[0]
@property
def validation_error_messages(self):
return self.q(css=self._bounded_selector('{} .xblock-message-item.error'.format(self.VALIDATION_SELECTOR))).text
@property
# pylint: disable=invalid-name
def validation_not_configured_warning_text(self):
""" Get the text of the validation "not configured" message. """
return self._validation_paragraph('not-configured').text[0]
@property
def preview_selector(self):
return self._bounded_selector('.xblock-student_view,.xblock-author_view')
@property
def has_group_visibility_set(self):
return self.q(css=self._bounded_selector('.wrapper-xblock.has-group-visibility-set')).is_present()
@property
def has_duplicate_button(self):
"""
Returns true if this xblock has a 'duplicate' button
"""
return self.q(css=self._bounded_selector('a.duplicate-button'))
@property
def has_delete_button(self):
"""
Returns true if this xblock has a 'delete' button
"""
return self.q(css=self._bounded_selector('a.delete-button'))
@property
def has_edit_visibility_button(self):
"""
Returns true if this xblock has an 'edit visibility' button
:return:
"""
return self.q(css=self._bounded_selector('.visibility-button')).is_present()
def go_to_container(self):
"""
Open the container page linked to by this xblock, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
def edit(self):
"""
Clicks the "edit" button for this xblock.
"""
return _click_edit(self, '.edit-button', '.xblock-studio_view', self._bounded_selector)
def edit_visibility(self):
"""
Clicks the edit visibility button for this xblock.
"""
return _click_edit(self, '.visibility-button', '.xblock-visibility_view', self._bounded_selector)
def open_advanced_tab(self):
"""
Click on Advanced Tab.
"""
self._click_button('advanced_tab')
def open_basic_tab(self):
"""
Click on Basic Tab.
"""
self._click_button('basic_tab')
def open_settings_tab(self):
"""
If editing, click on the "Settings" tab
"""
self._click_button('settings_tab')
def set_field_val(self, field_display_name, field_value):
"""
If editing, set the value of a field.
"""
selector = '{} li.field label:contains("{}") + input'.format(self.editor_selector, field_display_name)
script = "$(arguments[0]).val(arguments[1]).change();"
self.browser.execute_script(script, selector, field_value)
def reset_field_val(self, field_display_name):
"""
If editing, reset the value of a field to its default.
"""
scope = '{} li.field label:contains("{}")'.format(self.editor_selector, field_display_name)
script = "$(arguments[0]).siblings('.setting-clear').click();"
self.browser.execute_script(script, scope)
def set_codemirror_text(self, text, index=0):
"""
Set the text of a CodeMirror editor that is part of this xblock's settings.
"""
type_in_codemirror(self, index, text, find_prefix='$("{}").find'.format(self.editor_selector))
def set_license(self, license_type):
"""
Uses the UI to set the course's license to the given license_type (str)
"""
css_selector = (
"ul.license-types li[data-license={license_type}] button"
).format(license_type=license_type)
self.wait_for_element_presence(
css_selector,
"{license_type} button is present".format(license_type=license_type)
)
self.q(css=css_selector).click()
def save_settings(self):
"""
Click on settings Save button.
"""
self._click_button('save_settings')
@property
def editor_selector(self):
return '.xblock-studio_view'
def _click_button(self, button_name):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
"""
self.q(css=self.COMPONENT_BUTTONS[button_name]).first.click()
self.wait_for_ajax()
def go_to_group_configuration_page(self):
"""
Go to the Group Configuration used by the component.
"""
self.q(css=self._bounded_selector('span.message-text a')).first.click()
def is_placeholder(self):
"""
Checks to see if the XBlock is rendered as a placeholder without a preview.
"""
return not self.q(css=self._bounded_selector('.wrapper-xblock article')).present
@property
def group_configuration_link_name(self):
"""
Get Group Configuration name from link.
"""
return self.q(css=self._bounded_selector('span.message-text a')).first.text[0]
def _click_edit(page_object, button_css, view_css, bounded_selector=lambda(x): x):
"""
Click on the first editing button found and wait for the Studio editor to be present.
"""
page_object.q(css=bounded_selector(button_css)).first.click()
EmptyPromise(
lambda: page_object.q(css=view_css).present,
'Wait for the Studio editor to be present'
).fulfill()
return page_object
|
louyihua/edx-platform
|
common/test/acceptance/pages/studio/container.py
|
Python
|
agpl-3.0
| 20,916
|
[
"VisIt"
] |
f4667eac5d24c71148e8754496f84434aaa5aae25a4e33987ce3f1bf37781e4d
|
from gpaw.fd_operators import Gradient
import numpy as np
from gpaw.grid_descriptor import GridDescriptor
from gpaw.mpi import world
if world.size > 4:
# Grid is so small that domain decomposition cannot exceed 4 domains
assert world.size % 4 == 0
group, other = divmod(world.rank, 4)
ranks = np.arange(4*group, 4*(group+1))
domain_comm = world.new_communicator(ranks)
else:
domain_comm = world
gd = GridDescriptor((8, 1, 1), (8.0, 1.0, 1.0), comm=domain_comm)
a = gd.zeros()
dadx = gd.zeros()
a[:, 0, 0] = np.arange(gd.beg_c[0], gd.end_c[0])
gradx = Gradient(gd, v=0)
print a.itemsize, a.dtype, a.shape
print dadx.itemsize, dadx.dtype, dadx.shape
gradx.apply(a, dadx)
# a = [ 0. 1. 2. 3. 4. 5. 6. 7.]
#
# da
# -- = [-2.5 1. 1. 1. 1. 1. 1. -2.5]
# dx
dadx = gd.collect(dadx, broadcast=True)
assert dadx[3, 0, 0] == 1.0 and np.sum(dadx[:, 0, 0]) == 0.0
gd = GridDescriptor((1, 8, 1), (1.0, 8.0, 1.0), (1, 0, 1), comm=domain_comm)
dady = gd.zeros()
a = gd.zeros()
grady = Gradient(gd, v=1)
a[0, :, 0] = np.arange(gd.beg_c[1], gd.end_c[1]) - 1
grady.apply(a, dady)
# da
# -- = [0.5 1. 1. 1. 1. 1. -2.5]
# dy
dady = gd.collect(dady, broadcast=True)
assert dady[0, 0, 0] == 0.5 and np.sum(dady[0, :, 0]) == 3.0
# a GUC cell
gd = GridDescriptor((1, 7, 1),
((1.0, 0.0, 0.0),
(5.0, 5.0, 0.0),
(0.0, 0.0, 0.7)), comm=domain_comm)
dady = gd.zeros()
grady = Gradient(gd, v=1)
a = gd.zeros()
a[0, :, 0] = np.arange(gd.beg_c[1], gd.end_c[1]) - 1
grady.apply(a, dady)
# da
# -- = [-3.5 1.4 1.4 1.4 1.4 1.4 -3.5]
# dy
dady = gd.collect(dady, broadcast=True)
assert dady[0, 0, 0] == -3.5 and abs(np.sum(dady[0, :, 0])) < 1E-12
|
qsnake/gpaw
|
gpaw/test/gradient.py
|
Python
|
gpl-3.0
| 1,766
|
[
"GPAW"
] |
dd8de563bb0dc36d3c55808ba9ee83170078bcac7c5fe36ae0afe6a824d01bf8
|
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
This snippet show how to "test" a Facebook feature: the creation of an event.
It creates an event by going to http://www.facebook.com, login and navigate to "Create an event" page.
"""
import os
import unittest
import time
from splinter import Browser
class FacebookEventsTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser('firefox')
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def do_login_if_need(self, username, password):
if self.browser.is_element_present_by_css('div.menu_login_container'):
self.browser.fill('email', username)
self.browser.fill('pass', password)
self.browser.find_by_css('div.menu_login_container input[type="submit"]').first.click()
assert self.browser.is_element_present_by_css('li#navAccount')
def test_create_event(self):
"Should be able to create an event"
# Open home and login
self.browser.visit("http://www.facebook.com")
self.do_login_if_need(username='user', password='pass')
# Go to events page
self.browser.find_by_css('li#navItem_events a').first.click()
# Click on "Create an event button"
self.browser.find_by_css('div.uiHeaderTop a.uiButton').first.click()
time.sleep(1)
# Uploading the picture
picture_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'img', 'turtles.jpg')
self.browser.find_by_css('div.eventEditUpload a.uiButton').first.click()
if not self.browser.is_element_present_by_css('iframe#upload_pic_frame', wait_time=10):
self.fail("The upload pic iframe didn't appear :(")
with self.browser.get_iframe('upload_pic_frame') as frame:
frame.attach_file('pic', picture_path)
time.sleep(10)
# Filling the form
self.browser.fill('event_startIntlDisplay', '5/21/2011')
self.browser.select('start_time_min', '480')
self.browser.fill('name', 'Splinter sprint')
self.browser.fill('location', 'Rio de Janeiro, Brazil')
self.browser.fill('desc', 'For more info, check out the #cobratem channel on freenode!')
self.browser.find_by_css('label.uiButton input[type="submit"]').first.click()
time.sleep(1)
# Checking if the event was created and we were redirect to its page
title = self.browser.find_by_css('h1 span').first.text
assert title == 'Splinter sprint', title
|
nikolas/splinter
|
samples/test_facebook_events.py
|
Python
|
bsd-3-clause
| 2,705
|
[
"VisIt"
] |
9ffc61b82b732b714a25e83905eba6b11807b342982a332c2514dfba2c8de15a
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains an algorithm to solve the Linear Assignment Problem.
It has the same functionality as linear_assignment.pyx, but is much slower
as it is vectorized in numpy rather than cython
"""
__author__ = "Will Richards"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Will Richards"
__email__ = "wrichards@mit.edu"
__date__ = "Jan 28, 2013"
import numpy as np
class LinearAssignment:
"""
This class finds the solution to the Linear Assignment Problem.
It finds a minimum cost matching between two sets, given a cost
matrix.
This class is an implementation of the LAPJV algorithm described in:
R. Jonker, A. Volgenant. A Shortest Augmenting Path Algorithm for
Dense and Sparse Linear Assignment Problems. Computing 38, 325-340
(1987)
.. attribute: min_cost:
The minimum cost of the matching
.. attribute: solution:
The matching of the rows to columns. i.e solution = [1, 2, 0]
would match row 0 to column 1, row 1 to column 2 and row 2
to column 0. Total cost would be c[0, 1] + c[1, 2] + c[2, 0]
"""
def __init__(self, costs, epsilon=1e-6):
"""
Args:
costs: The cost matrix of the problem. cost[i,j] should be the
cost of matching x[i] to y[j]. The cost matrix may be
rectangular
epsilon: Tolerance for determining if solution vector is < 0
"""
self.orig_c = np.array(costs, dtype=np.float64)
self.nx, self.ny = self.orig_c.shape
self.n = self.ny
self._inds = np.arange(self.n)
self.epsilon = abs(epsilon)
# check that cost matrix is square
if self.nx > self.ny:
raise ValueError("cost matrix must have at least as many columns as rows")
if self.nx == self.ny:
self.c = self.orig_c
else:
# Can run into precision issues if np.max is used as the fill value (since a
# value of this size doesn't necessarily end up in the solution). A value
# at least as large as the maximin is, however, guaranteed to appear so it
# is a safer choice. The fill value is not zero to avoid choosing the extra
# rows in the initial column reduction step
self.c = np.full((self.n, self.n), np.max(np.min(self.orig_c, axis=1)))
self.c[: self.nx] = self.orig_c
# initialize solution vectors
self._x = np.zeros(self.n, dtype=np.int_) - 1
self._y = self._x.copy()
# if column reduction doesn't find a solution, augment with shortest
# paths until one is found
if self._column_reduction():
self._augmenting_row_reduction()
# initialize the reduced costs
self._update_cred()
while -1 in self._x:
self._augment()
self.solution = self._x[: self.nx]
self._min_cost = None
@property
def min_cost(self):
"""
Returns the cost of the best assignment
"""
if self._min_cost:
return self._min_cost
self._min_cost = np.sum(self.c[np.arange(self.nx), self.solution])
return self._min_cost
def _column_reduction(self):
"""
Column reduction and reduction transfer steps from LAPJV algorithm
"""
# assign each column to its lowest cost row, ensuring that only row
# or column is assigned once
i1, j = np.unique(np.argmin(self.c, axis=0), return_index=True)
self._x[i1] = j
# if problem is solved, return
if len(i1) == self.n:
return False
self._y[j] = i1
# reduction_transfer
# tempc is array with previously assigned matchings masked
self._v = np.min(self.c, axis=0)
tempc = self.c.copy()
tempc[i1, j] = np.inf
mu = np.min(tempc[i1, :] - self._v[None, :], axis=1)
self._v[j] -= mu
return True
def _augmenting_row_reduction(self):
"""
Augmenting row reduction step from LAPJV algorithm
"""
unassigned = np.where(self._x == -1)[0]
for i in unassigned:
for _ in range(self.c.size):
# Time in this loop can be proportional to 1/epsilon
# This step is not strictly necessary, so cutoff early
# to avoid near-infinite loops
# find smallest 2 values and indices
temp = self.c[i] - self._v
j1 = np.argmin(temp)
u1 = temp[j1]
temp[j1] = np.inf
j2 = np.argmin(temp)
u2 = temp[j2]
if u1 < u2:
self._v[j1] -= u2 - u1
elif self._y[j1] != -1:
j1 = j2
k = self._y[j1]
if k != -1:
self._x[k] = -1
self._x[i] = j1
self._y[j1] = i
i = k
if k == -1 or abs(u1 - u2) < self.epsilon:
break
def _update_cred(self):
"""
Updates the reduced costs with the values from the
dual solution
"""
ui = self.c[self._inds, self._x] - self._v[self._x]
self.cred = self.c - ui[:, None] - self._v[None, :]
def _augment(self):
"""
Finds a minimum cost path and adds it to the matching
"""
# build a minimum cost tree
_pred, _ready, istar, j, mu = self._build_tree()
# update prices
self._v[_ready] += self._d[_ready] - mu
# augment the solution with the minimum cost path from the
# tree. Follows an alternating path along matched, unmatched
# edges from X to Y
while True:
i = _pred[j]
self._y[j] = i
k = j
j = self._x[i]
self._x[i] = k
if i == istar:
break
self._update_cred()
def _build_tree(self):
"""
Builds the tree finding an augmenting path. Alternates along
matched and unmatched edges between X and Y. The paths are
stored in _pred (new predecessor of nodes in Y), and
self._x and self._y
"""
# find unassigned i*
istar = np.argmin(self._x)
# compute distances
self._d = self.c[istar] - self._v
_pred = np.zeros(self.n, dtype=np.int_) + istar
# initialize sets
# READY: set of nodes visited and in the path (whose price gets
# updated in augment)
# SCAN: set of nodes at the bottom of the tree, which we need to
# look at
# T0DO: unvisited nodes
_ready = np.zeros(self.n, dtype=np.bool)
_scan = np.zeros(self.n, dtype=np.bool)
_todo = np.zeros(self.n, dtype=np.bool) + True
while True:
# populate scan with minimum reduced distances
if True not in _scan:
mu = np.min(self._d[_todo])
_scan[self._d == mu] = True
_todo[_scan] = False
j = np.argmin(self._y * _scan)
if self._y[j] == -1 and _scan[j]:
return _pred, _ready, istar, j, mu
# pick jstar from scan (scan always has at least 1)
_jstar = np.argmax(_scan)
# pick i associated with jstar
i = self._y[_jstar]
_scan[_jstar] = False
_ready[_jstar] = True
# find shorter distances
newdists = mu + self.cred[i, :]
shorter = np.logical_and(newdists < self._d, _todo)
# update distances
self._d[shorter] = newdists[shorter]
# update predecessors
_pred[shorter] = i
for j in np.nonzero(np.logical_and(self._d == mu, _todo))[0]:
if self._y[j] == -1:
return _pred, _ready, istar, j, mu
_scan[j] = True
_todo[j] = False
|
vorwerkc/pymatgen
|
pymatgen/optimization/linear_assignment_numpy.py
|
Python
|
mit
| 8,191
|
[
"pymatgen"
] |
fa96d4aeee952276f9821cc0814a5e51c3573aa34e9617c3fe756311af554828
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2005 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
import os
from trac import db_default, util
from trac.config import *
from trac.core import Component, ComponentManager, implements, Interface, \
ExtensionPoint, TracError
from trac.db import DatabaseManager
from trac.versioncontrol import RepositoryManager
__all__ = ['Environment', 'IEnvironmentSetupParticipant', 'open_environment']
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to participate in the
creation and upgrading of Trac environments, for example to create
additional database tables."""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade(db):
"""Called when Trac checks whether the environment needs to be upgraded.
Should return `True` if this participant needs an upgrade to be
performed, `False` otherwise.
"""
def upgrade_environment(db):
"""Actually perform an environment upgrade.
Implementations of this method should not commit any database
transactions. This is done implicitly after all participants have
performed the upgrades they need without an error being raised.
"""
class Environment(Component, ComponentManager):
"""Trac stores project information in a Trac environment.
A Trac environment consists of a directory structure containing among other
things:
* a configuration file.
* an SQLite database (stores tickets, wiki pages...)
* Project specific templates and wiki macros.
* wiki and ticket attachments.
"""
setup_participants = ExtensionPoint(IEnvironmentSetupParticipant)
base_url = Option('trac', 'base_url', '',
"""Base URL of the Trac deployment.
In most configurations, Trac will automatically reconstruct the URL
that is used to access it automatically. However, in more complex
setups, usually involving running Trac behind a HTTP proxy, you may
need to use this option to force Trac to use the correct URL.""")
project_name = Option('project', 'name', 'My Project',
"""Name of the project.""")
project_description = Option('project', 'descr', 'My example project',
"""Short description of the project.""")
project_url = Option('project', 'url', 'http://example.org/',
"""URL of the main project web site.""")
project_footer = Option('project', 'footer',
'Visit the Trac open source project at<br />'
'<a href="http://trac.edgewall.org/">'
'http://trac.edgewall.org/</a>',
"""Page footer text (right-aligned).""")
project_icon = Option('project', 'icon', 'common/trac.ico',
"""URL of the icon of the project.""")
log_type = Option('logging', 'log_type', 'none',
"""Logging facility to use.
Should be one of (`none`, `file`, `stderr`, `syslog`, `winlog`).""")
log_file = Option('logging', 'log_file', 'trac.log',
"""If `log_type` is `file`, this should be a path to the log-file.""")
log_level = Option('logging', 'log_level', 'DEBUG',
"""Level of verbosity in log.
Should be one of (`CRITICAL`, `ERROR`, `WARN`, `INFO`, `DEBUG`).""")
log_format = Option('logging', 'log_format', None,
"""Custom logging format.
If nothing is set, the following will be used:
Trac[$(module)s] $(levelname)s: $(message)s
In addition to regular key names supported by the Python logger library
library (see http://docs.python.org/lib/node422.html), one could use:
- $(path)s the path for the current environment
- $(basename)s the last path component of the current environment
- $(project)s the project name
Note the usage of `$(...)s` instead of `%(...)s` as the latter form
would be interpreted by the ConfigParser itself.
Example:
($(thread)d) Trac[$(basename)s:$(module)s] $(levelname)s: $(message)s
(since 0.11)""")
def __init__(self, path, create=False, options=[]):
"""Initialize the Trac environment.
@param path: the absolute path to the Trac environment
@param create: if `True`, the environment is created and populated with
default data; otherwise, the environment is expected to
already exist.
@param options: A list of `(section, name, value)` tuples that define
configuration options
"""
ComponentManager.__init__(self)
self.path = path
self.setup_config(load_defaults=create)
self.setup_log()
from trac.loader import load_components
load_components(self)
if create:
self.create(options)
else:
self.verify()
if create:
for setup_participant in self.setup_participants:
setup_participant.environment_created()
def component_activated(self, component):
"""Initialize additional member variables for components.
Every component activated through the `Environment` object gets three
member variables: `env` (the environment object), `config` (the
environment configuration) and `log` (a logger object)."""
component.env = self
component.config = self.config
component.log = self.log
def is_component_enabled(self, cls):
"""Implemented to only allow activation of components that are not
disabled in the configuration.
This is called by the `ComponentManager` base class when a component is
about to be activated. If this method returns false, the component does
not get activated."""
if not isinstance(cls, basestring):
component_name = (cls.__module__ + '.' + cls.__name__).lower()
else:
component_name = cls.lower()
rules = [(name.lower(), value.lower() in ('enabled', 'on'))
for name, value in self.config.options('components')]
rules.sort(lambda a, b: -cmp(len(a[0]), len(b[0])))
for pattern, enabled in rules:
if component_name == pattern or pattern.endswith('*') \
and component_name.startswith(pattern[:-1]):
return enabled
# versioncontrol components are enabled if the repository is configured
# FIXME: this shouldn't be hardcoded like this
if component_name.startswith('trac.versioncontrol.'):
return self.config.get('trac', 'repository_dir') != ''
# By default, all components in the trac package are enabled
return component_name.startswith('trac.')
def verify(self):
"""Verify that the provided path points to a valid Trac environment
directory."""
fd = open(os.path.join(self.path, 'VERSION'), 'r')
try:
assert fd.read(26) == 'Trac Environment Version 1'
finally:
fd.close()
def get_db_cnx(self):
"""Return a database connection from the connection pool."""
return DatabaseManager(self).get_connection()
def shutdown(self, tid=None):
"""Close the environment."""
RepositoryManager(self).shutdown(tid)
DatabaseManager(self).shutdown(tid)
def get_repository(self, authname=None):
"""Return the version control repository configured for this
environment.
@param authname: user name for authorization
"""
return RepositoryManager(self).get_repository(authname)
def create(self, options=[]):
"""Create the basic directory structure of the environment, initialize
the database and populate the configuration file with default values."""
def _create_file(fname, data=None):
fd = open(fname, 'w')
if data: fd.write(data)
fd.close()
# Create the directory structure
if not os.path.exists(self.path):
os.mkdir(self.path)
os.mkdir(self.get_log_dir())
os.mkdir(self.get_htdocs_dir())
os.mkdir(os.path.join(self.path, 'plugins'))
os.mkdir(os.path.join(self.path, 'wiki-macros'))
# Create a few files
_create_file(os.path.join(self.path, 'VERSION'),
'Trac Environment Version 1\n')
_create_file(os.path.join(self.path, 'README'),
'This directory contains a Trac environment.\n'
'Visit http://trac.edgewall.org/ for more information.\n')
# Setup the default configuration
os.mkdir(os.path.join(self.path, 'conf'))
_create_file(os.path.join(self.path, 'conf', 'trac.ini'))
self.setup_config(load_defaults=True)
for section, name, value in options:
self.config.set(section, name, value)
self.config.save()
# Create the database
DatabaseManager(self).init_db()
def get_version(self, db=None):
"""Return the current version of the database."""
if not db:
db = self.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT value FROM system WHERE name='database_version'")
row = cursor.fetchone()
return row and int(row[0])
def setup_config(self, load_defaults=False):
"""Load the configuration file."""
self.config = Configuration(os.path.join(self.path, 'conf', 'trac.ini'))
if load_defaults:
for section, default_options in self.config.defaults().iteritems():
for name, value in default_options.iteritems():
if self.config.has_site_option(section, name):
value = None
self.config.set(section, name, value)
def get_templates_dir(self):
"""Return absolute path to the templates directory."""
return os.path.join(self.path, 'templates')
def get_htdocs_dir(self):
"""Return absolute path to the htdocs directory."""
return os.path.join(self.path, 'htdocs')
def get_log_dir(self):
"""Return absolute path to the log directory."""
return os.path.join(self.path, 'log')
def setup_log(self):
"""Initialize the logging sub-system."""
from trac.log import logger_factory
logtype = self.log_type
logfile = self.log_file
if logtype == 'file' and not os.path.isabs(logfile):
logfile = os.path.join(self.get_log_dir(), logfile)
format = self.log_format
if format:
format = format.replace('$(', '%(') \
.replace('%(path)s', self.path) \
.replace('%(basename)s', os.path.basename(self.path)) \
.replace('%(project)s', self.project_name)
self.log = logger_factory(logtype, logfile, self.log_level, self.path,
format=format)
def get_known_users(self, cnx=None):
"""Generator that yields information about all known users, i.e. users
that have logged in to this Trac environment and possibly set their name
and email.
This function generates one tuple for every user, of the form
(username, name, email) ordered alpha-numerically by username.
@param cnx: the database connection; if ommitted, a new connection is
retrieved
"""
if not cnx:
cnx = self.get_db_cnx()
cursor = cnx.cursor()
cursor.execute("SELECT DISTINCT s.sid, n.value, e.value "
"FROM session AS s "
" LEFT JOIN session_attribute AS n ON (n.sid=s.sid "
" and n.authenticated=1 AND n.name = 'name') "
" LEFT JOIN session_attribute AS e ON (e.sid=s.sid "
" AND e.authenticated=1 AND e.name = 'email') "
"WHERE s.authenticated=1 ORDER BY s.sid")
for username,name,email in cursor:
yield username, name, email
def backup(self, dest=None):
"""Simple SQLite-specific backup of the database.
@param dest: Destination file; if not specified, the backup is stored in
a file called db_name.trac_version.bak
"""
import shutil
db_str = self.config.get('trac', 'database')
if not db_str.startswith('sqlite:'):
raise EnvironmentError('Can only backup sqlite databases')
db_name = os.path.join(self.path, db_str[7:])
if not dest:
dest = '%s.%i.bak' % (db_name, self.get_version())
shutil.copy (db_name, dest)
def needs_upgrade(self):
"""Return whether the environment needs to be upgraded."""
db = self.get_db_cnx()
for participant in self.setup_participants:
if participant.environment_needs_upgrade(db):
self.log.warning('Component %s requires environment upgrade',
participant)
return True
return False
def upgrade(self, backup=False, backup_dest=None):
"""Upgrade database.
Each db version should have its own upgrade module, names
upgrades/dbN.py, where 'N' is the version number (int).
@param backup: whether or not to backup before upgrading
@param backup_dest: name of the backup file
@return: whether the upgrade was performed
"""
db = self.get_db_cnx()
upgraders = []
for participant in self.setup_participants:
if participant.environment_needs_upgrade(db):
upgraders.append(participant)
if not upgraders:
return False
if backup:
self.backup(backup_dest)
for participant in upgraders:
participant.upgrade_environment(db)
db.commit()
# Database schema may have changed, so close all connections
self.shutdown()
return True
class EnvironmentSetup(Component):
implements(IEnvironmentSetupParticipant)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""Insert default data into the database."""
db = self.env.get_db_cnx()
cursor = db.cursor()
for table, cols, vals in db_default.get_data(db):
cursor.executemany("INSERT INTO %s (%s) VALUES (%s)" % (table,
','.join(cols), ','.join(['%s' for c in cols])),
vals)
db.commit()
self._update_sample_config()
def environment_needs_upgrade(self, db):
dbver = self.env.get_version(db)
if dbver == db_default.db_version:
return False
elif dbver > db_default.db_version:
raise TracError, 'Database newer than Trac version'
return True
def upgrade_environment(self, db):
cursor = db.cursor()
dbver = self.env.get_version()
for i in range(dbver + 1, db_default.db_version + 1):
name = 'db%i' % i
try:
upgrades = __import__('upgrades', globals(), locals(), [name])
script = getattr(upgrades, name)
except AttributeError:
err = 'No upgrade module for version %i (%s.py)' % (i, name)
raise TracError, err
script.do_upgrade(self.env, i, cursor)
cursor.execute("UPDATE system SET value=%s WHERE "
"name='database_version'", (db_default.db_version,))
self.log.info('Upgraded database version from %d to %d',
dbver, db_default.db_version)
self._update_sample_config()
# Internal methods
def _update_sample_config(self):
from ConfigParser import ConfigParser
config = ConfigParser()
for section, options in self.config.defaults().items():
config.add_section(section)
for name, value in options.items():
config.set(section, name, value)
filename = os.path.join(self.env.path, 'conf', 'trac.ini.sample')
try:
fileobj = file(filename, 'w')
try:
config.write(fileobj)
fileobj.close()
finally:
fileobj.close()
self.log.info('Wrote sample configuration file with the new '
'settings and their default values: %s',
filename)
except IOError, e:
self.log.warn('Couldn\'t write sample configuration file (%s)', e,
exc_info=True)
def open_environment(env_path=None):
"""Open an existing environment object, and verify that the database is up
to date.
@param: env_path absolute path to the environment directory; if ommitted,
the value of the `TRAC_ENV` environment variable is used
@return: the `Environment` object
"""
if not env_path:
env_path = os.getenv('TRAC_ENV')
if not env_path:
raise TracError, 'Missing environment variable "TRAC_ENV". Trac ' \
'requires this variable to point to a valid Trac ' \
'environment.'
env = Environment(env_path)
if env.needs_upgrade():
raise TracError, 'The Trac Environment needs to be upgraded. Run ' \
'trac-admin %s upgrade"' % env_path
return env
|
cyphactor/lifecyclemanager
|
testenv/trac-0.10.4/trac/env.py
|
Python
|
gpl-3.0
| 18,288
|
[
"VisIt"
] |
008122754771332d3436a2780661adcdbe90419b58fc6e7e1efb95771fbc6a59
|
import os, time
from datetime import datetime
import pyinsane.abstract as pyinsane
from PIL import Image
def getScanners():
devices = pyinsane.get_devices()
# load pickle devices if no one connect (only for debugging)
if len(devices) == 0:
import pickle
f = open('devices.dump', 'rb')
devices = pickle.load(f)
f.close()
return devices
def getScanner(scanners, model):
device = None
for scanner in scanners:
if scanner.model == model:
device = scanner
#device.options['source'].constraint.remove("ADF")
return device
def getScans(model):
fileInfos = []
for f in os.listdir(app.root_path + '/scans/'):
if f.startswith(model) and (f.endswith('jpeg') or f.endswith('png') or f.endswith('tiff')):
size = os.path.getsize(app.root_path + '/scans/' + f)
ctime = time.ctime(os.path.getctime(app.root_path + '/scans/' + f))
fileInfos.append({"name":f, "size":size, "ctime":ctime})
return fileInfos
def scan(device, multiple, parameters):
# check if multiple scans possible
if (multiple == 1) and (not "ADF" in device.options['source'].constraint):
return
else:
device.options['source'].value = "ADF"
if not DEBUG:
# set parameters
#for p in parameters.keys():
# device.options[p].value = parameters[p]
print "bla"
# start scan
scan_session = device.scan(multiple=multiple)
# run scan session
if multiple:
try:
while True:
try:
scan_session.scan.read()
except EOFError:
pass
except StopIteration:
pass
# save images
path_base = "./scans/" + model + "-" + datetime.now().strftime("%Y%m%d-%H%M%S") + "_"
for i in range(0, len(scan_session.images)):
image = scan_session.images[i]
image.save(path + i + ".png")
else:
try:
while True:
scan_session.scan.read()
except EOFError:
pass
# save image
image = scan_session.images[0]
path = "./scans/" + model + "-" + datetime.now().strftime("%Y%m%d-%H%M%S") + ".png"
image.save(path)
|
RoboMod/Online-Scanner-Interface
|
app/helpers.py
|
Python
|
gpl-2.0
| 2,455
|
[
"ADF"
] |
69b30f71a7953d373f970b7d7208f5f6ea3ab000ac92581c70bda30eb4365bd6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
from fractions import Fraction
import numpy as np
from monty.design_patterns import cached_class
import textwrap
from pymatgen.core import Lattice
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.groups import SymmetryGroup, in_array_list
from pymatgen.core.operations import MagSymmOp
from pymatgen.util.string import transformation_to_string
import sqlite3
from array import array
__author__ = "Matthew Horton, Shyue Ping Ong"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Beta"
__date__ = "Feb 2017"
MAGSYMM_DATA = os.path.join(os.path.dirname(__file__), "symm_data_magnetic.sqlite")
@cached_class
class MagneticSpaceGroup(SymmetryGroup):
def __init__(self, id):
"""
Initializes a MagneticSpaceGroup from its Belov, Neronova and
Smirnova (BNS) number supplied as a list or its label supplied
as a string. To create a magnetic structure in pymatgen, the
Structure.from_magnetic_spacegroup() method can be used, which
relies on this class.
The main difference between magnetic space groups and normal
crystallographic space groups is the inclusion of a time reversal
operator that acts on an atom's magnetic moment. This is
indicated by a prime symbol (') next to the respective symmetry
operation in its label, e.g. the standard crystallographic
space group Pnma has magnetic subgroups Pn'ma, Pnm'a, Pnma',
Pn'm'a, Pnm'a', Pn'ma', Pn'm'a'.
The magnetic space groups are classified as one of 4 types
where G = magnetic space group, and F = parent crystallographic
space group:
1. G=F no time reversal, i.e. the same as corresponding
crystallographic group
2. G=F+F1', "grey" groups, where avg. magnetic moment is zero,
e.g. a paramagnet in zero ext. mag. field
3. G=D+(F-D)1', where D is an equi-translation subgroup of F of
index 2, lattice translations do not include time reversal
4. G=D+(F-D)1', where D is an equi-class subgroup of F of index 2
There are two common settings for magnetic space groups, BNS
and OG. In case 4, the BNS setting != OG setting, and so a
transformation to go between the two settings is required:
specifically, the BNS setting is derived from D, and the OG
setting is derived from F.
This means that the OG setting refers to the unit cell if magnetic
order is neglected, and requires multiple unit cells to reproduce
the full crystal periodicity when magnetic moments are present.
This does not make the OG setting, in general, useful for
electronic structure calculations and the BNS setting is preferred.
However, this class does contain information on the OG setting and
can be initialized from OG labels or numbers if required.
Conventions: ITC monoclinic unique axis b, monoclinic cell choice 1,
hexagonal axis for trigonal groups, origin choice 2 for groups with
more than one origin choice (ISO-MAG).
Raw data comes from ISO-MAG, ISOTROPY Software Suite, iso.byu.edu
http://stokes.byu.edu/iso/magnetic_data.txt
with kind permission from Professor Branton Campbell, BYU
Data originally compiled from:
(1) Daniel B. Litvin, Magnetic Group Tables (International Union
of Crystallography, 2013) www.iucr.org/publ/978-0-9553602-2-0.
(2) C. J. Bradley and A. P. Cracknell, The Mathematical Theory of
Symmetry in Solids (Clarendon Press, Oxford, 1972).
See http://stokes.byu.edu/iso/magneticspacegroupshelp.php for more
information on magnetic symmetry.
:param id: BNS number supplied as list of 2 ints or BNS label as
str or index as int (1-1651) to iterate over all space groups"""
self._data = {}
# Datafile is stored as sqlite3 database since (a) it can be easily
# queried for various different indexes (BNS/OG number/labels) and (b)
# allows binary data to be stored in a compact form similar to that in
# the source data file, significantly reducing file size.
# Note that a human-readable JSON format was tested first but was 20x
# larger and required *much* longer initial loading times.
# retrieve raw data
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
id = "".join(id.split()) # remove any white space
c.execute('SELECT * FROM space_groups WHERE BNS_label=?;', (id, ))
elif isinstance(id, list):
c.execute('SELECT * FROM space_groups WHERE BNS1=? AND BNS2=?;', (id[0], id[1]))
elif isinstance(id, int):
# OG3 index is a 'master' index, going from 1 to 1651
c.execute('SELECT * FROM space_groups WHERE OG3=?;', (id, ))
raw_data = list(c.fetchone())
self._data['magtype'] = raw_data[0] # int from 1 to 4
self._data['bns_number'] = [raw_data[1], raw_data[2]]
self._data['bns_label'] = raw_data[3]
self._data['og_number'] = [raw_data[4], raw_data[5], raw_data[6]]
self._data['og_label'] = raw_data[7] # can differ from BNS_label
def _get_point_operator(idx):
'''Retrieve information on point operator (rotation matrix and Seitz label).'''
hex = self._data['bns_number'][0] >= 143 and self._data['bns_number'][0] <= 194
c.execute('SELECT symbol, matrix FROM point_operators WHERE idx=? AND hex=?;', (idx-1, hex))
op = c.fetchone()
op = {'symbol': op[0], 'matrix': np.array(op[1].split(','), dtype='f').reshape(3, 3)}
return op
def _parse_operators(b):
'''Parses compact binary representation into list of MagSymmOps.'''
if len(b) == 0: # e.g. if magtype != 4, OG setting == BNS setting, and b == [] for OG symmops
return None
raw_symops = [b[i:i+6] for i in range(0, len(b), 6)]
symops = []
for r in raw_symops:
point_operator = _get_point_operator(r[0])
translation_vec = [r[1]/r[4], r[2]/r[4], r[3]/r[4]]
time_reversal = r[5]
op = MagSymmOp.from_rotation_and_translation_and_time_reversal(rotation_matrix=point_operator['matrix'],
translation_vec=translation_vec,
time_reversal=time_reversal)
# store string representation, e.g. (2x|1/2,1/2,1/2)'
seitz = '({0}|{1},{2},{3})'.format(point_operator['symbol'],
Fraction(translation_vec[0]),
Fraction(translation_vec[1]),
Fraction(translation_vec[2]))
if time_reversal == -1:
seitz += '\''
symops.append({'op': op, 'str': seitz})
return symops
def _parse_wyckoff(b):
'''Parses compact binary representation into list of Wyckoff sites.'''
if len(b) == 0:
return None
wyckoff_sites = []
def get_label(idx):
if idx <= 25:
return chr(97+idx) # returns a-z when idx 0-25
else:
return 'alpha' # when a-z labels exhausted, use alpha, only relevant for a few space groups
o = 0 # offset
n = 1 # nth Wyckoff site
num_wyckoff = b[0]
while len(wyckoff_sites) < num_wyckoff:
m = b[1+o] # multiplicity
label = str(b[2+o]*m)+get_label(num_wyckoff-n)
sites = []
for j in range(m):
s = b[3+o+(j*22):3+o+(j*22)+22] # data corresponding to specific Wyckoff position
translation_vec = [s[0]/s[3], s[1]/s[3], s[2]/s[3]]
matrix = [[s[4], s[7], s[10]],
[s[5], s[8], s[11]],
[s[6], s[9], s[12]]]
matrix_magmom = [[s[13], s[16], s[19]],
[s[14], s[17], s[20]],
[s[15], s[18], s[21]]]
# store string representation, e.g. (x,y,z;mx,my,mz)
wyckoff_str = "({};{})".format(transformation_to_string(matrix, translation_vec),
transformation_to_string(matrix_magmom, c='m'))
sites.append({'translation_vec': translation_vec,
'matrix': matrix,
'matrix_magnetic': matrix_magmom,
'str': wyckoff_str})
# only keeping string representation of Wyckoff sites for now
# could do something else with these in future
wyckoff_sites.append({'label': label,
'str': ' '.join([s['str'] for s in sites])})
n += 1
o += m*22 + 2
return wyckoff_sites
def _parse_lattice(b):
'''Parses compact binary representation into list of lattice vectors/centerings.'''
if len(b) == 0:
return None
raw_lattice = [b[i:i+4] for i in range(0, len(b), 4)]
lattice = []
for r in raw_lattice:
lattice.append({'vector': [r[0]/r[3], r[1]/r[3], r[2]/r[3]],
'str': '({0},{1},{2})+'.format(Fraction(r[0]/r[3]).limit_denominator(),
Fraction(r[1]/r[3]).limit_denominator(),
Fraction(r[2]/r[3]).limit_denominator())})
return lattice
def _parse_transformation(b):
'''Parses compact binary representation into transformation between OG and BNS settings.'''
if len(b) == 0:
return None
# capital letters used here by convention,
# IUCr defines P and p specifically
P = [[b[0], b[3], b[6]],
[b[1], b[4], b[7]],
[b[2], b[5], b[8]]]
p = [b[9]/b[12], b[10]/b[12], b[11]/b[12]]
P = np.array(P).transpose()
P_string = transformation_to_string(P, components=('a', 'b', 'c'))
p_string = "{},{},{}".format(Fraction(p[0]).limit_denominator(),
Fraction(p[1]).limit_denominator(),
Fraction(p[2]).limit_denominator())
return P_string + ";" + p_string
for i in range(8, 15):
try:
raw_data[i] = array('b', raw_data[i]) # construct array from sql binary blobs
except:
# array() behavior changed, need to explicitly convert buffer to str in earlier Python
raw_data[i] = array('b', str(raw_data[i]))
self._data['og_bns_transform'] = _parse_transformation(raw_data[8])
self._data['bns_operators'] = _parse_operators(raw_data[9])
self._data['bns_lattice'] = _parse_lattice(raw_data[10])
self._data['bns_wyckoff'] = _parse_wyckoff(raw_data[11])
self._data['og_operators'] = _parse_operators(raw_data[12])
self._data['og_lattice'] = _parse_lattice(raw_data[13])
self._data['og_wyckoff'] = _parse_wyckoff(raw_data[14])
db.close()
@classmethod
def from_og(cls, id):
"""
Initialize from Opechowski and Guccione (OG) label or number.
:param id: OG number supplied as list of 3 ints or
or OG label as str
:return:
"""
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
c.execute('SELECT BNS_label FROM space_groups WHERE OG_label=?', (id, ))
elif isinstance(id, list):
c.execute('SELECT BNS_label FROM space_groups WHERE OG1=? and OG2=? and OG3=?', (id[0], id[1], id[2]))
bns_label = c.fetchone()[0]
db.close()
return cls(bns_label)
def __eq__(self, other):
return self._data == other._data
@property
def crystal_system(self):
i = self._data["bns_number"][0]
if i <= 2:
return "triclinic"
elif i <= 15:
return "monoclinic"
elif i <= 74:
return "orthorhombic"
elif i <= 142:
return "tetragonal"
elif i <= 167:
return "trigonal"
elif i <= 194:
return "hexagonal"
else:
return "cubic"
@property
def sg_symbol(self):
return self._data["bns_label"]
@property
def symmetry_ops(self):
"""
Retrieve magnetic symmetry operations of the space group.
:return: List of :class:`pymatgen.core.operations.MagSymmOp`
"""
ops = [op_data['op'] for op_data in self._data['bns_operators']]
# add lattice centerings
centered_ops = []
lattice_vectors = [l['vector'] for l in self._data['bns_lattice']]
for vec in lattice_vectors:
if not (np.array_equal(vec, [1, 0, 0])
or np.array_equal(vec, [0, 1, 0])
or np.array_equal(vec, [0, 0, 1])):
for op in ops:
new_vec = op.translation_vector + vec
new_op = MagSymmOp.from_rotation_and_translation_and_time_reversal(op.rotation_matrix,
translation_vec=new_vec,
time_reversal=op.time_reversal)
centered_ops.append(new_op)
ops = ops+centered_ops
return ops
def get_orbit(self, p, m, tol=1e-5):
"""
Returns the orbit for a point and its associated magnetic moment.
Args:
p: Point as a 3x1 array.
m: A magnetic moment, compatible with
:class:`pymatgen.electronic_structure.core.Magmom`
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
(([array], [array])) Tuple of orbit for point and magnetic moments for orbit.
"""
orbit = []
orbit_magmoms = []
m = Magmom(m)
for o in self.symmetry_ops:
pp = o.operate(p)
pp = np.mod(np.round(pp, decimals=10), 1)
mm = o.operate_magmom(m)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
orbit_magmoms.append(mm)
return orbit, orbit_magmoms
def is_compatible(self, lattice, tol=1e-5, angle_tol=5):
"""
Checks whether a particular lattice is compatible with the
*conventional* unit cell.
Args:
lattice (Lattice): A Lattice.
tol (float): The tolerance to check for equality of lengths.
angle_tol (float): The tolerance to check for equality of angles
in degrees.
"""
# function from pymatgen.symmetry.groups.SpaceGroup
abc, angles = lattice.lengths_and_angles
crys_system = self.crystal_system
def check(param, ref, tolerance):
return all([abs(i - j) < tolerance for i, j in zip(param, ref)
if j is not None])
if crys_system == "cubic":
a = abc[0]
return check(abc, [a, a, a], tol) and\
check(angles, [90, 90, 90], angle_tol)
elif crys_system == "hexagonal" or (crys_system == "trigonal" and
self.symbol.endswith("H")):
a = abc[0]
return check(abc, [a, a, None], tol)\
and check(angles, [90, 90, 120], angle_tol)
elif crys_system == "trigonal":
a = abc[0]
return check(abc, [a, a, a], tol)
elif crys_system == "tetragonal":
a = abc[0]
return check(abc, [a, a, None], tol) and\
check(angles, [90, 90, 90], angle_tol)
elif crys_system == "orthorhombic":
return check(angles, [90, 90, 90], angle_tol)
elif crys_system == "monoclinic":
return check(angles, [90, None, 90], angle_tol)
return True
def data_str(self, include_og=True):
"""
Get description of all data, including information for OG setting.
:return: str
"""
# __str__() omits information on OG setting to reduce confusion
# as to which set of symops are active, this property gives
# all stored data including OG setting
desc = {} # dictionary to hold description strings
# parse data into strings
desc['magtype'] = self._data['magtype']
desc['bns_number'] = ".".join(map(str, self._data["bns_number"]))
desc['bns_label'] = self._data["bns_label"]
desc['og_id'] = ("\t\tOG: " + ".".join(map(str, self._data["og_number"])) + " " + self._data["og_label"]
if include_og else '')
desc['bns_operators'] = ' '.join([op_data['str'] for op_data in self._data['bns_operators']])
desc['bns_lattice'] = (' '.join([lattice_data['str'] for lattice_data in self._data['bns_lattice'][3:]])
if len(self._data['bns_lattice']) > 3 else '') # don't show (1,0,0)+ (0,1,0)+ (0,0,1)+
desc['bns_wyckoff'] = '\n'.join([textwrap.fill(wyckoff_data['str'],
initial_indent=wyckoff_data['label']+" ",
subsequent_indent=" " * len(wyckoff_data['label']+" "),
break_long_words=False, break_on_hyphens=False)
for wyckoff_data in self._data['bns_wyckoff']])
desc['og_bns_transformation'] = ('OG-BNS Transform: ({})\n'.format(self._data['og_bns_transform'])
if desc['magtype'] == 4 and include_og else '')
bns_operators_prefix = "Operators{}: ".format(' (BNS)' if desc['magtype'] == 4 and include_og else '')
bns_wyckoff_prefix = "Wyckoff Positions{}: ".format(' (BNS)' if desc['magtype'] == 4 and include_og else '')
# apply textwrap on long lines
desc['bns_operators'] = textwrap.fill(desc['bns_operators'],
initial_indent=bns_operators_prefix,
subsequent_indent=" " * len(bns_operators_prefix),
break_long_words=False, break_on_hyphens=False)
description = ("BNS: {d[bns_number]} {d[bns_label]}{d[og_id]}\n"
"{d[og_bns_transformation]}"
"{d[bns_operators]}\n"
"{bns_wyckoff_prefix}{d[bns_lattice]}\n"
"{d[bns_wyckoff]}").format(d=desc, bns_wyckoff_prefix=bns_wyckoff_prefix)
if desc['magtype'] == 4 and include_og:
desc['og_operators'] = ' '.join([op_data['str'] for op_data in self._data['og_operators']])
# include all lattice vectors because (1,0,0)+ (0,1,0)+ (0,0,1)+
# not always present in OG setting
desc['og_lattice'] = ' '.join([lattice_data['str'] for lattice_data in self._data['og_lattice']])
desc['og_wyckoff'] = '\n'.join([textwrap.fill(wyckoff_data['str'],
initial_indent=wyckoff_data['label'] + " ",
subsequent_indent=" " * len(wyckoff_data['label'] + " "),
break_long_words=False, break_on_hyphens=False)
for wyckoff_data in self._data['og_wyckoff']])
og_operators_prefix = "Operators (OG): "
og_wyckoff_prefix = "Wyckoff Positions (OG): "
# apply textwrap on long lines
desc['og_operators'] = textwrap.fill(desc['og_operators'],
initial_indent=og_operators_prefix,
subsequent_indent=" " * len(og_operators_prefix),
break_long_words=False, break_on_hyphens=False)
description += ("\n{d[og_operators]}\n"
"Wyckoff Positions (OG): {d[og_lattice]}\n"
"{d[og_wyckoff]}").format(d=desc)
elif desc['magtype'] == 4:
description += '\nAlternative OG setting exists for this space group.'
return description
def __str__(self):
"""
String representation of the space group, specifying the setting
of the space group, its magnetic symmetry operators and Wyckoff
positions.
:return: str
"""
return self.data_str(include_og=False)
def _write_all_magnetic_space_groups_to_file(filename):
"""
Write all magnetic space groups to a human-readable text file.
Should contain same information as text files provided by ISO-MAG.
:param filename:
:return:
"""
s = ('Data parsed from raw data from:\n'
'ISO-MAG, ISOTROPY Software Suite, iso.byu.edu\n'
'http://stokes.byu.edu/iso/magnetic_data.txt\n'
'Used with kind permission from Professor Branton Campbell, BYU\n\n')
all_msgs = []
for i in range(1, 1652):
all_msgs.append(MagneticSpaceGroup(i))
for msg in all_msgs:
s += '\n{}\n\n--------\n'.format(msg.data_str())
f = open(filename, 'w')
f.write(s)
f.close()
|
blondegeek/pymatgen
|
pymatgen/symmetry/maggroups.py
|
Python
|
mit
| 22,653
|
[
"CRYSTAL",
"pymatgen"
] |
7500f042bdcbcad2d8753eeb6341afb5d7b4d60deee961a5086b6acc19d753b2
|
#!/usr/bin/env python
"""Script to read a com file created by amboniom and make changes
required to oniom calculation."""
# python modules
import argparse
#our modules
import molecules
import gaussian
PARSER = argparse.ArgumentParser(
description = 'Reads the output of amboniom and makes changes for oniom',
formatter_class = argparse.RawTextHelpFormatter)
PARSER.add_argument('in_gaucom',
help = 'gaussian model file')
PARSER.add_argument('out_gaucom',
help = 'gaussian model file')
ARGS = PARSER.parse_args()
IN_GAUCOM = ARGS.in_gaucom
OUT_GAUCOM = ARGS.out_gaucom
def main():
in_file = gaussian.GaussianCom(IN_GAUCOM)
system = molecules.Molecule('system',in_file.atoms_list)
charge = system.get_charge()
charge_line = '{0} 1 {0} 1 {0} 1\n'.format(int(charge))
in_file.multiplicity_line = charge_line
route_section = """%nproc=8
%mem=1gb
# oniom(b3lyp/6-31g(d):amber=softonly)=embed geom(notest,connectivity)\n"""
in_file.route_section = route_section
for no, atom in enumerate(in_file.atoms_list):
if atom.resinfo.resname != 'WAT':
in_file.atoms_list[no].oniom.layer = 'H'
in_file.redo_connectivity_list()
in_file.write_to_file(OUT_GAUCOM)
if __name__ == "__main__":
main()
|
eduardoftoliveira/oniomMacGyver
|
omg/dock++/amboniom_to_oniom.py
|
Python
|
gpl-3.0
| 1,323
|
[
"Amber",
"Gaussian"
] |
c2ab464aa877040f93d6f0c997eb526dcd6fa9364a2562547bda5459c2b4876f
|
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import PandasTools
from rdkit.Chem import FilterCatalog
import pandas as pd
try:
# Import extra, proprietary functions
from websdf.extra import extra
except:
extra = None
params = FilterCatalog.FilterCatalogParams()
params.AddCatalog(FilterCatalog.FilterCatalogParams.FilterCatalogs.PAINS_A)
params.AddCatalog(FilterCatalog.FilterCatalogParams.FilterCatalogs.PAINS_B)
params.AddCatalog(FilterCatalog.FilterCatalogParams.FilterCatalogs.PAINS_C)
catalog = FilterCatalog.FilterCatalog(params)
PandasTools.molSize = (180,180)
def _clogSw(mol):
'''
Inspired by work by Christos Kannas presented at RDKit UGM 2013
Based on:
J. S. Delaney, Journal of Chemical Information and Modeling, 44, 1000-1005,
2004.
'''
MolWeight = Descriptors.MolWt(mol)
clogP = Descriptors.MolLogP(mol)
RotBonds = Descriptors.NumRotatableBonds(mol)
aromaticHeavyatoms = len(mol.GetSubstructMatches(Chem.MolFromSmarts("[a]")))
numAtoms = mol.GetNumAtoms()
AromProp = float(aromaticHeavyatoms) / numAtoms
# New clogSw with coefficients from Christos' presentation
clogSw_value = 0.233743817233 \
-0.74253027 * clogP \
-0.00676305 * MolWeight \
+0.01580559 * RotBonds \
-0.35483585 * AromProp
return clogSw_value
def _detect_pains(mol):
matches = catalog.GetMatches(mol)
if matches:
return ', '.join([x.GetDescription() for x in matches])
else:
return ''
def _calculate_descs(df, checks):
# Create a # column
df['#'] = range(len(df))
# Put # column on first position
df = df[['#'] + [x for x in list(df.columns) if x != '#']]
# For compatibility with RDKit older than 2015_03
# I think we can keep smiles column, since user can easily hide it from web interface
#if 'SMILES' in list(df.columns):
# df = df.drop(['SMILES'], axis=1)
# Recalculate SMILES
if 'SMILES' in checks:
df['SMILES'] = df.apply(lambda x: Chem.MolToSmiles(x['ROMol']), axis=1)
# Calculate MW
if 'MW' in checks:
df['MW'] = df['ROMol'].map(Descriptors.MolWt).round(decimals=2)
# Calculate logP
if 'logP' in checks:
df['logP'] = df['ROMol'].map(Descriptors.MolLogP).round(decimals=2)
# Calculate H-bond donors and acceptors
if 'HB' in checks:
df['HBA'] = df['ROMol'].map(Descriptors.NumHAcceptors)
df['HBD'] = df['ROMol'].map(Descriptors.NumHDonors)
# Calculate solubility
if 'logS' in checks:
df['logS'] = df['ROMol'].map(_clogSw).round(decimals=2)
# Detect PAINS
if 'PAINS' in checks:
df['PAINS'] = df['ROMol'].map(_detect_pains)
if 'recalc2d' in checks:
for x in df['ROMol']:
x.Compute2DCoords()
if 'removess' in checks:
PandasTools.RemoveSaltsFromFrame(df)
if 'svg' in checks:
PandasTools.molRepresentation = 'svg'
if 'extra' in checks:
if extra:
df = extra(df)
return df
def read_sdf(sdf, checks):
"""Reads sdf file and loads it in data frame"""
df = PandasTools.LoadSDF(sdf)
df = _calculate_descs(df,checks)
return df
def read_smi(smi, checks):
"""Reads smi file and loads it in data frame, works best with open babel format"""
df = pd.read_csv(smi, delimiter="\t", names=['SMILES', 'ID'])
PandasTools.AddMoleculeColumnToFrame(df, smilesCol='SMILES')
df = _calculate_descs(df,checks)
return df
def read_mol(molfile, checks):
"""Reads mol file and loads it in data frame"""
mol = Chem.MolFromMolBlock(molfile.read())
df = pd.DataFrame([mol], columns=['ROMol'])
df = _calculate_descs(df,checks)
return df
def read_mol2(molfile, checks):
"""Reads mol2 file and loads it in data frame"""
mol = Chem.MolFromMol2Block(molfile.read())
df = pd.DataFrame([mol], columns=['ROMol'])
df = _calculate_descs(df,checks)
return df
def read_smi_string(smi, checks):
df = pd.DataFrame({'SMILES':smi, 'ID':0}, index=[0])
PandasTools.AddMoleculeColumnToFrame(df, smilesCol='SMILES')
df = _calculate_descs(df,checks)
return df
|
samoturk/websdf
|
websdf/calculations.py
|
Python
|
bsd-3-clause
| 4,228
|
[
"Open Babel",
"RDKit"
] |
077685687ba7fb8a025c7d358c71027cc44d97aef832ed6475508342f0569fa1
|
from contextlib import contextmanager
import os
import sys
import unittest
from rdkit import RDConfig
from rdkit.Chem import FeatFinderCLI
from io import StringIO
class TestCase(unittest.TestCase):
def test_FeatFinderCLI(self):
smilesFile = os.path.join(RDConfig.RDDataDir, 'NCI', 'first_5K.smi')
featureFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'Pharm2D', 'test_data',
'BaseFeatures.fdef')
parser = FeatFinderCLI.initParser()
cmd = '-n 10 {0} {1}'.format(featureFile, smilesFile)
with outputRedirect() as (out, err):
args = parser.parse_args(cmd.split())
FeatFinderCLI.processArgs(args, parser)
out = out.getvalue()
err = err.getvalue()
self.assertIn('Mol-1', out)
self.assertIn('Acceptor-SingleAtomAcceptor', out)
self.assertIn('C(1)', out)
self.assertNotIn('Mol-11', out)
self.assertEqual(err, '')
cmd = '-n 2 -r {0} {1}'.format(featureFile, smilesFile)
with outputRedirect() as (out, err):
args = parser.parse_args(cmd.split())
FeatFinderCLI.processArgs(args, parser)
out = out.getvalue()
err = err.getvalue()
self.assertIn('Mol-1', out)
self.assertIn('Acceptor-SingleAtomAcceptor:', out)
self.assertIn('2, 3, 4', out)
self.assertNotIn('Mol-3', out)
self.assertEqual(err, '')
def test_FeatFinderCLIexceptions(self):
smilesFile = os.path.join(RDConfig.RDDataDir, 'NCI', 'first_5K.smi')
featureFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'Pharm2D', 'test_data',
'BaseFeatures.fdef')
parser = FeatFinderCLI.initParser()
cmd = '-n 10 {0} {1}'.format(smilesFile, smilesFile)
with self.assertRaises(SystemExit), outputRedirect() as (_, err):
args = parser.parse_args(cmd.split())
FeatFinderCLI.processArgs(args, parser)
self.assertIn('error', err.getvalue())
cmd = '-n 10 {0} {1}'.format(featureFile, 'incorrectFilename')
with self.assertRaises(SystemExit), outputRedirect() as (_, err):
args = parser.parse_args(cmd.split())
FeatFinderCLI.processArgs(args, parser)
self.assertIn('error', err.getvalue())
@contextmanager
def outputRedirect():
""" Redirect standard output and error to String IO and return """
try:
_stdout, _stderr = sys.stdout, sys.stderr
sys.stdout = sStdout = StringIO()
sys.stderr = sStderr = StringIO()
yield (sStdout, sStderr)
finally:
sys.stdout, sys.stderr = _stdout, _stderr
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/Chem/UnitTestFeatFinderCLI.py
|
Python
|
bsd-3-clause
| 2,752
|
[
"RDKit"
] |
dc873acb93e5299b65eea9ea09e832100d27b65414c6cb639ee5bc5b8f6c0543
|
#!/usr/bin/env python
__author__ = "Mike McCann"
__copyright__ = "Copyright 2011, MBARI"
__license__ = "GPL"
__maintainer__ = "Mike McCann"
__email__ = "mccann at mbari.org"
__status__ = "Development"
__doc__ = '''
Load small sample of data from OPeNDAP and other data sources at MBARI
for testing purposes. The collection should be sufficient to
provide decent test coverage for the STOQS application.
Mike McCann
MBARI Dec 28, 2011
@undocumented: __doc__ parser
@author: __author__
@status: __status__
@license: __license__
'''
import os
import sys
##parentDir = os.path.join(os.path.dirname(__file__), "../")
##sys.path.insert(0, parentDir) # settings.py is one dir up
from CANON import CANONLoader
# Assign input data sources
cl = CANONLoader('default', 'Initial Test Database',
description = 'Post-setup load of a single AUV mission',
x3dTerrains = {
'http://dods.mbari.org/terrain/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
'speed': '1',
}
},
grdTerrain = os.path.join(os.path.dirname(__file__), 'Monterey25.grd') # File expected in loaders directory
)
# Assign input data sets from OPeNDAP URLs pointing to Discrete Sampling Geometry CF-NetCDF sources
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2010/netcdf/'
cl.dorado_files = [ 'Dorado389_2010_300_00_300_00_decim.nc' ]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700', 'fl700_uncorr', 'salinity', 'biolume' ]
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadDorado(stride=100)
elif cl.args.optimal_stride:
cl.loadDorado(stride=2)
else:
if cl.args.stride:
cl.logger.warn("Overriding stride parameter with a value of 1000 for this test load script")
cl.args.stride = 1000
cl.loadDorado(stride=cl.args.stride)
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print "All Done."
|
google-code-export/stoqs
|
loaders/loadTestData.py
|
Python
|
gpl-3.0
| 2,577
|
[
"NetCDF"
] |
fe892de82a0c91ca0f2ac8c096a30faf253374db374b7bd468f017c2dde26226
|
from Bio import SeqIO
from Bio.Seq import Seq, translate
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet.IUPAC import IUPACUnambiguousDNA, IUPACProtein
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastpCommandline
import sys, glob, os, sqlite3, subprocess
import json
import numpy as np
def loadProject(wd):
project = {}
with open("{d}project.json".format(d=wd),'r') as project_json:
project = json.load(project_json)
return project
def writeProject(wd,project):
with open("{d}project.json".format(d=wd),'w') as project_json:
json.dump(project,project_json)
def connect_db(db_file):
return sqlite3.connect(db_file)
def create_db(db_file):
if os.path.isfile(db_file):
os.system("rm -rf " + db_file)
con = connect_db(db_file)
con.execute('''
CREATE TABLE `phage` (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`name` INTEGER NOT NULL,
`orig_name` TEXT NOT NULL,
`organism` TEXT,
`definition` TEXT,
`golden` INTEGER NOT NULL,
`seq` TEXT NOT NULL
);
''')
con.execute('''
CREATE TABLE `gene` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`phage_id` INTEGER,
`start` INTEGER NOT NULL,
`orig_start` INTEGER NOT NULL,
`end` INTEGER NOT NULL,
`orig_end` INTEGER NOT NULL,
`product` TEXT NOT NULL,
`note` TEXT,
`locus_tag` TEXT NOT NULL,
`old_locus_tag` TEXT,
`translation` TEXT NOT NULL,
`cluster` INTEGER,
`adjusted` INTEGER,
`rev_comp` INTEGER,
FOREIGN KEY(`phage_id`) REFERENCES `phage`(`id`),
FOREIGN KEY(`cluster`) REFERENCES cluster(id)
);
''')
con.execute('''
CREATE TABLE `clustalo` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`query_id` INTEGER NOT NULL,
`subject_id` INTEGER NOT NULL,
`percent_ident` REAL NOT NULL,
FOREIGN KEY(`query_id`) REFERENCES gene(id),
FOREIGN KEY(`subject_id`) REFERENCES gene(id)
);
''')
con.execute('''
CREATE TABLE `blastp` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`query_id` INTEGER NOT NULL,
`subject_id` INTEGER NOT NULL,
`percent_ident` REAL NOT NULL,
`e_value` REAL NOT NULL,
`query_start` INTEGER NOT NULL,
`subject_start` INTEGER NOT NULL,
FOREIGN KEY(`query_id`) REFERENCES phage(id),
FOREIGN KEY(`subject_id`) REFERENCES phage(id)
);
''')
con.execute('''
CREATE TABLE `cluster` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`name` TEXT UNIQUE,
`adjusted` INTEGER
);
''')
return con
def insert_phage(db, phage):
cur = db.cursor()
cur.execute("INSERT INTO `phage` (name,orig_name,organism,definition,golden,seq) VALUES(?,?,?,?,0,?)",
(phage.name, phage.name, phage.annotations['organism'], phage.description, str(phage.seq)))
db.commit()
return cur.lastrowid
def insert_gene(db, gene, phage_id):
cur = db.cursor()
if "old_locus_tag" in gene.qualifiers.keys():
old_locus = gene.qualifiers["old_locus_tag"][0]
else:
old_locus = ""
if "note" in gene.qualifiers.keys():
note = gene.qualifiers["note"][0]
else:
note = ""
if gene.strand == 1:
cur.execute("INSERT INTO `gene` (phage_id,start,orig_start,end,orig_end,product,note,locus_tag,old_locus_tag,translation,adjusted,rev_comp) "
"VALUES(?,?,?,?,?,?,?,?,?,?,0,0)",
(phage_id, gene.location.start,gene.location.start, gene.location.end,gene.location.end, gene.qualifiers["translation"][0],
note, gene.qualifiers["locus_tag"][0], old_locus, str(gene.qualifiers["translation"][0])))
else:
cur.execute("INSERT INTO `gene` (phage_id,start,orig_start,end,orig_end,product,note,locus_tag,old_locus_tag,translation,adjusted,rev_comp) "
"VALUES(?,?,?,?,?,?,?,?,?,?,0,1)",
(phage_id, gene.location.start,gene.location.start, gene.location.end,gene.location.end, gene.qualifiers["translation"][0],
note, gene.qualifiers["locus_tag"][0], old_locus, str(gene.qualifiers["translation"][0])))
db.commit()
return cur.lastrowid
def insert_blastp_hits(db, hits):
cur = db.cursor()
cur.executemany("INSERT INTO `blastp` (query_id,subject_id,e_value,query_start,subject_start,percent_ident) "
"VALUES(?,?,?,?,?,?)", hits)
db.commit()
def insert_clustalo_percents(db, query, subject, identity):
cur = db.cursor()
cur.execute("INSERT INTO `clustalo` (query_id,subject_id,percent_ident) "
"VALUES(?,?,?)", (query, subject, identity))
db.commit()
def get_phage(db, id):
result = db.execute("SELECT * from `phage` where id = " + str(id))
if result.arraysize != 1:
raise Exception("Attempted to find ID " + str(id) + " but found improper number of results.")
phage = {}
for row in result:
phage['id'] = int(row[0])
phage['name'] = row[1]
phage['orig_name'] = row[2]
phage['organism'] = row[3]
phage['definition'] = row[4]
phage['golden'] = int(row[5])
phage['seq'] = row[6]
return phage
def get_gene(db, id):
result = db.execute("SELECT * from `gene` where id = " + str(id))
if result.arraysize != 1:
raise Exception("Attempted to find ID " + str(id) + " but found improper number of results.")
gene = {}
for row in result:
gene['id'] = int(row[0])
gene['phage_id'] = int(row[1])
gene['start'] = int(row[2])
gene['orig_start'] = int(row[3])
gene['end'] = int(row[4])
gene['orig_end'] = int(row[5])
gene['product'] = row[6]
gene['note'] = row[6]
gene['locus_tag'] = row[8]
gene['old_locus_tag'] = row[9]
gene['translation'] = row[10]
if row[11] is None:
gene['cluster'] = row[11]
else:
gene['cluster'] = int(row[11])
if row[12] == 0:
gene['adjusted'] = False
else:
gene['adjusted'] = True
if row[13] == 0:
gene['rev_comp'] = False
else:
gene['rev_comp'] = True
return gene
def get_all_genes(db):
result = db.execute("SELECT * from `gene`")
genes = []
for row in result:
gene = dict()
gene['id'] = int(row[0])
gene['phage_id'] = int(row[1])
gene['start'] = int(row[2])
gene['orig_start'] = int(row[3])
gene['end'] = int(row[4])
gene['orig_end'] = int(row[5])
gene['product'] = row[6]
gene['note'] = row[6]
gene['locus_tag'] = row[8]
gene['old_locus_tag'] = row[9]
gene['translation'] = row[10]
if row[11] is None:
gene['cluster'] = row[11]
else:
gene['cluster'] = int(row[11])
if row[12] == 0:
gene['adjusted'] = False
else:
gene['adjusted'] = True
if row[13] == 0:
gene['rev_comp'] = False
else:
gene['rev_comp'] = True
genes.append(gene)
return genes
def get_gene_ids(db):
result = db.execute("SELECT id from `gene` ORDER BY id")
ids = [row[0] for row in result]
# Depreciated
'''
ids = []
for row in result:
ids.append(row[0])
'''
return ids
def get_blastp_hits(db, id, args):
result = db.execute("SELECT * from `blastp` where query_id = %d and e_value <= %s" %
(id, str(args.blastp_cutoff)))
hits = []
for row in result:
hit = {'type': "blastp", 'query_id': int(row[1]), 'subject_id': int(row[2]), 'ident': float(row[3]), 'e_value': float(row[4]),
'query_start': int(row[5]), 'subject_start': int(row[6])}
hits.append(hit)
return hits
def get_all_blastp_hits(db, id):
result = db.execute("SELECT * from `blastp` where query_id = %d" %
(id))
hits = []
for row in result:
hit = {'type': "blastp", 'query_id': int(row[1]), 'subject_id': int(row[2]), 'ident': float(row[3]), 'e_value': float(row[4]),
'query_start': int(row[5]), 'subject_start': int(row[6])}
hits.append(hit)
return hits
def get_clustalo_hits(db, id, args):
result = db.execute(
"SELECT * from `clustalo` where query_id = %d or subject_id = %d and percent_ident >= %f" % (id, id, args.clustalo_cutoff))
hits = []
for row in result:
hit = {'type': "clustalo", 'query_id': int(row[1]), 'subject_id': int(row[2]), 'ident': float(row[3])}
hits.append(hit)
return hits
def get_all_hits(db, id, args):
hits = get_clustalo_hits(db, id, args)
for hit in get_blastp_hits(db, id, args):
hits.append(hit)
return hits
#creates a new cluster with a given name
def create_cluster(db, name):
cur = db.cursor()
cur.execute("INSERT INTO `cluster` (name,adjusted) VALUES('%s',0)" % name)
db.commit()
return cur.lastrowid
#sets the cluster for a gene
def update_gene_cluster(db, gene_id, cluster_id):
cur = db.cursor()
cur.execute("UPDATE `gene` set cluster = ? where id = ?",
(cluster_id, gene_id))
db.commit()
# gets all gene id's in a given cluster
def get_cluster_genes(db, cluster_id):
result = db.execute("SELECT id from `gene` where cluster = %d" % cluster_id)
cluster = [row[0] for row in result]
return cluster
#gets all the information about each gene in a cluster
def get_cluster(db,cluster_id):
result = db.execute("SELECT id from `gene` where cluster = %d" % cluster_id)
cluster = []
for row in result:
gene = get_gene(db,row[0])
#gene['hits'] = get_all_hits(db,gene['id'],args)
# USE ONLY THE BLASTP HITS
gene['hits'] = get_all_blastp_hits(db,gene['id'])
cluster.append(gene)
return cluster
# returns id of closest cluster, or id of newly created cluster
def get_closest_cluster(db, gene_id, args, i):
close_clusters = {}
hits = get_all_hits(db, gene_id, args)
checked = []
# go through all hits and if any of them are already in a cluster, it adds the cluster to the "close_clusters" dictionary
for hit in hits:
if hit['subject_id'] != gene_id:
hit_id = hit['subject_id']
else:
hit_id = hit['query_id']
if hit_id not in checked:
checked.append(hit_id)
hit_gene = get_gene(db, hit_id)
if hit_gene['cluster'] is not None:
if hit_gene['cluster'] not in close_clusters.keys():
close_clusters[hit_gene['cluster']] = [hit['ident']]
else:
close_clusters[hit_gene['cluster']].append(hit['ident'])
if len(close_clusters.keys()) > 0:
closest_cluster = -1
closest_identity = 0
for cluster_id in close_clusters.keys():
avg = np.mean(close_clusters[cluster_id])
if avg > closest_identity:
closest_cluster = cluster_id
closest_identity = avg
if closest_cluster == -1:
raise Exception("Closest cluster not identified.")
return closest_cluster
else:
return create_cluster(db, "cluster_%d" % i)
#returns the golden phages in order, from highest priority to lowest
#for golden numbers, just add 1 to the index of the phage_id in the list
def get_golden_phages(db):
result = db.execute("select id from phage where golden != 0 order by golden asc;")
golden_phages = [row[0] for row in result]
return golden_phages
def get_golden_genes(golden_phages,cluster):
golden_ids = {}
for gene in cluster:
if gene['phage_id'] in golden_phages:
golden_number = golden_phages.index(gene['phage_id'])
if golden_number not in golden_ids.keys():
golden_ids[golden_number] = []
golden_ids[golden_number].append(gene['id'])
return golden_ids
#makes the best possible adjustments for a given cluster, aligning any genes that do not belong to
def adjust_cluster(db,cluster,start_codons,stop_codons):
#first we need to make a list of all the golden phage proteins that are in this create_cluster
golden_phages = get_golden_phages(db)
golden_genes = get_golden_genes(golden_phages, cluster)
revcomp_start_codons = ['CAT', 'CAC', 'CAA']
revcomp_stop_codons = ['CTA', 'TTA', 'TCA']
if len(golden_genes) == 0: #make sure there is at least one gene from the golden phage in the cluster
return
for gene in cluster:
if gene['phage_id'] not in golden_phages and gene['adjusted'] == 0:
potentialStarts = [] #List to iterate instead of set type# Because if we have muliple golds we will have many different starts to try?
codonShift = []
farCodonShift = None
closeCodonShift = None
print
print "New Gene ", gene['id']
for index, gold_ids in enumerate(golden_genes.values()):
for gold_id in gold_ids:
blastp_hit = None
for hit in gene['hits']: #find hit in gene for that gold_id
if hit['subject_id'] == gold_id:
blastp_hit = hit
break
if blastp_hit is not None: # if we found a hit for that gene, continue
#print "Gene", gene['id'], "and gene", gold_id, "has hit", blastp_hit
golden_start = blastp_hit['subject_start']
gene_start = blastp_hit['query_start']
# our gene is too short and we need to move the start upstream
if gene_start == 1 and golden_start == 1:
print "They are already perfectly aligned!"
print(gene["id"])
elif gene_start == 1 and blastp_hit['ident'] > 50 and blastp_hit['e_value'] < 1e-9:
print "Too Short"
ideal_move_distance = np.abs(golden_start - gene_start)
newCloseStart, newFarStart, farCodonShift, closeCodonShift = tooShort(db, gene, ideal_move_distance, start_codons, stop_codons, revcomp_start_codons, revcomp_stop_codons)
if newCloseStart != None:
potentialStarts.append(newCloseStart)
codonShift.append(closeCodonShift)
if newFarStart != None:
potentialStarts.append(newFarStart)
codonShift.append(farCodonShift)
# our gene is too long and we need to trim it down
elif golden_start == 1 and blastp_hit['ident'] > 50 and blastp_hit['e_value'] < 1e-9:
print "Too Long"
ideal_move_distance = np.abs(gene_start - golden_start)
newCloseStart, newFarStart, farCodonShift, closeCodonShift = tooLong(db, gene, ideal_move_distance, start_codons, stop_codons, revcomp_start_codons,revcomp_stop_codons)
if newCloseStart != None:
potentialStarts.append(newCloseStart)
codonShift.append(closeCodonShift)
if newFarStart != None:
potentialStarts.append(newFarStart)
codonShift.append(farCodonShift)
# right now we do nothing...
else:
print "Neither one starts at 1..."
else:
print "Gene", gene['id'], "has no blastp hit for golden gene", gold_id, ##gene['hits']
if potentialStarts: # if set is not empty
print(potentialStarts)
bestStart = findBestStart(db, gene, potentialStarts, ideal_move_distance, codonShift)
updateStart(db, gene['id'], bestStart, gene['rev_comp']) #Uncomment when ready
def tooShort(db, gene, ideal_move_distance, start_codons, stop_codons, revcomp_start_codons, revcomp_stop_codons):
phage = get_phage(db, gene['phage_id'])
phageGenome = phage['seq']
currentStart = gene['start']
if not gene['rev_comp']:
print "Forward"
return tooShortForward(db, gene, ideal_move_distance, start_codons, stop_codons)
elif gene['rev_comp']:
print "Reverse Compliment"
return tooShortRevComp(db, gene, ideal_move_distance, revcomp_start_codons, revcomp_stop_codons)
def tooShortRevComp(db, gene, ideal_move_distance, start_codons, stop_codons):
# Init bestGeneStart
farBestGeneStart = None
closeBestGeneStart = None
farCodonShift = None
closeCodonShift = None
# Run through all the potential starts
for i in xrange(1,ideal_move_distance*2): # doubled to we have equal search space on both sides
currentStart = gene['end'] + (3 * i) # increase our start 3 at a time
phage = get_phage(db, gene['phage_id'])
phageGenome = phage['seq']
codon = phageGenome[currentStart-3:currentStart]
##codon = codon[::-1] # reverse the codon
if codon in stop_codons:
print "Found stop codon at {}".format(currentStart)
break
if codon in start_codons and i > ideal_move_distance:
print "far"
farBestGeneStart = currentStart
farCodonShift = np.abs(gene['end'] - currentStart)
break
elif codon in start_codons and i <= ideal_move_distance:
print "on or before"
closeBestGeneStart = currentStart
closeCodonShift = np.abs(gene['end'] - currentStart)
return closeBestGeneStart, farBestGeneStart, farCodonShift, closeCodonShift
def tooShortForward(db, gene, ideal_move_distance, start_codons, stop_codons):
# Init bestGeneStart
farBestGeneStart = None
closeBestGeneStart = None
farCodonShift = None
closeCodonShift = None
# Run through all the potential starts
for i in xrange(1,ideal_move_distance*2): # doubled to we have equal search space on both sides
currentStart = gene['start'] - (3 * i) # decrease our start 3 at a time
phage = get_phage(db, gene['phage_id'])
phageGenome = phage['seq']
codon = phageGenome[currentStart:currentStart+3]
if codon in stop_codons:
print "Found stop codon at {}".format(currentStart)
break
if codon in start_codons and i > ideal_move_distance:
print "far"
farBestGeneStart = currentStart
farCodonShift = np.abs(gene['start'] - currentStart)
break
elif codon in start_codons and i <= ideal_move_distance:
print "on or before"
closeBestGeneStart = currentStart
closeCodonShift = np.abs(gene['start'] - currentStart)
return closeBestGeneStart, farBestGeneStart, farCodonShift, closeCodonShift
def tooLong(db, gene, ideal_move_distance, start_codons, stop_codons,revcomp_start_codons,revcomp_stop_codons):
phage = get_phage(db, gene['phage_id'])
phageGenome = phage['seq']
currentStart = gene['start']
if not gene['rev_comp']:
print "Forward"
return tooLongForward(db, gene, ideal_move_distance, start_codons, stop_codons)
elif gene['rev_comp']:
print "Reverse Compliment"
return tooLongRevComp(db, gene, ideal_move_distance, revcomp_start_codons, revcomp_stop_codons)
def tooLongRevComp(db, gene, ideal_move_distance, start_codons, stop_codons):
# Init bestGeneStart
farBestGeneStart = None
closeBestGeneStart = None
farCodonShift = None
closeCodonShift = None
# Run through all the potential starts
for i in xrange(1,ideal_move_distance*2): # doubled to we have equal search space on both sides
currentStart = gene['end'] - (3 * i) # decrease our start 3 at a time
phage = get_phage(db, gene['phage_id'])
phageGenome = phage['seq']
codon = phageGenome[currentStart-3:currentStart] # codon is going backward
##codon = codon[::-1] # reverse the codon
if codon in stop_codons:
print "Found stop codon at {}".format(currentStart)
break
if codon in start_codons and i > ideal_move_distance:
print "far"
farBestGeneStart = currentStart
farCodonShift = np.abs(gene['end'] - currentStart)
break
elif codon in start_codons and i <= ideal_move_distance:
print "on or before"
closeBestGeneStart = currentStart
closeCodonShift = np.abs(gene['end'] - currentStart)
return closeBestGeneStart, farBestGeneStart, farCodonShift, closeCodonShift
def tooLongForward(db, gene, ideal_move_distance, start_codons, stop_codons):
# Init bestGeneStart
farBestGeneStart = None
closeBestGeneStart = None
farCodonShift = None
closeCodonShift = None
# Run through all the potential starts
for i in xrange(1,ideal_move_distance*2): # doubled to we have equal search space on both sides
currentStart = gene['start'] + (3 * i) # increase our start 3 at a time
phage = get_phage(db, gene['phage_id'])
phageGenome = phage['seq']
codon = phageGenome[currentStart:currentStart+3] # codon is going forward
if codon in stop_codons:
print "Found stop codon at {}".format(currentStart)
break
if codon in start_codons and i > ideal_move_distance:
print "far"
farBestGeneStart = currentStart
farCodonShift = np.abs(gene['start'] - currentStart)
break
elif codon in start_codons and i <= ideal_move_distance:
print "on or before"
closeBestGeneStart = currentStart
closeCodonShift = np.abs(gene['start'] - currentStart)
return closeBestGeneStart, farBestGeneStart, farCodonShift, closeCodonShift
def updateStart(db, gene_id, newStart, rev_comp):
cur = db.cursor()
if rev_comp == False:
cur.execute("UPDATE gene SET start = " + str(newStart) + ", adjusted = 1 WHERE id = " + str(gene_id))
else:
cur.execute("UPDATE gene SET end = " + str(newStart) + ", adjusted = 1 WHERE id = " + str(gene_id))
db.commit()
def findBestStart(db, gene, potentialStarts, ideal_move_distance, codonShift):
if gene['rev_comp']:
check = gene['end']
else:
check = gene['start']
imd = ideal_move_distance
for item in potentialStarts:
if not isinstance(item, int):
potentialStarts.pop(potentialStarts.index(item))
diffs = [np.abs(s - imd) for s in codonShift]
return potentialStarts[np.argmin(diffs)]
def fillGap(db, cluster, phage):
golden_phages = get_golden_phages(db)
golden_genes = get_golden_genes(golden_phages, cluster)
tempGB = open("gapGenes.gb", "w")
new_phage = get_phage(db, phage)
new_phage_seq = new_phage["seq"]
new_phage_seq = Seq(new_phage_seq, IUPACUnambiguousDNA())
record = SeqRecord(new_phage_seq, id = "0", name = "Temp_Genome", description = "Temp_file")
if len(golden_genes) == 0: #make sure there is at least one gene from the golden phage in the cluster
return
if len(cluster) == len(golden_genes):
for gene in cluster:
querySeq = gene["product"]
query = open("query.FASTA", "w")
query.write(">query\n{}".format(querySeq))
query.close()
frame1, frame2, frame3 = getReadingFrames(gene, db, phage)
print
print(gene["locus_tag"])
counter = 0
resultProtein = None
while counter < 3:
if counter == 0:
subjectProtein = str(frame1)
elif counter == 1:
subjectProtein = str(frame2)
elif counter == 2:
subjectProtein = str(frame3)
subject = open("subject.FASTA", "w")
subject.write(">subject\n{}".format(subjectProtein))
subject.close()
resultProtein = blastGap(gene, "query.FASTA", "subject.FASTA")
if resultProtein != None:
break
else:
counter += 1
numAminoAcids = len(resultProtein)
resultProtein = Seq(resultProtein, IUPACProtein())
newFeature = makeNewGene(numAminoAcids, resultProtein, frame1, frame2, frame3, gene, new_phage_seq)
print(newFeature)
record.features.append(newFeature)
SeqIO.write(record, tempGB, "genbank")
tempGB.close()
genome = SeqIO.read("gapGenes.gb", "genbank")
features = genome.features
for feature in features:
if feature.type == "CDS":
if feature.qualifiers['translation'][0] == '-':
feature.qualifiers['translation'][0] = feature.qualifiers['translation'][0][1:]
gene_id = insert_gene(db, feature, phage)
def blastGap(gene, query, subject):
e_value = 1e-5
gapBlast = NcbiblastpCommandline(query = query, subject = subject, outfmt=5, out='gap.xml')
stdout, stderr = gapBlast()
result_handle = open('gap.xml')
blast_record = NCBIXML.read(result_handle)
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
if hsp.expect < e_value:
print("****ALIGNMENT****")
print("sequence:", alignment.title)
print("length", alignment.length)
print("e-value:", hsp.expect)
print(hsp.query[0:75] + "...")
print(hsp.match[0:75] + "...")
print(hsp.sbjct[0:75] + "...")
return hsp.sbjct
def getReadingFrames(gene, db, phage):
if gene['rev_comp'] != 1:
new_phage = get_phage(db, phage)
new_phage_seq = new_phage['seq']
seq = Seq(new_phage_seq, IUPACUnambiguousDNA())
frame1 = seq.translate()
frame2 = seq[1:(len(seq) - 1)]
frame2 = frame2.translate()
frame3 = seq[2:(len(seq) - 1)]
frame3 = frame3.translate()
else:
new_phage = get_phage(db, phage)
new_phage_seq = new_phage['seq']
seq = Seq(new_phage_seq, IUPACUnambiguousDNA())
seq = seq.reverse_complement()
frame1 = seq.translate()
frame2 = seq[1:(len(seq) - 1)]
frame2 = frame2.translate()
frame3 = seq[2:(len(seq) - 1)]
frame3 = frame3.translate()
return frame1, frame2, frame3
def makeNewGene(length, protein, frame1, frame2, frame3, gene, seq):
locusTag = "LG_{}".format(gene["locus_tag"])
counter = 0
while counter < 3:
if counter == 0:
start = frame1.find(protein)
if start != -1:
break
else:
counter += 1
elif counter == 1:
start = frame2.find(protein)
if start != -1:
break
else:
counter += 1
elif counter == 2:
start = frame3.find(protein)
if start != -1:
break
else:
counter += 1
if start != -1:
nucStart = counter + 3 * start
nucEnd = nucStart + 3 * length
if gene["rev_comp"] == 1:
geneSeq = seq[nucStart:nucEnd]
geneSeq = geneSeq.reverse_complement()
seq = seq.reverse_complement()
nucStart = seq.find(geneSeq)
nucEnd = nucStart + 3 * length
newFeature = SeqFeature(FeatureLocation(nucStart, nucEnd, strand = -1), type = "CDS")
newFeature.qualifiers["locus_tag"] = locusTag
newFeature.qualifiers["translation"] = protein
else:
newFeature = SeqFeature(FeatureLocation(nucStart, nucEnd, strand = 1), type = "CDS")
newFeature.qualifiers["locus_tag"] = locusTag
newFeature.qualifiers["translation"] = protein
return newFeature
|
pjtatlow/geneysis
|
python/functions.py
|
Python
|
mit
| 28,887
|
[
"BLAST"
] |
04c260e5a935fa18ee5180b67f9627adc3d060fdf25cb84d2c47b6a55f03af97
|
#!/usr/bin/env python3
# Copyright (C) 2016, 2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#########################################################################################
# #
# ESPResSo++ Python script for F-AdResS protein in rigid water simulation including #
# a selfadjusting atomistic region (on the fly) #
# #
#########################################################################################
import mpi4py.MPI as MPI
import espressopp
from espressopp import Real3D
from espressopp.tools import gromacs
import math
import os
import time
import sys
from math import sqrt
import random
import logging
from datetime import datetime
# Performs simulation of fully atomistic peptide in aqueous solution, with a self-adjusting atomistic region
# Reads in peptide coord file (.gro) and topology (topol.top) written in gromacs format
# Assumes that in input file, peptide is listed before water
# Assumes there are no ions
# Uses force-based AdResS and thermodynamic force
# Assumes the atomistic region is defined such that the entire protein is always completely inside it
# Atomistic region is formed of a series of overlapping spheres
# The particles are stored in memory as follows:
# particles in protein each correspond to one coarse-grained particle and one atomistic particle (this is just because of the way particles are stored in espressopp, the protein is fully atomistic all the time anyway)
# solvent (water) molecules each correspond to one coarse-grained particle which maps to three atomistic particles
########################################################################
# 1. specification of the main system setup and simulation parameters #
########################################################################
# protein indices
atProtIndices = [x for x in range(1,94)] #1 to 93 inclusive
nProtAtoms = len(atProtIndices)
# indices of atoms in water molecules with adaptive resolution
atWaterIndices = [x for x in range(94,30628)] #water atoms, 94 to 30627 inclusive
nWaterAtoms = len(atWaterIndices)
nWaterAtomsPerMol = 3 #number of atoms per cg water bead
nWaterMols = nWaterAtoms//nWaterAtomsPerMol
particlePIDsADR = atProtIndices #atomistic indices of atoms at centres of spheres forming AdResS region
# input coordinates
inputcrdfile = "peptide.gro"
# atomistic forcefield
aatopfile = "topol.top"
# system parameters
# NB cutoff
nbCutoff = 1.25
# Interaction cutoff
intCutoff = 1.0
# VerletList skin size (also used for domain decomposition)
skin = 0.2 #nm
# the temperature of the system
temperatureConvFactor = 120.27239 # 1/(kB in kJ K-1 mol-1) (input vel should be in nm/ps), for converting from reduced units to K
temperature = 300.0 # Kelvin
temperature = float(temperature)/temperatureConvFactor #in units of kJ mol-1
# time step for the velocity verlet integrator
dt = 0.001 #ps
nSteps = 1000 #total number of steps
nStepsPerOutput = 100 #frequency for printing energies and trajectory
nOutput = nSteps//nStepsPerOutput
# Parameters for size of AdResS dimensions
ex_size = 1.00
hy_size = 1.00
print('# radius of atomistic region = ',ex_size)
print('# thickness of hybrid region = ',hy_size)
trjfile = "trj.gro"
# print ESPResSo++ version and compile info
print('# ',espressopp.Version().info())
# print simulation parameters (useful to have them in a log file)
print("# nbCutoff = ", nbCutoff)
print("# intCutoff = ", intCutoff)
print("# skin = ", skin)
print("# dt = ", dt)
print("# nSteps = ", nSteps)
print("# output every ",nStepsPerOutput," steps")
########################################################################
# 2. read in coordinates and topology
########################################################################
## get info on (complete) atomistic system ##
print('# Reading gromacs top and gro files...')
# call gromacs parser for processing the top file (and included files) and the gro file
defaults, atTypes, atomtypesDict, atMasses, atCharges, atomtypeparameters, atBondtypes, bondtypeparams, atAngletypes, angletypeparams, atDihedraltypes, dihedraltypeparams, atImpropertypes, impropertypeparams, atExclusions, atOnefourpairslist, atX, atY, atZ, atVX, atVY, atVZ, atResnames, atResid, Lx, Ly, Lz = gromacs.read(inputcrdfile,aatopfile)
#initialize a map between atomtypes as integers and as strings
reverseAtomtypesDict = dict([(v, k) for k, v in atomtypesDict.items()])
# delete from atomtypeparams any types not in system, so as not to conflict with any new types created later
for k in list(atomtypeparameters):
if k not in atTypes:
print("# Deleting unused type ",k,"/",reverseAtomtypesDict[k]," from atomtypeparameters, atomtypesDict and reverseAtomtypesDict")
del atomtypeparameters[k]
atomtypekey = reverseAtomtypesDict[k]
del reverseAtomtypesDict[k]
del atomtypesDict[atomtypekey]
# system box size
box = (Lx, Ly, Lz)
print("# Box size = ", box)
nParticlesRead=len(atX)
print("# total number of particles read from atomistic config file = ",nParticlesRead)
print("# number of atomistic particles in protein = ",nProtAtoms)
print("# number of coarse-grained particles in protein = ",nProtAtoms)
print("# number of atomistic particles in solvent = ",nWaterAtoms)
print("# number of coarse-grained particles in solvent = ",nWaterMols)
nParticlesTotal=nProtAtoms*2+nWaterAtoms+nWaterMols
print("# total number of particles after setup = ",nParticlesTotal)
if (nParticlesRead != (nProtAtoms+nWaterAtoms)):
print("problem: no. particles in crd file != np. of atomistic particles specified")
print("values: ",nParticlesRead,nProtAtoms+nWaterAtoms)
quit()
particleX=[]
particleY=[]
particleZ=[]
particlePID=[]
particleTypes=[]
particleMasses=[]
particleCharges=[]
particleTypestring=[]
particleVX=[]
particleVY=[]
particleVZ=[]
#atomistic particles (protein and water)
for i in range(nProtAtoms+nWaterAtoms):
particlePID.append(i+1)
particleMasses.append(atMasses[i])
particleCharges.append(atCharges[i])
particleTypes.append(atTypes[i])
particleTypestring.append('atomistic__')
particleX.append(atX[i])
particleY.append(atY[i])
particleZ.append(atZ[i])
particleVX.append(atVX[i])
particleVY.append(atVY[i])
particleVZ.append(atVZ[i])
#cg protein particles (same as atomistic)
for i in range(int(nProtAtoms)):
particlePID.append(i+1+nProtAtoms+nWaterAtoms)
particleMasses.append(atMasses[i])
particleCharges.append(atCharges[i])
particleTypes.append(atTypes[i])
particleTypestring.append('cg_protein_')
particleX.append(atX[i])
particleY.append(atY[i])
particleZ.append(atZ[i])
particleVX.append(atVX[i])
particleVY.append(atVY[i])
particleVZ.append(atVZ[i])
#cg water particles
typeCG = max(reverseAtomtypesDict.keys())+2
reverseAtomtypesDict[typeCG]='WCG'
for i in range(int(nWaterMols)):
particlePID.append(i+1+nProtAtoms*2+nWaterAtoms)
indexO=atWaterIndices[3*i]-1
particleMasses.append(atMasses[indexO]+atMasses[indexO+1]+atMasses[indexO+2])
particleCharges.append(0.0)
particleTypes.append(typeCG)
particleTypestring.append('adres_cg___')
particleX.append(atX[indexO]) # put CG particle on O for the moment, later CG particle will be positioned in centre
particleY.append(atY[indexO])
particleZ.append(atZ[indexO])
particleVX.append(atVX[indexO]) # give CG particle velocity of O for the moment
particleVY.append(atVY[indexO])
particleVZ.append(atVZ[indexO])
print('# system total charge = ',sum(particleCharges[:nProtAtoms+nWaterAtoms]))
########################################################################
# 2. setup of the system, random number geneartor and parallelisation #
########################################################################
# create the basic system
system = espressopp.System()
# use the random number generator that is included within the ESPResSo++ package
xs = time.time()
seed = int(xs % int(xs) * 10000000000)
print("RNG Seed:", seed)
rng = espressopp.esutil.RNG()
rng.seed(seed)
system.rng = rng
# use orthorhombic periodic boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
# set the skin size used for verlet lists and cell sizes
system.skin = skin
# get the number of CPUs to use
NCPUs = espressopp.MPI.COMM_WORLD.size
# calculate a regular 3D grid according to the number of CPUs available
nodeGrid = espressopp.tools.decomp.nodeGrid(NCPUs,box,nbCutoff,skin)
# calculate a 3D subgrid to speed up verlet list builds and communication
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, nbCutoff, skin)
# create a domain decomposition particle storage with the calculated nodeGrid and cellGrid
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
print("# NCPUs = ", NCPUs)
print("# nodeGrid = ", nodeGrid)
print("# cellGrid = ", cellGrid)
########################################################################
# 4. adding the particles and build structure #
########################################################################
properties = ['id', 'type', 'pos', 'v', 'mass', 'q', 'adrat']
allParticles = []
tuples = []
#add particles in order CG1,AA11,AA12,AA13...CG2,AA21,AA22,AA23... etc.
mapAtToCgIndex = {}
#first adres particles
for i in range(int(nWaterMols)):
cgindex = i + nProtAtoms*2 + nWaterAtoms
tmptuple = [particlePID[cgindex]]
# first CG particle
allParticles.append([particlePID[cgindex],
particleTypes[cgindex],
Real3D(particleX[cgindex],particleY[cgindex],particleZ[cgindex]),
Real3D(particleVX[cgindex],particleVY[cgindex],particleVZ[cgindex]),
particleMasses[cgindex],particleCharges[cgindex],0])
# then AA particles
for j in range(int(nWaterAtomsPerMol)):
aaindex = i*nWaterAtomsPerMol + j + nProtAtoms
tmptuple.append(particlePID[aaindex])
allParticles.append([particlePID[aaindex],
particleTypes[aaindex],
Real3D(particleX[aaindex],particleY[aaindex],particleZ[aaindex]),
Real3D(particleVX[aaindex],particleVY[aaindex],particleVZ[aaindex]),
particleMasses[aaindex],particleCharges[aaindex],1])
mapAtToCgIndex[particlePID[aaindex]]=particlePID[cgindex]
tuples.append(tmptuple)
# then protein
for i in range(int(nProtAtoms)):
allParticles.append([particlePID[i]+nProtAtoms+nWaterAtoms,particleTypes[i], #particlePID[i]+nParticlesTotal works bcs non-adres particles are listed first
Real3D(particleX[i],particleY[i],particleZ[i]),
Real3D(particleVX[i],particleVY[i],particleVZ[i]),
particleMasses[i],particleCharges[i],0])
allParticles.append([particlePID[i],particleTypes[i],
Real3D(particleX[i],particleY[i],particleZ[i]),
Real3D(particleVX[i],particleVY[i],particleVZ[i]),
particleMasses[i],particleCharges[i],1])
tuples.append([particlePID[i]+nProtAtoms+nWaterAtoms,particlePID[i]])
mapAtToCgIndex[particlePID[i]] = particlePID[i]+nProtAtoms+nWaterAtoms
system.storage.addParticles(allParticles, *properties)
# create FixedTupleList object
ftpl = espressopp.FixedTupleListAdress(system.storage)
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
system.storage.decompose()
########################################################################
# 3. setup of the integrator and simulation ensemble #
########################################################################
# use a velocity Verlet integration scheme
integrator = espressopp.integrator.VelocityVerlet(system)
# set the integration step
integrator.dt = dt
# use a thermostat if the temperature is set
if (temperature != None):
# create Langevin thermostat
thermostat = espressopp.integrator.LangevinThermostat(system)
# set Langevin friction constant
thermostat.gamma = 5.0 # units ps-1
print("# gamma for langevin thermostat = ",thermostat.gamma)
# set temperature
thermostat.temperature = temperature
# switch on for adres
thermostat.adress = True
print("# thermostat temperature = ", temperature*temperatureConvFactor)
# tell the integrator to use this thermostat
integrator.addExtension(thermostat)
else:
print("#No thermostat")
########################################################################
# 6. define atomistic and adres interactions
########################################################################
## adres interactions ##
print('# moving atomistic region composed of multiple spheres centered on each protein cg particle')
particlePIDsADR = [mapAtToCgIndex[pid] for pid in particlePIDsADR]
verletlist = espressopp.VerletListAdress(system, cutoff=nbCutoff, adrcut=nbCutoff,
dEx=ex_size, dHy=hy_size,
pids=particlePIDsADR, sphereAdr=True)
# set up LJ interaction according to the parameters read from the .top file
lj_adres_interaction=gromacs.setLennardJonesInteractions(system, defaults, atomtypeparameters, verletlist, intCutoff, adress=True, ftpl=ftpl)
# set up coulomb interactions according to the parameters read from the .top file
print('#Note: Reaction Field method is used for Coulomb interactions')
qq_adres_interaction=gromacs.setCoulombInteractions(system, verletlist, intCutoff, atTypes, epsilon1=1, epsilon2=67.5998, kappa=0, adress=True, ftpl=ftpl)
# set the CG potential for water. Set for LJ interaction, and QQ interaction has no CG equivalent, also prot has no CG potential, is always in adres region
# load CG interaction from table
fe="table_CGwat_CGwat.tab"
gromacs.convertTable("table_CGwat_CGwat.xvg", fe, 1, 1, 1, 1)
potCG = espressopp.interaction.Tabulated(itype=3, filename=fe, cutoff=intCutoff)
lj_adres_interaction.setPotentialCG(type1=typeCG, type2=typeCG, potential=potCG)
## bonded (fixed list) interactions for protein (actually between CG particles in AA region) ##
## set up LJ 1-4 interactions
cgOnefourpairslist=[]
for (a1,a2) in atOnefourpairslist:
cgOnefourpairslist.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2]))
print('# ',len(cgOnefourpairslist),' 1-4 pairs in aa-hybrid region')
onefourlist = espressopp.FixedPairList(system.storage)
onefourlist.addBonds(cgOnefourpairslist)
lj14interaction=gromacs.setLennardJones14Interactions(system, defaults, atomtypeparameters, onefourlist, intCutoff)
# set up coulomb 1-4 interactions
qq14_interactions=gromacs.setCoulomb14Interactions(system, defaults, onefourlist, intCutoff, atTypes)
## set up bond interactions according to the parameters read from the .top file
# only for protein, not for water
cgBondtypes={}
for btkey in list(atBondtypes.keys()):
newBondtypes=[]
for (a1,a2) in atBondtypes[btkey]:
if (a1 in atProtIndices) and (a2 in atProtIndices):
newBondtypes.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2]))
cgBondtypes[btkey]=newBondtypes
bondedinteractions=gromacs.setBondedInteractions(system, cgBondtypes, bondtypeparams)
# set up angle interactions according to the parameters read from the .top file
# only for protein, not for water
cgAngletypes={}
for atkey in list(atAngletypes.keys()):
newAngletypes=[]
for (a1,a2,a3) in atAngletypes[atkey]:
if (a1 in atProtIndices) and (a2 in atProtIndices) and (a3 in atProtIndices):
newAngletypes.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2],mapAtToCgIndex[a3]))
cgAngletypes[atkey]=newAngletypes
angleinteractions=gromacs.setAngleInteractions(system, cgAngletypes, angletypeparams)
# set up dihedral interactions according to the parameters read from the .top file
cgDihedraltypes={}
for atkey in list(atDihedraltypes.keys()):
newDihedraltypes=[]
for (a1,a2,a3,a4) in atDihedraltypes[atkey]:
newDihedraltypes.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2],mapAtToCgIndex[a3],mapAtToCgIndex[a4]))
cgDihedraltypes[atkey]=newDihedraltypes
dihedralinteractions=gromacs.setDihedralInteractions(system, cgDihedraltypes, dihedraltypeparams)
# set up improper interactions according to the parameters read from the .top file
cgImpropertypes={}
for atkey in list(atImpropertypes.keys()):
newImpropertypes=[]
for (a1,a2,a3,a4) in atImpropertypes[atkey]:
newImpropertypes.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2],mapAtToCgIndex[a3],mapAtToCgIndex[a4]))
cgImpropertypes[atkey]=newImpropertypes
improperinteractions=gromacs.setImproperInteractions(system, cgImpropertypes, impropertypeparams)
cgExclusions = [] #previously existing atExclusions list was for atomistic protein, don't use it
#in espressopppp, exclusions are handled at the CG particle level
for pair in atExclusions:
vp1 = mapAtToCgIndex[pair[0]]
vp2 = mapAtToCgIndex[pair[1]]
if vp1 == vp2: continue #all at interactions within one cg particle are excluded anyway
cgExclusions.append((vp1,vp2))
verletlist.exclude(cgExclusions)
print('# ',len(cgExclusions),' exclusions')
count = system.getNumberOfInteractions()
print('# ',count,' interactions defined')
# SETTLE water for rigid water
print('#Warning: settle set-up assumes water was listed first when tuples were constructed')
molidlist=[]
for wm in range(int(nWaterMols)): #assuming water==adres part, and water is listed first
molidlist.append(tuples[wm][0])
settlewaters = espressopp.integrator.Settle(system, ftpl, mO=15.9994, mH=1.008, distHH=0.1633, distOH=0.1)
settlewaters.addMolecules(molidlist)
integrator.addExtension(settlewaters)
print('# Settling ',len(molidlist), ' waters')
# calculate number of degrees of freedom, for temperature calculation
# note that this will only work in a fully atomistic system
# espressopp doesn't calculate the number of dof correctly in force-based Adress
nconstr = nWaterAtoms
nAtoms = nWaterAtoms + nProtAtoms
ndof_unconstr = nAtoms*3-3
ndof_constr = ndof_unconstr-nconstr
dofTemperatureCorrFactor = float(ndof_unconstr)/float(ndof_constr)
print("# Correcting temperature for constraints, using factor: ",dofTemperatureCorrFactor)
print("# calculated using nAtoms = ",nAtoms, "nconstraints = ",nconstr," and ndof_constr = ",ndof_constr)
# add AdResS
adress = espressopp.integrator.Adress(system,verletlist,ftpl)
integrator.addExtension(adress)
# add thermodynamic force
print("# Adding Extension: external thermodynamic force using TDforce module...")
tabTF="tabletf-1-1.xvg"
thdforce = espressopp.integrator.TDforce(system,verletlist,startdist = 0.9, enddist = 2.1, edgeweightmultiplier = 20)
thdforce.addForce(itype=3,filename=tabTF,type=typeCG)
integrator.addExtension(thdforce)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
print('# Decomposing...')
espressopp.tools.AdressDecomp(system, integrator)
########################################################################
# 7. run #
########################################################################
temperature = espressopp.analysis.Temperature(system)
print("# starting run...")
#try:
# os.remove(trjfile)
#except OSError:
# pass
dump_conf_gro = espressopp.io.DumpGROAdress(system, ftpl, integrator, filename=trjfile,unfolded=True)
start_time = time.process_time()
print('Start time: ', str(datetime.now()))
print("i*dt,Eb, EAng, Edih, EImp, ELj, Elj14, EQQ, EQQ14, Etotal, T")
fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8f %15.8f\n'
integrator.run(0)
for k in range(int(nOutput)):
i=k*nStepsPerOutput
EQQ=0.0
EQQ14=0.0
ELj=0.0
ELj14=0.0
Eb = 0.0
EAng = 0.0
EDih = 0.0
EImp = 0.0
for bd in list(bondedinteractions.values()): Eb+=bd.computeEnergy()
for ang in list(angleinteractions.values()): EAng+=ang.computeEnergy()
for dih in list(dihedralinteractions.values()): EDih+=dih.computeEnergy()
for imp in list(improperinteractions.values()): EImp+=imp.computeEnergy()
ELj= lj_adres_interaction.computeEnergy()
ELj14 = lj14interaction.computeEnergy()
EQQ = qq_adres_interaction.computeEnergy()
EQQ14 = qq14_interactions.computeEnergy()
T = temperature.compute()
Etotal = Eb+EAng+EDih+EImp+EQQ+EQQ14+ELj+ELj14
print((fmt%(i*dt,Eb, EAng, EDih, EImp, ELj, ELj14, EQQ, EQQ14, Etotal, T*temperatureConvFactor*dofTemperatureCorrFactor)), end='')
sys.stdout.flush()
integrator.run(nStepsPerOutput)
particle = system.storage.getParticle(1)
if math.isnan(particle.pos[0]):
quit()
dump_conf_gro.dump()
end_time = time.process_time()
|
espressopp/espressopp
|
examples/adress/fadress_selfadjusting/peptide-adres-selfadjusting.py
|
Python
|
gpl-3.0
| 21,994
|
[
"ESPResSo",
"Gromacs"
] |
b495843f212a8623e689bb1f954f9d6809b7ffd1a5a9f022b2d4bb0f49813785
|
# Author: Felix Wiemann
# Contact: Felix_Wiemann@ososo.de
# Revision: $Revision: 4163 $
# Date: $Date: 2005-12-09 05:21:34 +0100 (Fri, 09 Dec 2005) $
# Copyright: This file has been placed in the public domain.
# This is a mapping of Unicode characters to LaTeX equivalents.
# The information has been extracted from
# <http://www.w3.org/2003/entities/xml/unicode.xml>, written by
# David Carlisle and Sebastian Rahtz.
#
# The extraction has been done by the "create_unimap.py" script
# located at <http://docutils.sf.net/tools/dev/create_unimap.py>.
unicode_map = {u'\xa0': '$~$',
u'\xa1': '{\\textexclamdown}',
u'\xa2': '{\\textcent}',
u'\xa3': '{\\textsterling}',
u'\xa4': '{\\textcurrency}',
u'\xa5': '{\\textyen}',
u'\xa6': '{\\textbrokenbar}',
u'\xa7': '{\\textsection}',
u'\xa8': '{\\textasciidieresis}',
u'\xa9': '{\\textcopyright}',
u'\xaa': '{\\textordfeminine}',
u'\xab': '{\\guillemotleft}',
u'\xac': '$\\lnot$',
u'\xad': '$\\-$',
u'\xae': '{\\textregistered}',
u'\xaf': '{\\textasciimacron}',
u'\xb0': '{\\textdegree}',
u'\xb1': '$\\pm$',
u'\xb2': '${^2}$',
u'\xb3': '${^3}$',
u'\xb4': '{\\textasciiacute}',
u'\xb5': '$\\mathrm{\\mu}$',
u'\xb6': '{\\textparagraph}',
u'\xb7': '$\\cdot$',
u'\xb8': '{\\c{}}',
u'\xb9': '${^1}$',
u'\xba': '{\\textordmasculine}',
u'\xbb': '{\\guillemotright}',
u'\xbc': '{\\textonequarter}',
u'\xbd': '{\\textonehalf}',
u'\xbe': '{\\textthreequarters}',
u'\xbf': '{\\textquestiondown}',
u'\xc0': '{\\`{A}}',
u'\xc1': "{\\'{A}}",
u'\xc2': '{\\^{A}}',
u'\xc3': '{\\~{A}}',
u'\xc4': '{\\"{A}}',
u'\xc5': '{\\AA}',
u'\xc6': '{\\AE}',
u'\xc7': '{\\c{C}}',
u'\xc8': '{\\`{E}}',
u'\xc9': "{\\'{E}}",
u'\xca': '{\\^{E}}',
u'\xcb': '{\\"{E}}',
u'\xcc': '{\\`{I}}',
u'\xcd': "{\\'{I}}",
u'\xce': '{\\^{I}}',
u'\xcf': '{\\"{I}}',
u'\xd0': '{\\DH}',
u'\xd1': '{\\~{N}}',
u'\xd2': '{\\`{O}}',
u'\xd3': "{\\'{O}}",
u'\xd4': '{\\^{O}}',
u'\xd5': '{\\~{O}}',
u'\xd6': '{\\"{O}}',
u'\xd7': '{\\texttimes}',
u'\xd8': '{\\O}',
u'\xd9': '{\\`{U}}',
u'\xda': "{\\'{U}}",
u'\xdb': '{\\^{U}}',
u'\xdc': '{\\"{U}}',
u'\xdd': "{\\'{Y}}",
u'\xde': '{\\TH}',
u'\xdf': '{\\ss}',
u'\xe0': '{\\`{a}}',
u'\xe1': "{\\'{a}}",
u'\xe2': '{\\^{a}}',
u'\xe3': '{\\~{a}}',
u'\xe4': '{\\"{a}}',
u'\xe5': '{\\aa}',
u'\xe6': '{\\ae}',
u'\xe7': '{\\c{c}}',
u'\xe8': '{\\`{e}}',
u'\xe9': "{\\'{e}}",
u'\xea': '{\\^{e}}',
u'\xeb': '{\\"{e}}',
u'\xec': '{\\`{\\i}}',
u'\xed': "{\\'{\\i}}",
u'\xee': '{\\^{\\i}}',
u'\xef': '{\\"{\\i}}',
u'\xf0': '{\\dh}',
u'\xf1': '{\\~{n}}',
u'\xf2': '{\\`{o}}',
u'\xf3': "{\\'{o}}",
u'\xf4': '{\\^{o}}',
u'\xf5': '{\\~{o}}',
u'\xf6': '{\\"{o}}',
u'\xf7': '$\\div$',
u'\xf8': '{\\o}',
u'\xf9': '{\\`{u}}',
u'\xfa': "{\\'{u}}",
u'\xfb': '{\\^{u}}',
u'\xfc': '{\\"{u}}',
u'\xfd': "{\\'{y}}",
u'\xfe': '{\\th}',
u'\xff': '{\\"{y}}',
u'\u0100': '{\\={A}}',
u'\u0101': '{\\={a}}',
u'\u0102': '{\\u{A}}',
u'\u0103': '{\\u{a}}',
u'\u0104': '{\\k{A}}',
u'\u0105': '{\\k{a}}',
u'\u0106': "{\\'{C}}",
u'\u0107': "{\\'{c}}",
u'\u0108': '{\\^{C}}',
u'\u0109': '{\\^{c}}',
u'\u010a': '{\\.{C}}',
u'\u010b': '{\\.{c}}',
u'\u010c': '{\\v{C}}',
u'\u010d': '{\\v{c}}',
u'\u010e': '{\\v{D}}',
u'\u010f': '{\\v{d}}',
u'\u0110': '{\\DJ}',
u'\u0111': '{\\dj}',
u'\u0112': '{\\={E}}',
u'\u0113': '{\\={e}}',
u'\u0114': '{\\u{E}}',
u'\u0115': '{\\u{e}}',
u'\u0116': '{\\.{E}}',
u'\u0117': '{\\.{e}}',
u'\u0118': '{\\k{E}}',
u'\u0119': '{\\k{e}}',
u'\u011a': '{\\v{E}}',
u'\u011b': '{\\v{e}}',
u'\u011c': '{\\^{G}}',
u'\u011d': '{\\^{g}}',
u'\u011e': '{\\u{G}}',
u'\u011f': '{\\u{g}}',
u'\u0120': '{\\.{G}}',
u'\u0121': '{\\.{g}}',
u'\u0122': '{\\c{G}}',
u'\u0123': '{\\c{g}}',
u'\u0124': '{\\^{H}}',
u'\u0125': '{\\^{h}}',
u'\u0126': '{{\\fontencoding{LELA}\\selectfont\\char40}}',
u'\u0127': '$\\Elzxh$',
u'\u0128': '{\\~{I}}',
u'\u0129': '{\\~{\\i}}',
u'\u012a': '{\\={I}}',
u'\u012b': '{\\={\\i}}',
u'\u012c': '{\\u{I}}',
u'\u012d': '{\\u{\\i}}',
u'\u012e': '{\\k{I}}',
u'\u012f': '{\\k{i}}',
u'\u0130': '{\\.{I}}',
u'\u0131': '{\\i}',
u'\u0132': '{IJ}',
u'\u0133': '{ij}',
u'\u0134': '{\\^{J}}',
u'\u0135': '{\\^{\\j}}',
u'\u0136': '{\\c{K}}',
u'\u0137': '{\\c{k}}',
u'\u0138': '{{\\fontencoding{LELA}\\selectfont\\char91}}',
u'\u0139': "{\\'{L}}",
u'\u013a': "{\\'{l}}",
u'\u013b': '{\\c{L}}',
u'\u013c': '{\\c{l}}',
u'\u013d': '{\\v{L}}',
u'\u013e': '{\\v{l}}',
u'\u013f': '{{\\fontencoding{LELA}\\selectfont\\char201}}',
u'\u0140': '{{\\fontencoding{LELA}\\selectfont\\char202}}',
u'\u0141': '{\\L}',
u'\u0142': '{\\l}',
u'\u0143': "{\\'{N}}",
u'\u0144': "{\\'{n}}",
u'\u0145': '{\\c{N}}',
u'\u0146': '{\\c{n}}',
u'\u0147': '{\\v{N}}',
u'\u0148': '{\\v{n}}',
u'\u0149': "{'n}",
u'\u014a': '{\\NG}',
u'\u014b': '{\\ng}',
u'\u014c': '{\\={O}}',
u'\u014d': '{\\={o}}',
u'\u014e': '{\\u{O}}',
u'\u014f': '{\\u{o}}',
u'\u0150': '{\\H{O}}',
u'\u0151': '{\\H{o}}',
u'\u0152': '{\\OE}',
u'\u0153': '{\\oe}',
u'\u0154': "{\\'{R}}",
u'\u0155': "{\\'{r}}",
u'\u0156': '{\\c{R}}',
u'\u0157': '{\\c{r}}',
u'\u0158': '{\\v{R}}',
u'\u0159': '{\\v{r}}',
u'\u015a': "{\\'{S}}",
u'\u015b': "{\\'{s}}",
u'\u015c': '{\\^{S}}',
u'\u015d': '{\\^{s}}',
u'\u015e': '{\\c{S}}',
u'\u015f': '{\\c{s}}',
u'\u0160': '{\\v{S}}',
u'\u0161': '{\\v{s}}',
u'\u0162': '{\\c{T}}',
u'\u0163': '{\\c{t}}',
u'\u0164': '{\\v{T}}',
u'\u0165': '{\\v{t}}',
u'\u0166': '{{\\fontencoding{LELA}\\selectfont\\char47}}',
u'\u0167': '{{\\fontencoding{LELA}\\selectfont\\char63}}',
u'\u0168': '{\\~{U}}',
u'\u0169': '{\\~{u}}',
u'\u016a': '{\\={U}}',
u'\u016b': '{\\={u}}',
u'\u016c': '{\\u{U}}',
u'\u016d': '{\\u{u}}',
u'\u016e': '{\\r{U}}',
u'\u016f': '{\\r{u}}',
u'\u0170': '{\\H{U}}',
u'\u0171': '{\\H{u}}',
u'\u0172': '{\\k{U}}',
u'\u0173': '{\\k{u}}',
u'\u0174': '{\\^{W}}',
u'\u0175': '{\\^{w}}',
u'\u0176': '{\\^{Y}}',
u'\u0177': '{\\^{y}}',
u'\u0178': '{\\"{Y}}',
u'\u0179': "{\\'{Z}}",
u'\u017a': "{\\'{z}}",
u'\u017b': '{\\.{Z}}',
u'\u017c': '{\\.{z}}',
u'\u017d': '{\\v{Z}}',
u'\u017e': '{\\v{z}}',
u'\u0192': '$f$',
u'\u0195': '{\\texthvlig}',
u'\u019e': '{\\textnrleg}',
u'\u01aa': '$\\eth$',
u'\u01ba': '{{\\fontencoding{LELA}\\selectfont\\char195}}',
u'\u01c2': '{\\textdoublepipe}',
u'\u01f5': "{\\'{g}}",
u'\u0250': '$\\Elztrna$',
u'\u0252': '$\\Elztrnsa$',
u'\u0254': '$\\Elzopeno$',
u'\u0256': '$\\Elzrtld$',
u'\u0258': '{{\\fontencoding{LEIP}\\selectfont\\char61}}',
u'\u0259': '$\\Elzschwa$',
u'\u025b': '$\\varepsilon$',
u'\u0261': '{g}',
u'\u0263': '$\\Elzpgamma$',
u'\u0264': '$\\Elzpbgam$',
u'\u0265': '$\\Elztrnh$',
u'\u026c': '$\\Elzbtdl$',
u'\u026d': '$\\Elzrtll$',
u'\u026f': '$\\Elztrnm$',
u'\u0270': '$\\Elztrnmlr$',
u'\u0271': '$\\Elzltlmr$',
u'\u0272': '{\\Elzltln}',
u'\u0273': '$\\Elzrtln$',
u'\u0277': '$\\Elzclomeg$',
u'\u0278': '{\\textphi}',
u'\u0279': '$\\Elztrnr$',
u'\u027a': '$\\Elztrnrl$',
u'\u027b': '$\\Elzrttrnr$',
u'\u027c': '$\\Elzrl$',
u'\u027d': '$\\Elzrtlr$',
u'\u027e': '$\\Elzfhr$',
u'\u027f': '{{\\fontencoding{LEIP}\\selectfont\\char202}}',
u'\u0282': '$\\Elzrtls$',
u'\u0283': '$\\Elzesh$',
u'\u0287': '$\\Elztrnt$',
u'\u0288': '$\\Elzrtlt$',
u'\u028a': '$\\Elzpupsil$',
u'\u028b': '$\\Elzpscrv$',
u'\u028c': '$\\Elzinvv$',
u'\u028d': '$\\Elzinvw$',
u'\u028e': '$\\Elztrny$',
u'\u0290': '$\\Elzrtlz$',
u'\u0292': '$\\Elzyogh$',
u'\u0294': '$\\Elzglst$',
u'\u0295': '$\\Elzreglst$',
u'\u0296': '$\\Elzinglst$',
u'\u029e': '{\\textturnk}',
u'\u02a4': '$\\Elzdyogh$',
u'\u02a7': '$\\Elztesh$',
u'\u02bc': "{'}",
u'\u02c7': '{\\textasciicaron}',
u'\u02c8': '$\\Elzverts$',
u'\u02cc': '$\\Elzverti$',
u'\u02d0': '$\\Elzlmrk$',
u'\u02d1': '$\\Elzhlmrk$',
u'\u02d2': '$\\Elzsbrhr$',
u'\u02d3': '$\\Elzsblhr$',
u'\u02d4': '$\\Elzrais$',
u'\u02d5': '$\\Elzlow$',
u'\u02d8': '{\\textasciibreve}',
u'\u02d9': '{\\textperiodcentered}',
u'\u02da': '{\\r{}}',
u'\u02db': '{\\k{}}',
u'\u02dc': '{\\texttildelow}',
u'\u02dd': '{\\H{}}',
u'\u02e5': '{\\tone{55}}',
u'\u02e6': '{\\tone{44}}',
u'\u02e7': '{\\tone{33}}',
u'\u02e8': '{\\tone{22}}',
u'\u02e9': '{\\tone{11}}',
u'\u0300': '{\\`}',
u'\u0301': "{\\'}",
u'\u0302': '{\\^}',
u'\u0303': '{\\~}',
u'\u0304': '{\\=}',
u'\u0306': '{\\u}',
u'\u0307': '{\\.}',
u'\u0308': '{\\"}',
u'\u030a': '{\\r}',
u'\u030b': '{\\H}',
u'\u030c': '{\\v}',
u'\u030f': '{\\cyrchar\\C}',
u'\u0311': '{{\\fontencoding{LECO}\\selectfont\\char177}}',
u'\u0318': '{{\\fontencoding{LECO}\\selectfont\\char184}}',
u'\u0319': '{{\\fontencoding{LECO}\\selectfont\\char185}}',
u'\u0321': '$\\Elzpalh$',
u'\u0322': '{\\Elzrh}',
u'\u0327': '{\\c}',
u'\u0328': '{\\k}',
u'\u032a': '$\\Elzsbbrg$',
u'\u032b': '{{\\fontencoding{LECO}\\selectfont\\char203}}',
u'\u032f': '{{\\fontencoding{LECO}\\selectfont\\char207}}',
u'\u0335': '{\\Elzxl}',
u'\u0336': '{\\Elzbar}',
u'\u0337': '{{\\fontencoding{LECO}\\selectfont\\char215}}',
u'\u0338': '{{\\fontencoding{LECO}\\selectfont\\char216}}',
u'\u033a': '{{\\fontencoding{LECO}\\selectfont\\char218}}',
u'\u033b': '{{\\fontencoding{LECO}\\selectfont\\char219}}',
u'\u033c': '{{\\fontencoding{LECO}\\selectfont\\char220}}',
u'\u033d': '{{\\fontencoding{LECO}\\selectfont\\char221}}',
u'\u0361': '{{\\fontencoding{LECO}\\selectfont\\char225}}',
u'\u0386': "{\\'{A}}",
u'\u0388': "{\\'{E}}",
u'\u0389': "{\\'{H}}",
u'\u038a': "{\\'{}{I}}",
u'\u038c': "{\\'{}O}",
u'\u038e': "$\\mathrm{'Y}$",
u'\u038f': "$\\mathrm{'\\Omega}$",
u'\u0390': '$\\acute{\\ddot{\\iota}}$',
u'\u0391': '$\\Alpha$',
u'\u0392': '$\\Beta$',
u'\u0393': '$\\Gamma$',
u'\u0394': '$\\Delta$',
u'\u0395': '$\\Epsilon$',
u'\u0396': '$\\Zeta$',
u'\u0397': '$\\Eta$',
u'\u0398': '$\\Theta$',
u'\u0399': '$\\Iota$',
u'\u039a': '$\\Kappa$',
u'\u039b': '$\\Lambda$',
u'\u039c': '$M$',
u'\u039d': '$N$',
u'\u039e': '$\\Xi$',
u'\u039f': '$O$',
u'\u03a0': '$\\Pi$',
u'\u03a1': '$\\Rho$',
u'\u03a3': '$\\Sigma$',
u'\u03a4': '$\\Tau$',
u'\u03a5': '$\\Upsilon$',
u'\u03a6': '$\\Phi$',
u'\u03a7': '$\\Chi$',
u'\u03a8': '$\\Psi$',
u'\u03a9': '$\\Omega$',
u'\u03aa': '$\\mathrm{\\ddot{I}}$',
u'\u03ab': '$\\mathrm{\\ddot{Y}}$',
u'\u03ac': "{\\'{$\\alpha$}}",
u'\u03ad': '$\\acute{\\epsilon}$',
u'\u03ae': '$\\acute{\\eta}$',
u'\u03af': '$\\acute{\\iota}$',
u'\u03b0': '$\\acute{\\ddot{\\upsilon}}$',
u'\u03b1': '$\\alpha$',
u'\u03b2': '$\\beta$',
u'\u03b3': '$\\gamma$',
u'\u03b4': '$\\delta$',
u'\u03b5': '$\\epsilon$',
u'\u03b6': '$\\zeta$',
u'\u03b7': '$\\eta$',
u'\u03b8': '{\\texttheta}',
u'\u03b9': '$\\iota$',
u'\u03ba': '$\\kappa$',
u'\u03bb': '$\\lambda$',
u'\u03bc': '$\\mu$',
u'\u03bd': '$\\nu$',
u'\u03be': '$\\xi$',
u'\u03bf': '$o$',
u'\u03c0': '$\\pi$',
u'\u03c1': '$\\rho$',
u'\u03c2': '$\\varsigma$',
u'\u03c3': '$\\sigma$',
u'\u03c4': '$\\tau$',
u'\u03c5': '$\\upsilon$',
u'\u03c6': '$\\varphi$',
u'\u03c7': '$\\chi$',
u'\u03c8': '$\\psi$',
u'\u03c9': '$\\omega$',
u'\u03ca': '$\\ddot{\\iota}$',
u'\u03cb': '$\\ddot{\\upsilon}$',
u'\u03cc': "{\\'{o}}",
u'\u03cd': '$\\acute{\\upsilon}$',
u'\u03ce': '$\\acute{\\omega}$',
u'\u03d0': '{\\Pisymbol{ppi022}{87}}',
u'\u03d1': '{\\textvartheta}',
u'\u03d2': '$\\Upsilon$',
u'\u03d5': '$\\phi$',
u'\u03d6': '$\\varpi$',
u'\u03da': '$\\Stigma$',
u'\u03dc': '$\\Digamma$',
u'\u03dd': '$\\digamma$',
u'\u03de': '$\\Koppa$',
u'\u03e0': '$\\Sampi$',
u'\u03f0': '$\\varkappa$',
u'\u03f1': '$\\varrho$',
u'\u03f4': '{\\textTheta}',
u'\u03f6': '$\\backepsilon$',
u'\u0401': '{\\cyrchar\\CYRYO}',
u'\u0402': '{\\cyrchar\\CYRDJE}',
u'\u0403': "{\\cyrchar{\\'\\CYRG}}",
u'\u0404': '{\\cyrchar\\CYRIE}',
u'\u0405': '{\\cyrchar\\CYRDZE}',
u'\u0406': '{\\cyrchar\\CYRII}',
u'\u0407': '{\\cyrchar\\CYRYI}',
u'\u0408': '{\\cyrchar\\CYRJE}',
u'\u0409': '{\\cyrchar\\CYRLJE}',
u'\u040a': '{\\cyrchar\\CYRNJE}',
u'\u040b': '{\\cyrchar\\CYRTSHE}',
u'\u040c': "{\\cyrchar{\\'\\CYRK}}",
u'\u040e': '{\\cyrchar\\CYRUSHRT}',
u'\u040f': '{\\cyrchar\\CYRDZHE}',
u'\u0410': '{\\cyrchar\\CYRA}',
u'\u0411': '{\\cyrchar\\CYRB}',
u'\u0412': '{\\cyrchar\\CYRV}',
u'\u0413': '{\\cyrchar\\CYRG}',
u'\u0414': '{\\cyrchar\\CYRD}',
u'\u0415': '{\\cyrchar\\CYRE}',
u'\u0416': '{\\cyrchar\\CYRZH}',
u'\u0417': '{\\cyrchar\\CYRZ}',
u'\u0418': '{\\cyrchar\\CYRI}',
u'\u0419': '{\\cyrchar\\CYRISHRT}',
u'\u041a': '{\\cyrchar\\CYRK}',
u'\u041b': '{\\cyrchar\\CYRL}',
u'\u041c': '{\\cyrchar\\CYRM}',
u'\u041d': '{\\cyrchar\\CYRN}',
u'\u041e': '{\\cyrchar\\CYRO}',
u'\u041f': '{\\cyrchar\\CYRP}',
u'\u0420': '{\\cyrchar\\CYRR}',
u'\u0421': '{\\cyrchar\\CYRS}',
u'\u0422': '{\\cyrchar\\CYRT}',
u'\u0423': '{\\cyrchar\\CYRU}',
u'\u0424': '{\\cyrchar\\CYRF}',
u'\u0425': '{\\cyrchar\\CYRH}',
u'\u0426': '{\\cyrchar\\CYRC}',
u'\u0427': '{\\cyrchar\\CYRCH}',
u'\u0428': '{\\cyrchar\\CYRSH}',
u'\u0429': '{\\cyrchar\\CYRSHCH}',
u'\u042a': '{\\cyrchar\\CYRHRDSN}',
u'\u042b': '{\\cyrchar\\CYRERY}',
u'\u042c': '{\\cyrchar\\CYRSFTSN}',
u'\u042d': '{\\cyrchar\\CYREREV}',
u'\u042e': '{\\cyrchar\\CYRYU}',
u'\u042f': '{\\cyrchar\\CYRYA}',
u'\u0430': '{\\cyrchar\\cyra}',
u'\u0431': '{\\cyrchar\\cyrb}',
u'\u0432': '{\\cyrchar\\cyrv}',
u'\u0433': '{\\cyrchar\\cyrg}',
u'\u0434': '{\\cyrchar\\cyrd}',
u'\u0435': '{\\cyrchar\\cyre}',
u'\u0436': '{\\cyrchar\\cyrzh}',
u'\u0437': '{\\cyrchar\\cyrz}',
u'\u0438': '{\\cyrchar\\cyri}',
u'\u0439': '{\\cyrchar\\cyrishrt}',
u'\u043a': '{\\cyrchar\\cyrk}',
u'\u043b': '{\\cyrchar\\cyrl}',
u'\u043c': '{\\cyrchar\\cyrm}',
u'\u043d': '{\\cyrchar\\cyrn}',
u'\u043e': '{\\cyrchar\\cyro}',
u'\u043f': '{\\cyrchar\\cyrp}',
u'\u0440': '{\\cyrchar\\cyrr}',
u'\u0441': '{\\cyrchar\\cyrs}',
u'\u0442': '{\\cyrchar\\cyrt}',
u'\u0443': '{\\cyrchar\\cyru}',
u'\u0444': '{\\cyrchar\\cyrf}',
u'\u0445': '{\\cyrchar\\cyrh}',
u'\u0446': '{\\cyrchar\\cyrc}',
u'\u0447': '{\\cyrchar\\cyrch}',
u'\u0448': '{\\cyrchar\\cyrsh}',
u'\u0449': '{\\cyrchar\\cyrshch}',
u'\u044a': '{\\cyrchar\\cyrhrdsn}',
u'\u044b': '{\\cyrchar\\cyrery}',
u'\u044c': '{\\cyrchar\\cyrsftsn}',
u'\u044d': '{\\cyrchar\\cyrerev}',
u'\u044e': '{\\cyrchar\\cyryu}',
u'\u044f': '{\\cyrchar\\cyrya}',
u'\u0451': '{\\cyrchar\\cyryo}',
u'\u0452': '{\\cyrchar\\cyrdje}',
u'\u0453': "{\\cyrchar{\\'\\cyrg}}",
u'\u0454': '{\\cyrchar\\cyrie}',
u'\u0455': '{\\cyrchar\\cyrdze}',
u'\u0456': '{\\cyrchar\\cyrii}',
u'\u0457': '{\\cyrchar\\cyryi}',
u'\u0458': '{\\cyrchar\\cyrje}',
u'\u0459': '{\\cyrchar\\cyrlje}',
u'\u045a': '{\\cyrchar\\cyrnje}',
u'\u045b': '{\\cyrchar\\cyrtshe}',
u'\u045c': "{\\cyrchar{\\'\\cyrk}}",
u'\u045e': '{\\cyrchar\\cyrushrt}',
u'\u045f': '{\\cyrchar\\cyrdzhe}',
u'\u0460': '{\\cyrchar\\CYROMEGA}',
u'\u0461': '{\\cyrchar\\cyromega}',
u'\u0462': '{\\cyrchar\\CYRYAT}',
u'\u0464': '{\\cyrchar\\CYRIOTE}',
u'\u0465': '{\\cyrchar\\cyriote}',
u'\u0466': '{\\cyrchar\\CYRLYUS}',
u'\u0467': '{\\cyrchar\\cyrlyus}',
u'\u0468': '{\\cyrchar\\CYRIOTLYUS}',
u'\u0469': '{\\cyrchar\\cyriotlyus}',
u'\u046a': '{\\cyrchar\\CYRBYUS}',
u'\u046c': '{\\cyrchar\\CYRIOTBYUS}',
u'\u046d': '{\\cyrchar\\cyriotbyus}',
u'\u046e': '{\\cyrchar\\CYRKSI}',
u'\u046f': '{\\cyrchar\\cyrksi}',
u'\u0470': '{\\cyrchar\\CYRPSI}',
u'\u0471': '{\\cyrchar\\cyrpsi}',
u'\u0472': '{\\cyrchar\\CYRFITA}',
u'\u0474': '{\\cyrchar\\CYRIZH}',
u'\u0478': '{\\cyrchar\\CYRUK}',
u'\u0479': '{\\cyrchar\\cyruk}',
u'\u047a': '{\\cyrchar\\CYROMEGARND}',
u'\u047b': '{\\cyrchar\\cyromegarnd}',
u'\u047c': '{\\cyrchar\\CYROMEGATITLO}',
u'\u047d': '{\\cyrchar\\cyromegatitlo}',
u'\u047e': '{\\cyrchar\\CYROT}',
u'\u047f': '{\\cyrchar\\cyrot}',
u'\u0480': '{\\cyrchar\\CYRKOPPA}',
u'\u0481': '{\\cyrchar\\cyrkoppa}',
u'\u0482': '{\\cyrchar\\cyrthousands}',
u'\u0488': '{\\cyrchar\\cyrhundredthousands}',
u'\u0489': '{\\cyrchar\\cyrmillions}',
u'\u048c': '{\\cyrchar\\CYRSEMISFTSN}',
u'\u048d': '{\\cyrchar\\cyrsemisftsn}',
u'\u048e': '{\\cyrchar\\CYRRTICK}',
u'\u048f': '{\\cyrchar\\cyrrtick}',
u'\u0490': '{\\cyrchar\\CYRGUP}',
u'\u0491': '{\\cyrchar\\cyrgup}',
u'\u0492': '{\\cyrchar\\CYRGHCRS}',
u'\u0493': '{\\cyrchar\\cyrghcrs}',
u'\u0494': '{\\cyrchar\\CYRGHK}',
u'\u0495': '{\\cyrchar\\cyrghk}',
u'\u0496': '{\\cyrchar\\CYRZHDSC}',
u'\u0497': '{\\cyrchar\\cyrzhdsc}',
u'\u0498': '{\\cyrchar\\CYRZDSC}',
u'\u0499': '{\\cyrchar\\cyrzdsc}',
u'\u049a': '{\\cyrchar\\CYRKDSC}',
u'\u049b': '{\\cyrchar\\cyrkdsc}',
u'\u049c': '{\\cyrchar\\CYRKVCRS}',
u'\u049d': '{\\cyrchar\\cyrkvcrs}',
u'\u049e': '{\\cyrchar\\CYRKHCRS}',
u'\u049f': '{\\cyrchar\\cyrkhcrs}',
u'\u04a0': '{\\cyrchar\\CYRKBEAK}',
u'\u04a1': '{\\cyrchar\\cyrkbeak}',
u'\u04a2': '{\\cyrchar\\CYRNDSC}',
u'\u04a3': '{\\cyrchar\\cyrndsc}',
u'\u04a4': '{\\cyrchar\\CYRNG}',
u'\u04a5': '{\\cyrchar\\cyrng}',
u'\u04a6': '{\\cyrchar\\CYRPHK}',
u'\u04a7': '{\\cyrchar\\cyrphk}',
u'\u04a8': '{\\cyrchar\\CYRABHHA}',
u'\u04a9': '{\\cyrchar\\cyrabhha}',
u'\u04aa': '{\\cyrchar\\CYRSDSC}',
u'\u04ab': '{\\cyrchar\\cyrsdsc}',
u'\u04ac': '{\\cyrchar\\CYRTDSC}',
u'\u04ad': '{\\cyrchar\\cyrtdsc}',
u'\u04ae': '{\\cyrchar\\CYRY}',
u'\u04af': '{\\cyrchar\\cyry}',
u'\u04b0': '{\\cyrchar\\CYRYHCRS}',
u'\u04b1': '{\\cyrchar\\cyryhcrs}',
u'\u04b2': '{\\cyrchar\\CYRHDSC}',
u'\u04b3': '{\\cyrchar\\cyrhdsc}',
u'\u04b4': '{\\cyrchar\\CYRTETSE}',
u'\u04b5': '{\\cyrchar\\cyrtetse}',
u'\u04b6': '{\\cyrchar\\CYRCHRDSC}',
u'\u04b7': '{\\cyrchar\\cyrchrdsc}',
u'\u04b8': '{\\cyrchar\\CYRCHVCRS}',
u'\u04b9': '{\\cyrchar\\cyrchvcrs}',
u'\u04ba': '{\\cyrchar\\CYRSHHA}',
u'\u04bb': '{\\cyrchar\\cyrshha}',
u'\u04bc': '{\\cyrchar\\CYRABHCH}',
u'\u04bd': '{\\cyrchar\\cyrabhch}',
u'\u04be': '{\\cyrchar\\CYRABHCHDSC}',
u'\u04bf': '{\\cyrchar\\cyrabhchdsc}',
u'\u04c0': '{\\cyrchar\\CYRpalochka}',
u'\u04c3': '{\\cyrchar\\CYRKHK}',
u'\u04c4': '{\\cyrchar\\cyrkhk}',
u'\u04c7': '{\\cyrchar\\CYRNHK}',
u'\u04c8': '{\\cyrchar\\cyrnhk}',
u'\u04cb': '{\\cyrchar\\CYRCHLDSC}',
u'\u04cc': '{\\cyrchar\\cyrchldsc}',
u'\u04d4': '{\\cyrchar\\CYRAE}',
u'\u04d5': '{\\cyrchar\\cyrae}',
u'\u04d8': '{\\cyrchar\\CYRSCHWA}',
u'\u04d9': '{\\cyrchar\\cyrschwa}',
u'\u04e0': '{\\cyrchar\\CYRABHDZE}',
u'\u04e1': '{\\cyrchar\\cyrabhdze}',
u'\u04e8': '{\\cyrchar\\CYROTLD}',
u'\u04e9': '{\\cyrchar\\cyrotld}',
u'\u2002': '{\\hspace{0.6em}}',
u'\u2003': '{\\hspace{1em}}',
u'\u2004': '{\\hspace{0.33em}}',
u'\u2005': '{\\hspace{0.25em}}',
u'\u2006': '{\\hspace{0.166em}}',
u'\u2007': '{\\hphantom{0}}',
u'\u2008': '{\\hphantom{,}}',
u'\u2009': '{\\hspace{0.167em}}',
u'\u200a': '$\\mkern1mu$',
u'\u2010': '{-}',
u'\u2013': '{\\textendash}',
u'\u2014': '{\\textemdash}',
u'\u2015': '{\\rule{1em}{1pt}}',
u'\u2016': '$\\Vert$',
u'\u2018': '{`}',
u'\u2019': "{'}",
u'\u201a': '{,}',
u'\u201b': '$\\Elzreapos$',
u'\u201c': '{\\textquotedblleft}',
u'\u201d': '{\\textquotedblright}',
u'\u201e': '{,,}',
u'\u2020': '{\\textdagger}',
u'\u2021': '{\\textdaggerdbl}',
u'\u2022': '{\\textbullet}',
u'\u2024': '{.}',
u'\u2025': '{..}',
u'\u2026': '{\\ldots}',
u'\u2030': '{\\textperthousand}',
u'\u2031': '{\\textpertenthousand}',
u'\u2032': "${'}$",
u'\u2033': "${''}$",
u'\u2034': "${'''}$",
u'\u2035': '$\\backprime$',
u'\u2039': '{\\guilsinglleft}',
u'\u203a': '{\\guilsinglright}',
u'\u2057': "$''''$",
u'\u205f': '{\\mkern4mu}',
u'\u2060': '{\\nolinebreak}',
u'\u20a7': '{\\ensuremath{\\Elzpes}}',
u'\u20ac': '{\\mbox{\\texteuro}}',
u'\u20db': '$\\dddot$',
u'\u20dc': '$\\ddddot$',
u'\u2102': '$\\mathbb{C}$',
u'\u210a': '{\\mathscr{g}}',
u'\u210b': '$\\mathscr{H}$',
u'\u210c': '$\\mathfrak{H}$',
u'\u210d': '$\\mathbb{H}$',
u'\u210f': '$\\hslash$',
u'\u2110': '$\\mathscr{I}$',
u'\u2111': '$\\mathfrak{I}$',
u'\u2112': '$\\mathscr{L}$',
u'\u2113': '$\\mathscr{l}$',
u'\u2115': '$\\mathbb{N}$',
u'\u2116': '{\\cyrchar\\textnumero}',
u'\u2118': '$\\wp$',
u'\u2119': '$\\mathbb{P}$',
u'\u211a': '$\\mathbb{Q}$',
u'\u211b': '$\\mathscr{R}$',
u'\u211c': '$\\mathfrak{R}$',
u'\u211d': '$\\mathbb{R}$',
u'\u211e': '$\\Elzxrat$',
u'\u2122': '{\\texttrademark}',
u'\u2124': '$\\mathbb{Z}$',
u'\u2126': '$\\Omega$',
u'\u2127': '$\\mho$',
u'\u2128': '$\\mathfrak{Z}$',
u'\u2129': '$\\ElsevierGlyph{2129}$',
u'\u212b': '{\\AA}',
u'\u212c': '$\\mathscr{B}$',
u'\u212d': '$\\mathfrak{C}$',
u'\u212f': '$\\mathscr{e}$',
u'\u2130': '$\\mathscr{E}$',
u'\u2131': '$\\mathscr{F}$',
u'\u2133': '$\\mathscr{M}$',
u'\u2134': '$\\mathscr{o}$',
u'\u2135': '$\\aleph$',
u'\u2136': '$\\beth$',
u'\u2137': '$\\gimel$',
u'\u2138': '$\\daleth$',
u'\u2153': '$\\textfrac{1}{3}$',
u'\u2154': '$\\textfrac{2}{3}$',
u'\u2155': '$\\textfrac{1}{5}$',
u'\u2156': '$\\textfrac{2}{5}$',
u'\u2157': '$\\textfrac{3}{5}$',
u'\u2158': '$\\textfrac{4}{5}$',
u'\u2159': '$\\textfrac{1}{6}$',
u'\u215a': '$\\textfrac{5}{6}$',
u'\u215b': '$\\textfrac{1}{8}$',
u'\u215c': '$\\textfrac{3}{8}$',
u'\u215d': '$\\textfrac{5}{8}$',
u'\u215e': '$\\textfrac{7}{8}$',
u'\u2190': '$\\leftarrow$',
u'\u2191': '$\\uparrow$',
u'\u2192': '$\\rightarrow$',
u'\u2193': '$\\downarrow$',
u'\u2194': '$\\leftrightarrow$',
u'\u2195': '$\\updownarrow$',
u'\u2196': '$\\nwarrow$',
u'\u2197': '$\\nearrow$',
u'\u2198': '$\\searrow$',
u'\u2199': '$\\swarrow$',
u'\u219a': '$\\nleftarrow$',
u'\u219b': '$\\nrightarrow$',
u'\u219c': '$\\arrowwaveright$',
u'\u219d': '$\\arrowwaveright$',
u'\u219e': '$\\twoheadleftarrow$',
u'\u21a0': '$\\twoheadrightarrow$',
u'\u21a2': '$\\leftarrowtail$',
u'\u21a3': '$\\rightarrowtail$',
u'\u21a6': '$\\mapsto$',
u'\u21a9': '$\\hookleftarrow$',
u'\u21aa': '$\\hookrightarrow$',
u'\u21ab': '$\\looparrowleft$',
u'\u21ac': '$\\looparrowright$',
u'\u21ad': '$\\leftrightsquigarrow$',
u'\u21ae': '$\\nleftrightarrow$',
u'\u21b0': '$\\Lsh$',
u'\u21b1': '$\\Rsh$',
u'\u21b3': '$\\ElsevierGlyph{21B3}$',
u'\u21b6': '$\\curvearrowleft$',
u'\u21b7': '$\\curvearrowright$',
u'\u21ba': '$\\circlearrowleft$',
u'\u21bb': '$\\circlearrowright$',
u'\u21bc': '$\\leftharpoonup$',
u'\u21bd': '$\\leftharpoondown$',
u'\u21be': '$\\upharpoonright$',
u'\u21bf': '$\\upharpoonleft$',
u'\u21c0': '$\\rightharpoonup$',
u'\u21c1': '$\\rightharpoondown$',
u'\u21c2': '$\\downharpoonright$',
u'\u21c3': '$\\downharpoonleft$',
u'\u21c4': '$\\rightleftarrows$',
u'\u21c5': '$\\dblarrowupdown$',
u'\u21c6': '$\\leftrightarrows$',
u'\u21c7': '$\\leftleftarrows$',
u'\u21c8': '$\\upuparrows$',
u'\u21c9': '$\\rightrightarrows$',
u'\u21ca': '$\\downdownarrows$',
u'\u21cb': '$\\leftrightharpoons$',
u'\u21cc': '$\\rightleftharpoons$',
u'\u21cd': '$\\nLeftarrow$',
u'\u21ce': '$\\nLeftrightarrow$',
u'\u21cf': '$\\nRightarrow$',
u'\u21d0': '$\\Leftarrow$',
u'\u21d1': '$\\Uparrow$',
u'\u21d2': '$\\Rightarrow$',
u'\u21d3': '$\\Downarrow$',
u'\u21d4': '$\\Leftrightarrow$',
u'\u21d5': '$\\Updownarrow$',
u'\u21da': '$\\Lleftarrow$',
u'\u21db': '$\\Rrightarrow$',
u'\u21dd': '$\\rightsquigarrow$',
u'\u21f5': '$\\DownArrowUpArrow$',
u'\u2200': '$\\forall$',
u'\u2201': '$\\complement$',
u'\u2202': '$\\partial$',
u'\u2203': '$\\exists$',
u'\u2204': '$\\nexists$',
u'\u2205': '$\\varnothing$',
u'\u2207': '$\\nabla$',
u'\u2208': '$\\in$',
u'\u2209': '$\\not\\in$',
u'\u220b': '$\\ni$',
u'\u220c': '$\\not\\ni$',
u'\u220f': '$\\prod$',
u'\u2210': '$\\coprod$',
u'\u2211': '$\\sum$',
u'\u2212': '{-}',
u'\u2213': '$\\mp$',
u'\u2214': '$\\dotplus$',
u'\u2216': '$\\setminus$',
u'\u2217': '${_\\ast}$',
u'\u2218': '$\\circ$',
u'\u2219': '$\\bullet$',
u'\u221a': '$\\surd$',
u'\u221d': '$\\propto$',
u'\u221e': '$\\infty$',
u'\u221f': '$\\rightangle$',
u'\u2220': '$\\angle$',
u'\u2221': '$\\measuredangle$',
u'\u2222': '$\\sphericalangle$',
u'\u2223': '$\\mid$',
u'\u2224': '$\\nmid$',
u'\u2225': '$\\parallel$',
u'\u2226': '$\\nparallel$',
u'\u2227': '$\\wedge$',
u'\u2228': '$\\vee$',
u'\u2229': '$\\cap$',
u'\u222a': '$\\cup$',
u'\u222b': '$\\int$',
u'\u222c': '$\\int\\!\\int$',
u'\u222d': '$\\int\\!\\int\\!\\int$',
u'\u222e': '$\\oint$',
u'\u222f': '$\\surfintegral$',
u'\u2230': '$\\volintegral$',
u'\u2231': '$\\clwintegral$',
u'\u2232': '$\\ElsevierGlyph{2232}$',
u'\u2233': '$\\ElsevierGlyph{2233}$',
u'\u2234': '$\\therefore$',
u'\u2235': '$\\because$',
u'\u2237': '$\\Colon$',
u'\u2238': '$\\ElsevierGlyph{2238}$',
u'\u223a': '$\\mathbin{{:}\\!\\!{-}\\!\\!{:}}$',
u'\u223b': '$\\homothetic$',
u'\u223c': '$\\sim$',
u'\u223d': '$\\backsim$',
u'\u223e': '$\\lazysinv$',
u'\u2240': '$\\wr$',
u'\u2241': '$\\not\\sim$',
u'\u2242': '$\\ElsevierGlyph{2242}$',
u'\u2243': '$\\simeq$',
u'\u2244': '$\\not\\simeq$',
u'\u2245': '$\\cong$',
u'\u2246': '$\\approxnotequal$',
u'\u2247': '$\\not\\cong$',
u'\u2248': '$\\approx$',
u'\u2249': '$\\not\\approx$',
u'\u224a': '$\\approxeq$',
u'\u224b': '$\\tildetrpl$',
u'\u224c': '$\\allequal$',
u'\u224d': '$\\asymp$',
u'\u224e': '$\\Bumpeq$',
u'\u224f': '$\\bumpeq$',
u'\u2250': '$\\doteq$',
u'\u2251': '$\\doteqdot$',
u'\u2252': '$\\fallingdotseq$',
u'\u2253': '$\\risingdotseq$',
u'\u2254': '{:=}',
u'\u2255': '$=:$',
u'\u2256': '$\\eqcirc$',
u'\u2257': '$\\circeq$',
u'\u2259': '$\\estimates$',
u'\u225a': '$\\ElsevierGlyph{225A}$',
u'\u225b': '$\\starequal$',
u'\u225c': '$\\triangleq$',
u'\u225f': '$\\ElsevierGlyph{225F}$',
u'\u2260': '$\\not =$',
u'\u2261': '$\\equiv$',
u'\u2262': '$\\not\\equiv$',
u'\u2264': '$\\leq$',
u'\u2265': '$\\geq$',
u'\u2266': '$\\leqq$',
u'\u2267': '$\\geqq$',
u'\u2268': '$\\lneqq$',
u'\u2269': '$\\gneqq$',
u'\u226a': '$\\ll$',
u'\u226b': '$\\gg$',
u'\u226c': '$\\between$',
u'\u226d': '$\\not\\kern-0.3em\\times$',
u'\u226e': '$\\not<$',
u'\u226f': '$\\not>$',
u'\u2270': '$\\not\\leq$',
u'\u2271': '$\\not\\geq$',
u'\u2272': '$\\lessequivlnt$',
u'\u2273': '$\\greaterequivlnt$',
u'\u2274': '$\\ElsevierGlyph{2274}$',
u'\u2275': '$\\ElsevierGlyph{2275}$',
u'\u2276': '$\\lessgtr$',
u'\u2277': '$\\gtrless$',
u'\u2278': '$\\notlessgreater$',
u'\u2279': '$\\notgreaterless$',
u'\u227a': '$\\prec$',
u'\u227b': '$\\succ$',
u'\u227c': '$\\preccurlyeq$',
u'\u227d': '$\\succcurlyeq$',
u'\u227e': '$\\precapprox$',
u'\u227f': '$\\succapprox$',
u'\u2280': '$\\not\\prec$',
u'\u2281': '$\\not\\succ$',
u'\u2282': '$\\subset$',
u'\u2283': '$\\supset$',
u'\u2284': '$\\not\\subset$',
u'\u2285': '$\\not\\supset$',
u'\u2286': '$\\subseteq$',
u'\u2287': '$\\supseteq$',
u'\u2288': '$\\not\\subseteq$',
u'\u2289': '$\\not\\supseteq$',
u'\u228a': '$\\subsetneq$',
u'\u228b': '$\\supsetneq$',
u'\u228e': '$\\uplus$',
u'\u228f': '$\\sqsubset$',
u'\u2290': '$\\sqsupset$',
u'\u2291': '$\\sqsubseteq$',
u'\u2292': '$\\sqsupseteq$',
u'\u2293': '$\\sqcap$',
u'\u2294': '$\\sqcup$',
u'\u2295': '$\\oplus$',
u'\u2296': '$\\ominus$',
u'\u2297': '$\\otimes$',
u'\u2298': '$\\oslash$',
u'\u2299': '$\\odot$',
u'\u229a': '$\\circledcirc$',
u'\u229b': '$\\circledast$',
u'\u229d': '$\\circleddash$',
u'\u229e': '$\\boxplus$',
u'\u229f': '$\\boxminus$',
u'\u22a0': '$\\boxtimes$',
u'\u22a1': '$\\boxdot$',
u'\u22a2': '$\\vdash$',
u'\u22a3': '$\\dashv$',
u'\u22a4': '$\\top$',
u'\u22a5': '$\\perp$',
u'\u22a7': '$\\truestate$',
u'\u22a8': '$\\forcesextra$',
u'\u22a9': '$\\Vdash$',
u'\u22aa': '$\\Vvdash$',
u'\u22ab': '$\\VDash$',
u'\u22ac': '$\\nvdash$',
u'\u22ad': '$\\nvDash$',
u'\u22ae': '$\\nVdash$',
u'\u22af': '$\\nVDash$',
u'\u22b2': '$\\vartriangleleft$',
u'\u22b3': '$\\vartriangleright$',
u'\u22b4': '$\\trianglelefteq$',
u'\u22b5': '$\\trianglerighteq$',
u'\u22b6': '$\\original$',
u'\u22b7': '$\\image$',
u'\u22b8': '$\\multimap$',
u'\u22b9': '$\\hermitconjmatrix$',
u'\u22ba': '$\\intercal$',
u'\u22bb': '$\\veebar$',
u'\u22be': '$\\rightanglearc$',
u'\u22c0': '$\\ElsevierGlyph{22C0}$',
u'\u22c1': '$\\ElsevierGlyph{22C1}$',
u'\u22c2': '$\\bigcap$',
u'\u22c3': '$\\bigcup$',
u'\u22c4': '$\\diamond$',
u'\u22c5': '$\\cdot$',
u'\u22c6': '$\\star$',
u'\u22c7': '$\\divideontimes$',
u'\u22c8': '$\\bowtie$',
u'\u22c9': '$\\ltimes$',
u'\u22ca': '$\\rtimes$',
u'\u22cb': '$\\leftthreetimes$',
u'\u22cc': '$\\rightthreetimes$',
u'\u22cd': '$\\backsimeq$',
u'\u22ce': '$\\curlyvee$',
u'\u22cf': '$\\curlywedge$',
u'\u22d0': '$\\Subset$',
u'\u22d1': '$\\Supset$',
u'\u22d2': '$\\Cap$',
u'\u22d3': '$\\Cup$',
u'\u22d4': '$\\pitchfork$',
u'\u22d6': '$\\lessdot$',
u'\u22d7': '$\\gtrdot$',
u'\u22d8': '$\\verymuchless$',
u'\u22d9': '$\\verymuchgreater$',
u'\u22da': '$\\lesseqgtr$',
u'\u22db': '$\\gtreqless$',
u'\u22de': '$\\curlyeqprec$',
u'\u22df': '$\\curlyeqsucc$',
u'\u22e2': '$\\not\\sqsubseteq$',
u'\u22e3': '$\\not\\sqsupseteq$',
u'\u22e5': '$\\Elzsqspne$',
u'\u22e6': '$\\lnsim$',
u'\u22e7': '$\\gnsim$',
u'\u22e8': '$\\precedesnotsimilar$',
u'\u22e9': '$\\succnsim$',
u'\u22ea': '$\\ntriangleleft$',
u'\u22eb': '$\\ntriangleright$',
u'\u22ec': '$\\ntrianglelefteq$',
u'\u22ed': '$\\ntrianglerighteq$',
u'\u22ee': '$\\vdots$',
u'\u22ef': '$\\cdots$',
u'\u22f0': '$\\upslopeellipsis$',
u'\u22f1': '$\\downslopeellipsis$',
u'\u2305': '{\\barwedge}',
u'\u2306': '$\\perspcorrespond$',
u'\u2308': '$\\lceil$',
u'\u2309': '$\\rceil$',
u'\u230a': '$\\lfloor$',
u'\u230b': '$\\rfloor$',
u'\u2315': '$\\recorder$',
u'\u2316': '$\\mathchar"2208$',
u'\u231c': '$\\ulcorner$',
u'\u231d': '$\\urcorner$',
u'\u231e': '$\\llcorner$',
u'\u231f': '$\\lrcorner$',
u'\u2322': '$\\frown$',
u'\u2323': '$\\smile$',
u'\u2329': '$\\langle$',
u'\u232a': '$\\rangle$',
u'\u233d': '$\\ElsevierGlyph{E838}$',
u'\u23a3': '$\\Elzdlcorn$',
u'\u23b0': '$\\lmoustache$',
u'\u23b1': '$\\rmoustache$',
u'\u2423': '{\\textvisiblespace}',
u'\u2460': '{\\ding{172}}',
u'\u2461': '{\\ding{173}}',
u'\u2462': '{\\ding{174}}',
u'\u2463': '{\\ding{175}}',
u'\u2464': '{\\ding{176}}',
u'\u2465': '{\\ding{177}}',
u'\u2466': '{\\ding{178}}',
u'\u2467': '{\\ding{179}}',
u'\u2468': '{\\ding{180}}',
u'\u2469': '{\\ding{181}}',
u'\u24c8': '$\\circledS$',
u'\u2506': '$\\Elzdshfnc$',
u'\u2519': '$\\Elzsqfnw$',
u'\u2571': '$\\diagup$',
u'\u25a0': '{\\ding{110}}',
u'\u25a1': '$\\square$',
u'\u25aa': '$\\blacksquare$',
u'\u25ad': '$\\fbox{~~}$',
u'\u25af': '$\\Elzvrecto$',
u'\u25b1': '$\\ElsevierGlyph{E381}$',
u'\u25b2': '{\\ding{115}}',
u'\u25b3': '$\\bigtriangleup$',
u'\u25b4': '$\\blacktriangle$',
u'\u25b5': '$\\vartriangle$',
u'\u25b8': '$\\blacktriangleright$',
u'\u25b9': '$\\triangleright$',
u'\u25bc': '{\\ding{116}}',
u'\u25bd': '$\\bigtriangledown$',
u'\u25be': '$\\blacktriangledown$',
u'\u25bf': '$\\triangledown$',
u'\u25c2': '$\\blacktriangleleft$',
u'\u25c3': '$\\triangleleft$',
u'\u25c6': '{\\ding{117}}',
u'\u25ca': '$\\lozenge$',
u'\u25cb': '$\\bigcirc$',
u'\u25cf': '{\\ding{108}}',
u'\u25d0': '$\\Elzcirfl$',
u'\u25d1': '$\\Elzcirfr$',
u'\u25d2': '$\\Elzcirfb$',
u'\u25d7': '{\\ding{119}}',
u'\u25d8': '$\\Elzrvbull$',
u'\u25e7': '$\\Elzsqfl$',
u'\u25e8': '$\\Elzsqfr$',
u'\u25ea': '$\\Elzsqfse$',
u'\u25ef': '$\\bigcirc$',
u'\u2605': '{\\ding{72}}',
u'\u2606': '{\\ding{73}}',
u'\u260e': '{\\ding{37}}',
u'\u261b': '{\\ding{42}}',
u'\u261e': '{\\ding{43}}',
u'\u263e': '{\\rightmoon}',
u'\u263f': '{\\mercury}',
u'\u2640': '{\\venus}',
u'\u2642': '{\\male}',
u'\u2643': '{\\jupiter}',
u'\u2644': '{\\saturn}',
u'\u2645': '{\\uranus}',
u'\u2646': '{\\neptune}',
u'\u2647': '{\\pluto}',
u'\u2648': '{\\aries}',
u'\u2649': '{\\taurus}',
u'\u264a': '{\\gemini}',
u'\u264b': '{\\cancer}',
u'\u264c': '{\\leo}',
u'\u264d': '{\\virgo}',
u'\u264e': '{\\libra}',
u'\u264f': '{\\scorpio}',
u'\u2650': '{\\sagittarius}',
u'\u2651': '{\\capricornus}',
u'\u2652': '{\\aquarius}',
u'\u2653': '{\\pisces}',
u'\u2660': '{\\ding{171}}',
u'\u2662': '$\\diamond$',
u'\u2663': '{\\ding{168}}',
u'\u2665': '{\\ding{170}}',
u'\u2666': '{\\ding{169}}',
u'\u2669': '{\\quarternote}',
u'\u266a': '{\\eighthnote}',
u'\u266d': '$\\flat$',
u'\u266e': '$\\natural$',
u'\u266f': '$\\sharp$',
u'\u2701': '{\\ding{33}}',
u'\u2702': '{\\ding{34}}',
u'\u2703': '{\\ding{35}}',
u'\u2704': '{\\ding{36}}',
u'\u2706': '{\\ding{38}}',
u'\u2707': '{\\ding{39}}',
u'\u2708': '{\\ding{40}}',
u'\u2709': '{\\ding{41}}',
u'\u270c': '{\\ding{44}}',
u'\u270d': '{\\ding{45}}',
u'\u270e': '{\\ding{46}}',
u'\u270f': '{\\ding{47}}',
u'\u2710': '{\\ding{48}}',
u'\u2711': '{\\ding{49}}',
u'\u2712': '{\\ding{50}}',
u'\u2713': '{\\ding{51}}',
u'\u2714': '{\\ding{52}}',
u'\u2715': '{\\ding{53}}',
u'\u2716': '{\\ding{54}}',
u'\u2717': '{\\ding{55}}',
u'\u2718': '{\\ding{56}}',
u'\u2719': '{\\ding{57}}',
u'\u271a': '{\\ding{58}}',
u'\u271b': '{\\ding{59}}',
u'\u271c': '{\\ding{60}}',
u'\u271d': '{\\ding{61}}',
u'\u271e': '{\\ding{62}}',
u'\u271f': '{\\ding{63}}',
u'\u2720': '{\\ding{64}}',
u'\u2721': '{\\ding{65}}',
u'\u2722': '{\\ding{66}}',
u'\u2723': '{\\ding{67}}',
u'\u2724': '{\\ding{68}}',
u'\u2725': '{\\ding{69}}',
u'\u2726': '{\\ding{70}}',
u'\u2727': '{\\ding{71}}',
u'\u2729': '{\\ding{73}}',
u'\u272a': '{\\ding{74}}',
u'\u272b': '{\\ding{75}}',
u'\u272c': '{\\ding{76}}',
u'\u272d': '{\\ding{77}}',
u'\u272e': '{\\ding{78}}',
u'\u272f': '{\\ding{79}}',
u'\u2730': '{\\ding{80}}',
u'\u2731': '{\\ding{81}}',
u'\u2732': '{\\ding{82}}',
u'\u2733': '{\\ding{83}}',
u'\u2734': '{\\ding{84}}',
u'\u2735': '{\\ding{85}}',
u'\u2736': '{\\ding{86}}',
u'\u2737': '{\\ding{87}}',
u'\u2738': '{\\ding{88}}',
u'\u2739': '{\\ding{89}}',
u'\u273a': '{\\ding{90}}',
u'\u273b': '{\\ding{91}}',
u'\u273c': '{\\ding{92}}',
u'\u273d': '{\\ding{93}}',
u'\u273e': '{\\ding{94}}',
u'\u273f': '{\\ding{95}}',
u'\u2740': '{\\ding{96}}',
u'\u2741': '{\\ding{97}}',
u'\u2742': '{\\ding{98}}',
u'\u2743': '{\\ding{99}}',
u'\u2744': '{\\ding{100}}',
u'\u2745': '{\\ding{101}}',
u'\u2746': '{\\ding{102}}',
u'\u2747': '{\\ding{103}}',
u'\u2748': '{\\ding{104}}',
u'\u2749': '{\\ding{105}}',
u'\u274a': '{\\ding{106}}',
u'\u274b': '{\\ding{107}}',
u'\u274d': '{\\ding{109}}',
u'\u274f': '{\\ding{111}}',
u'\u2750': '{\\ding{112}}',
u'\u2751': '{\\ding{113}}',
u'\u2752': '{\\ding{114}}',
u'\u2756': '{\\ding{118}}',
u'\u2758': '{\\ding{120}}',
u'\u2759': '{\\ding{121}}',
u'\u275a': '{\\ding{122}}',
u'\u275b': '{\\ding{123}}',
u'\u275c': '{\\ding{124}}',
u'\u275d': '{\\ding{125}}',
u'\u275e': '{\\ding{126}}',
u'\u2761': '{\\ding{161}}',
u'\u2762': '{\\ding{162}}',
u'\u2763': '{\\ding{163}}',
u'\u2764': '{\\ding{164}}',
u'\u2765': '{\\ding{165}}',
u'\u2766': '{\\ding{166}}',
u'\u2767': '{\\ding{167}}',
u'\u2776': '{\\ding{182}}',
u'\u2777': '{\\ding{183}}',
u'\u2778': '{\\ding{184}}',
u'\u2779': '{\\ding{185}}',
u'\u277a': '{\\ding{186}}',
u'\u277b': '{\\ding{187}}',
u'\u277c': '{\\ding{188}}',
u'\u277d': '{\\ding{189}}',
u'\u277e': '{\\ding{190}}',
u'\u277f': '{\\ding{191}}',
u'\u2780': '{\\ding{192}}',
u'\u2781': '{\\ding{193}}',
u'\u2782': '{\\ding{194}}',
u'\u2783': '{\\ding{195}}',
u'\u2784': '{\\ding{196}}',
u'\u2785': '{\\ding{197}}',
u'\u2786': '{\\ding{198}}',
u'\u2787': '{\\ding{199}}',
u'\u2788': '{\\ding{200}}',
u'\u2789': '{\\ding{201}}',
u'\u278a': '{\\ding{202}}',
u'\u278b': '{\\ding{203}}',
u'\u278c': '{\\ding{204}}',
u'\u278d': '{\\ding{205}}',
u'\u278e': '{\\ding{206}}',
u'\u278f': '{\\ding{207}}',
u'\u2790': '{\\ding{208}}',
u'\u2791': '{\\ding{209}}',
u'\u2792': '{\\ding{210}}',
u'\u2793': '{\\ding{211}}',
u'\u2794': '{\\ding{212}}',
u'\u2798': '{\\ding{216}}',
u'\u2799': '{\\ding{217}}',
u'\u279a': '{\\ding{218}}',
u'\u279b': '{\\ding{219}}',
u'\u279c': '{\\ding{220}}',
u'\u279d': '{\\ding{221}}',
u'\u279e': '{\\ding{222}}',
u'\u279f': '{\\ding{223}}',
u'\u27a0': '{\\ding{224}}',
u'\u27a1': '{\\ding{225}}',
u'\u27a2': '{\\ding{226}}',
u'\u27a3': '{\\ding{227}}',
u'\u27a4': '{\\ding{228}}',
u'\u27a5': '{\\ding{229}}',
u'\u27a6': '{\\ding{230}}',
u'\u27a7': '{\\ding{231}}',
u'\u27a8': '{\\ding{232}}',
u'\u27a9': '{\\ding{233}}',
u'\u27aa': '{\\ding{234}}',
u'\u27ab': '{\\ding{235}}',
u'\u27ac': '{\\ding{236}}',
u'\u27ad': '{\\ding{237}}',
u'\u27ae': '{\\ding{238}}',
u'\u27af': '{\\ding{239}}',
u'\u27b1': '{\\ding{241}}',
u'\u27b2': '{\\ding{242}}',
u'\u27b3': '{\\ding{243}}',
u'\u27b4': '{\\ding{244}}',
u'\u27b5': '{\\ding{245}}',
u'\u27b6': '{\\ding{246}}',
u'\u27b7': '{\\ding{247}}',
u'\u27b8': '{\\ding{248}}',
u'\u27b9': '{\\ding{249}}',
u'\u27ba': '{\\ding{250}}',
u'\u27bb': '{\\ding{251}}',
u'\u27bc': '{\\ding{252}}',
u'\u27bd': '{\\ding{253}}',
u'\u27be': '{\\ding{254}}',
u'\u27f5': '$\\longleftarrow$',
u'\u27f6': '$\\longrightarrow$',
u'\u27f7': '$\\longleftrightarrow$',
u'\u27f8': '$\\Longleftarrow$',
u'\u27f9': '$\\Longrightarrow$',
u'\u27fa': '$\\Longleftrightarrow$',
u'\u27fc': '$\\longmapsto$',
u'\u27ff': '$\\sim\\joinrel\\leadsto$',
u'\u2905': '$\\ElsevierGlyph{E212}$',
u'\u2912': '$\\UpArrowBar$',
u'\u2913': '$\\DownArrowBar$',
u'\u2923': '$\\ElsevierGlyph{E20C}$',
u'\u2924': '$\\ElsevierGlyph{E20D}$',
u'\u2925': '$\\ElsevierGlyph{E20B}$',
u'\u2926': '$\\ElsevierGlyph{E20A}$',
u'\u2927': '$\\ElsevierGlyph{E211}$',
u'\u2928': '$\\ElsevierGlyph{E20E}$',
u'\u2929': '$\\ElsevierGlyph{E20F}$',
u'\u292a': '$\\ElsevierGlyph{E210}$',
u'\u2933': '$\\ElsevierGlyph{E21C}$',
u'\u2936': '$\\ElsevierGlyph{E21A}$',
u'\u2937': '$\\ElsevierGlyph{E219}$',
u'\u2940': '$\\Elolarr$',
u'\u2941': '$\\Elorarr$',
u'\u2942': '$\\ElzRlarr$',
u'\u2944': '$\\ElzrLarr$',
u'\u2947': '$\\Elzrarrx$',
u'\u294e': '$\\LeftRightVector$',
u'\u294f': '$\\RightUpDownVector$',
u'\u2950': '$\\DownLeftRightVector$',
u'\u2951': '$\\LeftUpDownVector$',
u'\u2952': '$\\LeftVectorBar$',
u'\u2953': '$\\RightVectorBar$',
u'\u2954': '$\\RightUpVectorBar$',
u'\u2955': '$\\RightDownVectorBar$',
u'\u2956': '$\\DownLeftVectorBar$',
u'\u2957': '$\\DownRightVectorBar$',
u'\u2958': '$\\LeftUpVectorBar$',
u'\u2959': '$\\LeftDownVectorBar$',
u'\u295a': '$\\LeftTeeVector$',
u'\u295b': '$\\RightTeeVector$',
u'\u295c': '$\\RightUpTeeVector$',
u'\u295d': '$\\RightDownTeeVector$',
u'\u295e': '$\\DownLeftTeeVector$',
u'\u295f': '$\\DownRightTeeVector$',
u'\u2960': '$\\LeftUpTeeVector$',
u'\u2961': '$\\LeftDownTeeVector$',
u'\u296e': '$\\UpEquilibrium$',
u'\u296f': '$\\ReverseUpEquilibrium$',
u'\u2970': '$\\RoundImplies$',
u'\u297c': '$\\ElsevierGlyph{E214}$',
u'\u297d': '$\\ElsevierGlyph{E215}$',
u'\u2980': '$\\Elztfnc$',
u'\u2985': '$\\ElsevierGlyph{3018}$',
u'\u2986': '$\\Elroang$',
u'\u2993': '$<\\kern-0.58em($',
u'\u2994': '$\\ElsevierGlyph{E291}$',
u'\u2999': '$\\Elzddfnc$',
u'\u299c': '$\\Angle$',
u'\u29a0': '$\\Elzlpargt$',
u'\u29b5': '$\\ElsevierGlyph{E260}$',
u'\u29b6': '$\\ElsevierGlyph{E61B}$',
u'\u29ca': '$\\ElzLap$',
u'\u29cb': '$\\Elzdefas$',
u'\u29cf': '$\\LeftTriangleBar$',
u'\u29d0': '$\\RightTriangleBar$',
u'\u29dc': '$\\ElsevierGlyph{E372}$',
u'\u29eb': '$\\blacklozenge$',
u'\u29f4': '$\\RuleDelayed$',
u'\u2a04': '$\\Elxuplus$',
u'\u2a05': '$\\ElzThr$',
u'\u2a06': '$\\Elxsqcup$',
u'\u2a07': '$\\ElzInf$',
u'\u2a08': '$\\ElzSup$',
u'\u2a0d': '$\\ElzCint$',
u'\u2a0f': '$\\clockoint$',
u'\u2a10': '$\\ElsevierGlyph{E395}$',
u'\u2a16': '$\\sqrint$',
u'\u2a25': '$\\ElsevierGlyph{E25A}$',
u'\u2a2a': '$\\ElsevierGlyph{E25B}$',
u'\u2a2d': '$\\ElsevierGlyph{E25C}$',
u'\u2a2e': '$\\ElsevierGlyph{E25D}$',
u'\u2a2f': '$\\ElzTimes$',
u'\u2a34': '$\\ElsevierGlyph{E25E}$',
u'\u2a35': '$\\ElsevierGlyph{E25E}$',
u'\u2a3c': '$\\ElsevierGlyph{E259}$',
u'\u2a3f': '$\\amalg$',
u'\u2a53': '$\\ElzAnd$',
u'\u2a54': '$\\ElzOr$',
u'\u2a55': '$\\ElsevierGlyph{E36E}$',
u'\u2a56': '$\\ElOr$',
u'\u2a5e': '$\\perspcorrespond$',
u'\u2a5f': '$\\Elzminhat$',
u'\u2a63': '$\\ElsevierGlyph{225A}$',
u'\u2a6e': '$\\stackrel{*}{=}$',
u'\u2a75': '$\\Equal$',
u'\u2a7d': '$\\leqslant$',
u'\u2a7e': '$\\geqslant$',
u'\u2a85': '$\\lessapprox$',
u'\u2a86': '$\\gtrapprox$',
u'\u2a87': '$\\lneq$',
u'\u2a88': '$\\gneq$',
u'\u2a89': '$\\lnapprox$',
u'\u2a8a': '$\\gnapprox$',
u'\u2a8b': '$\\lesseqqgtr$',
u'\u2a8c': '$\\gtreqqless$',
u'\u2a95': '$\\eqslantless$',
u'\u2a96': '$\\eqslantgtr$',
u'\u2a9d': '$\\Pisymbol{ppi020}{117}$',
u'\u2a9e': '$\\Pisymbol{ppi020}{105}$',
u'\u2aa1': '$\\NestedLessLess$',
u'\u2aa2': '$\\NestedGreaterGreater$',
u'\u2aaf': '$\\preceq$',
u'\u2ab0': '$\\succeq$',
u'\u2ab5': '$\\precneqq$',
u'\u2ab6': '$\\succneqq$',
u'\u2ab7': '$\\precapprox$',
u'\u2ab8': '$\\succapprox$',
u'\u2ab9': '$\\precnapprox$',
u'\u2aba': '$\\succnapprox$',
u'\u2ac5': '$\\subseteqq$',
u'\u2ac6': '$\\supseteqq$',
u'\u2acb': '$\\subsetneqq$',
u'\u2acc': '$\\supsetneqq$',
u'\u2aeb': '$\\ElsevierGlyph{E30D}$',
u'\u2af6': '$\\Elztdcol$',
u'\u2afd': '${{/}\\!\\!{/}}$',
u'\u300a': '$\\ElsevierGlyph{300A}$',
u'\u300b': '$\\ElsevierGlyph{300B}$',
u'\u3018': '$\\ElsevierGlyph{3018}$',
u'\u3019': '$\\ElsevierGlyph{3019}$',
u'\u301a': '$\\openbracketleft$',
u'\u301b': '$\\openbracketright$',
u'\ufb00': '{ff}',
u'\ufb01': '{fi}',
u'\ufb02': '{fl}',
u'\ufb03': '{ffi}',
u'\ufb04': '{ffl}',
u'\U0001d400': '$\\mathbf{A}$',
u'\U0001d401': '$\\mathbf{B}$',
u'\U0001d402': '$\\mathbf{C}$',
u'\U0001d403': '$\\mathbf{D}$',
u'\U0001d404': '$\\mathbf{E}$',
u'\U0001d405': '$\\mathbf{F}$',
u'\U0001d406': '$\\mathbf{G}$',
u'\U0001d407': '$\\mathbf{H}$',
u'\U0001d408': '$\\mathbf{I}$',
u'\U0001d409': '$\\mathbf{J}$',
u'\U0001d40a': '$\\mathbf{K}$',
u'\U0001d40b': '$\\mathbf{L}$',
u'\U0001d40c': '$\\mathbf{M}$',
u'\U0001d40d': '$\\mathbf{N}$',
u'\U0001d40e': '$\\mathbf{O}$',
u'\U0001d40f': '$\\mathbf{P}$',
u'\U0001d410': '$\\mathbf{Q}$',
u'\U0001d411': '$\\mathbf{R}$',
u'\U0001d412': '$\\mathbf{S}$',
u'\U0001d413': '$\\mathbf{T}$',
u'\U0001d414': '$\\mathbf{U}$',
u'\U0001d415': '$\\mathbf{V}$',
u'\U0001d416': '$\\mathbf{W}$',
u'\U0001d417': '$\\mathbf{X}$',
u'\U0001d418': '$\\mathbf{Y}$',
u'\U0001d419': '$\\mathbf{Z}$',
u'\U0001d41a': '$\\mathbf{a}$',
u'\U0001d41b': '$\\mathbf{b}$',
u'\U0001d41c': '$\\mathbf{c}$',
u'\U0001d41d': '$\\mathbf{d}$',
u'\U0001d41e': '$\\mathbf{e}$',
u'\U0001d41f': '$\\mathbf{f}$',
u'\U0001d420': '$\\mathbf{g}$',
u'\U0001d421': '$\\mathbf{h}$',
u'\U0001d422': '$\\mathbf{i}$',
u'\U0001d423': '$\\mathbf{j}$',
u'\U0001d424': '$\\mathbf{k}$',
u'\U0001d425': '$\\mathbf{l}$',
u'\U0001d426': '$\\mathbf{m}$',
u'\U0001d427': '$\\mathbf{n}$',
u'\U0001d428': '$\\mathbf{o}$',
u'\U0001d429': '$\\mathbf{p}$',
u'\U0001d42a': '$\\mathbf{q}$',
u'\U0001d42b': '$\\mathbf{r}$',
u'\U0001d42c': '$\\mathbf{s}$',
u'\U0001d42d': '$\\mathbf{t}$',
u'\U0001d42e': '$\\mathbf{u}$',
u'\U0001d42f': '$\\mathbf{v}$',
u'\U0001d430': '$\\mathbf{w}$',
u'\U0001d431': '$\\mathbf{x}$',
u'\U0001d432': '$\\mathbf{y}$',
u'\U0001d433': '$\\mathbf{z}$',
u'\U0001d434': '$\\mathsl{A}$',
u'\U0001d435': '$\\mathsl{B}$',
u'\U0001d436': '$\\mathsl{C}$',
u'\U0001d437': '$\\mathsl{D}$',
u'\U0001d438': '$\\mathsl{E}$',
u'\U0001d439': '$\\mathsl{F}$',
u'\U0001d43a': '$\\mathsl{G}$',
u'\U0001d43b': '$\\mathsl{H}$',
u'\U0001d43c': '$\\mathsl{I}$',
u'\U0001d43d': '$\\mathsl{J}$',
u'\U0001d43e': '$\\mathsl{K}$',
u'\U0001d43f': '$\\mathsl{L}$',
u'\U0001d440': '$\\mathsl{M}$',
u'\U0001d441': '$\\mathsl{N}$',
u'\U0001d442': '$\\mathsl{O}$',
u'\U0001d443': '$\\mathsl{P}$',
u'\U0001d444': '$\\mathsl{Q}$',
u'\U0001d445': '$\\mathsl{R}$',
u'\U0001d446': '$\\mathsl{S}$',
u'\U0001d447': '$\\mathsl{T}$',
u'\U0001d448': '$\\mathsl{U}$',
u'\U0001d449': '$\\mathsl{V}$',
u'\U0001d44a': '$\\mathsl{W}$',
u'\U0001d44b': '$\\mathsl{X}$',
u'\U0001d44c': '$\\mathsl{Y}$',
u'\U0001d44d': '$\\mathsl{Z}$',
u'\U0001d44e': '$\\mathsl{a}$',
u'\U0001d44f': '$\\mathsl{b}$',
u'\U0001d450': '$\\mathsl{c}$',
u'\U0001d451': '$\\mathsl{d}$',
u'\U0001d452': '$\\mathsl{e}$',
u'\U0001d453': '$\\mathsl{f}$',
u'\U0001d454': '$\\mathsl{g}$',
u'\U0001d456': '$\\mathsl{i}$',
u'\U0001d457': '$\\mathsl{j}$',
u'\U0001d458': '$\\mathsl{k}$',
u'\U0001d459': '$\\mathsl{l}$',
u'\U0001d45a': '$\\mathsl{m}$',
u'\U0001d45b': '$\\mathsl{n}$',
u'\U0001d45c': '$\\mathsl{o}$',
u'\U0001d45d': '$\\mathsl{p}$',
u'\U0001d45e': '$\\mathsl{q}$',
u'\U0001d45f': '$\\mathsl{r}$',
u'\U0001d460': '$\\mathsl{s}$',
u'\U0001d461': '$\\mathsl{t}$',
u'\U0001d462': '$\\mathsl{u}$',
u'\U0001d463': '$\\mathsl{v}$',
u'\U0001d464': '$\\mathsl{w}$',
u'\U0001d465': '$\\mathsl{x}$',
u'\U0001d466': '$\\mathsl{y}$',
u'\U0001d467': '$\\mathsl{z}$',
u'\U0001d468': '$\\mathbit{A}$',
u'\U0001d469': '$\\mathbit{B}$',
u'\U0001d46a': '$\\mathbit{C}$',
u'\U0001d46b': '$\\mathbit{D}$',
u'\U0001d46c': '$\\mathbit{E}$',
u'\U0001d46d': '$\\mathbit{F}$',
u'\U0001d46e': '$\\mathbit{G}$',
u'\U0001d46f': '$\\mathbit{H}$',
u'\U0001d470': '$\\mathbit{I}$',
u'\U0001d471': '$\\mathbit{J}$',
u'\U0001d472': '$\\mathbit{K}$',
u'\U0001d473': '$\\mathbit{L}$',
u'\U0001d474': '$\\mathbit{M}$',
u'\U0001d475': '$\\mathbit{N}$',
u'\U0001d476': '$\\mathbit{O}$',
u'\U0001d477': '$\\mathbit{P}$',
u'\U0001d478': '$\\mathbit{Q}$',
u'\U0001d479': '$\\mathbit{R}$',
u'\U0001d47a': '$\\mathbit{S}$',
u'\U0001d47b': '$\\mathbit{T}$',
u'\U0001d47c': '$\\mathbit{U}$',
u'\U0001d47d': '$\\mathbit{V}$',
u'\U0001d47e': '$\\mathbit{W}$',
u'\U0001d47f': '$\\mathbit{X}$',
u'\U0001d480': '$\\mathbit{Y}$',
u'\U0001d481': '$\\mathbit{Z}$',
u'\U0001d482': '$\\mathbit{a}$',
u'\U0001d483': '$\\mathbit{b}$',
u'\U0001d484': '$\\mathbit{c}$',
u'\U0001d485': '$\\mathbit{d}$',
u'\U0001d486': '$\\mathbit{e}$',
u'\U0001d487': '$\\mathbit{f}$',
u'\U0001d488': '$\\mathbit{g}$',
u'\U0001d489': '$\\mathbit{h}$',
u'\U0001d48a': '$\\mathbit{i}$',
u'\U0001d48b': '$\\mathbit{j}$',
u'\U0001d48c': '$\\mathbit{k}$',
u'\U0001d48d': '$\\mathbit{l}$',
u'\U0001d48e': '$\\mathbit{m}$',
u'\U0001d48f': '$\\mathbit{n}$',
u'\U0001d490': '$\\mathbit{o}$',
u'\U0001d491': '$\\mathbit{p}$',
u'\U0001d492': '$\\mathbit{q}$',
u'\U0001d493': '$\\mathbit{r}$',
u'\U0001d494': '$\\mathbit{s}$',
u'\U0001d495': '$\\mathbit{t}$',
u'\U0001d496': '$\\mathbit{u}$',
u'\U0001d497': '$\\mathbit{v}$',
u'\U0001d498': '$\\mathbit{w}$',
u'\U0001d499': '$\\mathbit{x}$',
u'\U0001d49a': '$\\mathbit{y}$',
u'\U0001d49b': '$\\mathbit{z}$',
u'\U0001d49c': '$\\mathscr{A}$',
u'\U0001d49e': '$\\mathscr{C}$',
u'\U0001d49f': '$\\mathscr{D}$',
u'\U0001d4a2': '$\\mathscr{G}$',
u'\U0001d4a5': '$\\mathscr{J}$',
u'\U0001d4a6': '$\\mathscr{K}$',
u'\U0001d4a9': '$\\mathscr{N}$',
u'\U0001d4aa': '$\\mathscr{O}$',
u'\U0001d4ab': '$\\mathscr{P}$',
u'\U0001d4ac': '$\\mathscr{Q}$',
u'\U0001d4ae': '$\\mathscr{S}$',
u'\U0001d4af': '$\\mathscr{T}$',
u'\U0001d4b0': '$\\mathscr{U}$',
u'\U0001d4b1': '$\\mathscr{V}$',
u'\U0001d4b2': '$\\mathscr{W}$',
u'\U0001d4b3': '$\\mathscr{X}$',
u'\U0001d4b4': '$\\mathscr{Y}$',
u'\U0001d4b5': '$\\mathscr{Z}$',
u'\U0001d4b6': '$\\mathscr{a}$',
u'\U0001d4b7': '$\\mathscr{b}$',
u'\U0001d4b8': '$\\mathscr{c}$',
u'\U0001d4b9': '$\\mathscr{d}$',
u'\U0001d4bb': '$\\mathscr{f}$',
u'\U0001d4bd': '$\\mathscr{h}$',
u'\U0001d4be': '$\\mathscr{i}$',
u'\U0001d4bf': '$\\mathscr{j}$',
u'\U0001d4c0': '$\\mathscr{k}$',
u'\U0001d4c1': '$\\mathscr{l}$',
u'\U0001d4c2': '$\\mathscr{m}$',
u'\U0001d4c3': '$\\mathscr{n}$',
u'\U0001d4c5': '$\\mathscr{p}$',
u'\U0001d4c6': '$\\mathscr{q}$',
u'\U0001d4c7': '$\\mathscr{r}$',
u'\U0001d4c8': '$\\mathscr{s}$',
u'\U0001d4c9': '$\\mathscr{t}$',
u'\U0001d4ca': '$\\mathscr{u}$',
u'\U0001d4cb': '$\\mathscr{v}$',
u'\U0001d4cc': '$\\mathscr{w}$',
u'\U0001d4cd': '$\\mathscr{x}$',
u'\U0001d4ce': '$\\mathscr{y}$',
u'\U0001d4cf': '$\\mathscr{z}$',
u'\U0001d4d0': '$\\mathmit{A}$',
u'\U0001d4d1': '$\\mathmit{B}$',
u'\U0001d4d2': '$\\mathmit{C}$',
u'\U0001d4d3': '$\\mathmit{D}$',
u'\U0001d4d4': '$\\mathmit{E}$',
u'\U0001d4d5': '$\\mathmit{F}$',
u'\U0001d4d6': '$\\mathmit{G}$',
u'\U0001d4d7': '$\\mathmit{H}$',
u'\U0001d4d8': '$\\mathmit{I}$',
u'\U0001d4d9': '$\\mathmit{J}$',
u'\U0001d4da': '$\\mathmit{K}$',
u'\U0001d4db': '$\\mathmit{L}$',
u'\U0001d4dc': '$\\mathmit{M}$',
u'\U0001d4dd': '$\\mathmit{N}$',
u'\U0001d4de': '$\\mathmit{O}$',
u'\U0001d4df': '$\\mathmit{P}$',
u'\U0001d4e0': '$\\mathmit{Q}$',
u'\U0001d4e1': '$\\mathmit{R}$',
u'\U0001d4e2': '$\\mathmit{S}$',
u'\U0001d4e3': '$\\mathmit{T}$',
u'\U0001d4e4': '$\\mathmit{U}$',
u'\U0001d4e5': '$\\mathmit{V}$',
u'\U0001d4e6': '$\\mathmit{W}$',
u'\U0001d4e7': '$\\mathmit{X}$',
u'\U0001d4e8': '$\\mathmit{Y}$',
u'\U0001d4e9': '$\\mathmit{Z}$',
u'\U0001d4ea': '$\\mathmit{a}$',
u'\U0001d4eb': '$\\mathmit{b}$',
u'\U0001d4ec': '$\\mathmit{c}$',
u'\U0001d4ed': '$\\mathmit{d}$',
u'\U0001d4ee': '$\\mathmit{e}$',
u'\U0001d4ef': '$\\mathmit{f}$',
u'\U0001d4f0': '$\\mathmit{g}$',
u'\U0001d4f1': '$\\mathmit{h}$',
u'\U0001d4f2': '$\\mathmit{i}$',
u'\U0001d4f3': '$\\mathmit{j}$',
u'\U0001d4f4': '$\\mathmit{k}$',
u'\U0001d4f5': '$\\mathmit{l}$',
u'\U0001d4f6': '$\\mathmit{m}$',
u'\U0001d4f7': '$\\mathmit{n}$',
u'\U0001d4f8': '$\\mathmit{o}$',
u'\U0001d4f9': '$\\mathmit{p}$',
u'\U0001d4fa': '$\\mathmit{q}$',
u'\U0001d4fb': '$\\mathmit{r}$',
u'\U0001d4fc': '$\\mathmit{s}$',
u'\U0001d4fd': '$\\mathmit{t}$',
u'\U0001d4fe': '$\\mathmit{u}$',
u'\U0001d4ff': '$\\mathmit{v}$',
u'\U0001d500': '$\\mathmit{w}$',
u'\U0001d501': '$\\mathmit{x}$',
u'\U0001d502': '$\\mathmit{y}$',
u'\U0001d503': '$\\mathmit{z}$',
u'\U0001d504': '$\\mathfrak{A}$',
u'\U0001d505': '$\\mathfrak{B}$',
u'\U0001d507': '$\\mathfrak{D}$',
u'\U0001d508': '$\\mathfrak{E}$',
u'\U0001d509': '$\\mathfrak{F}$',
u'\U0001d50a': '$\\mathfrak{G}$',
u'\U0001d50d': '$\\mathfrak{J}$',
u'\U0001d50e': '$\\mathfrak{K}$',
u'\U0001d50f': '$\\mathfrak{L}$',
u'\U0001d510': '$\\mathfrak{M}$',
u'\U0001d511': '$\\mathfrak{N}$',
u'\U0001d512': '$\\mathfrak{O}$',
u'\U0001d513': '$\\mathfrak{P}$',
u'\U0001d514': '$\\mathfrak{Q}$',
u'\U0001d516': '$\\mathfrak{S}$',
u'\U0001d517': '$\\mathfrak{T}$',
u'\U0001d518': '$\\mathfrak{U}$',
u'\U0001d519': '$\\mathfrak{V}$',
u'\U0001d51a': '$\\mathfrak{W}$',
u'\U0001d51b': '$\\mathfrak{X}$',
u'\U0001d51c': '$\\mathfrak{Y}$',
u'\U0001d51e': '$\\mathfrak{a}$',
u'\U0001d51f': '$\\mathfrak{b}$',
u'\U0001d520': '$\\mathfrak{c}$',
u'\U0001d521': '$\\mathfrak{d}$',
u'\U0001d522': '$\\mathfrak{e}$',
u'\U0001d523': '$\\mathfrak{f}$',
u'\U0001d524': '$\\mathfrak{g}$',
u'\U0001d525': '$\\mathfrak{h}$',
u'\U0001d526': '$\\mathfrak{i}$',
u'\U0001d527': '$\\mathfrak{j}$',
u'\U0001d528': '$\\mathfrak{k}$',
u'\U0001d529': '$\\mathfrak{l}$',
u'\U0001d52a': '$\\mathfrak{m}$',
u'\U0001d52b': '$\\mathfrak{n}$',
u'\U0001d52c': '$\\mathfrak{o}$',
u'\U0001d52d': '$\\mathfrak{p}$',
u'\U0001d52e': '$\\mathfrak{q}$',
u'\U0001d52f': '$\\mathfrak{r}$',
u'\U0001d530': '$\\mathfrak{s}$',
u'\U0001d531': '$\\mathfrak{t}$',
u'\U0001d532': '$\\mathfrak{u}$',
u'\U0001d533': '$\\mathfrak{v}$',
u'\U0001d534': '$\\mathfrak{w}$',
u'\U0001d535': '$\\mathfrak{x}$',
u'\U0001d536': '$\\mathfrak{y}$',
u'\U0001d537': '$\\mathfrak{z}$',
u'\U0001d538': '$\\mathbb{A}$',
u'\U0001d539': '$\\mathbb{B}$',
u'\U0001d53b': '$\\mathbb{D}$',
u'\U0001d53c': '$\\mathbb{E}$',
u'\U0001d53d': '$\\mathbb{F}$',
u'\U0001d53e': '$\\mathbb{G}$',
u'\U0001d540': '$\\mathbb{I}$',
u'\U0001d541': '$\\mathbb{J}$',
u'\U0001d542': '$\\mathbb{K}$',
u'\U0001d543': '$\\mathbb{L}$',
u'\U0001d544': '$\\mathbb{M}$',
u'\U0001d546': '$\\mathbb{O}$',
u'\U0001d54a': '$\\mathbb{S}$',
u'\U0001d54b': '$\\mathbb{T}$',
u'\U0001d54c': '$\\mathbb{U}$',
u'\U0001d54d': '$\\mathbb{V}$',
u'\U0001d54e': '$\\mathbb{W}$',
u'\U0001d54f': '$\\mathbb{X}$',
u'\U0001d550': '$\\mathbb{Y}$',
u'\U0001d552': '$\\mathbb{a}$',
u'\U0001d553': '$\\mathbb{b}$',
u'\U0001d554': '$\\mathbb{c}$',
u'\U0001d555': '$\\mathbb{d}$',
u'\U0001d556': '$\\mathbb{e}$',
u'\U0001d557': '$\\mathbb{f}$',
u'\U0001d558': '$\\mathbb{g}$',
u'\U0001d559': '$\\mathbb{h}$',
u'\U0001d55a': '$\\mathbb{i}$',
u'\U0001d55b': '$\\mathbb{j}$',
u'\U0001d55c': '$\\mathbb{k}$',
u'\U0001d55d': '$\\mathbb{l}$',
u'\U0001d55e': '$\\mathbb{m}$',
u'\U0001d55f': '$\\mathbb{n}$',
u'\U0001d560': '$\\mathbb{o}$',
u'\U0001d561': '$\\mathbb{p}$',
u'\U0001d562': '$\\mathbb{q}$',
u'\U0001d563': '$\\mathbb{r}$',
u'\U0001d564': '$\\mathbb{s}$',
u'\U0001d565': '$\\mathbb{t}$',
u'\U0001d566': '$\\mathbb{u}$',
u'\U0001d567': '$\\mathbb{v}$',
u'\U0001d568': '$\\mathbb{w}$',
u'\U0001d569': '$\\mathbb{x}$',
u'\U0001d56a': '$\\mathbb{y}$',
u'\U0001d56b': '$\\mathbb{z}$',
u'\U0001d56c': '$\\mathslbb{A}$',
u'\U0001d56d': '$\\mathslbb{B}$',
u'\U0001d56e': '$\\mathslbb{C}$',
u'\U0001d56f': '$\\mathslbb{D}$',
u'\U0001d570': '$\\mathslbb{E}$',
u'\U0001d571': '$\\mathslbb{F}$',
u'\U0001d572': '$\\mathslbb{G}$',
u'\U0001d573': '$\\mathslbb{H}$',
u'\U0001d574': '$\\mathslbb{I}$',
u'\U0001d575': '$\\mathslbb{J}$',
u'\U0001d576': '$\\mathslbb{K}$',
u'\U0001d577': '$\\mathslbb{L}$',
u'\U0001d578': '$\\mathslbb{M}$',
u'\U0001d579': '$\\mathslbb{N}$',
u'\U0001d57a': '$\\mathslbb{O}$',
u'\U0001d57b': '$\\mathslbb{P}$',
u'\U0001d57c': '$\\mathslbb{Q}$',
u'\U0001d57d': '$\\mathslbb{R}$',
u'\U0001d57e': '$\\mathslbb{S}$',
u'\U0001d57f': '$\\mathslbb{T}$',
u'\U0001d580': '$\\mathslbb{U}$',
u'\U0001d581': '$\\mathslbb{V}$',
u'\U0001d582': '$\\mathslbb{W}$',
u'\U0001d583': '$\\mathslbb{X}$',
u'\U0001d584': '$\\mathslbb{Y}$',
u'\U0001d585': '$\\mathslbb{Z}$',
u'\U0001d586': '$\\mathslbb{a}$',
u'\U0001d587': '$\\mathslbb{b}$',
u'\U0001d588': '$\\mathslbb{c}$',
u'\U0001d589': '$\\mathslbb{d}$',
u'\U0001d58a': '$\\mathslbb{e}$',
u'\U0001d58b': '$\\mathslbb{f}$',
u'\U0001d58c': '$\\mathslbb{g}$',
u'\U0001d58d': '$\\mathslbb{h}$',
u'\U0001d58e': '$\\mathslbb{i}$',
u'\U0001d58f': '$\\mathslbb{j}$',
u'\U0001d590': '$\\mathslbb{k}$',
u'\U0001d591': '$\\mathslbb{l}$',
u'\U0001d592': '$\\mathslbb{m}$',
u'\U0001d593': '$\\mathslbb{n}$',
u'\U0001d594': '$\\mathslbb{o}$',
u'\U0001d595': '$\\mathslbb{p}$',
u'\U0001d596': '$\\mathslbb{q}$',
u'\U0001d597': '$\\mathslbb{r}$',
u'\U0001d598': '$\\mathslbb{s}$',
u'\U0001d599': '$\\mathslbb{t}$',
u'\U0001d59a': '$\\mathslbb{u}$',
u'\U0001d59b': '$\\mathslbb{v}$',
u'\U0001d59c': '$\\mathslbb{w}$',
u'\U0001d59d': '$\\mathslbb{x}$',
u'\U0001d59e': '$\\mathslbb{y}$',
u'\U0001d59f': '$\\mathslbb{z}$',
u'\U0001d5a0': '$\\mathsf{A}$',
u'\U0001d5a1': '$\\mathsf{B}$',
u'\U0001d5a2': '$\\mathsf{C}$',
u'\U0001d5a3': '$\\mathsf{D}$',
u'\U0001d5a4': '$\\mathsf{E}$',
u'\U0001d5a5': '$\\mathsf{F}$',
u'\U0001d5a6': '$\\mathsf{G}$',
u'\U0001d5a7': '$\\mathsf{H}$',
u'\U0001d5a8': '$\\mathsf{I}$',
u'\U0001d5a9': '$\\mathsf{J}$',
u'\U0001d5aa': '$\\mathsf{K}$',
u'\U0001d5ab': '$\\mathsf{L}$',
u'\U0001d5ac': '$\\mathsf{M}$',
u'\U0001d5ad': '$\\mathsf{N}$',
u'\U0001d5ae': '$\\mathsf{O}$',
u'\U0001d5af': '$\\mathsf{P}$',
u'\U0001d5b0': '$\\mathsf{Q}$',
u'\U0001d5b1': '$\\mathsf{R}$',
u'\U0001d5b2': '$\\mathsf{S}$',
u'\U0001d5b3': '$\\mathsf{T}$',
u'\U0001d5b4': '$\\mathsf{U}$',
u'\U0001d5b5': '$\\mathsf{V}$',
u'\U0001d5b6': '$\\mathsf{W}$',
u'\U0001d5b7': '$\\mathsf{X}$',
u'\U0001d5b8': '$\\mathsf{Y}$',
u'\U0001d5b9': '$\\mathsf{Z}$',
u'\U0001d5ba': '$\\mathsf{a}$',
u'\U0001d5bb': '$\\mathsf{b}$',
u'\U0001d5bc': '$\\mathsf{c}$',
u'\U0001d5bd': '$\\mathsf{d}$',
u'\U0001d5be': '$\\mathsf{e}$',
u'\U0001d5bf': '$\\mathsf{f}$',
u'\U0001d5c0': '$\\mathsf{g}$',
u'\U0001d5c1': '$\\mathsf{h}$',
u'\U0001d5c2': '$\\mathsf{i}$',
u'\U0001d5c3': '$\\mathsf{j}$',
u'\U0001d5c4': '$\\mathsf{k}$',
u'\U0001d5c5': '$\\mathsf{l}$',
u'\U0001d5c6': '$\\mathsf{m}$',
u'\U0001d5c7': '$\\mathsf{n}$',
u'\U0001d5c8': '$\\mathsf{o}$',
u'\U0001d5c9': '$\\mathsf{p}$',
u'\U0001d5ca': '$\\mathsf{q}$',
u'\U0001d5cb': '$\\mathsf{r}$',
u'\U0001d5cc': '$\\mathsf{s}$',
u'\U0001d5cd': '$\\mathsf{t}$',
u'\U0001d5ce': '$\\mathsf{u}$',
u'\U0001d5cf': '$\\mathsf{v}$',
u'\U0001d5d0': '$\\mathsf{w}$',
u'\U0001d5d1': '$\\mathsf{x}$',
u'\U0001d5d2': '$\\mathsf{y}$',
u'\U0001d5d3': '$\\mathsf{z}$',
u'\U0001d5d4': '$\\mathsfbf{A}$',
u'\U0001d5d5': '$\\mathsfbf{B}$',
u'\U0001d5d6': '$\\mathsfbf{C}$',
u'\U0001d5d7': '$\\mathsfbf{D}$',
u'\U0001d5d8': '$\\mathsfbf{E}$',
u'\U0001d5d9': '$\\mathsfbf{F}$',
u'\U0001d5da': '$\\mathsfbf{G}$',
u'\U0001d5db': '$\\mathsfbf{H}$',
u'\U0001d5dc': '$\\mathsfbf{I}$',
u'\U0001d5dd': '$\\mathsfbf{J}$',
u'\U0001d5de': '$\\mathsfbf{K}$',
u'\U0001d5df': '$\\mathsfbf{L}$',
u'\U0001d5e0': '$\\mathsfbf{M}$',
u'\U0001d5e1': '$\\mathsfbf{N}$',
u'\U0001d5e2': '$\\mathsfbf{O}$',
u'\U0001d5e3': '$\\mathsfbf{P}$',
u'\U0001d5e4': '$\\mathsfbf{Q}$',
u'\U0001d5e5': '$\\mathsfbf{R}$',
u'\U0001d5e6': '$\\mathsfbf{S}$',
u'\U0001d5e7': '$\\mathsfbf{T}$',
u'\U0001d5e8': '$\\mathsfbf{U}$',
u'\U0001d5e9': '$\\mathsfbf{V}$',
u'\U0001d5ea': '$\\mathsfbf{W}$',
u'\U0001d5eb': '$\\mathsfbf{X}$',
u'\U0001d5ec': '$\\mathsfbf{Y}$',
u'\U0001d5ed': '$\\mathsfbf{Z}$',
u'\U0001d5ee': '$\\mathsfbf{a}$',
u'\U0001d5ef': '$\\mathsfbf{b}$',
u'\U0001d5f0': '$\\mathsfbf{c}$',
u'\U0001d5f1': '$\\mathsfbf{d}$',
u'\U0001d5f2': '$\\mathsfbf{e}$',
u'\U0001d5f3': '$\\mathsfbf{f}$',
u'\U0001d5f4': '$\\mathsfbf{g}$',
u'\U0001d5f5': '$\\mathsfbf{h}$',
u'\U0001d5f6': '$\\mathsfbf{i}$',
u'\U0001d5f7': '$\\mathsfbf{j}$',
u'\U0001d5f8': '$\\mathsfbf{k}$',
u'\U0001d5f9': '$\\mathsfbf{l}$',
u'\U0001d5fa': '$\\mathsfbf{m}$',
u'\U0001d5fb': '$\\mathsfbf{n}$',
u'\U0001d5fc': '$\\mathsfbf{o}$',
u'\U0001d5fd': '$\\mathsfbf{p}$',
u'\U0001d5fe': '$\\mathsfbf{q}$',
u'\U0001d5ff': '$\\mathsfbf{r}$',
u'\U0001d600': '$\\mathsfbf{s}$',
u'\U0001d601': '$\\mathsfbf{t}$',
u'\U0001d602': '$\\mathsfbf{u}$',
u'\U0001d603': '$\\mathsfbf{v}$',
u'\U0001d604': '$\\mathsfbf{w}$',
u'\U0001d605': '$\\mathsfbf{x}$',
u'\U0001d606': '$\\mathsfbf{y}$',
u'\U0001d607': '$\\mathsfbf{z}$',
u'\U0001d608': '$\\mathsfsl{A}$',
u'\U0001d609': '$\\mathsfsl{B}$',
u'\U0001d60a': '$\\mathsfsl{C}$',
u'\U0001d60b': '$\\mathsfsl{D}$',
u'\U0001d60c': '$\\mathsfsl{E}$',
u'\U0001d60d': '$\\mathsfsl{F}$',
u'\U0001d60e': '$\\mathsfsl{G}$',
u'\U0001d60f': '$\\mathsfsl{H}$',
u'\U0001d610': '$\\mathsfsl{I}$',
u'\U0001d611': '$\\mathsfsl{J}$',
u'\U0001d612': '$\\mathsfsl{K}$',
u'\U0001d613': '$\\mathsfsl{L}$',
u'\U0001d614': '$\\mathsfsl{M}$',
u'\U0001d615': '$\\mathsfsl{N}$',
u'\U0001d616': '$\\mathsfsl{O}$',
u'\U0001d617': '$\\mathsfsl{P}$',
u'\U0001d618': '$\\mathsfsl{Q}$',
u'\U0001d619': '$\\mathsfsl{R}$',
u'\U0001d61a': '$\\mathsfsl{S}$',
u'\U0001d61b': '$\\mathsfsl{T}$',
u'\U0001d61c': '$\\mathsfsl{U}$',
u'\U0001d61d': '$\\mathsfsl{V}$',
u'\U0001d61e': '$\\mathsfsl{W}$',
u'\U0001d61f': '$\\mathsfsl{X}$',
u'\U0001d620': '$\\mathsfsl{Y}$',
u'\U0001d621': '$\\mathsfsl{Z}$',
u'\U0001d622': '$\\mathsfsl{a}$',
u'\U0001d623': '$\\mathsfsl{b}$',
u'\U0001d624': '$\\mathsfsl{c}$',
u'\U0001d625': '$\\mathsfsl{d}$',
u'\U0001d626': '$\\mathsfsl{e}$',
u'\U0001d627': '$\\mathsfsl{f}$',
u'\U0001d628': '$\\mathsfsl{g}$',
u'\U0001d629': '$\\mathsfsl{h}$',
u'\U0001d62a': '$\\mathsfsl{i}$',
u'\U0001d62b': '$\\mathsfsl{j}$',
u'\U0001d62c': '$\\mathsfsl{k}$',
u'\U0001d62d': '$\\mathsfsl{l}$',
u'\U0001d62e': '$\\mathsfsl{m}$',
u'\U0001d62f': '$\\mathsfsl{n}$',
u'\U0001d630': '$\\mathsfsl{o}$',
u'\U0001d631': '$\\mathsfsl{p}$',
u'\U0001d632': '$\\mathsfsl{q}$',
u'\U0001d633': '$\\mathsfsl{r}$',
u'\U0001d634': '$\\mathsfsl{s}$',
u'\U0001d635': '$\\mathsfsl{t}$',
u'\U0001d636': '$\\mathsfsl{u}$',
u'\U0001d637': '$\\mathsfsl{v}$',
u'\U0001d638': '$\\mathsfsl{w}$',
u'\U0001d639': '$\\mathsfsl{x}$',
u'\U0001d63a': '$\\mathsfsl{y}$',
u'\U0001d63b': '$\\mathsfsl{z}$',
u'\U0001d63c': '$\\mathsfbfsl{A}$',
u'\U0001d63d': '$\\mathsfbfsl{B}$',
u'\U0001d63e': '$\\mathsfbfsl{C}$',
u'\U0001d63f': '$\\mathsfbfsl{D}$',
u'\U0001d640': '$\\mathsfbfsl{E}$',
u'\U0001d641': '$\\mathsfbfsl{F}$',
u'\U0001d642': '$\\mathsfbfsl{G}$',
u'\U0001d643': '$\\mathsfbfsl{H}$',
u'\U0001d644': '$\\mathsfbfsl{I}$',
u'\U0001d645': '$\\mathsfbfsl{J}$',
u'\U0001d646': '$\\mathsfbfsl{K}$',
u'\U0001d647': '$\\mathsfbfsl{L}$',
u'\U0001d648': '$\\mathsfbfsl{M}$',
u'\U0001d649': '$\\mathsfbfsl{N}$',
u'\U0001d64a': '$\\mathsfbfsl{O}$',
u'\U0001d64b': '$\\mathsfbfsl{P}$',
u'\U0001d64c': '$\\mathsfbfsl{Q}$',
u'\U0001d64d': '$\\mathsfbfsl{R}$',
u'\U0001d64e': '$\\mathsfbfsl{S}$',
u'\U0001d64f': '$\\mathsfbfsl{T}$',
u'\U0001d650': '$\\mathsfbfsl{U}$',
u'\U0001d651': '$\\mathsfbfsl{V}$',
u'\U0001d652': '$\\mathsfbfsl{W}$',
u'\U0001d653': '$\\mathsfbfsl{X}$',
u'\U0001d654': '$\\mathsfbfsl{Y}$',
u'\U0001d655': '$\\mathsfbfsl{Z}$',
u'\U0001d656': '$\\mathsfbfsl{a}$',
u'\U0001d657': '$\\mathsfbfsl{b}$',
u'\U0001d658': '$\\mathsfbfsl{c}$',
u'\U0001d659': '$\\mathsfbfsl{d}$',
u'\U0001d65a': '$\\mathsfbfsl{e}$',
u'\U0001d65b': '$\\mathsfbfsl{f}$',
u'\U0001d65c': '$\\mathsfbfsl{g}$',
u'\U0001d65d': '$\\mathsfbfsl{h}$',
u'\U0001d65e': '$\\mathsfbfsl{i}$',
u'\U0001d65f': '$\\mathsfbfsl{j}$',
u'\U0001d660': '$\\mathsfbfsl{k}$',
u'\U0001d661': '$\\mathsfbfsl{l}$',
u'\U0001d662': '$\\mathsfbfsl{m}$',
u'\U0001d663': '$\\mathsfbfsl{n}$',
u'\U0001d664': '$\\mathsfbfsl{o}$',
u'\U0001d665': '$\\mathsfbfsl{p}$',
u'\U0001d666': '$\\mathsfbfsl{q}$',
u'\U0001d667': '$\\mathsfbfsl{r}$',
u'\U0001d668': '$\\mathsfbfsl{s}$',
u'\U0001d669': '$\\mathsfbfsl{t}$',
u'\U0001d66a': '$\\mathsfbfsl{u}$',
u'\U0001d66b': '$\\mathsfbfsl{v}$',
u'\U0001d66c': '$\\mathsfbfsl{w}$',
u'\U0001d66d': '$\\mathsfbfsl{x}$',
u'\U0001d66e': '$\\mathsfbfsl{y}$',
u'\U0001d66f': '$\\mathsfbfsl{z}$',
u'\U0001d670': '$\\mathtt{A}$',
u'\U0001d671': '$\\mathtt{B}$',
u'\U0001d672': '$\\mathtt{C}$',
u'\U0001d673': '$\\mathtt{D}$',
u'\U0001d674': '$\\mathtt{E}$',
u'\U0001d675': '$\\mathtt{F}$',
u'\U0001d676': '$\\mathtt{G}$',
u'\U0001d677': '$\\mathtt{H}$',
u'\U0001d678': '$\\mathtt{I}$',
u'\U0001d679': '$\\mathtt{J}$',
u'\U0001d67a': '$\\mathtt{K}$',
u'\U0001d67b': '$\\mathtt{L}$',
u'\U0001d67c': '$\\mathtt{M}$',
u'\U0001d67d': '$\\mathtt{N}$',
u'\U0001d67e': '$\\mathtt{O}$',
u'\U0001d67f': '$\\mathtt{P}$',
u'\U0001d680': '$\\mathtt{Q}$',
u'\U0001d681': '$\\mathtt{R}$',
u'\U0001d682': '$\\mathtt{S}$',
u'\U0001d683': '$\\mathtt{T}$',
u'\U0001d684': '$\\mathtt{U}$',
u'\U0001d685': '$\\mathtt{V}$',
u'\U0001d686': '$\\mathtt{W}$',
u'\U0001d687': '$\\mathtt{X}$',
u'\U0001d688': '$\\mathtt{Y}$',
u'\U0001d689': '$\\mathtt{Z}$',
u'\U0001d68a': '$\\mathtt{a}$',
u'\U0001d68b': '$\\mathtt{b}$',
u'\U0001d68c': '$\\mathtt{c}$',
u'\U0001d68d': '$\\mathtt{d}$',
u'\U0001d68e': '$\\mathtt{e}$',
u'\U0001d68f': '$\\mathtt{f}$',
u'\U0001d690': '$\\mathtt{g}$',
u'\U0001d691': '$\\mathtt{h}$',
u'\U0001d692': '$\\mathtt{i}$',
u'\U0001d693': '$\\mathtt{j}$',
u'\U0001d694': '$\\mathtt{k}$',
u'\U0001d695': '$\\mathtt{l}$',
u'\U0001d696': '$\\mathtt{m}$',
u'\U0001d697': '$\\mathtt{n}$',
u'\U0001d698': '$\\mathtt{o}$',
u'\U0001d699': '$\\mathtt{p}$',
u'\U0001d69a': '$\\mathtt{q}$',
u'\U0001d69b': '$\\mathtt{r}$',
u'\U0001d69c': '$\\mathtt{s}$',
u'\U0001d69d': '$\\mathtt{t}$',
u'\U0001d69e': '$\\mathtt{u}$',
u'\U0001d69f': '$\\mathtt{v}$',
u'\U0001d6a0': '$\\mathtt{w}$',
u'\U0001d6a1': '$\\mathtt{x}$',
u'\U0001d6a2': '$\\mathtt{y}$',
u'\U0001d6a3': '$\\mathtt{z}$',
u'\U0001d6a8': '$\\mathbf{\\Alpha}$',
u'\U0001d6a9': '$\\mathbf{\\Beta}$',
u'\U0001d6aa': '$\\mathbf{\\Gamma}$',
u'\U0001d6ab': '$\\mathbf{\\Delta}$',
u'\U0001d6ac': '$\\mathbf{\\Epsilon}$',
u'\U0001d6ad': '$\\mathbf{\\Zeta}$',
u'\U0001d6ae': '$\\mathbf{\\Eta}$',
u'\U0001d6af': '$\\mathbf{\\Theta}$',
u'\U0001d6b0': '$\\mathbf{\\Iota}$',
u'\U0001d6b1': '$\\mathbf{\\Kappa}$',
u'\U0001d6b2': '$\\mathbf{\\Lambda}$',
u'\U0001d6b3': '$M$',
u'\U0001d6b4': '$N$',
u'\U0001d6b5': '$\\mathbf{\\Xi}$',
u'\U0001d6b6': '$O$',
u'\U0001d6b7': '$\\mathbf{\\Pi}$',
u'\U0001d6b8': '$\\mathbf{\\Rho}$',
u'\U0001d6b9': '{\\mathbf{\\vartheta}}',
u'\U0001d6ba': '$\\mathbf{\\Sigma}$',
u'\U0001d6bb': '$\\mathbf{\\Tau}$',
u'\U0001d6bc': '$\\mathbf{\\Upsilon}$',
u'\U0001d6bd': '$\\mathbf{\\Phi}$',
u'\U0001d6be': '$\\mathbf{\\Chi}$',
u'\U0001d6bf': '$\\mathbf{\\Psi}$',
u'\U0001d6c0': '$\\mathbf{\\Omega}$',
u'\U0001d6c1': '$\\mathbf{\\nabla}$',
u'\U0001d6c2': '$\\mathbf{\\Alpha}$',
u'\U0001d6c3': '$\\mathbf{\\Beta}$',
u'\U0001d6c4': '$\\mathbf{\\Gamma}$',
u'\U0001d6c5': '$\\mathbf{\\Delta}$',
u'\U0001d6c6': '$\\mathbf{\\Epsilon}$',
u'\U0001d6c7': '$\\mathbf{\\Zeta}$',
u'\U0001d6c8': '$\\mathbf{\\Eta}$',
u'\U0001d6c9': '$\\mathbf{\\theta}$',
u'\U0001d6ca': '$\\mathbf{\\Iota}$',
u'\U0001d6cb': '$\\mathbf{\\Kappa}$',
u'\U0001d6cc': '$\\mathbf{\\Lambda}$',
u'\U0001d6cd': '$M$',
u'\U0001d6ce': '$N$',
u'\U0001d6cf': '$\\mathbf{\\Xi}$',
u'\U0001d6d0': '$O$',
u'\U0001d6d1': '$\\mathbf{\\Pi}$',
u'\U0001d6d2': '$\\mathbf{\\Rho}$',
u'\U0001d6d3': '$\\mathbf{\\varsigma}$',
u'\U0001d6d4': '$\\mathbf{\\Sigma}$',
u'\U0001d6d5': '$\\mathbf{\\Tau}$',
u'\U0001d6d6': '$\\mathbf{\\Upsilon}$',
u'\U0001d6d7': '$\\mathbf{\\Phi}$',
u'\U0001d6d8': '$\\mathbf{\\Chi}$',
u'\U0001d6d9': '$\\mathbf{\\Psi}$',
u'\U0001d6da': '$\\mathbf{\\Omega}$',
u'\U0001d6db': '$\\partial$',
u'\U0001d6dc': '$\\in$',
u'\U0001d6dd': '{\\mathbf{\\vartheta}}',
u'\U0001d6de': '{\\mathbf{\\varkappa}}',
u'\U0001d6df': '{\\mathbf{\\phi}}',
u'\U0001d6e0': '{\\mathbf{\\varrho}}',
u'\U0001d6e1': '{\\mathbf{\\varpi}}',
u'\U0001d6e2': '$\\mathsl{\\Alpha}$',
u'\U0001d6e3': '$\\mathsl{\\Beta}$',
u'\U0001d6e4': '$\\mathsl{\\Gamma}$',
u'\U0001d6e5': '$\\mathsl{\\Delta}$',
u'\U0001d6e6': '$\\mathsl{\\Epsilon}$',
u'\U0001d6e7': '$\\mathsl{\\Zeta}$',
u'\U0001d6e8': '$\\mathsl{\\Eta}$',
u'\U0001d6e9': '$\\mathsl{\\Theta}$',
u'\U0001d6ea': '$\\mathsl{\\Iota}$',
u'\U0001d6eb': '$\\mathsl{\\Kappa}$',
u'\U0001d6ec': '$\\mathsl{\\Lambda}$',
u'\U0001d6ed': '$M$',
u'\U0001d6ee': '$N$',
u'\U0001d6ef': '$\\mathsl{\\Xi}$',
u'\U0001d6f0': '$O$',
u'\U0001d6f1': '$\\mathsl{\\Pi}$',
u'\U0001d6f2': '$\\mathsl{\\Rho}$',
u'\U0001d6f3': '{\\mathsl{\\vartheta}}',
u'\U0001d6f4': '$\\mathsl{\\Sigma}$',
u'\U0001d6f5': '$\\mathsl{\\Tau}$',
u'\U0001d6f6': '$\\mathsl{\\Upsilon}$',
u'\U0001d6f7': '$\\mathsl{\\Phi}$',
u'\U0001d6f8': '$\\mathsl{\\Chi}$',
u'\U0001d6f9': '$\\mathsl{\\Psi}$',
u'\U0001d6fa': '$\\mathsl{\\Omega}$',
u'\U0001d6fb': '$\\mathsl{\\nabla}$',
u'\U0001d6fc': '$\\mathsl{\\Alpha}$',
u'\U0001d6fd': '$\\mathsl{\\Beta}$',
u'\U0001d6fe': '$\\mathsl{\\Gamma}$',
u'\U0001d6ff': '$\\mathsl{\\Delta}$',
u'\U0001d700': '$\\mathsl{\\Epsilon}$',
u'\U0001d701': '$\\mathsl{\\Zeta}$',
u'\U0001d702': '$\\mathsl{\\Eta}$',
u'\U0001d703': '$\\mathsl{\\Theta}$',
u'\U0001d704': '$\\mathsl{\\Iota}$',
u'\U0001d705': '$\\mathsl{\\Kappa}$',
u'\U0001d706': '$\\mathsl{\\Lambda}$',
u'\U0001d707': '$M$',
u'\U0001d708': '$N$',
u'\U0001d709': '$\\mathsl{\\Xi}$',
u'\U0001d70a': '$O$',
u'\U0001d70b': '$\\mathsl{\\Pi}$',
u'\U0001d70c': '$\\mathsl{\\Rho}$',
u'\U0001d70d': '$\\mathsl{\\varsigma}$',
u'\U0001d70e': '$\\mathsl{\\Sigma}$',
u'\U0001d70f': '$\\mathsl{\\Tau}$',
u'\U0001d710': '$\\mathsl{\\Upsilon}$',
u'\U0001d711': '$\\mathsl{\\Phi}$',
u'\U0001d712': '$\\mathsl{\\Chi}$',
u'\U0001d713': '$\\mathsl{\\Psi}$',
u'\U0001d714': '$\\mathsl{\\Omega}$',
u'\U0001d715': '$\\partial$',
u'\U0001d716': '$\\in$',
u'\U0001d717': '{\\mathsl{\\vartheta}}',
u'\U0001d718': '{\\mathsl{\\varkappa}}',
u'\U0001d719': '{\\mathsl{\\phi}}',
u'\U0001d71a': '{\\mathsl{\\varrho}}',
u'\U0001d71b': '{\\mathsl{\\varpi}}',
u'\U0001d71c': '$\\mathbit{\\Alpha}$',
u'\U0001d71d': '$\\mathbit{\\Beta}$',
u'\U0001d71e': '$\\mathbit{\\Gamma}$',
u'\U0001d71f': '$\\mathbit{\\Delta}$',
u'\U0001d720': '$\\mathbit{\\Epsilon}$',
u'\U0001d721': '$\\mathbit{\\Zeta}$',
u'\U0001d722': '$\\mathbit{\\Eta}$',
u'\U0001d723': '$\\mathbit{\\Theta}$',
u'\U0001d724': '$\\mathbit{\\Iota}$',
u'\U0001d725': '$\\mathbit{\\Kappa}$',
u'\U0001d726': '$\\mathbit{\\Lambda}$',
u'\U0001d727': '$M$',
u'\U0001d728': '$N$',
u'\U0001d729': '$\\mathbit{\\Xi}$',
u'\U0001d72a': '$O$',
u'\U0001d72b': '$\\mathbit{\\Pi}$',
u'\U0001d72c': '$\\mathbit{\\Rho}$',
u'\U0001d72d': '{\\mathbit{O}}',
u'\U0001d72e': '$\\mathbit{\\Sigma}$',
u'\U0001d72f': '$\\mathbit{\\Tau}$',
u'\U0001d730': '$\\mathbit{\\Upsilon}$',
u'\U0001d731': '$\\mathbit{\\Phi}$',
u'\U0001d732': '$\\mathbit{\\Chi}$',
u'\U0001d733': '$\\mathbit{\\Psi}$',
u'\U0001d734': '$\\mathbit{\\Omega}$',
u'\U0001d735': '$\\mathbit{\\nabla}$',
u'\U0001d736': '$\\mathbit{\\Alpha}$',
u'\U0001d737': '$\\mathbit{\\Beta}$',
u'\U0001d738': '$\\mathbit{\\Gamma}$',
u'\U0001d739': '$\\mathbit{\\Delta}$',
u'\U0001d73a': '$\\mathbit{\\Epsilon}$',
u'\U0001d73b': '$\\mathbit{\\Zeta}$',
u'\U0001d73c': '$\\mathbit{\\Eta}$',
u'\U0001d73d': '$\\mathbit{\\Theta}$',
u'\U0001d73e': '$\\mathbit{\\Iota}$',
u'\U0001d73f': '$\\mathbit{\\Kappa}$',
u'\U0001d740': '$\\mathbit{\\Lambda}$',
u'\U0001d741': '$M$',
u'\U0001d742': '$N$',
u'\U0001d743': '$\\mathbit{\\Xi}$',
u'\U0001d744': '$O$',
u'\U0001d745': '$\\mathbit{\\Pi}$',
u'\U0001d746': '$\\mathbit{\\Rho}$',
u'\U0001d747': '$\\mathbit{\\varsigma}$',
u'\U0001d748': '$\\mathbit{\\Sigma}$',
u'\U0001d749': '$\\mathbit{\\Tau}$',
u'\U0001d74a': '$\\mathbit{\\Upsilon}$',
u'\U0001d74b': '$\\mathbit{\\Phi}$',
u'\U0001d74c': '$\\mathbit{\\Chi}$',
u'\U0001d74d': '$\\mathbit{\\Psi}$',
u'\U0001d74e': '$\\mathbit{\\Omega}$',
u'\U0001d74f': '$\\partial$',
u'\U0001d750': '$\\in$',
u'\U0001d751': '{\\mathbit{\\vartheta}}',
u'\U0001d752': '{\\mathbit{\\varkappa}}',
u'\U0001d753': '{\\mathbit{\\phi}}',
u'\U0001d754': '{\\mathbit{\\varrho}}',
u'\U0001d755': '{\\mathbit{\\varpi}}',
u'\U0001d756': '$\\mathsfbf{\\Alpha}$',
u'\U0001d757': '$\\mathsfbf{\\Beta}$',
u'\U0001d758': '$\\mathsfbf{\\Gamma}$',
u'\U0001d759': '$\\mathsfbf{\\Delta}$',
u'\U0001d75a': '$\\mathsfbf{\\Epsilon}$',
u'\U0001d75b': '$\\mathsfbf{\\Zeta}$',
u'\U0001d75c': '$\\mathsfbf{\\Eta}$',
u'\U0001d75d': '$\\mathsfbf{\\Theta}$',
u'\U0001d75e': '$\\mathsfbf{\\Iota}$',
u'\U0001d75f': '$\\mathsfbf{\\Kappa}$',
u'\U0001d760': '$\\mathsfbf{\\Lambda}$',
u'\U0001d761': '$M$',
u'\U0001d762': '$N$',
u'\U0001d763': '$\\mathsfbf{\\Xi}$',
u'\U0001d764': '$O$',
u'\U0001d765': '$\\mathsfbf{\\Pi}$',
u'\U0001d766': '$\\mathsfbf{\\Rho}$',
u'\U0001d767': '{\\mathsfbf{\\vartheta}}',
u'\U0001d768': '$\\mathsfbf{\\Sigma}$',
u'\U0001d769': '$\\mathsfbf{\\Tau}$',
u'\U0001d76a': '$\\mathsfbf{\\Upsilon}$',
u'\U0001d76b': '$\\mathsfbf{\\Phi}$',
u'\U0001d76c': '$\\mathsfbf{\\Chi}$',
u'\U0001d76d': '$\\mathsfbf{\\Psi}$',
u'\U0001d76e': '$\\mathsfbf{\\Omega}$',
u'\U0001d76f': '$\\mathsfbf{\\nabla}$',
u'\U0001d770': '$\\mathsfbf{\\Alpha}$',
u'\U0001d771': '$\\mathsfbf{\\Beta}$',
u'\U0001d772': '$\\mathsfbf{\\Gamma}$',
u'\U0001d773': '$\\mathsfbf{\\Delta}$',
u'\U0001d774': '$\\mathsfbf{\\Epsilon}$',
u'\U0001d775': '$\\mathsfbf{\\Zeta}$',
u'\U0001d776': '$\\mathsfbf{\\Eta}$',
u'\U0001d777': '$\\mathsfbf{\\Theta}$',
u'\U0001d778': '$\\mathsfbf{\\Iota}$',
u'\U0001d779': '$\\mathsfbf{\\Kappa}$',
u'\U0001d77a': '$\\mathsfbf{\\Lambda}$',
u'\U0001d77b': '$M$',
u'\U0001d77c': '$N$',
u'\U0001d77d': '$\\mathsfbf{\\Xi}$',
u'\U0001d77e': '$O$',
u'\U0001d77f': '$\\mathsfbf{\\Pi}$',
u'\U0001d780': '$\\mathsfbf{\\Rho}$',
u'\U0001d781': '$\\mathsfbf{\\varsigma}$',
u'\U0001d782': '$\\mathsfbf{\\Sigma}$',
u'\U0001d783': '$\\mathsfbf{\\Tau}$',
u'\U0001d784': '$\\mathsfbf{\\Upsilon}$',
u'\U0001d785': '$\\mathsfbf{\\Phi}$',
u'\U0001d786': '$\\mathsfbf{\\Chi}$',
u'\U0001d787': '$\\mathsfbf{\\Psi}$',
u'\U0001d788': '$\\mathsfbf{\\Omega}$',
u'\U0001d789': '$\\partial$',
u'\U0001d78a': '$\\in$',
u'\U0001d78b': '{\\mathsfbf{\\vartheta}}',
u'\U0001d78c': '{\\mathsfbf{\\varkappa}}',
u'\U0001d78d': '{\\mathsfbf{\\phi}}',
u'\U0001d78e': '{\\mathsfbf{\\varrho}}',
u'\U0001d78f': '{\\mathsfbf{\\varpi}}',
u'\U0001d790': '$\\mathsfbfsl{\\Alpha}$',
u'\U0001d791': '$\\mathsfbfsl{\\Beta}$',
u'\U0001d792': '$\\mathsfbfsl{\\Gamma}$',
u'\U0001d793': '$\\mathsfbfsl{\\Delta}$',
u'\U0001d794': '$\\mathsfbfsl{\\Epsilon}$',
u'\U0001d795': '$\\mathsfbfsl{\\Zeta}$',
u'\U0001d796': '$\\mathsfbfsl{\\Eta}$',
u'\U0001d797': '$\\mathsfbfsl{\\vartheta}$',
u'\U0001d798': '$\\mathsfbfsl{\\Iota}$',
u'\U0001d799': '$\\mathsfbfsl{\\Kappa}$',
u'\U0001d79a': '$\\mathsfbfsl{\\Lambda}$',
u'\U0001d79b': '$M$',
u'\U0001d79c': '$N$',
u'\U0001d79d': '$\\mathsfbfsl{\\Xi}$',
u'\U0001d79e': '$O$',
u'\U0001d79f': '$\\mathsfbfsl{\\Pi}$',
u'\U0001d7a0': '$\\mathsfbfsl{\\Rho}$',
u'\U0001d7a1': '{\\mathsfbfsl{\\vartheta}}',
u'\U0001d7a2': '$\\mathsfbfsl{\\Sigma}$',
u'\U0001d7a3': '$\\mathsfbfsl{\\Tau}$',
u'\U0001d7a4': '$\\mathsfbfsl{\\Upsilon}$',
u'\U0001d7a5': '$\\mathsfbfsl{\\Phi}$',
u'\U0001d7a6': '$\\mathsfbfsl{\\Chi}$',
u'\U0001d7a7': '$\\mathsfbfsl{\\Psi}$',
u'\U0001d7a8': '$\\mathsfbfsl{\\Omega}$',
u'\U0001d7a9': '$\\mathsfbfsl{\\nabla}$',
u'\U0001d7aa': '$\\mathsfbfsl{\\Alpha}$',
u'\U0001d7ab': '$\\mathsfbfsl{\\Beta}$',
u'\U0001d7ac': '$\\mathsfbfsl{\\Gamma}$',
u'\U0001d7ad': '$\\mathsfbfsl{\\Delta}$',
u'\U0001d7ae': '$\\mathsfbfsl{\\Epsilon}$',
u'\U0001d7af': '$\\mathsfbfsl{\\Zeta}$',
u'\U0001d7b0': '$\\mathsfbfsl{\\Eta}$',
u'\U0001d7b1': '$\\mathsfbfsl{\\vartheta}$',
u'\U0001d7b2': '$\\mathsfbfsl{\\Iota}$',
u'\U0001d7b3': '$\\mathsfbfsl{\\Kappa}$',
u'\U0001d7b4': '$\\mathsfbfsl{\\Lambda}$',
u'\U0001d7b5': '$M$',
u'\U0001d7b6': '$N$',
u'\U0001d7b7': '$\\mathsfbfsl{\\Xi}$',
u'\U0001d7b8': '$O$',
u'\U0001d7b9': '$\\mathsfbfsl{\\Pi}$',
u'\U0001d7ba': '$\\mathsfbfsl{\\Rho}$',
u'\U0001d7bb': '$\\mathsfbfsl{\\varsigma}$',
u'\U0001d7bc': '$\\mathsfbfsl{\\Sigma}$',
u'\U0001d7bd': '$\\mathsfbfsl{\\Tau}$',
u'\U0001d7be': '$\\mathsfbfsl{\\Upsilon}$',
u'\U0001d7bf': '$\\mathsfbfsl{\\Phi}$',
u'\U0001d7c0': '$\\mathsfbfsl{\\Chi}$',
u'\U0001d7c1': '$\\mathsfbfsl{\\Psi}$',
u'\U0001d7c2': '$\\mathsfbfsl{\\Omega}$',
u'\U0001d7c3': '$\\partial$',
u'\U0001d7c4': '$\\in$',
u'\U0001d7c5': '{\\mathsfbfsl{\\vartheta}}',
u'\U0001d7c6': '{\\mathsfbfsl{\\varkappa}}',
u'\U0001d7c7': '{\\mathsfbfsl{\\phi}}',
u'\U0001d7c8': '{\\mathsfbfsl{\\varrho}}',
u'\U0001d7c9': '{\\mathsfbfsl{\\varpi}}',
u'\U0001d7ce': '$\\mathbf{0}$',
u'\U0001d7cf': '$\\mathbf{1}$',
u'\U0001d7d0': '$\\mathbf{2}$',
u'\U0001d7d1': '$\\mathbf{3}$',
u'\U0001d7d2': '$\\mathbf{4}$',
u'\U0001d7d3': '$\\mathbf{5}$',
u'\U0001d7d4': '$\\mathbf{6}$',
u'\U0001d7d5': '$\\mathbf{7}$',
u'\U0001d7d6': '$\\mathbf{8}$',
u'\U0001d7d7': '$\\mathbf{9}$',
u'\U0001d7d8': '$\\mathbb{0}$',
u'\U0001d7d9': '$\\mathbb{1}$',
u'\U0001d7da': '$\\mathbb{2}$',
u'\U0001d7db': '$\\mathbb{3}$',
u'\U0001d7dc': '$\\mathbb{4}$',
u'\U0001d7dd': '$\\mathbb{5}$',
u'\U0001d7de': '$\\mathbb{6}$',
u'\U0001d7df': '$\\mathbb{7}$',
u'\U0001d7e0': '$\\mathbb{8}$',
u'\U0001d7e1': '$\\mathbb{9}$',
u'\U0001d7e2': '$\\mathsf{0}$',
u'\U0001d7e3': '$\\mathsf{1}$',
u'\U0001d7e4': '$\\mathsf{2}$',
u'\U0001d7e5': '$\\mathsf{3}$',
u'\U0001d7e6': '$\\mathsf{4}$',
u'\U0001d7e7': '$\\mathsf{5}$',
u'\U0001d7e8': '$\\mathsf{6}$',
u'\U0001d7e9': '$\\mathsf{7}$',
u'\U0001d7ea': '$\\mathsf{8}$',
u'\U0001d7eb': '$\\mathsf{9}$',
u'\U0001d7ec': '$\\mathsfbf{0}$',
u'\U0001d7ed': '$\\mathsfbf{1}$',
u'\U0001d7ee': '$\\mathsfbf{2}$',
u'\U0001d7ef': '$\\mathsfbf{3}$',
u'\U0001d7f0': '$\\mathsfbf{4}$',
u'\U0001d7f1': '$\\mathsfbf{5}$',
u'\U0001d7f2': '$\\mathsfbf{6}$',
u'\U0001d7f3': '$\\mathsfbf{7}$',
u'\U0001d7f4': '$\\mathsfbf{8}$',
u'\U0001d7f5': '$\\mathsfbf{9}$',
u'\U0001d7f6': '$\\mathtt{0}$',
u'\U0001d7f7': '$\\mathtt{1}$',
u'\U0001d7f8': '$\\mathtt{2}$',
u'\U0001d7f9': '$\\mathtt{3}$',
u'\U0001d7fa': '$\\mathtt{4}$',
u'\U0001d7fb': '$\\mathtt{5}$',
u'\U0001d7fc': '$\\mathtt{6}$',
u'\U0001d7fd': '$\\mathtt{7}$',
u'\U0001d7fe': '$\\mathtt{8}$',
u'\U0001d7ff': '$\\mathtt{9}$'}
|
alon/polinax
|
libs/external_libs/docutils-0.4/docutils/writers/newlatex2e/unicode_map.py
|
Python
|
gpl-2.0
| 73,666
|
[
"Bowtie"
] |
f3c28e130bc5a45cd4801ec0544a85023ace4530f7f46560b15dc575914d216f
|
# -*- coding: utf-8 -*-
"""
Output Plugin for generic external encoders with piping.
Copyright (c) 2006-2008 by Nyaochi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
http://www.gnu.org/copyleft/gpl.html .
"""
from celib import *
class GenericEncoderPipingOutput(OutputModule):
def __init__(self):
self.name = 'extpipe'
self.is_utf8 = False
self.ext = ''
self.cmd = ''
self.doc = OutputModuleDocument()
self.doc.tools = (
'Any external encoder receiving an audio source from STDIN.',
)
self.doc.commands = None
self.doc.limitations = None
self.doc.tags = None
def handle_track(self, track, options):
args = []
args.append(track['input_cmdline'])
args.append('|')
args.append(track['output_cmdline'])
cmdline = args_to_string(args)
self.console.execute(cmdline)
i = 1
while track.has_key('output_cmdline' + str(i)):
self.console.execute(track['output_cmdline' + str(i)])
i += 1
|
rinrinne/cueproc-alternative
|
src/ce_extpipe.py
|
Python
|
gpl-2.0
| 1,707
|
[
"VisIt"
] |
0ed4b00d84ec5d597741990a29265d9d3f360f69a2e965511a7520d09e54c835
|
import simtk.unit as u
from simtk.openmm import app
import simtk.openmm as mm
from openmoltools import gafftools, system_checker
ligand_name = "sustiva"
ligand_path = "./chemicals/%s/" % ligand_name
temperature = 300 * u.kelvin
friction = 0.3 / u.picosecond
timestep = 0.1 * u.femtosecond
prmtop = app.AmberPrmtopFile("%s/%s.prmtop" % (ligand_path, ligand_name))
inpcrt = app.AmberInpcrdFile("%s/%s.inpcrd" % (ligand_path, ligand_name))
system_prm = prmtop.createSystem(nonbondedMethod=app.NoCutoff, nonbondedCutoff=1.0*u.nanometers, constraints=None)
mol2 = gafftools.Mol2Parser("%s/%s.mol2" % (ligand_path, ligand_name))
top, xyz = mol2.to_openmm()
forcefield = app.ForceField("%s/%s.xml" % (ligand_path, ligand_name))
system_xml = forcefield.createSystem(top, nonbondedMethod=app.NoCutoff, nonbondedCutoff=1.0*u.nanometers, constraints=None)
integrator_xml = mm.LangevinIntegrator(temperature, friction, timestep)
simulation_xml = app.Simulation(top, system_xml, integrator_xml)
simulation_xml.context.setPositions(xyz)
integrator_prm = mm.LangevinIntegrator(temperature, friction, timestep)
simulation_prm = app.Simulation(prmtop.topology, system_prm, integrator_prm)
simulation_prm.context.setPositions(xyz)
checker = system_checker.SystemChecker(simulation_xml, simulation_prm)
checker.check_force_parameters()
energy0, energy1 = checker.check_energies()
abs((energy0 - energy1) / u.kilojoules_per_mole)
|
Clyde-fare/openmoltools
|
examples/test_example.py
|
Python
|
gpl-2.0
| 1,421
|
[
"OpenMM"
] |
301fefc97d7fa156c72134b995f58459c919d9951af748fbdd6df3aefb3b0419
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import logging
from sys import version_info
from .errors import ScriptSyntaxError
supported_nodes = ('arg', 'assert', 'assign', 'attribute', 'augassign',
'binop', 'boolop', 'break', 'call', 'compare',
'continue', 'delete', 'dict', 'ellipsis',
'excepthandler', 'expr', 'extslice', 'for',
'if', 'ifexp', 'index', 'interrupt',
'list', 'listcomp', 'module', 'name', 'nameconstant',
'num', 'pass', 'raise', 'repr',
'slice', 'str', 'subscript', 'try', 'tuple', 'unaryop',)
reserved_names = (
'eval', 'exec', 'print', '__getattribute'
)
_logger = logging.getLogger(__name__)
class ScriptValidator(ast.NodeVisitor):
def __init__(self):
self.node_handlers = dict(((node, getattr(self, "on_%s" % node))
for node in supported_nodes))
def validate(self, script, filename='<string>', mode='exec'):
node = ast.parse(script, filename, mode)
self.visit(node)
def generic_visit(self, node):
if node is None:
return
name = node.__class__.__name__.lower()
# print("Node: %s" % name)
if name not in supported_nodes:
raise ScriptSyntaxError("Not supported node: %r" % name)
try:
handler = self.node_handlers[name]
handler(node)
except KeyError:
raise ScriptSyntaxError("Not supported node: %r" % name)
except SyntaxError as ex:
raise ScriptSyntaxError(ex.message)
def on_module(self, node):
for tnode in node.body:
self.visit(tnode)
def on_expr(self, node):
"expression"
return self.visit(node.value) # ('value',)
def on_index(self, node):
"index"
return self.visit(node.value) # ('value',)
def on_return(self, node): # ('value',)
"return statement: look for None, return special sentinal"
self.visit(node.value)
def on_repr(self, node):
"repr "
self.visit(node.value)
def on_pass(self, node):
"pass statement"
pass
def on_ellipsis(self, node):
"ellipses"
pass
# for break and continue: set the instance variable _interrupt
def on_interrupt(self, node): # ()
"interrupt handler"
pass
def on_break(self, node):
"break"
pass
def on_continue(self, node):
"continue"
pass
def on_assert(self, node): # ('test', 'msg')
"assert statement"
self.visit(node.test)
def on_list(self, node): # ('elt', 'ctx')
"list"
for e in node.elts:
self.visit(e)
def on_tuple(self, node): # ('elts', 'ctx')
"tuple"
self.on_list(node)
def on_dict(self, node): # ('keys', 'values')
"dictionary"
for k, v in zip(node.keys, node.values):
self.visit(k)
self.visit(v)
def on_num(self, node): # ('n',)
'return number'
pass
def on_str(self, node): # ('s',)
'return string'
pass
def on_name(self, node): # ('id', 'ctx')
""" Name node """
name = node.id
if name is None:
return
if name.startswith('__'):
raise RuntimeError("Name with double underscores is not allowed: %s" % name)
if name in reserved_names:
raise RuntimeError("Reserved name: %s" % name)
def on_nameconstant(self, node):
""" True, False, None in python >= 3.4 """
pass
def on_attribute(self, node): # ('value', 'attr', 'ctx')
"extract attribute"
pass
def on_assign(self, node): # ('targets', 'value')
"simple assignment"
self.visit(node.value)
def on_augassign(self, node): # ('target', 'op', 'value')
"augmented assign"
pass
def on_slice(self, node): # ():('lower', 'upper', 'step')
"simple slice"
self.visit(node.lower)
self.visit(node.upper)
self.visit(node.step)
def on_extslice(self, node): # ():('dims',)
"extended slice"
for tnode in node.dims:
self.visit(tnode)
def on_subscript(self, node): # ('value', 'slice', 'ctx')
"subscript handling -- one of the tricky parts"
val = self.visit(node.value)
nslice = self.visit(node.slice)
def on_delete(self, node): # ('targets',)
"delete statement"
pass
def on_unaryop(self, node): # ('op', 'operand')
"unary operator"
self.visit(node.operand)
def on_binop(self, node): # ('left', 'op', 'right')
"binary operator"
self.visit(node.left)
self.visit(node.right)
def on_boolop(self, node): # ('op', 'values')
"boolean operator"
for n in node.values:
self.visit(n)
def on_compare(self, node): # ('left', 'ops', 'comparators')
"comparison operators"
self.visit(node.left)
for op, rnode in zip(node.ops, node.comparators):
self.visit(rnode)
def on_print(self, node): # ('dest', 'values', 'nl')
""" note: implements Python2 style print statement, not
print() function. May need improvement...."""
self.visit(node.dest)
for tnode in node.values:
self.visit(tnode)
def on_if(self, node): # ('test', 'body', 'orelse')
"regular if-then-else statement"
self.visit(node.test)
for tnode in node.orelse:
self.visit(tnode)
for tnode in node.body:
self.visit(tnode)
def on_ifexp(self, node): # ('test', 'body', 'orelse')
"if expressions"
expr = node.orelse
self.visit(node.test)
self.visit(node.orelse)
self.visit(node.body)
def on_while(self, node): # ('test', 'body', 'orelse')
"while blocks"
self.visit(node.test)
for tnode in node.body:
self.visit(tnode)
for tnode in node.orelse:
self.visit(tnode)
def on_for(self, node): # ('target', 'iter', 'body', 'orelse')
"for blocks"
for tnode in node.body:
self.visit(tnode)
def on_listcomp(self, node): # ('elt', 'generators')
"list comprehension"
pass
def on_excepthandler(self, node): # ('type', 'name', 'body')
"exception handler..."
self.visit(node.type)
def on_try(self, node): # ('body', 'handlers', 'orelse', 'finalbody')
"try/except/else/finally blocks"
for tnode in node.body:
self.visit(tnode)
def on_raise(self, node): # ('type', 'inst', 'tback')
"raise statement: note difference for python 2 and 3"
if version_info[0] == 3:
excnode = node.exc
msgnode = node.cause
else:
excnode = node.type
msgnode = node.inst
self.visit(excnode)
self.visit(msgnode)
def on_call(self, node):
"function execution"
# ('func', 'args', 'keywords', 'starargs', 'kwargs')
self.visit(node.func)
def on_arg(self, node): # ('test', 'msg')
"arg for function definitions"
pass
|
nickchen-mitac/fork
|
src/ava/job/validator.py
|
Python
|
apache-2.0
| 7,468
|
[
"VisIt"
] |
0c58c95d41f6cbab6e8827fee344f29b083699ba740c89d3a767f67b2cb03a3f
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module analyzes and estimates the distribution of averaged anomaly scores
from a CLA model. Given a new anomaly score `s`, estimates `P(score >= s)`.
The number `P(score >= s)` represents the likelihood of the current state of
predictability. For example, a likelihood of 0.01 or 1% means we see this much
predictability about one out of every 100 records. The number is not as unusual
as it seems. For records that arrive every minute, this means once every hour
and 40 minutes. A likelihood of 0.0001 or 0.01% means we see it once out of
10,000 records, or about once every 7 days.
USAGE
-----
There are two ways to use the code: using the AnomalyLikelihood helper class or
using the raw individual functions.
Helper Class
------------
The helper class AnomalyLikelihood is the easiest to use. To use it simply
create an instance and then feed it successive anomaly scores:
anomalyLikelihood = AnomalyLikelihood()
while still_have_data:
# Get anomaly score from model
# Compute probability that an anomaly has ocurred
anomalyProbability = anomalyLikelihood.anomalyProbability(
value, anomalyScore, timestamp)
Raw functions
-------------
There are two lower level functions, estimateAnomalyLikelihoods and
updateAnomalyLikelihoods. The details of these are described below.
"""
import math
import datetime
import numpy
from nupic.utils import MovingAverage
class AnomalyLikelihood(object):
"""
Helper class for running anomaly likelihood computation.
"""
def __init__(self, claLearningPeriod=300, estimationSamples=300):
"""
:param claLearningPeriod: the number of iterations required for the CLA to
learn the basic patterns in the dataset and for the anomaly score to 'settle
down'. The default is based on empirical observations but in reality this
could be larger for more complex domains. The downside if this is too large
is that real anomalies might get ignored and not flagged.
:param estimationSamples: the number of reasonable anomaly scores required
for the initial estimate of the Gaussian. The default of 300 records is
reasonable - we just need sufficient samples to get a decent estimate for
the Gaussian. It's unlikely you will need to tune this since the Gaussian is
re-estimated every 100 iterations.
Anomaly likelihood scores are reported at a flat 0.5 for claLearningPeriod +
estimationSamples iterations.
"""
self._iteration = 0
self._historicalScores = []
self._distribution = None
self._probationaryPeriod = claLearningPeriod + estimationSamples
self._claLearningPeriod = claLearningPeriod
# How often we re-estimate the Gaussian distribution. The ideal is to
# re-estimate every iteration but this is a performance hit. In general the
# system is not very sensitive to this number as long as it is small
# relative to the total number of records processed.
self._reestimationPeriod = 100
@staticmethod
def computeLogLikelihood(likelihood):
"""
Compute a log scale representation of the likelihood value. Since the
likelihood computations return low probabilities that often go into four 9's
or five 9's, a log value is more useful for visualization, thresholding,
etc.
"""
# The log formula is:
# Math.log(1.0000000001 - likelihood) / Math.log(1.0 - 0.9999999999)
return math.log(1.0000000001 - likelihood) / -23.02585084720009
def anomalyProbability(self, value, anomalyScore, timestamp=None):
"""
Return the probability that the current value plus anomaly score represents
an anomaly given the historical distribution of anomaly scores. The closer
the number is to 1, the higher the chance it is an anomaly.
Given the current metric value, plus the current anomaly score, output the
anomalyLikelihood for this record.
"""
if timestamp is None:
timestamp = datetime.datetime.now()
dataPoint = (timestamp, value, anomalyScore)
# We ignore the first probationaryPeriod data points
if len(self._historicalScores) < self._probationaryPeriod:
likelihood = 0.5
else:
# On a rolling basis we re-estimate the distribution
if ( (self._distribution is None) or
(self._iteration % self._reestimationPeriod == 0) ):
_, _, self._distribution = (
estimateAnomalyLikelihoods(
self._historicalScores,
skipRecords = self._claLearningPeriod)
)
likelihoods, _, self._distribution = (
updateAnomalyLikelihoods([dataPoint],
self._distribution)
)
likelihood = 1.0 - likelihoods[0]
# Before we exit update historical scores and iteration
self._historicalScores.append(dataPoint)
self._iteration += 1
return likelihood
#
# USAGE FOR LOW-LEVEL FUNCTIONS
# -----------------------------
#
# There are two primary interface routines:
#
# estimateAnomalyLikelihoods: batch routine, called initially and once in a
# while
# updateAnomalyLikelihoods: online routine, called for every new data point
#
# 1. Initially::
#
# likelihoods, avgRecordList, estimatorParams = \
# estimateAnomalyLikelihoods(metric_data)
#
# 2. Whenever you get new data::
#
# likelihoods, avgRecordList, estimatorParams = \
# updateAnomalyLikelihoods(data2, estimatorParams)
#
# 3. And again (make sure you use the new estimatorParams returned in the above
# call to updateAnomalyLikelihoods!)::
#
# likelihoods, avgRecordList, estimatorParams = \
# updateAnomalyLikelihoods(data3, estimatorParams)
#
# 4. Every once in a while update estimator with a lot of recent data::
#
# likelihoods, avgRecordList, estimatorParams = \
# estimateAnomalyLikelihoods(lots_of_metric_data)
#
#
# PARAMS
# ~~~~~~
#
# The parameters dict returned by the above functions has the following
# structure. Note: the client does not need to know the details of this.
#
# ::
#
# {
# "distribution": # describes the distribution
# {
# "name": STRING, # name of the distribution, such as 'normal'
# "mean": SCALAR, # mean of the distribution
# "variance": SCALAR, # variance of the distribution
#
# # There may also be some keys that are specific to the distribution
# },
#
# "historicalLikelihoods": [] # Contains the last windowSize likelihood
# # values returned
#
# "movingAverage": # stuff needed to compute a rolling average
# # of the anomaly scores
# {
# "windowSize": SCALAR, # the size of the averaging window
# "historicalValues": [], # list with the last windowSize anomaly
# # scores
# "total": SCALAR, # the total of the values in historicalValues
# },
#
# }
def estimateAnomalyLikelihoods(anomalyScores,
averagingWindow=10,
skipRecords=0,
verbosity=0):
"""
Given a series of anomaly scores, compute the likelihood for each score. This
function should be called once on a bunch of historical anomaly scores for an
initial estimate of the distribution. It should be called again every so often
(say every 50 records) to update the estimate.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
For best results, the list should be between 1000
and 10,000 records
:param averagingWindow: integer number of records to average over
:param skipRecords: integer specifying number of records to skip when
estimating distributions. If skip records are >=
len(anomalyScores), a very broad distribution is returned
that makes everything pretty likely.
:param verbosity: integer controlling extent of printouts for debugging
0 = none
1 = occasional information
2 = print every record
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
a small JSON dict that contains the state of the estimator
"""
if verbosity > 1:
print "In estimateAnomalyLikelihoods."
print "Number of anomaly scores:", len(anomalyScores)
print "Skip records=", skipRecords
print "First 20:", anomalyScores[0:min(20, len(anomalyScores))]
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
# Compute averaged anomaly scores
aggRecordList, historicalValues, total = _anomalyScoreMovingAverage(
anomalyScores,
windowSize = averagingWindow,
verbosity = verbosity)
s = [r[2] for r in aggRecordList]
dataValues = numpy.array(s)
# Estimate the distribution of anomaly scores based on aggregated records
if len(aggRecordList) <= skipRecords:
distributionParams = nullDistribution(verbosity = verbosity)
else:
distributionParams = estimateNormal(dataValues[skipRecords:])
# HACK ALERT! The CLA model currently does not handle constant metric values
# very well (time of day encoder changes sometimes lead to unstable SDR's
# even though the metric is constant). Until this is resolved, we explicitly
# detect and handle completely flat metric values by reporting them as not
# anomalous.
s = [r[1] for r in aggRecordList]
metricValues = numpy.array(s)
metricDistribution = estimateNormal(metricValues[skipRecords:],
performLowerBoundCheck=False)
if metricDistribution["variance"] < 1.5e-5:
distributionParams = nullDistribution(verbosity = verbosity)
# Estimate likelihoods based on this distribution
likelihoods = numpy.array(dataValues, dtype=float)
for i, s in enumerate(dataValues):
likelihoods[i] = normalProbability(s, distributionParams)
# Filter likelihood values
filteredLikelihoods = numpy.array(
_filterLikelihoods(likelihoods) )
params = {
"distribution": distributionParams,
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": averagingWindow,
},
"historicalLikelihoods":
list(likelihoods[-min(averagingWindow, len(likelihoods)):]),
}
if verbosity > 1:
print "Discovered params="
print params
print "Number of likelihoods:", len(likelihoods)
print "First 20 likelihoods:", (
filteredLikelihoods[0:min(20, len(filteredLikelihoods))] )
print "leaving estimateAnomalyLikelihoods"
return (filteredLikelihoods, aggRecordList, params)
def updateAnomalyLikelihoods(anomalyScores,
params,
verbosity=0): # pylint: disable=W0613
"""
Compute updated probabilities for anomalyScores using the given params.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
:param params: the JSON dict returned by estimateAnomalyLikelihoods
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
an updated JSON object containing the state of this metric.
"""
if verbosity > 3:
print "In updateAnomalyLikelihoods."
print "Number of anomaly scores:", len(anomalyScores)
print "First 20:", anomalyScores[0:min(20, len(anomalyScores))]
print "Params:", params
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
if not isValidEstimatorParams(params):
raise ValueError("'params' is not a valid params structure")
# For backward compatibility.
if not params.has_key("historicalLikelihoods"):
params["historicalLikelihoods"] = [1.0]
# Compute moving averages of these new scores using the previous values
# as well as likelihood for these scores using the old estimator
historicalValues = params["movingAverage"]["historicalValues"]
total = params["movingAverage"]["total"]
windowSize = params["movingAverage"]["windowSize"]
aggRecordList = numpy.zeros(len(anomalyScores), dtype=float)
likelihoods = numpy.zeros(len(anomalyScores), dtype=float)
for i, v in enumerate(anomalyScores):
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, v[2], windowSize)
)
aggRecordList[i] = newAverage
likelihoods[i] = normalProbability(newAverage, params["distribution"])
# Filter the likelihood values. First we prepend the historical likelihoods
# to the current set. Then we filter the values. We peel off the likelihoods
# to return and the last windowSize values to store for later.
likelihoods2 = params["historicalLikelihoods"] + list(likelihoods)
filteredLikelihoods = _filterLikelihoods(likelihoods2)
likelihoods[:] = filteredLikelihoods[-len(likelihoods):]
historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):]
# Update the estimator
newParams = {
"distribution": params["distribution"],
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": windowSize,
},
"historicalLikelihoods": historicalLikelihoods,
}
assert len(newParams["historicalLikelihoods"]) <= windowSize
if verbosity > 3:
print "Number of likelihoods:", len(likelihoods)
print "First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))]
print "Leaving updateAnomalyLikelihoods."
return (likelihoods, aggRecordList, newParams)
def _filterLikelihoods(likelihoods,
redThreshold=0.99999, yellowThreshold=0.999):
"""
Filter the list of raw (pre-filtered) likelihoods so that we only preserve
sharp increases in likelihood. 'likelihoods' can be a numpy array of floats or
a list of floats.
:returns: A new list of floats likelihoods containing the filtered values.
"""
redThreshold = 1.0 - redThreshold
yellowThreshold = 1.0 - yellowThreshold
# The first value is untouched
filteredLikelihoods = [likelihoods[0]]
for i, v in enumerate(likelihoods[1:]):
if v <= redThreshold:
# Value is in the redzone
if likelihoods[i] > redThreshold:
# Previous value is not in redzone, so leave as-is
filteredLikelihoods.append(v)
else:
filteredLikelihoods.append(yellowThreshold)
else:
# Value is below the redzone, so leave as-is
filteredLikelihoods.append(v)
return filteredLikelihoods
def _anomalyScoreMovingAverage(anomalyScores,
windowSize=10,
verbosity=0,
):
"""
Given a list of anomaly scores return a list of averaged records.
anomalyScores is assumed to be a list of records of the form:
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
Each record in the returned list list contains:
[datetime, value, averagedScore]
*Note:* we only average the anomaly score.
"""
historicalValues = []
total = 0.0
averagedRecordList = [] # Aggregated records
for record in anomalyScores:
# Skip (but log) records without correct number of entries
if len(record) != 3:
if verbosity >= 1:
print "Malformed record:", record
continue
avg, historicalValues, total = (
MovingAverage.compute(historicalValues, total, record[2], windowSize)
)
averagedRecordList.append( [record[0], record[1], avg] )
if verbosity > 2:
print "Aggregating input record:", record
print "Result:", [record[0], record[1], avg]
return averagedRecordList, historicalValues, total
def estimateNormal(sampleData, performLowerBoundCheck=True): # pylint: disable=W0613
"""
:param sampleData:
:type sampleData: Numpy array.
:param performLowerBoundCheck:
:type performLowerBoundCheck: bool
:returns: A dict containing the parameters of a normal distribution based on
the ``sampleData``.
"""
params = {
"name": "normal",
"mean": numpy.mean(sampleData),
"variance": numpy.var(sampleData),
}
if performLowerBoundCheck:
# Handle edge case of almost no deviations and super low anomaly scores. We
# find that such low anomaly means can happen, but then the slightest blip
# of anomaly score can cause the likelihood to jump up to red.
if params["mean"] < 0.03:
params["mean"] = 0.03
# Catch all for super low variance to handle numerical precision issues
if params["variance"] < 0.0003:
params["variance"] = 0.0003
# Compute standard deviation
if params["variance"] > 0:
params["stdev"] = math.sqrt(params["variance"])
else:
params["stdev"] = 0
return params
def nullDistribution(verbosity=0):
"""
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: A distribution that is very broad and makes every anomaly score
between 0 and 1 pretty likely.
"""
if verbosity>0:
print "Returning nullDistribution"
return {
"name": "normal",
"mean": 0.5,
"variance": 1e6,
"stdev": 1e3,
}
def normalProbability(x, distributionParams):
"""
Given the normal distribution specified in distributionParams, return
the probability of getting samples > x
This is essentially the Q-function
"""
# Distribution is symmetrical around mean
if x < distributionParams["mean"] :
xp = 2*distributionParams["mean"] - x
return 1.0 - normalProbability(xp, distributionParams)
# How many standard deviations above the mean are we, scaled by 10X for table
xs = 10*(x - distributionParams["mean"]) / distributionParams["stdev"]
xs = round(xs)
if xs > 70:
return 0.0
else:
return Q[xs]
def isValidEstimatorParams(p):
"""
:returns: ``True`` if ``p`` is a valid estimator params as might be returned
by ``estimateAnomalyLikelihoods()`` or ``updateAnomalyLikelihoods``,
``False`` otherwise. Just does some basic validation.
"""
if type(p) != type({}):
return False
if not p.has_key("distribution"):
return False
if not p.has_key("movingAverage"):
return False
dist = p["distribution"]
if not (dist.has_key("mean") and dist.has_key("name")
and dist.has_key("variance") and dist.has_key("stdev")
):
return False
return True
# Table lookup for Q function, from wikipedia
# http://en.wikipedia.org/wiki/Q-function
Q = numpy.zeros(71)
Q[0] = 0.500000000
Q[1] = 0.460172163
Q[2] = 0.420740291
Q[3] = 0.382088578
Q[4] = 0.344578258
Q[5] = 0.308537539
Q[6] = 0.274253118
Q[7] = 0.241963652
Q[8] = 0.211855399
Q[9] = 0.184060125
Q[10] = 0.158655254
Q[11] = 0.135666061
Q[12] = 0.115069670
Q[13] = 0.096800485
Q[14] = 0.080756659
Q[15] = 0.066807201
Q[16] = 0.054799292
Q[17] = 0.044565463
Q[18] = 0.035930319
Q[19] = 0.028716560
Q[20] = 0.022750132
Q[21] = 0.017864421
Q[22] = 0.013903448
Q[23] = 0.010724110
Q[24] = 0.008197536
Q[25] = 0.006209665
Q[26] = 0.004661188
Q[27] = 0.003466974
Q[28] = 0.002555130
Q[29] = 0.001865813
Q[30] = 0.001349898
Q[31] = 0.000967603
Q[32] = 0.000687138
Q[33] = 0.000483424
Q[34] = 0.000336929
Q[35] = 0.000232629
Q[36] = 0.000159109
Q[37] = 0.000107800
Q[38] = 0.000072348
Q[39] = 0.000048096
Q[40] = 0.000031671
# From here on use the approximation in http://cnx.org/content/m11537/latest/
Q[41] = 0.000021771135897
Q[42] = 0.000014034063752
Q[43] = 0.000008961673661
Q[44] = 0.000005668743475
Q[45] = 0.000003551942468
Q[46] = 0.000002204533058
Q[47] = 0.000001355281953
Q[48] = 0.000000825270644
Q[49] = 0.000000497747091
Q[50] = 0.000000297343903
Q[51] = 0.000000175930101
Q[52] = 0.000000103096834
Q[53] = 0.000000059836778
Q[54] = 0.000000034395590
Q[55] = 0.000000019581382
Q[56] = 0.000000011040394
Q[57] = 0.000000006164833
Q[58] = 0.000000003409172
Q[59] = 0.000000001867079
Q[60] = 0.000000001012647
Q[61] = 0.000000000543915
Q[62] = 0.000000000289320
Q[63] = 0.000000000152404
Q[64] = 0.000000000079502
Q[65] = 0.000000000041070
Q[66] = 0.000000000021010
Q[67] = 0.000000000010644
Q[68] = 0.000000000005340
Q[69] = 0.000000000002653
Q[70] = 0.000000000001305
|
chetan51/nupic
|
nupic/algorithms/anomaly_likelihood.py
|
Python
|
gpl-3.0
| 22,129
|
[
"Gaussian"
] |
ab046cc02e76ecbcbb906a218a108274a13bc886c2916e7cfe46f6e0564302d5
|
'''Module with EMC_writer class to save dense frames in EMC format'''
from __future__ import print_function
import os
import numpy as np
try:
import h5py
HDF5_MODE = True
except ImportError:
HDF5_MODE = False
class EMCWriter(object):
"""EMC file writer class
Provides interface to write dense integer photon count data to an emc file
__init__ arguments:
out_fname (string) - Output filename
num_pix (int) - Number of pixels in dense frame
The number of pixels is saved to the header and serves as a check since the
sparse format is in reference to a detector file.
Methods:
write_frame(frame, fraction=1.)
write_sparse_frame(place_ones, place_multi, count_multi)
finish_write()
The typical usage is as follows:
.. code-block:: python
with EMCWriter('photons.emc', num_pix) as emc:
for i in range(num_frames):
emc.write_frame(frame[i].ravel())
"""
def __init__(self, out_fname, num_pix, hdf5=True):
out_folder = os.path.dirname(out_fname)
self.h5_output = hdf5
if hdf5 and not HDF5_MODE:
print('Could not import h5py. Generating .emc output')
out_fname = os.path.splitext(out_fname)[0] + '.emc.'
self.h5_output = False
self.out_fname = out_fname
print('Writing emc file to', out_fname)
self.num_data = 0
self.num_pix = num_pix
self.mean_count = 0.
self.ones = []
self.multi = []
self._init_file(out_folder)
def __enter__(self):
return self
def __exit__(self, etype, val, traceback):
self.finish_write()
def _init_file(self, out_folder):
if self.h5_output:
self._h5f = h5py.File(self.out_fname, 'w')
self._h5f['num_pix'] = [self.num_pix]
vlentype = h5py.special_dtype(vlen=np.int32)
self._h5f.create_dataset('place_ones', (0,), maxshape=(None,),
chunks=(1,), dtype=vlentype)
self._h5f.create_dataset('place_multi', (0,), maxshape=(None,),
chunks=(1,), dtype=vlentype)
self._h5f.create_dataset('count_multi', (0,), maxshape=(None,),
chunks=(1,), dtype=vlentype)
self._fptrs = []
else:
temp_fnames = [os.path.join(out_folder, fname) + str(os.getpid())
for fname in ['.po.', '.pm.', '.cm.']]
self._fptrs = [open(fname, 'wb') for fname in temp_fnames]
def finish_write(self):
"""Cleanup and close emc file
This function writes the header and appends the temporary files.
It then deletes those temp files. This function should be run before
the script is exited.
"""
for fptr in self._fptrs:
fptr.close()
if self.h5_output:
self._h5f.close()
if self.num_data == 0:
print('No frames to write')
for fptr in self._fptrs:
os.system('rm ' + fptr.name)
return
self.mean_count /= self.num_data
print('num_data = %d, mean_count = %.4e' % (self.num_data, self.mean_count))
if not self.h5_output:
ones_arr = np.asarray(self.ones)
multi_arr = np.asarray(self.multi)
fptr = open(self.out_fname, 'wb')
header = np.zeros((256), dtype='i4')
header[0] = self.num_data
header[1] = self.num_pix
header.tofile(fptr)
ones_arr.astype('i4').tofile(fptr)
multi_arr.astype('i4').tofile(fptr)
fptr.close()
for fptr in self._fptrs:
os.system('cat ' + fptr.name + ' >> ' + self.out_fname)
os.system('rm ' + fptr.name)
def write_frame(self, frame, fraction=1., partition=1):
"""Write given frame to the file
Using temporary files, the sparsified version of the input is written.
Arguments:
frame (int array) - 1D dense array with photon counts in each pixel
fraction (float, optional) - What fraction of photons to write
partition (int, optional) - Partition frame into N sub-frames
If fraction is less than 1, then each photon is written randomly with \
the probability = fraction. by default, all photons are written. This \
option is useful for performing tests with lower photons/frame.
"""
if len(frame.shape) != 1 or not np.issubdtype(frame.dtype, np.integer):
raise ValueError('write_frame needs 1D array of integers: '+
str(frame.shape)+' '+str(frame.dtype))
place_ones = np.where(frame == 1)[0]
place_multi = np.where(frame > 1)[0]
count_multi = frame[place_multi]
if fraction < 1. and partition > 1:
print('Can either split or reduce data frame')
return
elif partition > 1:
sel_ones = (np.random.random(len(place_ones))*int(partition)).astype('i4')
sel_multi = (np.random.random(count_multi.sum())*int(partition)).astype('i4')
sum_count_multi = count_multi.cumsum()
for i in range(int(partition)):
sp_count_multi = np.array([a.sum() for a in np.split(sel_multi == i, sum_count_multi)])[:-1]
sp_place_multi = place_multi[sp_count_multi > 0]
sp_count_multi = sp_count_multi[sp_count_multi > 0]
self._update_file(place_ones[sel_ones == i], sp_place_multi, sp_count_multi)
elif fraction < 1.:
sel = (np.random.random(len(place_ones)) < fraction)
place_ones = place_ones[sel]
sel = (np.random.random(count_multi.sum()) < fraction)
count_multi = np.array([a.sum() for a in np.split(sel, count_multi.cumsum())])[:-1]
place_multi = place_multi[count_multi > 0]
count_multi = count_multi[count_multi > 0]
self._update_file(place_ones, place_multi, count_multi)
else:
self._update_file(place_ones, place_multi, count_multi)
def write_sparse_frame(self, place_ones, place_multi, count_multi):
"""Write sparse frame to file
Arguments:
place_ones (int array) - List of pixel numbers with 1 photon
place_multi (int array) - List of pixel numbers with moe than 1 photon
count_multi (int array) - Number of photons in the place_multi pixels
len(place_multi) == len(count_multi)
"""
if len(place_multi) != len(count_multi):
raise ValueError('place_multi and count_multi should have equal lengths')
if not (np.issubdtype(place_ones.dtype, np.integer)
and np.issubdtype(place_multi.dtype, np.integer)
and np.issubdtype(count_multi.dtype, np.integer)):
raise ValueError('Arrays should be of integer type')
self._update_file(place_ones, place_multi, count_multi)
def _update_file(self, place_ones, place_multi, count_multi):
self.num_data += 1
self.mean_count += len(place_ones) + count_multi.sum()
self.ones.append(len(place_ones))
self.multi.append(len(place_multi))
if self.h5_output:
self._h5f['place_ones'].resize((self.num_data,))
self._h5f['place_ones'][-1] = place_ones.astype(np.int32)
self._h5f['place_multi'].resize((self.num_data,))
self._h5f['place_multi'][-1] = place_multi.astype(np.int32)
self._h5f['count_multi'].resize((self.num_data,))
self._h5f['count_multi'][-1] = count_multi.astype(np.int32)
else:
place_ones.astype(np.int32).tofile(self._fptrs[0])
place_multi.astype(np.int32).tofile(self._fptrs[1])
count_multi.astype(np.int32).tofile(self._fptrs[2])
|
duaneloh/Dragonfly
|
utils/py_src/writeemc.py
|
Python
|
gpl-3.0
| 7,979
|
[
"MOE"
] |
7fb222019ab3d165c6279ca3b01748560d450579a0abb7b0afe5629e6880e59f
|
import numpy as np
from gpaw.utilities.blas import gemm
from gpaw.utilities import pack, unpack2
from gpaw.utilities.timing import nulltimer
class EmptyWaveFunctions:
def __nonzero__(self):
return False
def set_eigensolver(self, eigensolver):
pass
def set_orthonormalized(self, flag):
pass
def estimate_memory(self, mem):
mem.set('Unknown WFs', 0)
class WaveFunctions(EmptyWaveFunctions):
"""...
setups:
List of setup objects.
symmetry:
Symmetry object.
kpt_u:
List of **k**-point objects.
nbands: int
Number of bands.
nspins: int
Number of spins.
dtype: dtype
Data type of wave functions (float or complex).
bzk_kc: ndarray
Scaled **k**-points used for sampling the whole
Brillouin zone - values scaled to [-0.5, 0.5).
ibzk_kc: ndarray
Scaled **k**-points in the irreducible part of the
Brillouin zone.
weight_k: ndarray
Weights of the **k**-points in the irreducible part
of the Brillouin zone (summing up to 1).
kpt_comm:
MPI-communicator for parallelization over **k**-points.
"""
collinear = True
ncomp = 1
def __init__(self, gd, nvalence, setups, bd, dtype,
world, kd, timer=None):
if timer is None:
timer = nulltimer
self.gd = gd
self.nspins = kd.nspins
self.nvalence = nvalence
self.bd = bd
#self.nbands = self.bd.nbands #XXX
#self.mynbands = self.bd.mynbands #XXX
self.dtype = dtype
self.world = world
self.kd = kd
self.band_comm = self.bd.comm #XXX
self.timer = timer
self.rank_a = None
# XXX Remember to modify aseinterface when removing the following
# attributes from the wfs object
self.gamma = kd.gamma
self.kpt_comm = kd.comm
self.bzk_kc = kd.bzk_kc
self.ibzk_kc = kd.ibzk_kc
self.ibzk_qc = kd.ibzk_qc
self.weight_k = kd.weight_k
self.symmetry = kd.symmetry
self.nibzkpts = kd.nibzkpts
self.kpt_u = kd.create_k_points(self.gd)
self.eigensolver = None
self.positions_set = False
self.set_setups(setups)
def set_setups(self, setups):
self.setups = setups
def set_eigensolver(self, eigensolver):
self.eigensolver = eigensolver
def __nonzero__(self):
return True
def calculate_density_contribution(self, nt_sG):
"""Calculate contribution to pseudo density from wave functions."""
nt_sG.fill(0.0)
for kpt in self.kpt_u:
self.add_to_density_from_k_point(nt_sG, kpt)
self.band_comm.sum(nt_sG)
self.kpt_comm.sum(nt_sG)
self.timer.start('Symmetrize density')
for nt_G in nt_sG:
self.symmetry.symmetrize(nt_G, self.gd)
self.timer.stop('Symmetrize density')
def add_to_density_from_k_point(self, nt_sG, kpt):
self.add_to_density_from_k_point_with_occupation(nt_sG, kpt, kpt.f_n)
def get_orbital_density_matrix(self, a, kpt, n):
"""Add the nth band density from kpt to density matrix D_sp"""
ni = self.setups[a].ni
D_sii = np.zeros((self.nspins, ni, ni))
P_i = kpt.P_ani[a][n]
D_sii[kpt.s] += np.outer(P_i.conj(), P_i).real
D_sp = [pack(D_ii) for D_ii in D_sii]
return D_sp
def calculate_atomic_density_matrices_k_point(self, D_sii, kpt, a, f_n):
if kpt.rho_MM is not None:
P_Mi = kpt.P_aMi[a]
#P_Mi = kpt.P_aMi_sparse[a]
#ind = get_matrix_index(kpt.P_aMi_index[a])
#D_sii[kpt.s] += np.dot(np.dot(P_Mi.T.conj(), kpt.rho_MM),
# P_Mi).real
rhoP_Mi = np.zeros_like(P_Mi)
D_ii = np.zeros(D_sii[kpt.s].shape, kpt.rho_MM.dtype)
#gemm(1.0, P_Mi, kpt.rho_MM[ind.T, ind], 0.0, tmp)
gemm(1.0, P_Mi, kpt.rho_MM, 0.0, rhoP_Mi)
gemm(1.0, rhoP_Mi, P_Mi.T.conj().copy(), 0.0, D_ii)
D_sii[kpt.s] += D_ii.real
#D_sii[kpt.s] += dot(dot(P_Mi.T.conj().copy(),
# kpt.rho_MM[ind.T, ind]), P_Mi).real
else:
P_ni = kpt.P_ani[a]
D_sii[kpt.s] += np.dot(P_ni.T.conj() * f_n, P_ni).real
if hasattr(kpt, 'c_on'):
for ne, c_n in zip(kpt.ne_o, kpt.c_on):
d_nn = ne * np.outer(c_n.conj(), c_n)
D_sii[kpt.s] += np.dot(P_ni.T.conj(), np.dot(d_nn, P_ni)).real
def calculate_atomic_density_matrices(self, D_asp):
"""Calculate atomic density matrices from projections."""
f_un = [kpt.f_n for kpt in self.kpt_u]
self.calculate_atomic_density_matrices_with_occupation(D_asp, f_un)
def calculate_atomic_density_matrices_with_occupation(self, D_asp, f_un):
"""Calculate atomic density matrices from projections with
custom occupation f_un."""
# Varying f_n used in calculation of response part of GLLB-potential
for a, D_sp in D_asp.items():
ni = self.setups[a].ni
D_sii = np.zeros((len(D_sp), ni, ni))
for f_n, kpt in zip(f_un, self.kpt_u):
self.calculate_atomic_density_matrices_k_point(D_sii, kpt, a,
f_n)
D_sp[:] = [pack(D_ii) for D_ii in D_sii]
self.band_comm.sum(D_sp)
self.kpt_comm.sum(D_sp)
self.symmetrize_atomic_density_matrices(D_asp)
def symmetrize_atomic_density_matrices(self, D_asp):
if len(self.symmetry.op_scc) > 1:
all_D_asp = []
for a, setup in enumerate(self.setups):
D_sp = D_asp.get(a)
if D_sp is None:
ni = setup.ni
D_sp = np.empty((self.nspins * self.ncomp**2,
ni * (ni + 1) // 2))
self.gd.comm.broadcast(D_sp, self.rank_a[a])
all_D_asp.append(D_sp)
for s in range(self.nspins):
D_aii = [unpack2(D_sp[s]) for D_sp in all_D_asp]
for a, D_sp in D_asp.items():
setup = self.setups[a]
D_sp[s] = pack(setup.symmetrize(a, D_aii,
self.symmetry.a_sa))
def set_positions(self, spos_ac):
self.positions_set = False
rank_a = self.gd.get_ranks_from_positions(spos_ac)
"""
# If both old and new atomic ranks are present, start a blank dict if
# it previously didn't exist but it will needed for the new atoms.
if (self.rank_a is not None and rank_a is not None and
self.kpt_u[0].P_ani is None and (rank_a == self.gd.comm.rank).any()):
for kpt in self.kpt_u:
kpt.P_ani = {}
"""
if self.rank_a is not None and self.kpt_u[0].P_ani is not None:
self.timer.start('Redistribute')
requests = []
mynks = len(self.kpt_u)
flags = (self.rank_a != rank_a)
my_incoming_atom_indices = np.argwhere(np.bitwise_and(flags, \
rank_a == self.gd.comm.rank)).ravel()
my_outgoing_atom_indices = np.argwhere(np.bitwise_and(flags, \
self.rank_a == self.gd.comm.rank)).ravel()
for a in my_incoming_atom_indices:
# Get matrix from old domain:
ni = self.setups[a].ni
P_uni = np.empty((mynks, self.bd.mynbands, ni), self.dtype)
requests.append(self.gd.comm.receive(P_uni, self.rank_a[a],
tag=a, block=False))
for myu, kpt in enumerate(self.kpt_u):
assert a not in kpt.P_ani
kpt.P_ani[a] = P_uni[myu]
for a in my_outgoing_atom_indices:
# Send matrix to new domain:
P_uni = np.array([kpt.P_ani.pop(a) for kpt in self.kpt_u])
requests.append(self.gd.comm.send(P_uni, rank_a[a],
tag=a, block=False))
self.gd.comm.waitall(requests)
self.timer.stop('Redistribute')
self.rank_a = rank_a
if self.symmetry is not None:
self.symmetry.check(spos_ac)
def allocate_arrays_for_projections(self, my_atom_indices):
if not self.positions_set and self.kpt_u[0].P_ani is not None:
# Projections have been read from file - don't delete them!
pass
else:
for kpt in self.kpt_u:
kpt.P_ani = {}
for a in my_atom_indices:
ni = self.setups[a].ni
for kpt in self.kpt_u:
kpt.P_ani[a] = np.empty((self.bd.mynbands, ni), self.dtype)
def collect_eigenvalues(self, k, s):
return self.collect_array('eps_n', k, s)
def collect_occupations(self, k, s):
return self.collect_array('f_n', k, s)
def collect_array(self, name, k, s, subset=None):
"""Helper method for collect_eigenvalues and collect_occupations.
For the parallel case find the rank in kpt_comm that contains
the (k,s) pair, for this rank, collect on the corresponding
domain a full array on the domain master and send this to the
global master."""
kpt_u = self.kpt_u
kpt_rank, u = self.kd.get_rank_and_index(s, k)
if self.kpt_comm.rank == kpt_rank:
a_nx = getattr(kpt_u[u], name)
if subset is not None:
a_nx = a_nx[subset]
# Domain master send this to the global master
if self.gd.comm.rank == 0:
if self.band_comm.size == 1:
if kpt_rank == 0:
return a_nx
else:
self.kpt_comm.ssend(a_nx, 0, 1301)
else:
b_nx = self.bd.collect(a_nx)
if self.band_comm.rank == 0:
if kpt_rank == 0:
return b_nx
else:
self.kpt_comm.ssend(b_nx, 0, 1301)
elif self.world.rank == 0 and kpt_rank != 0:
# Only used to determine shape and dtype of receiving buffer:
a_nx = getattr(kpt_u[0], name)
if subset is not None:
a_nx = a_nx[subset]
b_nx = np.zeros((self.bd.nbands,) + a_nx.shape[1:],
dtype=a_nx.dtype)
self.kpt_comm.receive(b_nx, kpt_rank, 1301)
return b_nx
def collect_auxiliary(self, value, k, s, shape=1, dtype=float):
"""Helper method for collecting band-independent scalars/arrays.
For the parallel case find the rank in kpt_comm that contains
the (k,s) pair, for this rank, collect on the corresponding
domain a full array on the domain master and send this to the
global master."""
kpt_u = self.kpt_u
kpt_rank, u = self.kd.get_rank_and_index(s, k)
if self.kpt_comm.rank == kpt_rank:
if isinstance(value, str):
a_o = getattr(kpt_u[u], value)
else:
a_o = value[u] # assumed list
# Make sure data is a mutable object
a_o = np.asarray(a_o)
if a_o.dtype is not dtype:
a_o = a_o.astype(dtype)
# Domain master send this to the global master
if self.gd.comm.rank == 0:
if kpt_rank == 0:
return a_o
else:
self.kpt_comm.send(a_o, 0, 1302)
elif self.world.rank == 0 and kpt_rank != 0:
b_o = np.zeros(shape, dtype=dtype)
self.kpt_comm.receive(b_o, kpt_rank, 1302)
return b_o
def collect_projections(self, k, s):
"""Helper method for collecting projector overlaps across domains.
For the parallel case find the rank in kpt_comm that contains
the (k,s) pair, for this rank, send to the global master."""
kpt_rank, u = self.kd.get_rank_and_index(s, k)
natoms = len(self.rank_a) # it's a hack...
nproj = sum([setup.ni for setup in self.setups])
if self.world.rank == 0:
if kpt_rank == 0:
P_ani = self.kpt_u[u].P_ani
mynu = len(self.kpt_u)
all_P_ni = np.empty((self.bd.nbands, nproj), self.dtype)
for band_rank in range(self.band_comm.size):
nslice = self.bd.get_slice(band_rank)
i = 0
for a in range(natoms):
ni = self.setups[a].ni
if kpt_rank == 0 and band_rank == 0 and a in P_ani:
P_ni = P_ani[a]
else:
P_ni = np.empty((self.bd.mynbands, ni), self.dtype)
world_rank = (self.rank_a[a] +
kpt_rank * self.gd.comm.size *
self.band_comm.size +
band_rank * self.gd.comm.size)
self.world.receive(P_ni, world_rank, 1303 + a)
all_P_ni[nslice, i:i + ni] = P_ni
i += ni
assert i == nproj
return all_P_ni
elif self.kpt_comm.rank == kpt_rank: # plain else works too...
P_ani = self.kpt_u[u].P_ani
for a in range(natoms):
if a in P_ani:
self.world.ssend(P_ani[a], 0, 1303 + a)
def get_wave_function_array(self, n, k, s, realspace=True):
"""Return pseudo-wave-function array on master.
n: int
Global band index.
k: int
Global IBZ k-point index.
s: int
Spin index (0 or 1).
realspace: bool
Transform plane wave or LCAO expansion coefficients to real-space.
For the parallel case find the ranks in kd.comm and bd.comm
that contains to (n, k, s), and collect on the corresponding
domain a full array on the domain master and send this to the
global master."""
kpt_rank, u = self.kd.get_rank_and_index(s, k)
band_rank, myn = self.bd.who_has(n)
size = self.world.size
rank = self.world.rank
if (self.kpt_comm.rank == kpt_rank and
self.band_comm.rank == band_rank):
psit_G = self._get_wave_function_array(u, myn, realspace)
if realspace:
psit_G = self.gd.collect(psit_G)
if rank == 0:
return psit_G
# Domain master send this to the global master
if self.gd.comm.rank == 0:
self.world.ssend(psit_G, 0, 1398)
if rank == 0:
# allocate full wavefunction and receive
psit_G = self.empty(dtype=self.dtype, global_array=True,
realspace=realspace)
world_rank = (kpt_rank * self.gd.comm.size *
self.band_comm.size +
band_rank * self.gd.comm.size)
self.world.receive(psit_G, world_rank, 1398)
return psit_G
|
ajylee/gpaw-rtxs
|
gpaw/wavefunctions/base.py
|
Python
|
gpl-3.0
| 15,526
|
[
"GPAW"
] |
e4d73aeb71c49ac7e9021dd53e59cace2bf5bd8f52ababa1d1de295c1ee3c389
|
# Version: 0.12
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
[](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py versioneer` command (described below) will
append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`). It will also
modify your `MANIFEST.in` to include both `versioneer.py` and the generated
`_version.py` in sdist tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
this uses the output of `git describe --tags --dirty --always` but strips
the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
that this commit is two revisions ("-2-") beyond the "0.11" tag. For
released software (exactly equal to a known tag), the identifier will only
contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
In the future, this will also include a
[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
for a hash-based revision id), but is safe to use in a `setup.py`
"`version=`" argument. It also enables tools like *pip* to compare version
strings and evaluate compatibility constraint declarations.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace your copy of
`versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py versioneer`. This will enable the use of additional version-control
systems (SVN, etc) in the future.
### Upgrading from 0.11 to 0.12
Nothing special.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
import os
import sys
import re
import subprocess
import errno
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
# these configuration settings will be overridden by setup.py after it
# imports us
versionfile_source = 'schemator/_version.py'
versionfile_build = 'schemator/_version.py'
tag_prefix = 'v'
parentdir_prefix = 'schemator'
VCS = 'git'
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.12 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full": keywords["full"].strip()}
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return {"version": keywords["full"].strip(),
"full": keywords["full"].strip()}
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" %
(stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.12) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % versions)
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose:
print("got version from VCS %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose:
print("got version from parentdir %s" % ver)
return ver
if verbose:
print("got version from default %s" % default)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
if versionfile_build:
target_versionfile = os.path.join(
self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
|
hgenru/python-schemator
|
versioneer.py
|
Python
|
mit
| 36,719
|
[
"Brian"
] |
7b7acba4d79b2eef35de501d0a2024755499d96bd17940828daed1f0707ab351
|
#!/usr/bin/env python
'''
setup board.h for chibios
'''
import argparse, sys, fnmatch, os, dma_resolver, shlex, pickle, re
import shutil
parser = argparse.ArgumentParser("chibios_pins.py")
parser.add_argument(
'-D', '--outdir', type=str, default=None, help='Output directory')
parser.add_argument(
'--bootloader', action='store_true', default=False, help='configure for bootloader')
parser.add_argument(
'hwdef', type=str, default=None, help='hardware definition file')
args = parser.parse_args()
# output variables for each pin
f4f7_vtypes = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR', 'AFRL', 'AFRH']
f1_vtypes = ['CRL', 'CRH', 'ODR']
f1_input_sigs = ['RX', 'MISO', 'CTS']
f1_output_sigs = ['TX', 'MOSI', 'SCK', 'RTS', 'CH1', 'CH2', 'CH3', 'CH4']
af_labels = ['USART', 'UART', 'SPI', 'I2C', 'SDIO', 'SDMMC', 'OTG', 'JT', 'TIM', 'CAN']
vtypes = []
# number of pins in each port
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 2,
'I': 0,
'J': 0,
'K': 0
}
ports = pincount.keys()
portmap = {}
# dictionary of all config lines, indexed by first word
config = {}
# list of all pins in config file order
allpins = []
# list of configs by type
bytype = {}
# list of configs by label
bylabel = {}
# list of SPI devices
spidev = []
# dictionary of ROMFS files
romfs = {}
# SPI bus list
spi_list = []
# all config lines in order
alllines = []
# allow for extra env vars
env_vars = {}
# build flags for ChibiOS makefiles
build_flags = []
# sensor lists
imu_list = []
compass_list = []
baro_list = []
mcu_type = None
def is_int(str):
'''check if a string is an integer'''
try:
int(str)
except Exception:
return False
return True
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def get_mcu_lib(mcu):
'''get library file for the chosen MCU'''
import importlib
try:
return importlib.import_module(mcu)
except ImportError:
error("Unable to find module for MCU %s" % mcu)
def setup_mcu_type_defaults():
'''setup defaults for given mcu type'''
global pincount, ports, portmap, vtypes
lib = get_mcu_lib(mcu_type)
if hasattr(lib, 'pincount'):
pincount = lib.pincount
if mcu_series.startswith("STM32F1"):
vtypes = f1_vtypes
else:
vtypes = f4f7_vtypes
ports = pincount.keys()
# setup default as input pins
for port in ports:
portmap[port] = []
for pin in range(pincount[port]):
portmap[port].append(generic_pin(port, pin, None, 'INPUT', []))
def get_alt_function(mcu, pin, function):
'''return alternative function number for a pin'''
lib = get_mcu_lib(mcu)
if function.endswith('_TXINV') or function.endswith('_RXINV'):
# RXINV and TXINV are special labels for inversion pins, not alt-functions
return None
if hasattr(lib, "AltFunction_map"):
alt_map = lib.AltFunction_map
else:
# just check if Alt Func is available or not
for l in af_labels:
if function.startswith(l):
return 0
return None
if function and function.endswith("_RTS") and (
function.startswith('USART') or function.startswith('UART')):
# we do software RTS
return None
for l in af_labels:
if function.startswith(l):
s = pin + ":" + function
if not s in alt_map:
error("Unknown pin function %s for MCU %s" % (s, mcu))
return alt_map[s]
return None
def have_type_prefix(ptype):
'''return True if we have a peripheral starting with the given peripheral type'''
for t in bytype.keys():
if t.startswith(ptype):
return True
return False
def get_ADC1_chan(mcu, pin):
'''return ADC1 channel for an analog pin'''
import importlib
try:
lib = importlib.import_module(mcu)
ADC1_map = lib.ADC1_map
except ImportError:
error("Unable to find ADC1_Map for MCU %s" % mcu)
if not pin in ADC1_map:
error("Unable to find ADC1 channel for pin %s" % pin)
return ADC1_map[pin]
class generic_pin(object):
'''class to hold pin definition'''
def __init__(self, port, pin, label, type, extra):
global mcu_series
self.portpin = "P%s%u" % (port, pin)
self.port = port
self.pin = pin
self.label = label
self.type = type
self.extra = extra
self.af = None
if type == 'OUTPUT':
self.sig_dir = 'OUTPUT'
else:
self.sig_dir = 'INPUT'
if mcu_series.startswith("STM32F1") and self.label is not None:
self.f1_pin_setup()
# check that labels and pin types are consistent
for prefix in ['USART', 'UART', 'TIM']:
if label is None or type is None:
continue
if type.startswith(prefix):
a1 = label.split('_')
a2 = type.split('_')
if a1[0] != a2[0]:
error("Peripheral prefix mismatch for %s %s %s" % (self.portpin, label, type))
def f1_pin_setup(self):
for l in af_labels:
if self.label.startswith(l):
if self.label.endswith(tuple(f1_input_sigs)):
self.sig_dir = 'INPUT'
self.extra.append('FLOATING')
elif self.label.endswith(tuple(f1_output_sigs)):
self.sig_dir = 'OUTPUT'
elif l == 'I2C':
self.sig_dir = 'OUTPUT'
else:
error("Unknown signal type %s:%s for %s!" % (self.portpin, self.label, mcu_type))
def has_extra(self, v):
'''return true if we have the given extra token'''
return v in self.extra
def extra_prefix(self, prefix):
'''find an extra token starting with the given prefix'''
for e in self.extra:
if e.startswith(prefix):
return e
return None
def extra_value(self, name, type=None, default=None):
'''find an extra value of given type'''
v = self.extra_prefix(name)
if v is None:
return default
if v[len(name)] != '(' or v[-1] != ')':
error("Badly formed value for %s: %s\n" % (name, v))
ret = v[len(name) + 1:-1]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed value for %s: %s\n" % (name, ret))
return ret
def is_RTS(self):
'''return true if this is a RTS pin'''
if self.label and self.label.endswith("_RTS") and (
self.type.startswith('USART') or self.type.startswith('UART')):
return True
return False
def is_CS(self):
'''return true if this is a CS pin'''
return self.has_extra("CS") or self.type == "CS"
def get_MODER(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
if self.af is not None:
v = "ALTERNATE"
elif self.type == 'OUTPUT':
v = "OUTPUT"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT"
elif self.is_RTS():
v = "OUTPUT"
else:
v = "INPUT"
return "PIN_MODE_%s(%uU)" % (v, self.pin)
def get_OTYPER(self):
'''return one of PUSHPULL, OPENDRAIN'''
v = 'PUSHPULL'
if self.type.startswith('I2C'):
# default I2C to OPENDRAIN
v = 'OPENDRAIN'
values = ['PUSHPULL', 'OPENDRAIN']
for e in self.extra:
if e in values:
v = e
return "PIN_OTYPE_%s(%uU)" % (v, self.pin)
def get_OSPEEDR(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
# on STM32F4 these speeds correspond to 2MHz, 25MHz, 50MHz and 100MHz
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in values:
v = e
return "PIN_O%s(%uU)" % (v, self.pin)
def get_PUPDR(self):
'''return one of FLOATING, PULLUP, PULLDOWN'''
values = ['FLOATING', 'PULLUP', 'PULLDOWN']
v = 'FLOATING'
if self.is_CS():
v = "PULLUP"
# generate pullups for UARTs
if (self.type.startswith('USART') or
self.type.startswith('UART')) and (
(self.label.endswith('_TX') or
self.label.endswith('_RX') or
self.label.endswith('_CTS') or
self.label.endswith('_RTS'))):
v = "PULLUP"
# generate pullups for SDIO and SDMMC
if (self.type.startswith('SDIO') or
self.type.startswith('SDMMC')) and (
(self.label.endswith('_D0') or
self.label.endswith('_D1') or
self.label.endswith('_D2') or
self.label.endswith('_D3') or
self.label.endswith('_CMD'))):
v = "PULLUP"
for e in self.extra:
if e in values:
v = e
return "PIN_PUPDR_%s(%uU)" % (v, self.pin)
def get_ODR_F1(self):
'''return one of LOW, HIGH'''
values = ['LOW', 'HIGH']
v = 'HIGH'
if self.type == 'OUTPUT':
v = 'LOW'
for e in self.extra:
if e in values:
v = e
#for some controllers input pull up down is selected by ODR
if self.type == "INPUT":
v = 'LOW'
if 'PULLUP' in self.extra:
v = "HIGH"
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_ODR(self):
'''return one of LOW, HIGH'''
if mcu_series.startswith("STM32F1"):
return self.get_ODR_F1()
values = ['LOW', 'HIGH']
v = 'HIGH'
for e in self.extra:
if e in values:
v = e
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_AFIO(self):
'''return AFIO'''
af = self.af
if af is None:
af = 0
return "PIN_AFIO_AF(%uU, %uU)" % (self.pin, af)
def get_AFRL(self):
'''return AFIO low 8'''
if self.pin >= 8:
return None
return self.get_AFIO()
def get_AFRH(self):
'''return AFIO high 8'''
if self.pin < 8:
return None
return self.get_AFIO()
def get_CR_F1(self):
'''return CR FLAGS for STM32F1xx'''
#Check Speed
if self.sig_dir != "INPUT" or self.af is not None:
speed_values = ['SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in speed_values:
v = e
speed_str = "PIN_%s(%uU) |" % (v, self.pin)
else:
speed_str = ""
if self.af is not None:
if self.label.endswith('_RX'):
# uart RX is configured as a input, and can be pullup, pulldown or float
if 'PULLUP' in self.extra or 'PULLDOWN' in self.extra:
v = 'PUD'
else:
v = "NOPULL"
else:
v = "AF_PP"
elif self.sig_dir == 'OUTPUT':
if 'OPENDRAIN' in self.extra:
v = 'OUTPUT_OD'
else:
v = "OUTPUT_PP"
elif self.type.startswith('ADC'):
v = "ANALOG"
else:
v = "PUD"
if 'FLOATING' in self.extra:
v = "NOPULL"
mode_str = "PIN_MODE_%s(%uU)" % (v, self.pin)
return "%s %s" % (speed_str, mode_str)
def get_CR(self):
'''return CR FLAGS'''
if mcu_series.startswith("STM32F1"):
return self.get_CR_F1()
if self.sig_dir != "INPUT":
speed_values = ['SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in speed_values:
v = e
speed_str = "PIN_%s(%uU) |" % (v, self.pin)
else:
speed_str = ""
#Check Alternate function
if self.type.startswith('I2C'):
v = "AF_OD"
elif self.sig_dir == 'OUTPUT':
if self.af is not None:
v = "AF_PP"
else:
v = "OUTPUT_PP"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT_PP"
elif self.is_RTS():
v = "OUTPUT_PP"
else:
v = "PUD"
if 'FLOATING' in self.extra:
v = "NOPULL"
mode_str = "PIN_MODE_%s(%uU)" % (v, self.pin)
return "%s %s" % (speed_str, mode_str)
def get_CRH(self):
if self.pin < 8:
return None
return self.get_CR()
def get_CRL(self):
if self.pin >= 8:
return None
return self.get_CR()
def __str__(self):
str = ''
if self.af is not None:
str += " AF%u" % self.af
if self.type.startswith('ADC1'):
str += " ADC1_IN%u" % get_ADC1_chan(mcu_type, self.portpin)
if self.extra_value('PWM', type=int):
str += " PWM%u" % self.extra_value('PWM', type=int)
return "P%s%u %s %s%s" % (self.port, self.pin, self.label, self.type,
str)
def get_config(name, column=0, required=True, default=None, type=None, spaces=False):
'''get a value from config dictionary'''
if not name in config:
if required and default is None:
error("missing required value %s in hwdef.dat" % name)
return default
if len(config[name]) < column + 1:
if not required:
return None
error("missing required value %s in hwdef.dat (column %u)" % (name,
column))
if spaces:
ret = ' '.join(config[name][column:])
else:
ret = config[name][column]
if type is not None:
if type == int and ret.startswith('0x'):
try:
ret = int(ret,16)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
else:
try:
ret = type(ret)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
return ret
def get_mcu_config(name, required=False):
'''get a value from the mcu dictionary'''
lib = get_mcu_lib(mcu_type)
if not hasattr(lib, 'mcu'):
error("Missing mcu config for %s" % mcu_type)
if not name in lib.mcu:
if required:
error("Missing required mcu config %s for %s" % (name, mcu_type))
return None
return lib.mcu[name]
def enable_can(f):
'''setup for a CAN enabled board'''
f.write('#define HAL_WITH_UAVCAN 1\n')
env_vars['HAL_WITH_UAVCAN'] = '1'
def has_sdcard_spi():
'''check for sdcard connected to spi bus'''
for dev in spidev:
if(dev[0] == 'sdcard'):
return True
return False
def write_mcu_config(f):
'''write MCU config defines'''
f.write('// MCU type (ChibiOS define)\n')
f.write('#define %s_MCUCONF\n' % get_config('MCU'))
mcu_subtype = get_config('MCU', 1)
if mcu_subtype.endswith('xx'):
f.write('#define %s_MCUCONF\n\n' % mcu_subtype[:-2])
f.write('#define %s\n\n' % mcu_subtype)
f.write('// crystal frequency\n')
f.write('#define STM32_HSECLK %sU\n\n' % get_config('OSCILLATOR_HZ'))
f.write('// UART used for stdout (printf)\n')
if get_config('STDOUT_SERIAL', required=False):
f.write('#define HAL_STDOUT_SERIAL %s\n\n' % get_config('STDOUT_SERIAL'))
f.write('// baudrate used for stdout (printf)\n')
f.write('#define HAL_STDOUT_BAUDRATE %u\n\n' % get_config('STDOUT_BAUDRATE', type=int))
if have_type_prefix('SDIO'):
f.write('// SDIO available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
build_flags.append('USE_FATFS=yes')
elif have_type_prefix('SDMMC'):
f.write('// SDMMC available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
f.write('#define STM32_SDC_USE_SDMMC1 TRUE\n')
build_flags.append('USE_FATFS=yes')
elif has_sdcard_spi():
f.write('// MMC via SPI available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_MMC_SPI TRUE\n')
f.write('#define HAL_USE_SDC FALSE\n')
f.write('#define HAL_SDCARD_SPI_HOOK TRUE\n')
build_flags.append('USE_FATFS=yes')
else:
f.write('#define HAL_USE_SDC FALSE\n')
build_flags.append('USE_FATFS=no')
env_vars['DISABLE_SCRIPTING'] = True
if 'OTG1' in bytype:
f.write('#define STM32_USB_USE_OTG1 TRUE\n')
f.write('#define HAL_USE_USB TRUE\n')
f.write('#define HAL_USE_SERIAL_USB TRUE\n')
if 'OTG2' in bytype:
f.write('#define STM32_USB_USE_OTG2 TRUE\n')
if have_type_prefix('CAN') and not 'AP_PERIPH' in env_vars:
enable_can(f)
if get_config('PROCESS_STACK', required=False):
env_vars['PROCESS_STACK'] = get_config('PROCESS_STACK')
else:
env_vars['PROCESS_STACK'] = "0x2000"
if get_config('MAIN_STACK', required=False):
env_vars['MAIN_STACK'] = get_config('MAIN_STACK')
else:
env_vars['MAIN_STACK'] = "0x400"
if get_config('IOMCU_FW', required=False):
env_vars['IOMCU_FW'] = get_config('IOMCU_FW')
else:
env_vars['IOMCU_FW'] = 0
if get_config('PERIPH_FW', required=False):
env_vars['PERIPH_FW'] = get_config('PERIPH_FW')
else:
env_vars['PERIPH_FW'] = 0
# write any custom STM32 defines
for d in alllines:
if d.startswith('STM32_'):
f.write('#define %s\n' % d)
if d.startswith('define '):
f.write('#define %s\n' % d[7:])
flash_size = get_config('FLASH_SIZE_KB', type=int)
f.write('#define BOARD_FLASH_SIZE %u\n' % flash_size)
env_vars['BOARD_FLASH_SIZE'] = flash_size
f.write('#define CRT1_AREAS_NUMBER 1\n')
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
f.write('\n// location of loaded firmware\n')
f.write('#define FLASH_LOAD_ADDRESS 0x%08x\n' % (0x08000000 + flash_reserve_start*1024))
if args.bootloader:
f.write('#define FLASH_BOOTLOADER_LOAD_KB %u\n' % get_config('FLASH_BOOTLOADER_LOAD_KB', type=int))
f.write('\n')
ram_map = get_mcu_config('RAM_MAP', True)
f.write('// memory regions\n')
regions = []
for (address, size, flags) in ram_map:
regions.append('{(void*)0x%08x, 0x%08x, 0x%02x }' % (address, size*1024, flags))
f.write('#define HAL_MEMORY_REGIONS %s\n' % ', '.join(regions))
f.write('\n// CPU serial number (12 bytes)\n')
f.write('#define UDID_START 0x%08x\n\n' % get_mcu_config('UDID_START', True))
f.write('\n// APJ board ID (for bootloaders)\n')
f.write('#define APJ_BOARD_ID %s\n' % get_config('APJ_BOARD_ID'))
lib = get_mcu_lib(mcu_type)
build_info = lib.build
if mcu_series.startswith("STM32F1"):
cortex = "cortex-m3"
env_vars['CPU_FLAGS'] = ["-mcpu=%s" % cortex]
build_info['MCU'] = cortex
else:
cortex = "cortex-m4"
env_vars['CPU_FLAGS'] = [ "-mcpu=%s" % cortex, "-mfpu=fpv4-sp-d16", "-mfloat-abi=hard"]
build_info['MCU'] = cortex
if not args.bootloader:
env_vars['CPU_FLAGS'].append('-u_printf_float')
build_info['ENV_UDEFS'] = "-DCHPRINTF_USE_FLOAT=1"
# setup build variables
for v in build_info.keys():
build_flags.append('%s=%s' % (v, build_info[v]))
# setup for bootloader build
if args.bootloader:
f.write('''
#define HAL_BOOTLOADER_BUILD TRUE
#define HAL_USE_ADC FALSE
#define HAL_USE_EXT FALSE
#define HAL_NO_UARTDRIVER
#define HAL_NO_PRINTF
#define HAL_NO_CCM
#define CH_DBG_STATISTICS FALSE
#define CH_CFG_USE_TM FALSE
#define CH_CFG_USE_REGISTRY FALSE
#define CH_CFG_USE_WAITEXIT FALSE
#define CH_CFG_USE_DYNAMIC FALSE
#define CH_CFG_USE_MEMPOOLS FALSE
#define CH_CFG_USE_OBJ_FIFOS FALSE
#define CH_DBG_FILL_THREADS FALSE
#define CH_CFG_USE_SEMAPHORES FALSE
#define CH_CFG_USE_HEAP FALSE
#define CH_CFG_USE_MUTEXES FALSE
#define CH_CFG_USE_CONDVARS FALSE
#define CH_CFG_USE_CONDVARS_TIMEOUT FALSE
#define CH_CFG_USE_EVENTS FALSE
#define CH_CFG_USE_EVENTS_TIMEOUT FALSE
#define CH_CFG_USE_MESSAGES FALSE
#define CH_CFG_USE_MAILBOXES FALSE
#define CH_CFG_USE_FACTORY FALSE
#define CH_CFG_USE_MEMCORE FALSE
#define HAL_USE_I2C FALSE
#define HAL_USE_PWM FALSE
''')
def write_ldscript(fname):
'''write ldscript.ld for this board'''
flash_size = get_config('FLASH_USE_MAX_KB', type=int, default=0)
if flash_size == 0:
flash_size = get_config('FLASH_SIZE_KB', type=int)
# space to reserve for bootloader and storage at start of flash
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
env_vars['FLASH_RESERVE_START_KB'] = str(flash_reserve_start)
# space to reserve for storage at end of flash
flash_reserve_end = get_config('FLASH_RESERVE_END_KB', default=0, type=int)
# ram layout
ram_map = get_mcu_config('RAM_MAP', True)
flash_base = 0x08000000 + flash_reserve_start * 1024
if not args.bootloader:
flash_length = flash_size - (flash_reserve_start + flash_reserve_end)
else:
flash_length = get_config('FLASH_BOOTLOADER_LOAD_KB', type=int)
print("Generating ldscript.ld")
f = open(fname, 'w')
f.write('''/* generated ldscript.ld */
MEMORY
{
flash : org = 0x%08x, len = %uK
ram0 : org = 0x%08x, len = %uk
}
INCLUDE common.ld
''' % (flash_base, flash_length, ram_map[0][0], ram_map[0][1]))
def copy_common_linkerscript(outdir, hwdef):
dirpath = os.path.dirname(hwdef)
shutil.copy(os.path.join(dirpath, "../common/common.ld"),
os.path.join(outdir, "common.ld"))
def write_USB_config(f):
'''write USB config defines'''
if not have_type_prefix('OTG'):
return
f.write('// USB configuration\n')
f.write('#define HAL_USB_VENDOR_ID %s\n' % get_config('USB_VENDOR', default=0x0483)) # default to ST
f.write('#define HAL_USB_PRODUCT_ID %s\n' % get_config('USB_PRODUCT', default=0x5740))
f.write('#define HAL_USB_STRING_MANUFACTURER "%s"\n' % get_config("USB_STRING_MANUFACTURER", default="ArduPilot"))
default_product = "%BOARD%"
if args.bootloader:
default_product += "-BL"
f.write('#define HAL_USB_STRING_PRODUCT "%s"\n' % get_config("USB_STRING_PRODUCT", default=default_product))
f.write('#define HAL_USB_STRING_SERIAL "%s"\n' % get_config("USB_STRING_SERIAL", default="%SERIAL%"))
f.write('\n\n')
def write_SPI_table(f):
'''write SPI device table'''
f.write('\n// SPI device table\n')
devlist = []
for dev in spidev:
if len(dev) != 7:
print("Badly formed SPIDEV line %s" % dev)
name = '"' + dev[0] + '"'
bus = dev[1]
devid = dev[2]
cs = dev[3]
mode = dev[4]
lowspeed = dev[5]
highspeed = dev[6]
if not bus.startswith('SPI') or not bus in spi_list:
error("Bad SPI bus in SPIDEV line %s" % dev)
if not devid.startswith('DEVID') or not is_int(devid[5:]):
error("Bad DEVID in SPIDEV line %s" % dev)
if not cs in bylabel or not bylabel[cs].is_CS():
error("Bad CS pin in SPIDEV line %s" % dev)
if not mode in ['MODE0', 'MODE1', 'MODE2', 'MODE3']:
error("Bad MODE in SPIDEV line %s" % dev)
if not lowspeed.endswith('*MHZ') and not lowspeed.endswith('*KHZ'):
error("Bad lowspeed value %s in SPIDEV line %s" % (lowspeed, dev))
if not highspeed.endswith('*MHZ') and not highspeed.endswith('*KHZ'):
error("Bad highspeed value %s in SPIDEV line %s" % (highspeed,
dev))
cs_pin = bylabel[cs]
pal_line = 'PAL_LINE(GPIO%s,%uU)' % (cs_pin.port, cs_pin.pin)
devidx = len(devlist)
f.write(
'#define HAL_SPI_DEVICE%-2u SPIDesc(%-17s, %2u, %2u, %-19s, SPIDEV_%s, %7s, %7s)\n'
% (devidx, name, spi_list.index(bus), int(devid[5:]), pal_line,
mode, lowspeed, highspeed))
devlist.append('HAL_SPI_DEVICE%u' % devidx)
f.write('#define HAL_SPI_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_SPI_config(f):
'''write SPI config defines'''
global spi_list
for t in bytype.keys():
if t.startswith('SPI'):
spi_list.append(t)
spi_list = sorted(spi_list)
if len(spi_list) == 0:
f.write('#define HAL_USE_SPI FALSE\n')
return
devlist = []
for dev in spi_list:
n = int(dev[3:])
devlist.append('HAL_SPI%u_CONFIG' % n)
f.write(
'#define HAL_SPI%u_CONFIG { &SPID%u, %u, STM32_SPI_SPI%u_DMA_STREAMS }\n'
% (n, n, n, n))
f.write('#define HAL_SPI_BUS_LIST %s\n\n' % ','.join(devlist))
write_SPI_table(f)
def parse_spi_device(dev):
'''parse a SPI:xxx device item'''
a = dev.split(':')
if len(a) != 2:
error("Bad SPI device: %s" % dev)
return 'hal.spi->get_device("%s")' % a[1]
def parse_i2c_device(dev):
'''parse a I2C:xxx:xxx device item'''
a = dev.split(':')
if len(a) != 3:
error("Bad I2C device: %s" % dev)
busaddr = int(a[2],base=0)
if a[1] == 'ALL_EXTERNAL':
return ('FOREACH_I2C_EXTERNAL(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
elif a[1] == 'ALL_INTERNAL':
return ('FOREACH_I2C_INTERNAL(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
elif a[1] == 'ALL':
return ('FOREACH_I2C(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
busnum = int(a[1])
return ('', 'GET_I2C_DEVICE(%u,0x%02x)' % (busnum, busaddr))
def seen_str(dev):
'''return string representation of device for checking for duplicates'''
return str(dev[:2])
def write_IMU_config(f):
'''write IMU config defines'''
global imu_list
devlist = []
wrapper = ''
seen = set()
for dev in imu_list:
if seen_str(dev) in seen:
error("Duplicate IMU: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
for i in range(1,len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
n = len(devlist)+1
devlist.append('HAL_INS_PROBE%u' % n)
f.write(
'#define HAL_INS_PROBE%u %s ADD_BACKEND(AP_InertialSensor_%s::probe(*this,%s))\n'
% (n, wrapper, driver, ','.join(dev[1:])))
if len(devlist) > 0:
f.write('#define HAL_INS_PROBE_LIST %s\n\n' % ';'.join(devlist))
def write_MAG_config(f):
'''write MAG config defines'''
global compass_list
devlist = []
seen = set()
for dev in compass_list:
if seen_str(dev) in seen:
error("Duplicate MAG: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
probe = 'probe'
wrapper = ''
a = driver.split(':')
driver = a[0]
if len(a) > 1 and a[1].startswith('probe'):
probe = a[1]
for i in range(1,len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
n = len(devlist)+1
devlist.append('HAL_MAG_PROBE%u' % n)
f.write(
'#define HAL_MAG_PROBE%u %s ADD_BACKEND(DRIVER_%s, AP_Compass_%s::%s(%s))\n'
% (n, wrapper, driver, driver, probe, ','.join(dev[1:])))
if len(devlist) > 0:
f.write('#define HAL_MAG_PROBE_LIST %s\n\n' % ';'.join(devlist))
def write_BARO_config(f):
'''write barometer config defines'''
global baro_list
devlist = []
seen = set()
for dev in baro_list:
if seen_str(dev) in seen:
error("Duplicate BARO: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
probe = 'probe'
wrapper = ''
a = driver.split(':')
driver = a[0]
if len(a) > 1 and a[1].startswith('probe'):
probe = a[1]
for i in range(1,len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
if dev[i].startswith('hal.i2c_mgr'):
dev[i] = 'std::move(%s)' % dev[i]
n = len(devlist)+1
devlist.append('HAL_BARO_PROBE%u' % n)
f.write(
'#define HAL_BARO_PROBE%u %s ADD_BACKEND(AP_Baro_%s::%s(*this,%s))\n'
% (n, wrapper, driver, probe, ','.join(dev[1:])))
if len(devlist) > 0:
f.write('#define HAL_BARO_PROBE_LIST %s\n\n' % ';'.join(devlist))
def get_gpio_bylabel(label):
'''get GPIO(n) setting on a pin label, or -1'''
p = bylabel.get(label)
if p is None:
return -1
return p.extra_value('GPIO', type=int, default=-1)
def get_extra_bylabel(label, name, default=None):
'''get extra setting for a label by name'''
p = bylabel.get(label)
if p is None:
return default
return p.extra_value(name, type=str, default=default)
def write_UART_config(f):
'''write UART config defines'''
if get_config('UART_ORDER', required=False) is None:
return
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
# write out driver declarations for HAL_ChibOS_Class.cpp
devnames = "ABCDEFGH"
sdev = 0
idx = 0
for dev in uart_list:
if dev == 'EMPTY':
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
else:
f.write(
'#define HAL_UART%s_DRIVER ChibiOS::UARTDriver uart%sDriver(%u)\n'
% (devnames[idx], devnames[idx], sdev))
sdev += 1
idx += 1
for idx in range(len(uart_list), len(devnames)):
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
if 'IOMCU_UART' in config:
f.write('#define HAL_WITH_IO_MCU 1\n')
idx = len(uart_list)
f.write('#define HAL_UART_IOMCU_IDX %u\n' % idx)
f.write(
'#define HAL_UART_IO_DRIVER ChibiOS::UARTDriver uart_io(HAL_UART_IOMCU_IDX)\n'
)
uart_list.append(config['IOMCU_UART'][0])
f.write('#define HAL_HAVE_SERVO_VOLTAGE 1\n') # make the assumption that IO gurantees servo monitoring
else:
f.write('#define HAL_WITH_IO_MCU 0\n')
f.write('\n')
need_uart_driver = False
OTG2_index = None
devlist = []
for dev in uart_list:
if dev.startswith('UART'):
n = int(dev[4:])
elif dev.startswith('USART'):
n = int(dev[5:])
elif dev.startswith('OTG'):
n = int(dev[3:])
elif dev.startswith('EMPTY'):
continue
else:
error("Invalid element %s in UART_ORDER" % dev)
devlist.append('HAL_%s_CONFIG' % dev)
if dev + "_RTS" in bylabel:
p = bylabel[dev + '_RTS']
rts_line = 'PAL_LINE(GPIO%s,%uU)' % (p.port, p.pin)
else:
rts_line = "0"
if dev.startswith('OTG2'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU2, true, false, 0, 0, false, 0, 0}\n'
% dev)
OTG2_index = uart_list.index(dev)
elif dev.startswith('OTG'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU1, true, false, 0, 0, false, 0, 0}\n'
% dev)
else:
need_uart_driver = True
f.write(
"#define HAL_%s_CONFIG { (BaseSequentialStream*) &SD%u, false, "
% (dev, n))
if mcu_series.startswith("STM32F1"):
f.write("%s, " % rts_line)
else:
f.write("STM32_%s_RX_DMA_CONFIG, STM32_%s_TX_DMA_CONFIG, %s, " %
(dev, dev, rts_line))
# add inversion pins, if any
f.write("%d, " % get_gpio_bylabel(dev + "_RXINV"))
f.write("%s, " % get_extra_bylabel(dev + "_RXINV", "POL", "0"))
f.write("%d, " % get_gpio_bylabel(dev + "_TXINV"))
f.write("%s}\n" % get_extra_bylabel(dev + "_TXINV", "POL", "0"))
if OTG2_index is not None:
f.write('#define HAL_OTG2_UART_INDEX %d\n' % OTG2_index)
f.write('''
#if HAL_WITH_UAVCAN
#ifndef HAL_OTG2_PROTOCOL
#define HAL_OTG2_PROTOCOL SerialProtocol_SLCAN
#endif
#define HAL_SERIAL%d_PROTOCOL HAL_OTG2_PROTOCOL
#define HAL_SERIAL%d_BAUD 115200
#endif
''' % (OTG2_index, OTG2_index))
f.write('#define HAL_HAVE_DUAL_USB_CDC 1\n')
f.write('#define HAL_UART_DEVICE_LIST %s\n\n' % ','.join(devlist))
if not need_uart_driver and not args.bootloader:
f.write('''
#ifndef HAL_USE_SERIAL
#define HAL_USE_SERIAL HAL_USE_SERIAL_USB
#endif
''')
def write_UART_config_bootloader(f):
'''write UART config defines'''
if get_config('UART_ORDER', required=False) is None:
return
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
devlist = []
have_uart = False
OTG2_index = None
for u in uart_list:
if u.startswith('OTG2'):
devlist.append('(BaseChannel *)&SDU2')
OTG2_index = uart_list.index(u)
elif u.startswith('OTG'):
devlist.append('(BaseChannel *)&SDU1')
else:
unum = int(u[-1])
devlist.append('(BaseChannel *)&SD%u' % unum)
have_uart = True
f.write('#define BOOTLOADER_DEV_LIST %s\n' % ','.join(devlist))
if OTG2_index is not None:
f.write('#define HAL_OTG2_UART_INDEX %d\n' % OTG2_index)
if not have_uart:
f.write('''
#ifndef HAL_USE_SERIAL
#define HAL_USE_SERIAL FALSE
#endif
''')
def write_I2C_config(f):
'''write I2C config defines'''
if not have_type_prefix('I2C'):
print("No I2C peripherals")
f.write('''
#ifndef HAL_USE_I2C
#define HAL_USE_I2C FALSE
#endif
''')
return
if not 'I2C_ORDER' in config:
print("Missing I2C_ORDER config")
return
i2c_list = config['I2C_ORDER']
f.write('// I2C configuration\n')
if len(i2c_list) == 0:
error("I2C_ORDER invalid")
devlist = []
# write out config structures
for dev in i2c_list:
if not dev.startswith('I2C') or dev[3] not in "1234":
error("Bad I2C_ORDER element %s" % dev)
n = int(dev[3:])
devlist.append('HAL_I2C%u_CONFIG' % n)
f.write('''
#if defined(STM32_I2C_I2C%u_RX_DMA_STREAM) && defined(STM32_I2C_I2C%u_TX_DMA_STREAM)
#define HAL_I2C%u_CONFIG { &I2CD%u, STM32_I2C_I2C%u_RX_DMA_STREAM, STM32_I2C_I2C%u_TX_DMA_STREAM, HAL_GPIO_PIN_I2C%u_SCL, HAL_GPIO_PIN_I2C%u_SDA }
#else
#define HAL_I2C%u_CONFIG { &I2CD%u, SHARED_DMA_NONE, SHARED_DMA_NONE, HAL_GPIO_PIN_I2C%u_SCL, HAL_GPIO_PIN_I2C%u_SDA }
#endif
'''
% (n, n, n, n, n, n, n, n, n, n, n, n))
f.write('\n#define HAL_I2C_DEVICE_LIST %s\n\n' % ','.join(devlist))
def parse_timer(str):
'''parse timer channel string, i.e TIM8_CH2N'''
result = re.match(r'TIM([0-9]*)_CH([1234])(N?)', str)
if result:
tim = int(result.group(1))
chan = int(result.group(2))
compl = result.group(3) == 'N'
if tim < 1 or tim > 17:
error("Bad timer number %s in %s" % (tim, str))
return (tim, chan, compl)
else:
error("Bad timer definition %s" % str)
def write_PWM_config(f):
'''write PWM config defines'''
rc_in = None
rc_in_int = None
alarm = None
pwm_out = []
pwm_timers = []
for l in bylabel.keys():
p = bylabel[l]
if p.type.startswith('TIM'):
if p.has_extra('RCIN'):
rc_in = p
elif p.has_extra('RCININT'):
rc_in_int = p
elif p.has_extra('ALARM'):
alarm = p
else:
if p.extra_value('PWM', type=int) is not None:
pwm_out.append(p)
if p.type not in pwm_timers:
pwm_timers.append(p.type)
if not pwm_out and not alarm:
print("No PWM output defined")
f.write('''
#ifndef HAL_USE_PWM
#define HAL_USE_PWM FALSE
#endif
''')
if rc_in is not None:
(n, chan, compl) = parse_timer(rc_in.label)
if compl:
# it is an inverted channel
f.write('#define HAL_RCIN_IS_INVERTED\n')
if chan not in [1, 2]:
error(
"Bad channel number, only channel 1 and 2 supported for RCIN")
f.write('// RC input config\n')
f.write('#define HAL_USE_ICU TRUE\n')
f.write('#define STM32_ICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCIN_ICU_TIMER ICUD%u\n' % n)
f.write('#define RCIN_ICU_CHANNEL ICU_CHANNEL_%u\n' % chan)
f.write('#define STM32_RCIN_DMA_STREAM STM32_TIM_TIM%u_CH%u_DMA_STREAM\n' % (n, chan))
f.write('#define STM32_RCIN_DMA_CHANNEL STM32_TIM_TIM%u_CH%u_DMA_CHAN\n' % (n, chan))
f.write('\n')
if rc_in_int is not None:
(n, chan, compl) = parse_timer(rc_in_int.label)
if compl:
error('Complementary channel is not supported for RCININT %s' % rc_in_int.label)
f.write('// RC input config\n')
f.write('#define HAL_USE_EICU TRUE\n')
f.write('#define STM32_EICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCININT_EICU_TIMER EICUD%u\n' % n)
f.write('#define RCININT_EICU_CHANNEL EICU_CHANNEL_%u\n' % chan)
f.write('\n')
if alarm is not None:
(n, chan, compl) = parse_timer(alarm.label)
if compl:
error("Complementary channel is not supported for ALARM %s" % alarm.label)
f.write('\n')
f.write('// Alarm PWM output config\n')
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
pwm_clock = 1000000
period = 1000
f.write('''#define HAL_PWM_ALARM \\
{ /* pwmGroup */ \\
%u, /* Timer channel */ \\
{ /* PWMConfig */ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ /* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, \\
0, 0 \\
}, \\
&PWMD%u /* PWMDriver* */ \\
}\n''' %
(chan-1, pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
else:
f.write('\n')
f.write('// No Alarm output pin defined\n')
f.write('#undef HAL_PWM_ALARM\n')
f.write('\n')
f.write('// PWM timer config\n')
for t in sorted(pwm_timers):
n = int(t[3:])
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
f.write('\n')
f.write('// PWM output config\n')
groups = []
for t in sorted(pwm_timers):
group = len(groups) + 1
n = int(t[3:])
chan_list = [255, 255, 255, 255]
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
alt_functions = [ 0, 0, 0, 0 ]
pal_lines = [ '0', '0', '0', '0' ]
for p in pwm_out:
if p.type != t:
continue
(n, chan, compl) = parse_timer(p.label)
pwm = p.extra_value('PWM', type=int)
chan_list[chan - 1] = pwm - 1
if compl:
chan_mode[chan - 1] = 'PWM_COMPLEMENTARY_OUTPUT_ACTIVE_HIGH'
else:
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
alt_functions[chan - 1] = p.af
pal_lines[chan - 1] = 'PAL_LINE(GPIO%s, %uU)' % (p.port, p.pin)
groups.append('HAL_PWM_GROUP%u' % group)
if n in [1, 8]:
# only the advanced timers do 8MHz clocks
advanced_timer = 'true'
else:
advanced_timer = 'false'
pwm_clock = 1000000
period = 20000 * pwm_clock / 1000000
f.write('''#if defined(STM32_TIM_TIM%u_UP_DMA_STREAM) && defined(STM32_TIM_TIM%u_UP_DMA_CHAN)
# define HAL_PWM%u_DMA_CONFIG true, STM32_TIM_TIM%u_UP_DMA_STREAM, STM32_TIM_TIM%u_UP_DMA_CHAN
#else
# define HAL_PWM%u_DMA_CONFIG false, 0, 0
#endif\n''' % (n, n, n, n, n, n))
f.write('''#define HAL_PWM_GROUP%u { %s, \\
{%u, %u, %u, %u}, \\
/* Group Initial Config */ \\
{ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ \\
/* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, 0, 0}, &PWMD%u, \\
HAL_PWM%u_DMA_CONFIG, \\
{ %u, %u, %u, %u }, \\
{ %s, %s, %s, %s }}\n''' %
(group, advanced_timer,
chan_list[0], chan_list[1], chan_list[2], chan_list[3],
pwm_clock, period,
chan_mode[0], chan_mode[1], chan_mode[2], chan_mode[3],
n, n,
alt_functions[0], alt_functions[1], alt_functions[2], alt_functions[3],
pal_lines[0], pal_lines[1], pal_lines[2], pal_lines[3]))
f.write('#define HAL_PWM_GROUPS %s\n\n' % ','.join(groups))
def write_ADC_config(f):
'''write ADC config defines'''
f.write('// ADC config\n')
adc_chans = []
for l in bylabel:
p = bylabel[l]
if not p.type.startswith('ADC'):
continue
chan = get_ADC1_chan(mcu_type, p.portpin)
scale = p.extra_value('SCALE', default=None)
if p.label == 'VDD_5V_SENS':
f.write('#define ANALOG_VCC_5V_PIN %u\n' % chan)
f.write('#define HAL_HAVE_BOARD_VOLTAGE 1\n')
if p.label == 'FMU_SERVORAIL_VCC_SENS':
f.write('#define FMU_SERVORAIL_ADC_CHAN %u\n' % chan)
f.write('#define HAL_HAVE_SERVO_VOLTAGE 1\n')
adc_chans.append((chan, scale, p.label, p.portpin))
adc_chans = sorted(adc_chans)
vdd = get_config('STM32_VDD')
if vdd[-1] == 'U':
vdd = vdd[:-1]
vdd = float(vdd) * 0.01
f.write('#define HAL_ANALOG_PINS { \\\n')
for (chan, scale, label, portpin) in adc_chans:
scale_str = '%.2f/4096' % vdd
if scale is not None and scale != '1':
scale_str = scale + '*' + scale_str
f.write('{ %2u, %12s }, /* %s %s */ \\\n' % (chan, scale_str, portpin,
label))
f.write('}\n\n')
def write_GPIO_config(f):
'''write GPIO config defines'''
f.write('// GPIO config\n')
gpios = []
gpioset = set()
for l in bylabel:
p = bylabel[l]
gpio = p.extra_value('GPIO', type=int)
if gpio is None:
continue
if gpio in gpioset:
error("Duplicate GPIO value %u" % gpio)
gpioset.add(gpio)
# see if it is also a PWM pin
pwm = p.extra_value('PWM', type=int, default=0)
port = p.port
pin = p.pin
gpios.append((gpio, pwm, port, pin, p))
gpios = sorted(gpios)
for (gpio, pwm, port, pin, p) in gpios:
f.write('#define HAL_GPIO_LINE_GPIO%u PAL_LINE(GPIO%s, %2uU)\n' % (gpio, port, pin))
f.write('#define HAL_GPIO_PINS { \\\n')
for (gpio, pwm, port, pin, p) in gpios:
f.write('{ %3u, true, %2u, PAL_LINE(GPIO%s, %2uU)}, /* %s */ \\\n' %
(gpio, pwm, port, pin, p))
# and write #defines for use by config code
f.write('}\n\n')
f.write('// full pin define list\n')
last_label = None
for l in sorted(list(set(bylabel.keys()))):
p = bylabel[l]
label = p.label
label = label.replace('-', '_')
if label == last_label:
continue
last_label = label
f.write('#define HAL_GPIO_PIN_%-20s PAL_LINE(GPIO%s,%uU)\n' %
(label, p.port, p.pin))
f.write('\n')
def bootloader_path():
# always embed a bootloader if it is available
this_dir = os.path.realpath(__file__)
rootdir = os.path.relpath(os.path.join(this_dir, "../../../../.."))
hwdef_dirname = os.path.basename(os.path.dirname(args.hwdef))
bootloader_filename = "%s_bl.bin" % (hwdef_dirname,)
bootloader_path = os.path.join(rootdir,
"Tools",
"bootloaders",
bootloader_filename)
if os.path.exists(bootloader_path):
return os.path.realpath(bootloader_path)
return None
def add_bootloader():
'''added bootloader to ROMFS'''
bp = bootloader_path()
if bp is not None:
romfs["bootloader.bin"] = bp
def write_ROMFS(outdir):
'''create ROMFS embedded header'''
romfs_list = []
for k in romfs.keys():
romfs_list.append((k, romfs[k]))
env_vars['ROMFS_FILES'] = romfs_list
def setup_apj_IDs():
'''setup the APJ board IDs'''
env_vars['APJ_BOARD_ID'] = get_config('APJ_BOARD_ID')
env_vars['APJ_BOARD_TYPE'] = get_config('APJ_BOARD_TYPE', default=mcu_type)
def write_peripheral_enable(f):
'''write peripheral enable lines'''
f.write('// peripherals enabled\n')
for type in sorted(bytype.keys()):
if type.startswith('USART') or type.startswith('UART'):
dstr = 'STM32_SERIAL_USE_%-6s' % type
f.write('#ifndef %s\n' % dstr)
f.write('#define %s TRUE\n' % dstr)
f.write('#endif\n')
if type.startswith('SPI'):
f.write('#define STM32_SPI_USE_%s TRUE\n' % type)
if type.startswith('OTG'):
f.write('#define STM32_USB_USE_%s TRUE\n' % type)
if type.startswith('I2C'):
f.write('#define STM32_I2C_USE_%s TRUE\n' % type)
def get_dma_exclude(periph_list):
'''return list of DMA devices to exclude from DMA'''
dma_exclude = []
for periph in periph_list:
if periph not in bylabel:
continue
p = bylabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
return dma_exclude
def write_hwdef_header(outfilename):
'''write hwdef header file'''
print("Writing hwdef setup in %s" % outfilename)
f = open(outfilename, 'w')
f.write('''/*
generated hardware definitions from hwdef.dat - DO NOT EDIT
*/
#pragma once
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
''')
write_mcu_config(f)
write_USB_config(f)
write_SPI_config(f)
write_ADC_config(f)
write_GPIO_config(f)
write_IMU_config(f)
write_MAG_config(f)
write_BARO_config(f)
write_peripheral_enable(f)
setup_apj_IDs()
dma_resolver.write_dma_header(f, periph_list, mcu_type,
dma_exclude=get_dma_exclude(periph_list),
dma_priority=get_config('DMA_PRIORITY',default='TIM* SPI*', spaces=True),
dma_noshare=get_config('DMA_NOSHARE',default='', spaces=True))
if not args.bootloader:
write_PWM_config(f)
write_I2C_config(f)
write_UART_config(f)
else:
write_UART_config_bootloader(f)
add_bootloader()
if len(romfs) > 0:
f.write('#define HAL_HAVE_AP_ROMFS_EMBEDDED_H 1\n')
if mcu_series.startswith('STM32F1'):
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_OUTPUT_PP(n) (0 << (((n) & 7) * 4))
#define PIN_MODE_OUTPUT_OD(n) (4 << (((n) & 7) * 4))
#define PIN_MODE_AF_PP(n) (8 << (((n) & 7) * 4))
#define PIN_MODE_AF_OD(n) (12 << (((n) & 7) * 4))
#define PIN_MODE_ANALOG(n) (0 << (((n) & 7) * 4))
#define PIN_MODE_NOPULL(n) (4 << (((n) & 7) * 4))
#define PIN_MODE_PUD(n) (8 << (((n) & 7) * 4))
#define PIN_SPEED_MEDIUM(n) (1 << (((n) & 7) * 4))
#define PIN_SPEED_LOW(n) (2 << (((n) & 7) * 4))
#define PIN_SPEED_HIGH(n) (3 << (((n) & 7) * 4))
#define PIN_ODR_HIGH(n) (1 << (((n) & 15)))
#define PIN_ODR_LOW(n) (0 << (((n) & 15)))
#define PIN_PULLUP(n) (1 << (((n) & 15)))
#define PIN_PULLDOWN(n) (0 << (((n) & 15)))
#define PIN_UNDEFINED(n) PIN_INPUT_PUD(n)
''')
else:
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_INPUT(n) (0U << ((n) * 2U))
#define PIN_MODE_OUTPUT(n) (1U << ((n) * 2U))
#define PIN_MODE_ALTERNATE(n) (2U << ((n) * 2U))
#define PIN_MODE_ANALOG(n) (3U << ((n) * 2U))
#define PIN_ODR_LOW(n) (0U << (n))
#define PIN_ODR_HIGH(n) (1U << (n))
#define PIN_OTYPE_PUSHPULL(n) (0U << (n))
#define PIN_OTYPE_OPENDRAIN(n) (1U << (n))
#define PIN_OSPEED_VERYLOW(n) (0U << ((n) * 2U))
#define PIN_OSPEED_LOW(n) (1U << ((n) * 2U))
#define PIN_OSPEED_MEDIUM(n) (2U << ((n) * 2U))
#define PIN_OSPEED_HIGH(n) (3U << ((n) * 2U))
#define PIN_PUPDR_FLOATING(n) (0U << ((n) * 2U))
#define PIN_PUPDR_PULLUP(n) (1U << ((n) * 2U))
#define PIN_PUPDR_PULLDOWN(n) (2U << ((n) * 2U))
#define PIN_AFIO_AF(n, v) ((v) << (((n) % 8U) * 4U))
''')
for port in sorted(ports):
f.write("/* PORT%s:\n" % port)
for pin in range(pincount[port]):
p = portmap[port][pin]
if p.label is not None:
f.write(" %s\n" % p)
f.write("*/\n\n")
if pincount[port] == 0:
# handle blank ports
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s 0x0\n" % (port,
vtype))
f.write("\n\n\n")
continue
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s (" % (p.port, vtype))
first = True
for pin in range(pincount[port]):
p = portmap[port][pin]
modefunc = getattr(p, "get_" + vtype)
v = modefunc()
if v is None:
continue
if not first:
f.write(" | \\\n ")
f.write(v)
first = False
if first:
# there were no pin definitions, use 0
f.write("0")
f.write(")\n\n")
def build_peripheral_list():
'''build a list of peripherals for DMA resolver to work on'''
peripherals = []
done = set()
prefixes = ['SPI', 'USART', 'UART', 'I2C']
for p in allpins:
type = p.type
if type in done:
continue
for prefix in prefixes:
if type.startswith(prefix):
ptx = type + "_TX"
prx = type + "_RX"
peripherals.append(ptx)
peripherals.append(prx)
if not ptx in bylabel:
bylabel[ptx] = p
if not prx in bylabel:
bylabel[prx] = p
if type.startswith('ADC'):
peripherals.append(type)
if type.startswith('SDIO') or type.startswith('SDMMC'):
if not mcu_series.startswith("STM32H7"):
peripherals.append(type)
if type.startswith('TIM'):
if p.has_extra('RCIN'):
label = p.label
if label[-1] == 'N':
label = label[:-1]
peripherals.append(label)
elif not p.has_extra('ALARM') and not p.has_extra('RCININT'):
# get the TIMn_UP DMA channels for DShot
label = type + '_UP'
if not label in peripherals and not p.has_extra('NODMA'):
peripherals.append(label)
done.add(type)
return peripherals
def write_env_py(filename):
'''write out env.py for environment variables to control the build process'''
# see if board has a defaults.parm file
defaults_filename = os.path.join(os.path.dirname(args.hwdef), 'defaults.parm')
if os.path.exists(defaults_filename) and not args.bootloader:
print("Adding defaults.parm")
env_vars['DEFAULT_PARAMETERS'] = os.path.abspath(defaults_filename)
# CHIBIOS_BUILD_FLAGS is passed to the ChibiOS makefile
env_vars['CHIBIOS_BUILD_FLAGS'] = ' '.join(build_flags)
pickle.dump(env_vars, open(filename, "wb"))
def romfs_add(romfs_filename, filename):
'''add a file to ROMFS'''
romfs[romfs_filename] = filename
def romfs_wildcard(pattern):
'''add a set of files to ROMFS by wildcard'''
base_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
(pattern_dir, pattern) = os.path.split(pattern)
for f in os.listdir(os.path.join(base_path, pattern_dir)):
if fnmatch.fnmatch(f, pattern):
romfs[f] = os.path.join(pattern_dir, f)
def process_line(line):
'''process one line of pin definition file'''
global allpins, imu_list, compass_list, baro_list
a = shlex.split(line)
# keep all config lines for later use
alllines.append(line)
if a[0].startswith('P') and a[0][1] in ports and a[0] in config:
error("Pin %s redefined" % a[0])
config[a[0]] = a[1:]
if a[0] == 'MCU':
global mcu_type, mcu_series
mcu_type = a[2]
mcu_series = a[1]
setup_mcu_type_defaults()
if a[0].startswith('P') and a[0][1] in ports:
# it is a port/pin definition
try:
port = a[0][1]
pin = int(a[0][2:])
label = a[1]
type = a[2]
extra = a[3:]
except Exception:
error("Bad pin line: %s" % a)
return
p = generic_pin(port, pin, label, type, extra)
portmap[port][pin] = p
allpins.append(p)
if not type in bytype:
bytype[type] = []
bytype[type].append(p)
bylabel[label] = p
af = get_alt_function(mcu_type, a[0], label)
if af is not None:
p.af = af
if a[0] == 'SPIDEV':
spidev.append(a[1:])
if a[0] == 'IMU':
imu_list.append(a[1:])
if a[0] == 'COMPASS':
compass_list.append(a[1:])
if a[0] == 'BARO':
baro_list.append(a[1:])
if a[0] == 'ROMFS':
romfs_add(a[1],a[2])
if a[0] == 'ROMFS_WILDCARD':
romfs_wildcard(a[1])
if a[0] == 'undef':
print("Removing %s" % a[1])
config.pop(a[1], '')
bytype.pop(a[1],'')
bylabel.pop(a[1],'')
#also remove all occurences of defines in previous lines if any
for line in alllines[:]:
if line.startswith('define') and a[1] == line.split()[1]:
alllines.remove(line)
newpins = []
for pin in allpins:
if pin.type == a[1]:
continue
if pin.label == a[1]:
continue
if pin.portpin == a[1]:
continue
newpins.append(pin)
allpins = newpins
if a[1] == 'IMU':
imu_list = []
if a[1] == 'COMPASS':
compass_list = []
if a[1] == 'BARO':
baro_list = []
if a[0] == 'env':
print("Adding environment %s" % ' '.join(a[1:]))
if len(a[1:]) < 2:
error("Bad env line for %s" % a[0])
env_vars[a[1]] = ' '.join(a[2:])
def process_file(filename):
'''process a hwdef.dat file'''
try:
f = open(filename, "r")
except Exception:
error("Unable to open file %s" % filename)
for line in f.readlines():
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
a = shlex.split(line)
if a[0] == "include" and len(a) > 1:
include_file = a[1]
if include_file[0] != '/':
dir = os.path.dirname(filename)
include_file = os.path.normpath(
os.path.join(dir, include_file))
print("Including %s" % include_file)
process_file(include_file)
else:
process_line(line)
# process input file
process_file(args.hwdef)
outdir = args.outdir
if outdir is None:
outdir = '/tmp'
if not "MCU" in config:
error("Missing MCU type in config")
mcu_type = get_config('MCU', 1)
print("Setup for MCU %s" % mcu_type)
# build a list for peripherals for DMA resolver
periph_list = build_peripheral_list()
# write out hwdef.h
write_hwdef_header(os.path.join(outdir, "hwdef.h"))
# write out ldscript.ld
write_ldscript(os.path.join(outdir, "ldscript.ld"))
write_ROMFS(outdir)
# copy the shared linker script into the build directory; it must
# exist in the same directory as the ldscript.ld file we generate.
copy_common_linkerscript(outdir, args.hwdef)
write_env_py(os.path.join(outdir, "env.py"))
|
ethomas997/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py
|
Python
|
gpl-3.0
| 58,725
|
[
"CRYSTAL"
] |
2ca0afa0fe4e32e73138b4b91cbf3a0517d07ab0422e7612f6868b0d3628b8d4
|
#!/usr/bin/python
import numpy as np
import pylab as pl
from auryntools import *
# This code snipped assumes that you have run the example simulation
# sim_coba_binmon with default paramters.
# This generates spk output files under /tmp/
filename = "/tmp/coba.0.e.spk"
seconds = 0.1
sf = AurynBinarySpikeFile(filename)
spikes = np.array(sf.get_last(seconds))
pl.scatter(spikes[:,0], spikes[:,1])
pl.xlabel("Time [s]")
pl.ylabel("Neuron ID")
pl.show()
|
idiot-z/auryn
|
tools/python/simple_spike_raster.py
|
Python
|
gpl-3.0
| 466
|
[
"NEURON"
] |
9b3c2d6b0a3400fca3b25a3057192f70f97f06d36f96e04db688703da64cdb1a
|
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Calculation methods related to volume based on cclib data."""
from __future__ import print_function
import copy
import numpy
try:
from PyQuante.CGBF import CGBF
from cclib.bridge import cclib2pyquante
module_pyq = True
except:
module_pyq = False
try:
from pyvtk import *
from pyvtk.DataSetAttr import *
module_pyvtk = True
except:
module_pyvtk = False
from cclib.parser.utils import convertor
class Volume(object):
"""Represent a volume in space.
Required parameters:
origin -- the bottom left hand corner of the volume
topcorner -- the top right hand corner
spacing -- the distance between the points in the cube
Attributes:
data -- a numpy array of values for each point in the volume
(set to zero at initialisation)
numpts -- the numbers of points in the (x,y,z) directions
"""
def __init__(self, origin, topcorner, spacing):
self.origin = origin
self.spacing = spacing
self.topcorner = topcorner
self.numpts = []
for i in range(3):
self.numpts.append(int((self.topcorner[i]-self.origin[i])/self.spacing[i] + 1) )
self.data = numpy.zeros( tuple(self.numpts), "d")
def __str__(self):
"""Return a string representation."""
return "Volume %s to %s (density: %s)" % (self.origin, self.topcorner,
self.spacing)
def write(self, filename, format="Cube"):
"""Write the volume to file."""
format = format.upper()
if format.upper() not in ["VTK", "CUBE"]:
raise "Format must be either VTK or Cube"
elif format=="VTK":
self.writeasvtk(filename)
else:
self.writeascube(filename)
def writeasvtk(self, filename):
if not module_pyvtk:
raise Exception("You need to have pyvtk installed")
ranges = (numpy.arange(self.data.shape[2]),
numpy.arange(self.data.shape[1]),
numpy.arange(self.data.shape[0]))
v = VtkData(RectilinearGrid(*ranges), "Test",
PointData(Scalars(self.data.ravel(), "from cclib", "default")))
v.tofile(filename)
def integrate(self):
boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] *
convertor(1, "Angstrom", "bohr")**3)
return sum(self.data.ravel()) * boxvol
def integrate_square(self):
boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] *
convertor(1, "Angstrom", "bohr")**3)
return sum(self.data.ravel()**2) * boxvol
def writeascube(self, filename):
# Remember that the units are bohr, not Angstroms
convert = lambda x : convertor(x, "Angstrom", "bohr")
ans = []
ans.append("Cube file generated by cclib")
ans.append("")
format = "%4d%12.6f%12.6f%12.6f"
origin = [convert(x) for x in self.origin]
ans.append(format % (0, origin[0], origin[1], origin[2]))
ans.append(format % (self.data.shape[0], convert(self.spacing[0]), 0.0, 0.0))
ans.append(format % (self.data.shape[1], 0.0, convert(self.spacing[1]), 0.0))
ans.append(format % (self.data.shape[2], 0.0, 0.0, convert(self.spacing[2])))
line = []
for i in range(self.data.shape[0]):
for j in range(self.data.shape[1]):
for k in range(self.data.shape[2]):
line.append(scinotation(self.data[i][j][k]))
if len(line)==6:
ans.append(" ".join(line))
line = []
if line:
ans.append(" ".join(line))
line = []
outputfile = open(filename, "w")
outputfile.write("\n".join(ans))
outputfile.close()
def scinotation(num):
"""Write in scientific notation
>>> scinotation(1./654)
' 1.52905E-03'
>>> scinotation(-1./654)
'-1.52905E-03'
"""
ans = "%10.5E" % num
broken = ans.split("E")
exponent = int(broken[1])
if exponent<-99:
return " 0.000E+00"
if exponent<0:
sign="-"
else:
sign="+"
return ("%sE%s%s" % (broken[0],sign,broken[1][-2:])).rjust(12)
def getbfs(coords, gbasis):
"""Convenience function for both wavefunction and density based on PyQuante Ints.py."""
mymol = makepyquante(coords, [0 for x in coords])
sym2powerlist = {
'S' : [(0,0,0)],
'P' : [(1,0,0),(0,1,0),(0,0,1)],
'D' : [(2,0,0),(0,2,0),(0,0,2),(1,1,0),(0,1,1),(1,0,1)],
'F' : [(3,0,0),(2,1,0),(2,0,1),(1,2,0),(1,1,1),(1,0,2),
(0,3,0),(0,2,1),(0,1,2), (0,0,3)]
}
bfs = []
for i,atom in enumerate(mymol):
bs = gbasis[i]
for sym,prims in bs:
for power in sym2powerlist[sym]:
bf = CGBF(atom.pos(),power)
for expnt,coef in prims:
bf.add_primitive(expnt,coef)
bf.normalize()
bfs.append(bf)
return bfs
def wavefunction(coords, mocoeffs, gbasis, volume):
"""Calculate the magnitude of the wavefunction at every point in a volume.
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for one eigenvalue
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
"""
bfs = getbfs(coords, gbasis)
wavefn = copy.copy(volume)
wavefn.data = numpy.zeros( wavefn.data.shape, "d")
conversion = convertor(1,"bohr","Angstrom")
x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion
y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion
z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion
for bs in range(len(bfs)):
data = numpy.zeros( wavefn.data.shape, "d")
for i,xval in enumerate(x):
for j,yval in enumerate(y):
for k,zval in enumerate(z):
data[i, j, k] = bfs[bs].amp(xval,yval,zval)
numpy.multiply(data, mocoeffs[bs], data)
numpy.add(wavefn.data, data, wavefn.data)
return wavefn
def electrondensity(coords, mocoeffslist, gbasis, volume):
"""Calculate the magnitude of the electron density at every point in a volume.
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for all of the occupied eigenvalues
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
Note: mocoeffs is a list of numpy arrays. The list will be of length 1
for restricted calculations, and length 2 for unrestricted.
"""
bfs = getbfs(coords, gbasis)
density = copy.copy(volume)
density.data = numpy.zeros( density.data.shape, "d")
conversion = convertor(1,"bohr","Angstrom")
x = numpy.arange(density.origin[0], density.topcorner[0]+density.spacing[0], density.spacing[0]) / conversion
y = numpy.arange(density.origin[1], density.topcorner[1]+density.spacing[1], density.spacing[1]) / conversion
z = numpy.arange(density.origin[2], density.topcorner[2]+density.spacing[2], density.spacing[2]) / conversion
for mocoeffs in mocoeffslist:
for mocoeff in mocoeffs:
wavefn = numpy.zeros( density.data.shape, "d")
for bs in range(len(bfs)):
data = numpy.zeros( density.data.shape, "d")
for i,xval in enumerate(x):
for j,yval in enumerate(y):
tmp = []
for k,zval in enumerate(z):
tmp.append(bfs[bs].amp(xval, yval, zval))
data[i,j,:] = tmp
numpy.multiply(data, mocoeff[bs], data)
numpy.add(wavefn, data, wavefn)
density.data += wavefn**2
if len(mocoeffslist) == 1:
density.data = density.data*2. # doubly-occupied
return density
if __name__=="__main__":
try:
import psyco
psyco.full()
except ImportError:
pass
from cclib.io import ccopen
import logging
a = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp_basis.log")
a.logger.setLevel(logging.ERROR)
c = a.parse()
b = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp.out")
b.logger.setLevel(logging.ERROR)
d = b.parse()
vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
wavefn = wavefunction(d.atomcoords[0], d.mocoeffs[0][d.homos[0]],
c.gbasis, vol)
assert abs(wavefn.integrate())<1E-6 # not necessarily true for all wavefns
assert abs(wavefn.integrate_square() - 1.00)<1E-3 # true for all wavefns
print(wavefn.integrate(), wavefn.integrate_square())
vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
frontierorbs = [d.mocoeffs[0][(d.homos[0]-3):(d.homos[0]+1)]]
density = electrondensity(d.atomcoords[0], frontierorbs, c.gbasis, vol)
assert abs(density.integrate()-8.00)<1E-2
print("Combined Density of 4 Frontier orbitals=",density.integrate())
|
jchodera/cclib
|
src/cclib/method/volume.py
|
Python
|
lgpl-2.1
| 10,208
|
[
"Gaussian",
"VTK",
"cclib"
] |
1f78da6f52e829d537bdd2c6f5c190e1c8049285bfe6a515ca869b107fc9505e
|
# Copyright (C) 2004, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Vector class, including rotation-related functions."""
import numpy
def m2rotaxis(m):
"""
Return angles, axis pair that corresponds to rotation matrix m.
"""
# Angle always between 0 and pi
# Sense of rotation is defined by axis orientation
t=0.5*(numpy.trace(m)-1)
t=max(-1, t)
t=min(1, t)
angle=numpy.arccos(t)
if angle<1e-15:
# Angle is 0
return 0.0, Vector(1,0,0)
elif angle<numpy.pi:
# Angle is smaller than pi
x=m[2,1]-m[1,2]
y=m[0,2]-m[2,0]
z=m[1,0]-m[0,1]
axis=Vector(x,y,z)
axis.normalize()
return angle, axis
else:
# Angle is pi - special case!
m00=m[0,0]
m11=m[1,1]
m22=m[2,2]
if m00>m11 and m00>m22:
x=numpy.sqrt(m00-m11-m22+0.5)
y=m[0,1]/(2*x)
z=m[0,2]/(2*x)
elif m11>m00 and m11>m22:
y=numpy.sqrt(m11-m00-m22+0.5)
x=m[0,1]/(2*y)
z=m[1,2]/(2*y)
else:
z=numpy.sqrt(m22-m00-m11+0.5)
x=m[0,2]/(2*z)
y=m[1,2]/(2*z)
axis=Vector(x,y,z)
axis.normalize()
return numpy.pi, axis
def vector_to_axis(line, point):
"""
Returns the vector between a point and
the closest point on a line (ie. the perpendicular
projection of the point on the line).
@type line: L{Vector}
@param line: vector defining a line
@type point: L{Vector}
@param point: vector defining the point
"""
line=line.normalized()
np=point.norm()
angle=line.angle(point)
return point-line**(np*numpy.cos(angle))
def rotaxis2m(theta, vector):
"""
Calculate a left multiplying rotation matrix that rotates
theta rad around vector.
Example:
>>> m=rotaxis(pi, Vector(1,0,0))
>>> rotated_vector=any_vector.left_multiply(m)
@type theta: float
@param theta: the rotation angle
@type vector: L{Vector}
@param vector: the rotation axis
@return: The rotation matrix, a 3x3 Numeric array.
"""
vector=vector.copy()
vector.normalize()
c=numpy.cos(theta)
s=numpy.sin(theta)
t=1-c
x,y,z=vector.get_array()
rot=numpy.zeros((3,3))
# 1st row
rot[0,0]=t*x*x+c
rot[0,1]=t*x*y-s*z
rot[0,2]=t*x*z+s*y
# 2nd row
rot[1,0]=t*x*y+s*z
rot[1,1]=t*y*y+c
rot[1,2]=t*y*z-s*x
# 3rd row
rot[2,0]=t*x*z-s*y
rot[2,1]=t*y*z+s*x
rot[2,2]=t*z*z+c
return rot
rotaxis=rotaxis2m
def refmat(p,q):
"""
Return a (left multiplying) matrix that mirrors p onto q.
Example:
>>> mirror=refmat(p,q)
>>> qq=p.left_multiply(mirror)
>>> print q, qq # q and qq should be the same
@type p,q: L{Vector}
@return: The mirror operation, a 3x3 Numeric array.
"""
p.normalize()
q.normalize()
if (p-q).norm()<1e-5:
return numpy.identity(3)
pq=p-q
pq.normalize()
b=pq.get_array()
b.shape=(3, 1)
i=numpy.identity(3)
ref=i-2*numpy.dot(b, numpy.transpose(b))
return ref
def rotmat(p,q):
"""
Return a (left multiplying) matrix that rotates p onto q.
Example:
>>> r=rotmat(p,q)
>>> print q, p.left_multiply(r)
@param p: moving vector
@type p: L{Vector}
@param q: fixed vector
@type q: L{Vector}
@return: rotation matrix that rotates p onto q
@rtype: 3x3 Numeric array
"""
rot=numpy.dot(refmat(q, -p), refmat(p, -p))
return rot
def calc_angle(v1, v2, v3):
"""
Calculate the angle between 3 vectors
representing 3 connected points.
@param v1, v2, v3: the tree points that define the angle
@type v1, v2, v3: L{Vector}
@return: angle
@rtype: float
"""
v1=v1-v2
v3=v3-v2
return v1.angle(v3)
def calc_dihedral(v1, v2, v3, v4):
"""
Calculate the dihedral angle between 4 vectors
representing 4 connected points. The angle is in
]-pi, pi].
@param v1, v2, v3, v4: the four points that define the dihedral angle
@type v1, v2, v3, v4: L{Vector}
"""
ab=v1-v2
cb=v3-v2
db=v4-v3
u=ab**cb
v=db**cb
w=u**v
angle=u.angle(v)
# Determine sign of angle
try:
if cb.angle(w)>0.001:
angle=-angle
except ZeroDivisionError:
# dihedral=pi
pass
return angle
class Vector:
"3D vector"
def __init__(self, x, y=None, z=None):
if y is None and z is None:
# Array, list, tuple...
if len(x)!=3:
raise ValueError("Vector: x is not a "
"list/tuple/array of 3 numbers")
self._ar=numpy.array(x, 'd')
else:
# Three numbers
self._ar=numpy.array((x, y, z), 'd')
def __repr__(self):
x,y,z=self._ar
return "<Vector %.2f, %.2f, %.2f>" % (x,y,z)
def __neg__(self):
"Return Vector(-x, -y, -z)"
a=-self._ar
return Vector(a)
def __add__(self, other):
"Return Vector+other Vector or scalar"
if isinstance(other, Vector):
a=self._ar+other._ar
else:
a=self._ar+numpy.array(other)
return Vector(a)
def __sub__(self, other):
"Return Vector-other Vector or scalar"
if isinstance(other, Vector):
a=self._ar-other._ar
else:
a=self._ar-numpy.array(other)
return Vector(a)
def __mul__(self, other):
"Return Vector.Vector (dot product)"
return sum(self._ar*other._ar)
def __div__(self, x):
"Return Vector(coords/a)"
a=self._ar/numpy.array(x)
return Vector(a)
def __pow__(self, other):
"Return VectorxVector (cross product) or Vectorxscalar"
if isinstance(other, Vector):
a,b,c=self._ar
d,e,f=other._ar
c1=numpy.linalg.det(numpy.array(((b,c), (e,f))))
c2=-numpy.linalg.det(numpy.array(((a,c), (d,f))))
c3=numpy.linalg.det(numpy.array(((a,b), (d,e))))
return Vector(c1,c2,c3)
else:
a=self._ar*numpy.array(other)
return Vector(a)
def __getitem__(self, i):
return self._ar[i]
def __setitem__(self, i, value):
self._ar[i]=value
def norm(self):
"Return vector norm"
return numpy.sqrt(sum(self._ar*self._ar))
def normsq(self):
"Return square of vector norm"
return abs(sum(self._ar*self._ar))
def normalize(self):
"Normalize the Vector"
self._ar=self._ar/self.norm()
def normalized(self):
"Return a normalized copy of the Vector"
v=self.copy()
v.normalize()
return v
def angle(self, other):
"Return angle between two vectors"
n1=self.norm()
n2=other.norm()
c=(self*other)/(n1*n2)
# Take care of roundoff errors
c=min(c,1)
c=max(-1,c)
return numpy.arccos(c)
def get_array(self):
"Return (a copy of) the array of coordinates"
return numpy.array(self._ar)
def left_multiply(self, matrix):
"Return Vector=Matrix x Vector"
a=numpy.dot(matrix, self._ar)
return Vector(a)
def right_multiply(self, matrix):
"Return Vector=Vector x Matrix"
a=numpy.dot(self._ar, matrix)
return Vector(a)
def copy(self):
"Return a deep copy of the Vector"
return Vector(self._ar)
if __name__=="__main__":
from numpy.random import random
v1=Vector(0,0,1)
v2=Vector(0,0,0)
v3=Vector(0,1,0)
v4=Vector(1,1,0)
v4.normalize()
print v4
print calc_angle(v1, v2, v3)
dih=calc_dihedral(v1, v2, v3, v4)
# Test dihedral sign
assert(dih>0)
print "DIHEDRAL ", dih
ref=refmat(v1, v3)
rot=rotmat(v1, v3)
print v3
print v1.left_multiply(ref)
print v1.left_multiply(rot)
print v1.right_multiply(numpy.transpose(rot))
# -
print v1-v2
print v1-1
print v1+(1,2,3)
# +
print v1+v2
print v1+3
print v1-(1,2,3)
# *
print v1*v2
# /
print v1/2
print v1/(1,2,3)
# **
print v1**v2
print v1**2
print v1**(1,2,3)
# norm
print v1.norm()
# norm squared
print v1.normsq()
# setitem
v1[2]=10
print v1
# getitem
print v1[2]
print numpy.array(v1)
print "ROT"
angle=random()*numpy.pi
axis=Vector(random(3)-random(3))
axis.normalize()
m=rotaxis(angle, axis)
cangle, caxis=m2rotaxis(m)
print angle-cangle
print axis-caxis
print
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/PDB/Vector.py
|
Python
|
gpl-2.0
| 9,097
|
[
"Biopython"
] |
516c7b7be4e9b4a5337e8e2c41b1d8528c4fa736824502f094c1c28cabd31e94
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.