text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable
from Tools.GetEcmInfo import GetEcmInfo
from Poll import Poll
def addspace(text):
if text:
text += " "
return text
class PliExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.caid_data = (
( "0x100", "0x1ff", "Seca", "S", True ),
( "0x500", "0x5ff", "Via", "V", True ),
( "0x600", "0x6ff", "Irdeto", "I", True ),
( "0x900", "0x9ff", "NDS", "Nd", True ),
( "0xb00", "0xbff", "Conax", "Co", True ),
( "0xd00", "0xdff", "CryptoW", "Cw", True ),
( "0xe00", "0xeff", "PowerVU", "P", False ),
("0x1700", "0x17ff", "Beta", "B", True ),
("0x1800", "0x18ff", "Nagra", "N", True ),
("0x2600", "0x2600", "Biss", "Bi", False ),
("0x4ae0", "0x4ae1", "Dre", "D", False ),
("0x4aee", "0x4aee", "BulCrypt", "B1", False ),
("0x5581", "0x5581", "BulCrypt", "B2", False )
)
self.ca_table = (
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidIrdetoAvailable", "I", False),
("CryptoCaidNDSAvailable", "Nd", False),
("CryptoCaidConaxAvailable", "Co", False),
("CryptoCaidCryptoWAvailable", "Cw", False),
("CryptoCaidPowerVUAvailable", "P", False),
("CryptoCaidBetaAvailable", "B", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidBissAvailable", "Bi", False),
("CryptoCaidDreAvailable", "D", False),
("CryptoCaidBulCrypt1Available","B1", False),
("CryptoCaidBulCrypt2Available","B2", False),
("CryptoCaidSecaSelected", "S", True),
("CryptoCaidViaSelected", "V", True),
("CryptoCaidIrdetoSelected", "I", True),
("CryptoCaidNDSSelected", "Nd", True),
("CryptoCaidConaxSelected", "Co", True),
("CryptoCaidCryptoWSelected", "Cw", True),
("CryptoCaidPowerVUSelected", "P", True),
("CryptoCaidBetaSelected", "B", True),
("CryptoCaidNagraSelected", "N", True),
("CryptoCaidBissSelected", "Bi", True),
("CryptoCaidDreSelected", "D", True),
("CryptoCaidBulCrypt1Selected", "B1", True),
("CryptoCaidBulCrypt2Selected", "B2", True),
)
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
def getCryptoInfo(self, info):
if (info.getInfo(iServiceInformation.sIsCrypted) == 1):
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(self.current_caid, 16) >= int(caid_entry[0], 16) and int(self.current_caid, 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if color != "\c007?7?7?" or caid_entry[4]:
if res: res += " "
res += color + caid_entry[3]
res += "\c00??????"
return res
def createCryptoSeca(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x100', 16) and int(self.current_caid, 16) <= int('0x1ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x100', 16) and caid <= int('0x1ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'S'
res += "\c00??????"
return res
def createCryptoVia(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x500', 16) and int(self.current_caid, 16) <= int('0x5ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x500', 16) and caid <= int('0x5ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'V'
res += "\c00??????"
return res
def createCryptoIrdeto(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x600', 16) and int(self.current_caid, 16) <= int('0x6ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x600', 16) and caid <= int('0x6ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'I'
res += "\c00??????"
return res
def createCryptoNDS(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x900', 16) and int(self.current_caid, 16) <= int('0x9ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x900', 16) and caid <= int('0x9ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'NDS'
res += "\c00??????"
return res
def createCryptoConax(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0xb00', 16) and int(self.current_caid, 16) <= int('0xbff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0xb00', 16) and caid <= int('0xbff', 16):
color="\c00eeee00"
except:
pass
res = color + 'CO'
res += "\c00??????"
return res
def createCryptoCryptoW(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0xd00', 16) and int(self.current_caid, 16) <= int('0xdff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0xd00', 16) and caid <= int('0xdff', 16):
color="\c00eeee00"
except:
pass
res = color + 'CW'
res += "\c00??????"
return res
def createCryptoPowerVU(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0xe00', 16) and int(self.current_caid, 16) <= int('0xeff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0xe00', 16) and caid <= int('0xeff', 16):
color="\c00eeee00"
except:
pass
res = color + 'P'
res += "\c00??????"
return res
def createCryptoBeta(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x1700', 16) and int(self.current_caid, 16) <= int('0x17ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x1700', 16) and caid <= int('0x17ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'B'
res += "\c00??????"
return res
def createCryptoNagra(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x1800', 16) and int(self.current_caid, 16) <= int('0x18ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x1800', 16) and caid <= int('0x18ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'N'
res += "\c00??????"
return res
def createCryptoBiss(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x2600', 16) and int(self.current_caid, 16) <= int('0x26ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x2600', 16) and caid <= int('0x26ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'BI'
res += "\c00??????"
return res
def createCryptoDre(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x4ae0', 16) and int(self.current_caid, 16) <= int('0x4ae1', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x4ae0', 16) and caid <= int('0x4ae1', 16):
color="\c00eeee00"
except:
pass
res = color + 'DC'
res += "\c00??????"
return res
def createCryptoSpecial(self, info):
caid_name = "FTA"
try:
for caid_entry in self.caid_data:
if int(self.current_caid, 16) >= int(caid_entry[0], 16) and int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x:%04x" % (int(self.current_caid,16), int(self.current_provid,16), info.getInfo(iServiceInformation.sSID), int(self.current_ecmpid,16))
except:
pass
return ""
def createResolution(self, info):
xres = info.getInfo(iServiceInformation.sVideoWidth)
if xres == -1:
return ""
yres = info.getInfo(iServiceInformation.sVideoHeight)
mode = ("i", "p", "", " ")[info.getInfo(iServiceInformation.sProgressive)]
fps = str((info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000)
if int(fps) <= 0:
fps = ""
return str(xres) + "x" + str(yres) + mode + fps
def createVideoCodec(self, info):
return ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "")[info.getInfo(iServiceInformation.sVideoType)]
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
if vpid < 0 : vpid = 0
if apid < 0 : apid = 0
if pcrpid < 0 : pcrpid = 0
if sidpid < 0 : sidpid = 0
return "Pids:%04d:%04d:%04d:%05d" % (vpid, apid, pcrpid, sidpid)
def createTransponderInfo(self, fedata, feraw):
return addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) \
+ self.createOrbPos(feraw)
def createFrequency(self, feraw):
frequency = feraw.get("frequency")
if frequency:
if "DVB-T" in feraw.get("tuner_type"):
return str(int(frequency / 1000000 + 0.5))
else:
return str(int(frequency / 1000 + 0.5))
return ""
def createSymbolRate(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
bandwidth = fedata.get("bandwidth")
if bandwidth:
return bandwidth
else:
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate / 1000)
return ""
def createPolarization(self, fedata):
polarization = fedata.get("polarization_abbreviation")
if polarization:
return polarization
return ""
def createFEC(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
code_rate_lp = fedata.get("code_rate_lp")
code_rate_hp = fedata.get("code_rate_hp")
if code_rate_lp and code_rate_hp:
return code_rate_lp + "-" + code_rate_hp
else:
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
if fedata.get("tuner_type") == _("Terrestrial"):
constellation = fedata.get("constellation")
if constellation:
return constellation
else:
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
tunertype = feraw.get("tuner_type")
if tunertype:
return tunertype
return ""
def createTunerSystem(self, fedata):
tunersystem = fedata.get("system")
if tunersystem:
return tunersystem
return ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + "\xc2\xb0 E"
return ""
def createOrbPosOrTunerSystem(self, fedata,feraw):
orbpos = self.createOrbPos(feraw)
if orbpos is not "":
return orbpos
return self.createTunerSystem(fedata)
def createTransponderName(self,feraw):
orb_pos = ""
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
if orbpos == 3590:
orb_pos = 'Thor/Intelsat'
elif orbpos == 3560:
orb_pos = 'Amos (4'
elif orbpos == 3550:
orb_pos = 'Atlantic Bird'
elif orbpos == 3530:
orb_pos = 'Nilesat/Atlantic Bird'
elif orbpos == 3520:
orb_pos = 'Atlantic Bird'
elif orbpos == 3475:
orb_pos = 'Atlantic Bird'
elif orbpos == 3460:
orb_pos = 'Express'
elif orbpos == 3450:
orb_pos = 'Telstar'
elif orbpos == 3420:
orb_pos = 'Intelsat'
elif orbpos == 3380:
orb_pos = 'Nss'
elif orbpos == 3355:
orb_pos = 'Intelsat'
elif orbpos == 3325:
orb_pos = 'Intelsat'
elif orbpos == 3300:
orb_pos = 'Hispasat'
elif orbpos == 3285:
orb_pos = 'Intelsat'
elif orbpos == 3170:
orb_pos = 'Intelsat'
elif orbpos == 3150:
orb_pos = 'Intelsat'
elif orbpos == 3070:
orb_pos = 'Intelsat'
elif orbpos == 3045:
orb_pos = 'Intelsat'
elif orbpos == 3020:
orb_pos = 'Intelsat 9'
elif orbpos == 2990:
orb_pos = 'Amazonas'
elif orbpos == 2900:
orb_pos = 'Star One'
elif orbpos == 2880:
orb_pos = 'AMC 6 (72'
elif orbpos == 2875:
orb_pos = 'Echostar 6'
elif orbpos == 2860:
orb_pos = 'Horizons'
elif orbpos == 2810:
orb_pos = 'AMC5'
elif orbpos == 2780:
orb_pos = 'NIMIQ 4'
elif orbpos == 2690:
orb_pos = 'NIMIQ 1'
elif orbpos == 3592:
orb_pos = 'Thor/Intelsat'
elif orbpos == 2985:
orb_pos = 'Echostar 3,12'
elif orbpos == 2830:
orb_pos = 'Echostar 8'
elif orbpos == 2630:
orb_pos = 'Galaxy 19'
elif orbpos == 2500:
orb_pos = 'Echostar 10,11'
elif orbpos == 2502:
orb_pos = 'DirectTV 5'
elif orbpos == 2410:
orb_pos = 'Echostar 7 Anik F3'
elif orbpos == 2391:
orb_pos = 'Galaxy 23'
elif orbpos == 2390:
orb_pos = 'Echostar 9'
elif orbpos == 2412:
orb_pos = 'DirectTV 7S'
elif orbpos == 2310:
orb_pos = 'Galaxy 27'
elif orbpos == 2311:
orb_pos = 'Ciel 2'
elif orbpos == 2120:
orb_pos = 'Echostar 2'
else:
orb_pos = str((float(3600 - orbpos)) / 10.0) + "W"
elif orbpos > 0:
if orbpos == 192:
orb_pos = 'Astra 1F'
elif orbpos == 130:
orb_pos = 'Hot Bird 6,7A,8'
elif orbpos == 235:
orb_pos = 'Astra 1E'
elif orbpos == 1100:
orb_pos = 'BSat 1A,2A'
elif orbpos == 1101:
orb_pos = 'N-Sat 110'
elif orbpos == 1131:
orb_pos = 'KoreaSat 5'
elif orbpos == 1440:
orb_pos = 'SuperBird 7,C2'
elif orbpos == 1006:
orb_pos = 'AsiaSat 2'
elif orbpos == 1030:
orb_pos = 'Express A2'
elif orbpos == 1056:
orb_pos = 'Asiasat 3S'
elif orbpos == 1082:
orb_pos = 'NSS 11'
elif orbpos == 881:
orb_pos = 'ST1'
elif orbpos == 900:
orb_pos = 'Yamal 201'
elif orbpos == 917:
orb_pos = 'Mesat'
elif orbpos == 950:
orb_pos = 'Insat 4B'
elif orbpos == 951:
orb_pos = 'NSS 6'
elif orbpos == 765:
orb_pos = 'Telestar'
elif orbpos == 785:
orb_pos = 'ThaiCom 5'
elif orbpos == 800:
orb_pos = 'Express'
elif orbpos == 830:
orb_pos = 'Insat 4A'
elif orbpos == 850:
orb_pos = 'Intelsat 709'
elif orbpos == 750:
orb_pos = 'Abs'
elif orbpos == 720:
orb_pos = 'Intelsat'
elif orbpos == 705:
orb_pos = 'Eutelsat W5'
elif orbpos == 685:
orb_pos = 'Intelsat'
elif orbpos == 620:
orb_pos = 'Intelsat 902'
elif orbpos == 600:
orb_pos = 'Intelsat 904'
elif orbpos == 570:
orb_pos = 'Nss'
elif orbpos == 530:
orb_pos = 'Express AM22'
elif orbpos == 480:
orb_pos = 'Eutelsat 2F2'
elif orbpos == 450:
orb_pos = 'Intelsat'
elif orbpos == 420:
orb_pos = 'Turksat 2A'
elif orbpos == 400:
orb_pos = 'Express AM1'
elif orbpos == 390:
orb_pos = 'Hellas Sat 2'
elif orbpos == 380:
orb_pos = 'Paksat 1'
elif orbpos == 360:
orb_pos = 'Eutelsat Sesat'
elif orbpos == 335:
orb_pos = 'Astra 1M'
elif orbpos == 330:
orb_pos = 'Eurobird 3'
elif orbpos == 328:
orb_pos = 'Galaxy 11'
elif orbpos == 315:
orb_pos = 'Astra 5A'
elif orbpos == 310:
orb_pos = 'Turksat'
elif orbpos == 305:
orb_pos = 'Arabsat'
elif orbpos == 285:
orb_pos = 'Eurobird 1'
elif orbpos == 284:
orb_pos = 'Eurobird/Astra'
elif orbpos == 282:
orb_pos = 'Eurobird/Astra'
elif orbpos == 1220:
orb_pos = 'AsiaSat'
elif orbpos == 1380:
orb_pos = 'Telstar 18'
elif orbpos == 260:
orb_pos = 'Badr 3/4'
elif orbpos == 255:
orb_pos = 'Eurobird 2'
elif orbpos == 215:
orb_pos = 'Eutelsat'
elif orbpos == 216:
orb_pos = 'Eutelsat W6'
elif orbpos == 210:
orb_pos = 'AfriStar 1'
elif orbpos == 160:
orb_pos = 'Eutelsat W2'
elif orbpos == 100:
orb_pos = 'Eutelsat W1'
elif orbpos == 90:
orb_pos = 'Eurobird 9'
elif orbpos == 70:
orb_pos = 'Eutelsat W3A'
elif orbpos == 50:
orb_pos = 'Sirius 4'
elif orbpos == 48:
orb_pos = 'Sirius 4'
elif orbpos == 30:
orb_pos = 'Telecom 2'
else:
orb_pos = str((float(orbpos)) / 10.0) + "E"
return orb_pos
def createProviderName(self,info):
return info.getInfoString(iServiceInformation.sProvider)
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if self.type == "CryptoInfo":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.getValue()) > 0:
return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info)
else:
return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info)
if self.type == "CryptoBar":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoBar(info)
else:
return ""
if self.type == "CryptoSeca":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoSeca(info)
else:
return ""
if self.type == "CryptoVia":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoVia(info)
else:
return ""
if self.type == "CryptoIrdeto":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoIrdeto(info)
else:
return ""
if self.type == "CryptoNDS":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoNDS(info)
else:
return ""
if self.type == "CryptoConax":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoConax(info)
else:
return ""
if self.type == "CryptoCryptoW":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoCryptoW(info)
else:
return ""
if self.type == "CryptoBeta":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoBeta(info)
else:
return ""
if self.type == "CryptoNagra":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoNagra(info)
else:
return ""
if self.type == "CryptoBiss":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoBiss(info)
else:
return ""
if self.type == "CryptoDre":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoDre(info)
else:
return ""
if self.type == "CryptoSpecial":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
else:
return ""
if self.type == "ResolutionString":
return self.createResolution(info)
if self.type == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(True)
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
fedata = self.fedata
if not feraw or not fedata:
return ""
if self.type == "All":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.getValue()) > 0:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata,feraw) + "\n"\
+ addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n"\
+ addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
else:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata,feraw) + "\n" \
+ addspace(self.createCryptoBar(info)) + self.current_source + "\n" \
+ addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "ServiceInfo":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) \
+ addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "TransponderInfo2line":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createTransponderName(feraw)) + '\n'\
+ self.createFrequency(fedata) + addspace(" MHz") + addspace(self.createPolarization(fedata))\
+ addspace(self.createSymbolRate(fedata, feraw)) + self.createModulation(fedata) + '-' + addspace(self.createFEC(fedata, feraw))
if self.type == "TransponderInfo":
return self.createTransponderInfo(fedata,feraw)
if self.type == "TransponderFrequency":
return self.createFrequency(feraw)
if self.type == "TransponderSymbolRate":
return self.createSymbolRate(fedata, feraw)
if self.type == "TransponderPolarization":
return self.createPolarization(fedata)
if self.type == "TransponderFEC":
return self.createFEC(fedata, feraw)
if self.type == "TransponderModulation":
return self.createModulation(fedata)
if self.type == "OrbitalPosition":
return self.createOrbPos(feraw)
if self.type == "TunerType":
return self.createTunerType(feraw)
if self.type == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata,feraw)
if self.type == "PIDInfo":
return self.createPIDInfo(info)
return _("invalid type")
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if caid_entry[3] == request_caid:
if(request_selected):
if int(current_caid, 16) >= int(caid_entry[0], 16) and int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
|
Ophiuchus1312/enigma2-master
|
lib/python/Components/Converter/PliExtraInfo.py
|
Python
|
gpl-2.0
| 24,850
|
[
"Galaxy"
] |
b4872a49aa70b9c891a0880fa9a6f72d44d7683f3df2fd97e69b364ba5aeac64
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.dezinezync.ticolorart.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComDezinezyncTicolorartModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
dezinezync/TiColorArt
|
build.py
|
Python
|
mit
| 6,789
|
[
"VisIt"
] |
5933f08361eeaeb8fd5b706ebdbef082fb9c983e6c2589606b750c66cc0a6615
|
# -*- coding: utf-8 -*-
"""Shortest paths and path lengths using A* ("A star") algorithm.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from heapq import heappush, heappop
from networkx import NetworkXError
import networkx as nx
__author__ = "\n".join(["Salim Fadhley <salimfadhley@gmail.com>",
"Matteo Dell'Amico <matteodellamico@gmail.com>"])
__all__ = ['astar_path', 'astar_path_length']
def astar_path(G, source, target, heuristic=None, weight='weight'):
"""Return a list of nodes in a shortest path between source and target
using the A* ("A-star") algorithm.
There may be more than one shortest path. This returns only one.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.astar_path(G,0,4))
[0, 1, 2, 3, 4]
>>> G=nx.grid_graph(dim=[3,3]) # nodes are two-tuples (x,y)
>>> def dist(a, b):
... (x1, y1) = a
... (x2, y2) = b
... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
>>> print(nx.astar_path(G,(0,0),(2,2),dist))
[(0, 0), (0, 1), (1, 1), (1, 2), (2, 2)]
See Also
--------
shortest_path, dijkstra_path
"""
if G.is_multigraph():
raise NetworkXError("astar_path() not implemented for Multi(Di)Graphs")
if heuristic is None:
# The default heuristic is h=0 - same as Dijkstra's algorithm
def heuristic(u, v):
return 0
# The queue stores priority, node, cost to reach, and parent.
# Uses Python heapq to keep in priority order.
# Add each node's hash to the queue to prevent the underlying heap from
# attempting to compare the nodes themselves. The hash breaks ties in the
# priority and is guarenteed unique for all nodes in the graph.
queue = [(0, hash(source), source, 0, None)]
# Maps enqueued nodes to distance of discovered paths and the
# computed heuristics to target. We avoid computing the heuristics
# more than once and inserting the node into the queue too many times.
enqueued = {}
# Maps explored nodes to parent closest to the source.
explored = {}
while queue:
# Pop the smallest item from queue.
_, __, curnode, dist, parent = heappop(queue)
if curnode == target:
path = [curnode]
node = parent
while node is not None:
path.append(node)
node = explored[node]
path.reverse()
return path
if curnode in explored:
continue
explored[curnode] = parent
for neighbor, w in G[curnode].items():
if neighbor in explored:
continue
ncost = dist + w.get(weight, 1)
if neighbor in enqueued:
qcost, h = enqueued[neighbor]
# if qcost < ncost, a longer path to neighbor remains
# enqueued. Removing it would need to filter the whole
# queue, it's better just to leave it there and ignore
# it when we visit the node a second time.
if qcost <= ncost:
continue
else:
h = heuristic(neighbor, target)
enqueued[neighbor] = ncost, h
heappush(queue, (ncost + h, hash(neighbor), neighbor,
ncost, curnode))
raise nx.NetworkXNoPath("Node %s not reachable from %s" % (source, target))
def astar_path_length(G, source, target, heuristic=None, weight='weight'):
"""Return the length of the shortest path between source and target using
the A* ("A-star") algorithm.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
astar_path
"""
path = astar_path(G, source, target, heuristic)
return sum(G[u][v].get(weight, 1) for u, v in zip(path[:-1], path[1:]))
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/networkx/algorithms/shortest_paths/astar.py
|
Python
|
agpl-3.0
| 4,913
|
[
"VisIt"
] |
bbf94c497ea1a56966048c182ffe1a839239b2a78114ce13c177818db6dbf7e5
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
import logging
import string
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.models import User
from django.core import mail
from django.conf import settings
from xgds_core.registerForms import UserRegistrationForm, EmailFeedbackForm
from rest_framework.authtoken.models import Token
from django.http import JsonResponse
registration_email_template = string.Template( # noqa
"""
Greetings, xGDS managers.
You have received a user registration request for $first_name $last_name.
Username: $username
Email: $email
$first_name says:
"$comments"
To activate this user, visit:
$url
The request came from $ip_address, and was referred by the form at $referrer
"""
)
def registerUser(request):
if request.method not in ('GET', 'POST'):
raise Exception("Invalid request method: " + request.method)
# if request.user.is_authenticated:
# # Don't let them register again if they're already logged in.
# #return HttpResponseRedirect('/')
# return HttpResponse("You are already logged in.")
if request.method == "GET":
return render(request,
"registration/register.html",
{'register_form': UserRegistrationForm()}
)
else:
form = UserRegistrationForm(request.POST)
if not form.is_valid():
# FAIL
logging.info("Create form validation failed.")
return render(request,
"registration/register.html",
{'register_form': form})
else:
logging.info("Creating a new user")
user_data = form.cleaned_data
assert user_data.get('email')
user = User.objects.create_user(user_data['username'], user_data['email'], user_data['password1'])
user.first_name = user_data['first_name']
user.last_name = user_data['last_name']
user.is_active = False
user.save()
mail.mail_managers(
'Registration request from %s %s (%s)' % (user.first_name, user.last_name, user.username),
registration_email_template.substitute({
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'url': request.build_absolute_uri(reverse('user-activate', args=[user.id])),
'comments': user_data['comments'],
'ip_address': request.META['REMOTE_ADDR'],
'referrer': request.META['HTTP_REFERER'],
}),
)
return render(request,
"registration/simple_message.html",
{'message': "You will receive an email notification at %s after a site manager approves your request." % user.email},
)
@permission_required('add_user')
def activateUser(request, user_id):
def render_message(msg):
return render(request,
"registration/simple_message.html",
{'message': msg},
)
try:
user = User.objects.get(id=user_id)
except User.DoesNotExist:
return render_message("No user with the given id")
if user.is_active:
return render_message("The user %s has already been activated. Someone must have gotten here first." % user.username)
user.is_active = True
user.save()
mail.send_mail(
settings.EMAIL_SUBJECT_PREFIX + "Your account has been activated",
string.Template("""
Hi $first_name,
Your xGDS registration request has been approved. Click to log in!
$url
""").substitute({'username': user.username,
'first_name': user.first_name,
'url': request.build_absolute_uri(reverse('user-login'))}),
settings.SERVER_EMAIL,
[user.email],
)
mail.mail_managers(
settings.EMAIL_SUBJECT_PREFIX + "The user %s was activated." % user.username,
string.Template("""
The user $first_name $last_name ($username) was successfully activated by $adminuser.
""").substitute({'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'adminuser': request.user.username}),
)
return render_message("The user %s %s (%s) was successfully activated." % (user.first_name, user.last_name, user.username))
def email_feedback(request):
mail_sent = False
if request.POST:
form = EmailFeedbackForm(request.POST)
if form.is_valid():
cc = []
content = form.cleaned_data['email_content']
fromEmail = form.cleaned_data.get('reply_to', None)
if fromEmail:
cc = [request.user.email]
content = fromEmail + ": " + content
mail.mail_managers(
"XGDS USER FEEDBACK",
content,
cc)
mail_sent = True
else:
email = None
if hasattr(request.user, 'email'):
email = request.user.email
form = EmailFeedbackForm(initial={'reply_to': email})
return render(request,
'registration/email_feedback.html',
{'form': form,
'mail_sent': mail_sent},
)
def generateAuthToken(request, username):
user = request.user
if username:
user = User.objects.get(username=username)
token = Token.objects.get_or_create(user=user)
theDict = {'username':user.username,
'token': token[0].key}
return JsonResponse(theDict);
def renderTemplate(request, template_name):
''' Because TemplateView.as_view does not support post'''
return render(request, template_name)
|
xgds/xgds_core
|
xgds_core/register.py
|
Python
|
apache-2.0
| 6,851
|
[
"VisIt"
] |
41a28cac43c8ecf0f52321aad80c9b37d720c031e447a5335a01927a685131b5
|
#============================================================================
#
# Copyright (c) Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#============================================================================
import imp, sys, os, unittest
from __main__ import vtk, qt, ctk, slicer
from InteractiveSegmentTubesWidget import *
from InteractiveSegmentTubesLogic import *
class InteractiveSegmentTubes:
def __init__(self, parent):
import string
parent.title = "Interactive Segment Tubes"
parent.categories = ["TubeTK"]
parent.contributors = ["Johan Andruejol (Kitware)"]
parent.helpText = """
<p>This module is an interactive gui wrapping around the <i>Segment Tubes</i>
module. It automatically processes the seeds points, allowing the user to
click its way through an image without having to worry about running the
<i>Segment Tubes</i> CLI by hand.
<p>To use, simply specify the <b>Input image</b>, the <b>Output tube</b> and
the <b>Seed points</b> list. The seed list is the list that will contains
all the seeds to process. Click the <b>Start</b> button to start
processing seeds as they come.\n
<p>Each new markup added to the list will be queued ('Queued' status) for
processing. Once the <i>Segment Tubes</i> CLI processed the queued seeds,
their status will change to 'Processed'. The new vessels will be merged to
the <b>Output tube</b>."""
parent.acknowledgementText = """"""
iconPath = os.path.join(ICON_DIR, 'InteractiveSegmentTubesIcon.png')
parent.icon = qt.QIcon(iconPath)
self.parent = parent
|
KitwareMedical/VesselView
|
Modules/Scripted/InteractiveSegmentTubes/InteractiveSegmentTubes.py
|
Python
|
apache-2.0
| 2,133
|
[
"VTK"
] |
7561747797e5420b222a182d601b55f517d7019c099c9a8dbe074d91bf343cb9
|
#!/usr/bin/env python
# Copyright 2007, Michael J. Harms
# This program is distributed under General Public License v. 3. See the file
# COPYING for a copy of the license.
__description__ = "Adds polar hydrogens to a pdb file for a UHBD calculation."
__author__ = "Michael J. Harms"
__date__ = "070727"
import time, os
from math import sqrt
from .helper import container, cmdline, geometry
from .atom_renumber import pdbAtomRenumber
from .disulfide import pdbDisulfide
#from .charmm import interface
### HACK HACK HACK
class PdbAddHError(Exception):
"""
General error class for this module.
"""
pass
def renameResidue(pdb,residue_name,residue_number):
"""
Renames residue defined by residue number to residue_name.
"""
residue = [l for l in pdb if l[21:26] == residue_number]
index = pdb.index(residue[0])
for i, r in enumerate(residue):
pdb[index + i] = "%s%-4s%s" % (r[:17],residue_name,r[21:])
return pdb
def convertHis(pdb,his_types=None):
"""
Rename HIS residues to HSD or HIS (1 or 2 tautomers). If his_types is
specified, use numbers in there. Otherwise, set all to 2 tautomer.
"""
# Find histidines
all_his = ["HIS ","HSD ","HSE ","HISA","HISB"]
his_dict = {1:"HSD ",2:"HIS ",3:"HSC "}
# Change his types
HIS_lines = [l for l in pdb if l[17:21] in all_his and l[13:16] == "N "]
if his_types == None:
his_types = [2 for l in HIS_lines]
# Create final list of hist types
his_types = [his_dict[k] for k in his_types]
# Change his in file to proper his types
for index, HIS_line in enumerate(HIS_lines):
residue_number = HIS_line[21:26]
pdb = renameResidue(pdb,his_types[index],residue_number)
return pdb
def convertResidues(pdb,atom_conv={},resid_conv={},atom_skip=[],resid_skip=[]):
"""
Convert a pdb file to a CHARMM readable input (i.e. set HIS tautomeric
states and give every group a charge.
"""
atom_to_convert = list(atom_conv.keys())
res_to_convert = list(resid_conv.keys())
new_pdb = []
for line in pdb:
atom = line[12:16]
if atom in atom_skip:
continue
elif atom in atom_to_convert:
line = "%s%-4s%s" % (line[:12],atom_conv[atom],line[16:])
res = line[17:21]
if atom in atom_skip:
continue
elif res in res_to_convert:
line = "%s%-4s%s" % (line[:17],resid_conv[res],line[21:])
new_pdb.append(line)
return new_pdb
def processCysteines(pdb):
"""
Find disulfide bonds. Add hydrogens to free cysteines. Change name of
disulfide bonded residues to CSS.
"""
# Find disulfide bonds
free_cys, disulfide = pdbDisulfide(pdb)
# Add hydrogens to free cysteines
for cys in free_cys:
residue = [l for l in pdb if l[21:26] == cys]
CB_line = [l for l in residue if l[13:16] == "CB "][0]
SG_line = [l for l in residue if l[13:16] == "SG "][0]
CB_coord = [float(CB_line[30+8*i:38+8*i]) for i in range(3)]
SG_coord = [float(SG_line[30+8*i:38+8*i]) for i in range(3)]
HG_coord = geometry.calcHG(CB_coord,SG_coord)
HG_line = "ATOM %5i %-3s %-4s%5s %8.3F%8.3F%8.3F%23s%s \n" % \
(1,"HG","CYS",cys,HG_coord[0],HG_coord[1],HG_coord[2]," ","H")
index = pdb.index(SG_line)
pdb.insert(index+1,HG_line)
# Rename CYS in disulfide bonds to CSS
for cys in disulfide:
pdb = renameResidue(pdb,"CSS",cys)
return pdb
def processTerm(pdb):
"""
Process termini. Change name of N-terminal residues to RESN, name of
C-terminal residues to RESC, and add C-terminal carboxyl hydrogen.
"""
# Change name of N-terminal residue
HT1_lines = [l for l in pdb if l[13:16] == "HT1"]
for HT1_line in HT1_lines:
# Change name of N terminal residue to RESN (i.e. ALAN, ARGN, etc.)
residue_name = HT1_line[16:20].strip()
residue_name = "%4s" % (residue_name + "N")
residue_number = HT1_line[21:26]
pdb = renameResidue(pdb,residue_name,residue_number)
# Add C-terminal hydrogens
OXT_lines = [l for l in pdb if l[13:16] == "OXT"]
for OXT_line in OXT_lines:
# Grab residues in C-terminus
last_residue = [l for l in pdb if l[21:26] == OXT_line[21:26]]
C_line = [l for l in last_residue if l[13:16] == "C "][0]
O_line = [l for l in last_residue if l[13:16] == "O "][0]
# Calculate position of HXT
OXT_coord = [float(OXT_line[30+8*i:38+8*i]) for i in range(3)]
C_coord = [float(C_line[30+8*i:38+8*i]) for i in range(3)]
O_coord = [float(O_line[30+8*i:38+8*i]) for i in range(3)]
HXT_coord = geometry.calcHXT(C_coord,O_coord,OXT_coord)
# Convert position of HXT to a pdb line
HXT_line = "ATOM %5i %-3s %9s %8.3F%8.3F%8.3F%23s%s \n" % \
(1,"HXT",OXT_line[17:26],
HXT_coord[0],HXT_coord[1],HXT_coord[2]," ","H")
# Insert HXT
index = pdb.index(OXT_line)
pdb.insert(index+1,HXT_line)
# Change name of C terminal residue to RESC (i.e. ALAC, ARGC, etc.)
residue_name = OXT_line[16:20].strip()
residue_name = "%4s" % (residue_name + "C")
residue_number = OXT_line[21:26]
pdb = renameResidue(pdb,residue_name,residue_number)
return pdb
def flipAtoms(pdb):
"""
Flip the OX1/OX2 labels of GLU and ASP residues.
"""
flip = {"ASP":("OD1","OD2"),
"GLU":("OE1","OE2")}
flip_keys = list(flip.keys())
for index, line in enumerate(pdb):
res = line[17:30]
if res in flip_keys and line[13:16] in flip[res]:
new_atom = flip[res].index(line[13:16]) - 1
pdb[index] = "%s%s%s" % (line[0:13],new_atom,line[16:])
return pdb
def pdbAddH(pdb,pdb_id,uhbd_style=False,his_types=None,calc_type="single",
keep_temp=False,hbond=False):
"""
Add polar hydrogens to the structure using CHARMM for a UHBD calculation.
"""
# Residues to alter and skip during processing
if calc_type == "single":
pdb2charmm_resid = {"LYS ":"LYSN","ARG ":"ARGN","GLU ":"GLUH",
"ASP ":"ASPH","LYSH":"LYSN","LSN ":"LYSN"}
charmm2pdb_resid = {"LYSN":"LYS ","ARGN":"ARG ","GLUH":"GLU ",
"ASPH":"ASP ","HIS ":"HISA","HSD ":"HISB"}
elif calc_type == "full":
pdb2charmm_resid = {"GLU ":"GLUH","ASP ":"ASPH","LYSH":"LYS ",
"LSN ":"LYS "}
charmm2pdb_resid = {"GLUH":"GLU ","ASPH":"ASP ","HIS ":"HISA",
"HSD ":"HISB","HSC ":"HISA"}
else:
err = "Calculation type \"%s\" not recognized!" % calc_type
raise PdbAddHError(err)
charmm2pdb_atom_skip = [" HT3"]
all_his = ["HIS ","HSD ","HSE ","HISA","HISB"]
# Grab sequence and atoms from pdb file
seq_lines = [l for l in pdb if l[0:6] == "SEQRES"]
atom_lines = [l for l in pdb if l[0:6] == "ATOM "]
# Create a pdb object that will find termini and renumber all atoms. The
# renumbering scheme is dumped to pdb_id_resid-conversion.txt
pdb_obj = container.Structure(pdb_id,seq_lines,atom_lines)
pdb_obj.renumberAtoms()
pdb_obj.dumpNumberConversion("%s_resid-conversion.txt" % pdb_id)
structure_list = pdb_obj.dumpStructures()
# Convert residue names in structure
for index, struct in enumerate(structure_list):
tmp_struct = convertResidues(struct[0],resid_conv=pdb2charmm_resid)
structure_list[index][0] = tmp_struct
# Convert histidines to correct type (speificied in tautomer file). If
# not tautomer file is specified, default HIS is passed to charmm
if calc_type == "single":
his_list = his_types
for index, struct in enumerate(structure_list):
if his_types != None:
num_his = len([l for l in struct[0] if l[17:21] in all_his])
try:
his = his_list[:num_his]
his_list = his_list[num_his:]
except IndexError:
err = "Number of HIS in pdb and tautomer file do not match!"
raise cmdline.parser.error(err)
else:
his = None
tmp_struct = convertHis(struct[0][:],his)
structure_list[index][0] = tmp_struct
# Make sure that all his where used
if his_types != None and len(his_list) != 0:
raise cmdline.parser.error(err)
# For full calculation, convert all histidines to charged form (HSC)
elif calc_type == "full":
for index, struct in enumerate(structure_list):
num_his = len([l for l in struct[0] if l[17:21] in all_his])
his = [3 for i in range(num_his)]
tmp_struct = convertHis(struct[0][:],his)
structure_list[index][0] = tmp_struct
# Flip carboxyl atoms
if calc_type == "full":
for index, struct in enumerate(structure_list):
structure_list[index][0] = flipAtoms(struct[0])
# User CHARMM to add hydrogens
try:
out_pdb = charmm.interface.charmmWash(structure_list,calc_type,
keep_temp,hbond)
except interface.CharmmInterfaceError(strerr):
err = "Error in charmm!\n"
err += "%s\n" % strerr
raise PdbAddHError(err)
# Deal with addH specific changes in cysteines, termini, and residue names
out_pdb = processCysteines(out_pdb)
out_pdb = convertResidues(out_pdb,resid_conv=charmm2pdb_resid,
atom_skip=charmm2pdb_atom_skip)
out_pdb = processTerm(out_pdb)
new_pdb = container.Structure("tmp",[],out_pdb)
new_pdb.loadNumberConversion("%s_resid-conversion.txt" % pdb_id,"fixed")
new_pdb.renumberAtoms()
out = []
for chain in new_pdb.chains:
out.extend(chain.atom_lines)
ter = out[-1]
ter = "%s%s%54s\n" % ("TER ",ter[6:26]," ")
out.append(ter)
out.append("%-80s\n" % "END")
if uhbd_style:
out = [l for l in out if l[0:3] != "TER"]
# UHBD takes a non-standard pdb file; atom names must be left-justified.
out = ["%s%-4s%s" % (l[:12],l[12:16].strip(),l[16:]) for l in out]
# UHDB also cannot handle chain identifiers, remove them
out = ["%s %s" % (l[0:21],l[22:]) for l in out]
# Add header and END
out.insert(0,"%-79s\n" % "REMARK Polar hydrogens added by pdb_addH.py")
out.insert(1,"%-79s\n" % ("REMARK Time: %s" % time.asctime()))
return out
|
harmslab/pdbtools
|
pdbtools/addH.py
|
Python
|
gpl-3.0
| 10,679
|
[
"CHARMM"
] |
e38da8417ad2778dd6bc798aa8aae9206cb7ac1f076d8ae0813605ab76fdbe5b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 18 21:40:28 2014
@author: Vespa
"""
import urllib2
import time
import sys
from zerocommon import *
def FindAnimate(dblist,inputname):
"""
find animate match input from the database
"""
idx = sorted(dblist)
bestid = []
for id in idx:
if len(re.findall(inputname.lower(),dblist[id].lower()))>0:
bestid.append(id)
return bestid
def ShowAnimateFound(name,idlist,dblist,quickmode = 0):
"""
show the animate found, and let user make the choice
"""
if len(idlist) == 0:
print name," Not Found!!"
return 0
if len(idlist) == 1:
if name == dblist[idlist[0]] or quickmode:
return 1
else:
print "\nDownload :",dblist[idlist[0]],"?[Y/N]"
result = raw_input("")
if result.lower() == "y":
return 1
else:
return 0
print "\nAnimate <<%s>> Found Are Listed Below,Select The Index You Want:\n"%(name)
for (i,id) in zip(range(1,len(idlist)+1),idlist):
print "[%d]:%s"%(i,dblist[id])
print "[Q]:quit\n"
while True:
result = raw_input("Input:")
if result.lower() == "q":
return 0
if not result.isdigit():
print "Input a Number"
elif not int(result) in range(1,len(idlist)+1):
print "Out of Range!!"
else:
return int(result)
def ShowEpsInfo(EPSInfo):
"""
show Eps info for chioce
4 in a row
"""
eps_list = map(lambda x:x[1],EPSInfo)
m_str = ""
for (eps,epsinfo) in zip(range(1,len(eps_list)+1),eps_list):
strtemp = "[%d]:%s"%(eps,epsinfo)
m_str = m_str + strtemp.ljust(15)
if not (eps % 4):
m_str = m_str + "\n"
print m_str
def GetChoice():
while True:
inputrange = raw_input("Input Download Range:(For Example:2-10 or 17 or all)")
if inputrange.find('-')>=0:
eps = inputrange.split('-')
if len(eps)==2 and eps[0].isdigit() and eps[1].isdigit():
if int(eps[1]) >= int(eps[0]):
return range(int(eps[0]),int(eps[1])+1)
elif inputrange.isdigit():
return [int(inputrange)]
elif inputrange.lower()=="all":
return range(-1,1000);#Infinite
return []
def GetEpsInfo(id):
"""
get (url,eps_name)
"""
if GetRE(id,r'^\d+$') != []:
url = "http://dmxz.zerodm.tv/xiazai/"+id+".html"
else:
url = "http://dmxz.zerodm.tv/xiazai/"+id
print url
headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
req = urllib2.Request(url = url,headers = headers)
content = toUTF8(urllib2.urlopen(req).read())
regexp = r"<a\s*href=['\"](.+?xunlei.+?)['\"].*?>(.*?)</a>"
return GetRE(content,regexp)
def GetDLURL_xunlei(pageurl):
try:
content = urllib2.urlopen(pageurl).read()
except:
return []
regexp = r"href=\"(http://gdl\.lixian\.vip\.xunlei\.com/[^\"]*)\" download=\"([^\"]*)"
urllist = GetRE(content,regexp)
downloadlist = []
if len(urllist) == 0:
return []
for url in urllist:
url = url[0].replace("download",url[1])
downloadlist.append(url)
return downloadlist
def AnimateNameCheck(animate_name):
"""
make sure the file name is valid
"""
if animate_name.find(r"/")>=0:
animate_name = animate_name.replace(r"/","")
if animate_name.find(r":")>=0:
animate_name = animate_name.replace(r":","")
if animate_name.find(r"?")>=0:
animate_name = animate_name.replace(r"?","")
return animate_name
def DownLoad(EPSInfo,eps_range,animate_name):
animatename = AnimateNameCheck(encodeFileName(animate_name))
downloadfile = open(animatename+".txt","w")
EpsFailList = []
for i in range(len(EPSInfo)):
if i+1 in eps_range:
print "Getting Download URL for ",EPSInfo[i][1],":",
downloadlist = GetDLURL_xunlei(EPSInfo[i][0])
if len(downloadlist) == 0:
print "Get Url Fail!!"
EpsFailList.append(i)
else:
print "Get Daze!"
for url in downloadlist:
downloadfile.write(url+"\n\n")
if i != len(EPSInfo)-1:#LAST ONE DON'T NEED DELAY
time.sleep(3)
if len(EpsFailList) != 0:
print "\n",animate_name,":\nItem(s) shown below get url fail,Please download it manually..."
downloadfile.write(animate_name+":\nItem(s) shown below get url fail:\n")
for eps in EpsFailList:
print EPSInfo[eps][1]," ",
downloadfile.write(EPSInfo[eps][1]+":\n"+EPSInfo[eps][0]+"\n")
print "\n"
downloadfile.close()
def DownloadSingleAnimate(dblist,argv):
idlist = FindAnimate(dblist,argv)
choice = ShowAnimateFound(argv,idlist,dblist)-1
if choice != -1:
print "Getting Info of ",dblist[idlist[choice]],"......."
EPSInfo = GetEpsInfo(idlist[choice])
ShowEpsInfo(EPSInfo)
Eps_Range = GetChoice()
DownLoad(EPSInfo,Eps_Range,dblist[idlist[choice]])
def downloadFile(dblist,filename):
AnimateList = {}
for animateName in open(filename,'r'):
if animateName.find('\n')>=0:
animateName = animateName.replace('\n','')
if animateName.find('\r')>=0:
animateName = animateName.replace('\r','')
idlist = FindAnimate(dblist,animateName)
choice = ShowAnimateFound(animateName,idlist,dblist,1)-1
if choice != -1:
AnimateList[idlist[choice]]= dblist[idlist[choice]]
for id in AnimateList:
EPSInfo = GetEpsInfo(id)
Eps_Range = range(-1,1000)
print 'downloading ',AnimateList[id],'...'
DownLoad(EPSInfo,Eps_Range,AnimateList[id])
print "Finished"
def main(argv):
if len(argv) == 1:
print """
Usages:
========================================================
Single Animate:
python zerodm.py AnimateName
Animate in file:
python zerodm.py downloadlist.txt
========================================================
Learn more detail,please visit: www.kylen314.com/archives/5729"""
return
dblist = GetAnimateList()
if dblist == {}:
print "Database Read Fail!"
return
command = argv[1][-4:].lower()
if command == ".txt":
downloadFile(dblist,argv[1])
else:
DownloadSingleAnimate(dblist,toUTF8(argv[1]))
if __name__ == '__main__':
main(sys.argv)
|
Vespa314/zerodm-download
|
Zerodm_PY/zerodm.py
|
Python
|
mit
| 6,658
|
[
"VisIt"
] |
a583bfaccaa8d1945e81f66b616a1b2f4434e4cdb1644684b879d0fbce9636cf
|
#!/usr/bin/env python3
from netcdfTools import *
import sys
import argparse
import numpy as np
'''
Description:
'''
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
def checkVariables(vnames, vDict):
# Check that variables are found
for vi in vnames:
if( vi not in vDict.keys() ):
sys.exit(' Variable {} not found from variable list: {}'.format(vi, vDict.keys()))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
def unaryOpr( v, opStr ):
if( opStr is None ):
return v
if( opStr == '^2'):
np.power(v, 2, out=v)
elif( opStr == 'abs' ):
np.abs(v, out=v)
elif( opStr == 'sqrt'):
np.sqrt(v, out=v)
return v
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
def binaryOpr( v1, v2, opStr ):
if( opStr == '+' ):
return v1+v2
elif( opStr == '-' ):
return v1-v2
elif( opStr == '/' ):
return v1/(v2+1.e-6)
elif( opStr == '*' ):
return v1*v2
else:
sys.exit('Unrecognized binary operator {}: Exiting ...'.format(opStr))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
def unaryOprUnit( unit1 , opStr ):
if( opStr is None ):
return unit1
if( opStr == '^2'):
unitout = '({})^2'.format(unit1)
elif( opStr == 'abs' ):
unitout = unit1
elif( opStr == 'sqrt'):
unitout = '({})^(1/2)'.format(unit1)
return unitout
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
def binaryOprUnit( unit1, unit2, opStr ):
if( opStr == '+' ):
return unit1
elif( opStr == '-' ):
return unit1
elif( opStr == '/' ):
return '{}({})^-1'.format(unit1,unit2)
elif( opStr == '*' ):
if( unit1 == unit2 ): unitout = '({})^2'.format(unit1)
else: unitout = '{} {}'.format(unit1,unit2)
return unitout
#==========================================================#
parser = argparse.ArgumentParser(prog='binaryOperateNetCdf.py')
parser.add_argument("-f1", "--filename1", metavar='FILE1', type=str,
help="First NetCDF file.")
parser.add_argument("-f2", "--filename2", metavar='FILE2', type=str,
help="Second NetCDF file.")
parser.add_argument("-fo", "--fileout", type=str, required=True,
help="Name of the output netCDF file.")
parser.add_argument("-vn1", "--varNames1", metavar='VN1', type=str, nargs='+',\
help="Names of variables from f1 dataset.")
parser.add_argument("-vn2", "--varNames2", metavar='VN2', type=str, nargs='+',\
help="Names of variables from f2 dataset.")
parser.add_argument("-vno", "--varOutNames", metavar='VNO', type=str, nargs='+',\
help="Names of output variables. Their number must match with VN1.")
parser.add_argument("-dn", "--derivNames",type=str, nargs='+', metavar='DN', default=None,\
help="(Optional) Names of derived coordinates to output to same file.")
parser.add_argument("-s1", "--scale1", type=float, default=1.0,
help="Scale factor for file 1 dataset.")
parser.add_argument("-s2", "--scale2", type=float, default=1.0,
help="Scale factor for file 2 dataset.")
parser.add_argument("-op", "--binaryOperator", type=str, choices=['+','-','/','*'],
help="Binary operator: v1 <op> v2.")
parser.add_argument("-uop1", "--unaryOperator1", type=str, choices=['^2','abs','sqrt'], default=None,
help="Unary operator for file 1 dataset.")
parser.add_argument("-uop2", "--unaryOperator2", type=str, choices=['^2','abs','sqrt'], default=None,
help="Unary operator for file 2 dataset.")
args = parser.parse_args()
#==========================================================#
fn1 = args.filename1
fn2 = args.filename2
fileout = args.fileout
vn1 = args.varNames1
vn2 = args.varNames2
vno = args.varOutNames
dn = args.derivNames
s1 = args.scale1
s2 = args.scale2
uop1 = args.unaryOperator1
uop2 = args.unaryOperator2
biop = args.binaryOperator
'''
Establish two boolean variables which indicate whether the created variable is an
independent or dependent variable in function createNetcdfVariable().
'''
parameter = True; variable = False
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
N1 = len(vn1); N2 = len(vn2); NO = len(vno)
if( N1 == N2 ):
pass
elif( (N1 > N2) and (N2 == 1) ):
pass
else:
sys.exit(' Incompatible number of variables: N1={} & N2={}. If N1 > N2, then N2 == 1 is required.'.format(N1,N2))
if( N1 != NO ):
sys.exit(' The number of output variable names NO={} must match N1={}. Exiting ...'.format(NO,N1))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
ds1, v1D, u1D = netcdfDataset2(fn1) # vD: variableDict, uD: unitDict
ds2, v2D, u2D = netcdfDataset2(fn2)
checkVariables( vn1, v1D )
checkVariables( vn2, v2D )
vstr = vn1[0] # We can use the first variable as the coords should match.
tn = v1D[vstr][0]; zn = v1D[vstr][1]; yn = v1D[vstr][2]; xn = v1D[vstr][3]
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Create a NETCDF output dataset (dso) for writing out the data.
dso = netcdfOutputDataset( fileout )
# Read
dD1 = dict()
dD2 = dict()
time, time_dims = read1DVariableFromDataset(tn, vstr , ds1, 0, 0, 1 ) # All values.
tv = createNetcdfVariable( dso, time, tn, len(time), u1D[tn],'f4', (tn,), parameter )
x, x_dims = read1DVariableFromDataset( xn, vstr, ds1, 0, 0, 1 )
xv = createNetcdfVariable( dso, x , xn , len(x) , u1D[xn],'f4', (xn,), parameter )
y, y_dims = read1DVariableFromDataset( yn, vstr, ds1, 0, 0, 1 )
yv = createNetcdfVariable( dso, y , yn , len(y) , u1D[yn],'f4', (yn,), parameter )
z, z_dims = read1DVariableFromDataset( zn, vstr, ds1, 0, 0, 1 )
zv = createNetcdfVariable( dso, z , zn , len(z) , u1D[zn],'f4', (zn,), parameter )
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Include additional (derived) coordinates into the output file.
if( dn ):
for di in dn:
if( di in v1D.keys() ):
dc = ds1.variables[di][:]
uD = u1D[di]
vD = v1D[di]
elif( di in v2D.keys() ):
dc = ds2.variables[di][:]
uD = u2D[di]
vD = v2D[di]
else:
sys.exit('Error: {} not found variable lists. Exiting ...'.format(di))
dc_dims = np.shape( dc )
dv = createNetcdfVariable( dso, dc, di, None, uD, 'f4', vD, variable )
dc = None
time = None; x = None; y = None; z = None
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
for vi in vn1:
vt , _ = read3DVariableFromDataset( vi, ds1, 0, 0, 0, 1 ) # All values.
if( s1 != 1.0 ): vt *= s1
vt = unaryOpr( vt, uop1 )
dD1[vi] = vt
for vi in vn2:
vt , _ = read3DVariableFromDataset( vi, ds2, 0, 0, 0, 1 ) # All values.
if( s2 != 1.0 ): vt *= s2
vt = unaryOpr( vt, uop2 )
dD2[vi] = vt
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
for i in range( N1 ):
if( N2 != 1 ):
j = i
vo = binaryOpr( dD1.pop(vn1[i]), dD2.pop(vn2[j]) , biop )
else:
j = 0
vo = binaryOpr( dD1.pop(vn1[i]), dD2[vn2[0]] , biop )
unit12 = binaryOprUnit( u1D[vn1[i]] , u2D[vn2[0]] , biop )
vv = createNetcdfVariable( dso, vo, vno[i], None, unit12, 'f4',(tn,zn,yn,xn,) , variable )
vo = None
netcdfWriteAndClose(dso)
|
mjsauvinen/P4UL
|
pyNetCDF/binaryOperateNetCdf.py
|
Python
|
mit
| 7,087
|
[
"NetCDF"
] |
859f4d40c7bed2b1476912ff9ae953d1ec580e5ff345f7be0bde522375290306
|
# Copyright (c) 2014, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import unittest
import GPy
from GPy.models import GradientChecker
import functools
import inspect
from GPy.likelihoods import link_functions
from functools import partial
fixed_seed = 7
#np.seterr(divide='raise')
def dparam_partial(inst_func, *args):
"""
If we have a instance method that needs to be called but that doesn't
take the parameter we wish to change to checkgrad, then this function
will change the variable using set params.
inst_func: should be a instance function of an object that we would like
to change
param: the param that will be given to set_params
args: anything else that needs to be given to the function (for example
the f or Y that are being used in the function whilst we tweak the
param
"""
def param_func(param_val, param_name, inst_func, args):
#inst_func.__self__._set_params(param)
#inst_func.__self__.add_parameter(Param(param_name, param_val))
inst_func.__self__[param_name] = param_val
return inst_func(*args)
return functools.partial(param_func, inst_func=inst_func, args=args)
def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, randomize=False, verbose=False):
"""
checkgrad expects a f: R^N -> R^1 and df: R^N -> R^N
However if we are holding other parameters fixed and moving something else
We need to check the gradient of each of the fixed parameters
(f and y for example) seperately, whilst moving another parameter.
Otherwise f: gives back R^N and
df: gives back R^NxM where M is
The number of parameters and N is the number of data
Need to take a slice out from f and a slice out of df
"""
print("\n{} likelihood: {} vs {}".format(func.__self__.__class__.__name__,
func.__name__, dfunc.__name__))
partial_f = dparam_partial(func, *args)
partial_df = dparam_partial(dfunc, *args)
gradchecking = True
zipped_params = zip(params, params_names)
for param_ind, (param_val, param_name) in enumerate(zipped_params):
#Check one parameter at a time, make sure it is 2d (as some gradients only return arrays) then strip out the parameter
f_ = partial_f(param_val, param_name)
df_ = partial_df(param_val, param_name)
#Reshape it such that we have a 3d matrix incase, that is we want it (?, N, D) regardless of whether ? is num_params or not
f_ = f_.reshape(-1, f_.shape[0], f_.shape[1])
df_ = df_.reshape(-1, f_.shape[0], f_.shape[1])
#Get the number of f and number of dimensions
fnum = f_.shape[-2]
fdim = f_.shape[-1]
dfnum = df_.shape[-2]
for fixed_val in range(dfnum):
#dlik and dlik_dvar gives back 1 value for each
f_ind = min(fnum, fixed_val+1) - 1
print("fnum: {} dfnum: {} f_ind: {} fixed_val: {}".format(fnum, dfnum, f_ind, fixed_val))
#Make grad checker with this param moving, note that set_params is NOT being called
#The parameter is being set directly with __setattr__
#Check only the parameter and function value we wish to check at a time
#func = lambda p_val, fnum, fdim, param_ind, f_ind, param_ind: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :]
#dfunc_dparam = lambda d_val, fnum, fdim, param_ind, fixed_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :]
#First we reshape the output such that it is (num_params, N, D) then we pull out the relavent parameter-findex and checkgrad just this index at a time
func = lambda p_val: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :]
dfunc_dparam = lambda d_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :]
grad = GradientChecker(func, dfunc_dparam, param_val, [param_name])
if constraints is not None:
for constrain_param, constraint in constraints:
if grad.grep_param_names(constrain_param):
constraint(constrain_param, grad)
else:
print("parameter didn't exist")
print(constrain_param, " ", constraint)
if randomize:
grad.randomize()
if verbose:
print(grad)
grad.checkgrad(verbose=1)
if not grad.checkgrad(verbose=True):
gradchecking = False
if not grad.checkgrad(verbose=True):
gradchecking = False
return gradchecking
from nose.tools import with_setup
class TestNoiseModels(object):
"""
Generic model checker
"""
def setUp(self):
np.random.seed(fixed_seed)
self.N = 15
self.D = 3
self.X = np.random.rand(self.N, self.D)*10
self.real_std = 0.1
noise = np.random.randn(*self.X[:, 0].shape)*self.real_std
self.Y = (np.sin(self.X[:, 0]*2*np.pi) + noise)[:, None]
self.f = np.random.rand(self.N, 1)
self.binary_Y = np.asarray(np.random.rand(self.N) > 0.5, dtype=np.int)[:, None]
self.binary_Y[self.binary_Y == 0.0] = -1.0
self.positive_Y = np.exp(self.Y.copy())
tmp = np.round(self.X[:, 0]*3-3)[:, None] + np.random.randint(0,3, self.X.shape[0])[:, None]
self.integer_Y = np.where(tmp > 0, tmp, 0)
self.ns = np.random.poisson(50, size=self.N)[:, None]
p = np.abs(np.cos(2*np.pi*self.X + np.random.normal(scale=.2, size=(self.N, self.D)))).mean(1)
self.binomial_Y = np.array([np.random.binomial(int(self.ns[i]), p[i]) for i in range(p.shape[0])])[:, None]
self.var = 0.2
self.deg_free = 4.0
#Make a bigger step as lower bound can be quite curved
self.step = 1e-4
"""
Dictionary where we nest models we would like to check
Name: {
"model": model_instance,
"grad_params": {
"names": [names_of_params_we_want, to_grad_check],
"vals": [values_of_params, to_start_at],
"constrain": [constraint_wrappers, listed_here]
},
"laplace": boolean_of_whether_model_should_work_for_laplace,
"ep": boolean_of_whether_model_should_work_for_laplace,
"link_f_constraints": [constraint_wrappers, listed_here]
}
"""
self.noise_models = {"Student_t_default": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [self.var],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
#"Student_t_deg_free": {
#"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
#"grad_params": {
#"names": [".*deg_free"],
#"vals": [self.deg_free],
#"constraints": [(".*t_scale2", self.constrain_fixed), (".*deg_free", self.constrain_positive)]
#},
#"laplace": True
#},
"Student_t_1_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [1.0],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
# FIXME: This is a known failure point, when the degrees of freedom
# are very small, and the variance is relatively small, the
# likelihood is log-concave and problems occur
# "Student_t_small_deg_free": {
# "model": GPy.likelihoods.StudentT(deg_free=1.5, sigma2=self.var),
# "grad_params": {
# "names": [".*t_scale2"],
# "vals": [self.var],
# "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
# },
# "laplace": True
# },
"Student_t_small_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [0.001],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_large_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [10.0],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_approx_gauss": {
"model": GPy.likelihoods.StudentT(deg_free=1000, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [self.var],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Gaussian_default": {
"model": GPy.likelihoods.Gaussian(variance=self.var),
"grad_params": {
"names": [".*variance"],
"vals": [self.var],
"constraints": [(".*variance", self.constrain_positive)]
},
"laplace": True,
"ep": False, # FIXME: Should be True when we have it working again
"variational_expectations": True,
},
"Gaussian_log": {
"model": GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var),
"grad_params": {
"names": [".*variance"],
"vals": [self.var],
"constraints": [(".*variance", self.constrain_positive)]
},
"laplace": True,
"variational_expectations": True
},
#"Gaussian_probit": {
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N),
#"grad_params": {
#"names": ["noise_model_variance"],
#"vals": [self.var],
#"constraints": [constrain_positive]
#},
#"laplace": True
#},
#"Gaussian_log_ex": {
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log_ex_1(), variance=self.var, D=self.D, N=self.N),
#"grad_params": {
#"names": ["noise_model_variance"],
#"vals": [self.var],
#"constraints": [constrain_positive]
#},
#"laplace": True
#},
"Bernoulli_default": {
"model": GPy.likelihoods.Bernoulli(),
"link_f_constraints": [partial(self.constrain_bounded, lower=0, upper=1)],
"laplace": True,
"Y": self.binary_Y,
"ep": True, # FIXME: Should be True when we have it working again
"variational_expectations": True
},
"Exponential_default": {
"model": GPy.likelihoods.Exponential(),
"link_f_constraints": [self.constrain_positive],
"Y": self.positive_Y,
"laplace": True,
},
"Poisson_default": {
"model": GPy.likelihoods.Poisson(),
"link_f_constraints": [self.constrain_positive],
"Y": self.integer_Y,
"laplace": True,
"ep": False #Should work though...
},
"Binomial_default": {
"model": GPy.likelihoods.Binomial(),
"link_f_constraints": [partial(self.constrain_bounded, lower=0, upper=1)],
"Y": self.binomial_Y,
"Y_metadata": {'trials': self.ns},
"laplace": True,
},
#,
#GAMMA needs some work!"Gamma_default": {
#"model": GPy.likelihoods.Gamma(),
#"link_f_constraints": [constrain_positive],
#"Y": self.positive_Y,
#"laplace": True
#}
}
####################################################
# Constraint wrappers so we can just list them off #
####################################################
def constrain_fixed(self, regex, model):
model[regex].constrain_fixed()
def constrain_negative(self, regex, model):
model[regex].constrain_negative()
def constrain_positive(self, regex, model):
model[regex].constrain_positive()
def constrain_fixed_below(self, regex, model, up_to):
model[regex][0:up_to].constrain_fixed()
def constrain_fixed_above(self, regex, model, above):
model[regex][above:].constrain_fixed()
def constrain_bounded(self, regex, model, lower, upper):
"""
Used like: partial(constrain_bounded, lower=0, upper=1)
"""
model[regex].constrain_bounded(lower, upper)
def tearDown(self):
self.Y = None
self.f = None
self.X = None
def test_scale2_models(self):
self.setUp()
for name, attributes in self.noise_models.items():
model = attributes["model"]
if "grad_params" in attributes:
params = attributes["grad_params"]
param_vals = params["vals"]
param_names= params["names"]
param_constraints = params["constraints"]
else:
params = []
param_vals = []
param_names = []
constrain_positive = []
param_constraints = []
if "link_f_constraints" in attributes:
link_f_constraints = attributes["link_f_constraints"]
else:
link_f_constraints = []
if "Y" in attributes:
Y = attributes["Y"].copy()
else:
Y = self.Y.copy()
if "f" in attributes:
f = attributes["f"].copy()
else:
f = self.f.copy()
if "Y_metadata" in attributes:
Y_metadata = attributes["Y_metadata"].copy()
else:
Y_metadata = None
if "laplace" in attributes:
laplace = attributes["laplace"]
else:
laplace = False
if "ep" in attributes:
ep = attributes["ep"]
else:
ep = False
if "variational_expectations" in attributes:
var_exp = attributes["variational_expectations"]
else:
var_exp = False
#if len(param_vals) > 1:
#raise NotImplementedError("Cannot support multiple params in likelihood yet!")
#Required by all
#Normal derivatives
yield self.t_logpdf, model, Y, f, Y_metadata
yield self.t_dlogpdf_df, model, Y, f, Y_metadata
yield self.t_d2logpdf_df2, model, Y, f, Y_metadata
#Link derivatives
yield self.t_dlogpdf_dlink, model, Y, f, Y_metadata, link_f_constraints
yield self.t_d2logpdf_dlink2, model, Y, f, Y_metadata, link_f_constraints
if laplace:
#Laplace only derivatives
yield self.t_d3logpdf_df3, model, Y, f, Y_metadata
yield self.t_d3logpdf_dlink3, model, Y, f, Y_metadata, link_f_constraints
#Params
yield self.t_dlogpdf_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_dlogpdf_df_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_d2logpdf2_df2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
#Link params
yield self.t_dlogpdf_link_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_dlogpdf_dlink_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_d2logpdf2_dlink2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
#laplace likelihood gradcheck
yield self.t_laplace_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
if ep:
#ep likelihood gradcheck
yield self.t_ep_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
if var_exp:
#Need to specify mu and var!
yield self.t_varexp, model, Y, Y_metadata
yield self.t_dexp_dmu, model, Y, Y_metadata
yield self.t_dexp_dvar, model, Y, Y_metadata
self.tearDown()
#############
# dpdf_df's #
#############
@with_setup(setUp, tearDown)
def t_logpdf(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
#print model._get_params()
np.testing.assert_almost_equal(
model.pdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).prod(),
np.exp(model.logpdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).sum())
)
@with_setup(setUp, tearDown)
def t_dlogpdf_df(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
self.description = "\n{}".format(inspect.stack()[0][3])
logpdf = functools.partial(np.sum(model.logpdf), y=Y, Y_metadata=Y_metadata)
dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(logpdf, dlogpdf_df, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d2logpdf_df2(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata)
d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(dlogpdf_df, d2logpdf_df2, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d3logpdf_df3(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata)
d3logpdf_df3 = functools.partial(model.d3logpdf_df3, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(d2logpdf_df2, d3logpdf_df3, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
##############
# df_dparams #
##############
@with_setup(setUp, tearDown)
def t_dlogpdf_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.logpdf, model.dlogpdf_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_dlogpdf_df_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.dlogpdf_df, model.dlogpdf_df_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_d2logpdf2_df2_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.d2logpdf_df2, model.d2logpdf_df2_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
################
# dpdf_dlink's #
################
@with_setup(setUp, tearDown)
def t_dlogpdf_dlink(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
logpdf = functools.partial(model.logpdf_link, y=Y, Y_metadata=Y_metadata)
dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(logpdf, dlogpdf_dlink, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d2logpdf_dlink2(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata)
d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(dlogpdf_dlink, d2logpdf_dlink2, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d3logpdf_dlink3(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata)
d3logpdf_dlink3 = functools.partial(model.d3logpdf_dlink3, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(d2logpdf_dlink2, d3logpdf_dlink3, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
#################
# dlink_dparams #
#################
@with_setup(setUp, tearDown)
def t_dlogpdf_link_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.logpdf_link, model.dlogpdf_link_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_dlogpdf_dlink_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.dlogpdf_dlink, model.dlogpdf_dlink_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_d2logpdf2_dlink2_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.d2logpdf_dlink2, model.d2logpdf_dlink2_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
################
# laplace test #
################
@with_setup(setUp, tearDown)
def t_laplace_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints):
print("\n{}".format(inspect.stack()[0][3]))
np.random.seed(111)
#Normalize
# Y = Y/Y.max()
white_var = 1e-4
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
laplace_likelihood = GPy.inference.latent_function_inference.Laplace()
m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=laplace_likelihood)
m.kern.white.constrain_fixed(white_var)
#Set constraints
for constrain_param, constraint in constraints:
constraint(constrain_param, m)
m.randomize()
#Set params
for param_num in range(len(param_names)):
name = param_names[param_num]
m[name] = param_vals[param_num]
try:
assert m.checkgrad(verbose=0, step=step)
except:
assert m.checkgrad(verbose=1, step=step)
###########
# EP test #
###########
@with_setup(setUp, tearDown)
def t_ep_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints):
print("\n{}".format(inspect.stack()[0][3]))
#Normalize
# Y = Y/Y.max()
white_var = 1e-4
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
ep_inf = GPy.inference.latent_function_inference.EP()
m = GPy.core.GP(X.copy(), Y.copy(), kernel=kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=ep_inf)
m['.*white'].constrain_fixed(white_var)
for param_num in range(len(param_names)):
name = param_names[param_num]
m[name] = param_vals[param_num]
constraints[param_num](name, m)
m.randomize()
print(m)
assert m.checkgrad(verbose=1, step=step)
################
# variational expectations #
################
@with_setup(setUp, tearDown)
def t_varexp(self, model, Y, Y_metadata):
#Test that the analytic implementation (if it exists) matches the generic gauss
#hermite implementation
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = model.variational_expectations(Y=Y, m=mu, v=var, gh_points=None, Y_metadata=Y_metadata)[0]
#Implementation of gauss hermite integration
shape = mu.shape
gh_x, gh_w= np.polynomial.hermite.hermgauss(50)
m,v,Y = mu.flatten(), var.flatten(), Y.flatten()
#make a grid of points
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None]
#evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid.
# broadcast needs to be handled carefully.
logp = model.logpdf(X, Y[:,None], Y_metadata=Y_metadata)
#average over the gird to get derivatives of the Gaussian's parameters
#division by pi comes from fact that for each quadrature we need to scale by 1/sqrt(pi)
expectation_gh = np.dot(logp, gh_w)/np.sqrt(np.pi)
expectation_gh = expectation_gh.reshape(*shape)
np.testing.assert_almost_equal(expectation, expectation_gh, decimal=5)
@with_setup(setUp, tearDown)
def t_dexp_dmu(self, model, Y, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = functools.partial(model.variational_expectations, Y=Y, v=var, gh_points=None, Y_metadata=Y_metadata)
#Function to get the nth returned value
def F(mu):
return expectation(m=mu)[0]
def dmu(mu):
return expectation(m=mu)[1]
grad = GradientChecker(F, dmu, mu.copy(), 'm')
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_dexp_dvar(self, model, Y, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = functools.partial(model.variational_expectations, Y=Y, m=mu, gh_points=None, Y_metadata=Y_metadata)
#Function to get the nth returned value
def F(var):
return expectation(v=var)[0]
def dvar(var):
return expectation(v=var)[2]
grad = GradientChecker(F, dvar, var.copy(), 'v')
self.constrain_positive('v', grad)
#grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
class LaplaceTests(unittest.TestCase):
"""
Specific likelihood tests, not general enough for the above tests
"""
def setUp(self):
np.random.seed(fixed_seed)
self.N = 15
self.D = 1
self.X = np.random.rand(self.N, self.D)*10
self.real_std = 0.1
noise = np.random.randn(*self.X[:, 0].shape)*self.real_std
self.Y = (np.sin(self.X[:, 0]*2*np.pi) + noise)[:, None]
self.f = np.random.rand(self.N, 1)
self.var = 0.2
self.var = np.random.rand(1)
self.stu_t = GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var)
#TODO: gaussians with on Identity link. self.gauss = GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var)
self.gauss = GPy.likelihoods.Gaussian(variance=self.var)
#Make a bigger step as lower bound can be quite curved
self.step = 1e-6
def tearDown(self):
self.stu_t = None
self.gauss = None
self.Y = None
self.f = None
self.X = None
def test_gaussian_d2logpdf_df2_2(self):
print("\n{}".format(inspect.stack()[0][3]))
self.Y = None
self.N = 2
self.D = 1
self.X = np.linspace(0, self.D, self.N)[:, None]
self.real_std = 0.2
noise = np.random.randn(*self.X.shape)*self.real_std
self.Y = np.sin(self.X*2*np.pi) + noise
self.f = np.random.rand(self.N, 1)
dlogpdf_df = functools.partial(self.gauss.dlogpdf_df, y=self.Y)
d2logpdf_df2 = functools.partial(self.gauss.d2logpdf_df2, y=self.Y)
grad = GradientChecker(dlogpdf_df, d2logpdf_df2, self.f.copy(), 'g')
grad.randomize()
self.assertTrue(grad.checkgrad(verbose=1))
def test_laplace_log_likelihood(self):
debug = False
real_std = 0.1
initial_var_guess = 0.5
#Start a function, any function
X = np.linspace(0.0, np.pi*2, 100)[:, None]
Y = np.sin(X) + np.random.randn(*X.shape)*real_std
Y = Y/Y.max()
#Yc = Y.copy()
#Yc[75:80] += 1
kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
#FIXME: Make sure you can copy kernels when params is fixed
#kernel2 = kernel1.copy()
kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
gauss_distr1 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
exact_inf = GPy.inference.latent_function_inference.ExactGaussianInference()
m1 = GPy.core.GP(X, Y.copy(), kernel=kernel1, likelihood=gauss_distr1, inference_method=exact_inf)
m1['.*white'].constrain_fixed(1e-6)
m1['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
m1.randomize()
gauss_distr2 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m2 = GPy.core.GP(X, Y.copy(), kernel=kernel2, likelihood=gauss_distr2, inference_method=laplace_inf)
m2['.*white'].constrain_fixed(1e-6)
m2['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
m2.randomize()
if debug:
print(m1)
print(m2)
optimizer = 'scg'
print("Gaussian")
m1.optimize(optimizer, messages=debug, ipython_notebook=False)
print("Laplace Gaussian")
m2.optimize(optimizer, messages=debug, ipython_notebook=False)
if debug:
print(m1)
print(m2)
m2[:] = m1[:]
#Predict for training points to get posterior mean and variance
post_mean, post_var = m1.predict(X)
post_mean_approx, post_var_approx, = m2.predict(X)
if debug:
from matplotlib import pyplot as pb
pb.figure(5)
pb.title('posterior means')
pb.scatter(X, post_mean, c='g')
pb.scatter(X, post_mean_approx, c='r', marker='x')
pb.figure(6)
pb.title('plot_f')
m1.plot_f(fignum=6)
m2.plot_f(fignum=6)
fig, axes = pb.subplots(2, 1)
fig.suptitle('Covariance matricies')
a1 = pb.subplot(121)
a1.matshow(m1.likelihood.covariance_matrix)
a2 = pb.subplot(122)
a2.matshow(m2.likelihood.covariance_matrix)
pb.figure(8)
pb.scatter(X, m1.likelihood.Y, c='g')
pb.scatter(X, m2.likelihood.Y, c='r', marker='x')
#Check Y's are the same
np.testing.assert_almost_equal(m1.Y, m2.Y, decimal=5)
#Check marginals are the same
np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)
#Check marginals are the same with random
m1.randomize()
m2[:] = m1[:]
np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)
#Check they are checkgradding
#m1.checkgrad(verbose=1)
#m2.checkgrad(verbose=1)
self.assertTrue(m1.checkgrad(verbose=True))
self.assertTrue(m2.checkgrad(verbose=True))
if __name__ == "__main__":
print("Running unit tests")
unittest.main()
|
dhhjx880713/GPy
|
GPy/testing/likelihood_tests.py
|
Python
|
bsd-3-clause
| 35,530
|
[
"Gaussian"
] |
98d4aad98432734a1edc0fd0e7db386764f44815284ce5906874e076df5037b7
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os
import shutil
import tempfile
class Ncl(Package):
"""NCL is an interpreted language designed specifically for
scientific data analysis and visualization. Supports NetCDF 3/4,
GRIB 1/2, HDF 4/5, HDF-EOD 2/5, shapefile, ASCII, binary.
Numerous analysis functions are built-in."""
homepage = "https://www.ncl.ucar.edu"
version('6.4.0', 'a981848ddcaf1c263279648265f24766',
url='https://www.earthsystemgrid.org/download/fileDownload.html?logicalFileId=86b9bec2-fa01-11e6-a976-00c0f03d5b7c',
extension='tar.gz')
patch('spack_ncl.patch')
# Make ncl compile with hdf5 1.10
patch('hdf5.patch')
# ymake-filter's buffer may overflow
patch('ymake-filter.patch')
# This installation script is implemented according to this manual:
# http://www.ncl.ucar.edu/Download/build_from_src.shtml
variant('hdf4', default=False, description='Enable HDF4 support.')
variant('gdal', default=False, description='Enable GDAL support.')
variant('triangle', default=True, description='Enable Triangle support.')
variant('udunits2', default=True, description='Enable UDUNITS-2 support.')
variant('openmp', default=True, description='Enable OpenMP support.')
# Non-optional dependencies according to the manual:
depends_on('jpeg')
depends_on('netcdf')
depends_on('cairo+X')
# Extra dependencies that may be missing from build system:
depends_on('bison', type='build')
depends_on('flex+lex')
depends_on('libiconv')
depends_on('tcsh')
# Also, the manual says that ncl requires zlib, but that comes as a
# mandatory dependency of libpng, which is a mandatory dependency of cairo.
# The following dependencies are required, otherwise several components
# fail to compile:
depends_on('curl')
depends_on('libiconv')
depends_on('libx11')
depends_on('libxaw')
depends_on('libxmu')
# In Spack, we do not have an option to compile netcdf without netcdf-4
# support, so we will tell the ncl configuration script that we want
# support for netcdf-4, but the script assumes that hdf5 is compiled with
# szip support. We introduce this restriction with the following dependency
# statement.
depends_on('hdf5+szip')
depends_on('szip')
# ESMF is only required at runtime (for ESMF_regridding.ncl)
depends_on('esmf', type='run')
# In Spack, we also do not have an option to compile netcdf without DAP
# support, so we will tell the ncl configuration script that we have it.
# Some of the optional dependencies according to the manual:
depends_on('hdf', when='+hdf4')
depends_on('gdal', when='+gdal')
depends_on('udunits2', when='+udunits2')
# We need src files of triangle to appear in ncl's src tree if we want
# triangle's features.
resource(
name='triangle',
url='http://www.netlib.org/voronoi/triangle.zip',
md5='10aff8d7950f5e0e2fb6dd2e340be2c9',
placement='triangle_src',
when='+triangle')
def patch(self):
# Make configure scripts use Spack's tcsh
files = ['Configure'] + glob.glob('config/*')
filter_file('^#!/bin/csh -f', '#!/usr/bin/env csh', *files)
@run_before('install')
def filter_sbang(self):
# Filter sbang before install so Spack's sbang hook can fix it up
files = glob.glob('ncarg2d/src/bin/scripts/*')
files += glob.glob('ncarview/src/bin/scripts/*')
files += glob.glob('ni/src/scripts/*')
csh = join_path(self.spec['tcsh'].prefix.bin, 'csh')
filter_file('^#!/bin/csh', '#!{0}'.format(csh), *files)
def install(self, spec, prefix):
if (self.compiler.fc is None) or (self.compiler.cc is None):
raise InstallError('NCL package requires both '
'C and Fortran compilers.')
self.prepare_site_config()
self.prepare_install_config()
self.prepare_src_tree()
make('Everything', parallel=False)
def setup_environment(self, spack_env, run_env):
run_env.set('NCARG_ROOT', self.spec.prefix)
def prepare_site_config(self):
fc_flags = []
cc_flags = []
c2f_flags = []
if '+openmp' in self.spec:
fc_flags.append(self.compiler.openmp_flag)
cc_flags.append(self.compiler.openmp_flag)
if self.compiler.name == 'gcc':
fc_flags.append('-fno-range-check')
c2f_flags.extend(['-lgfortran', '-lm'])
elif self.compiler.name == 'intel':
fc_flags.append('-fp-model precise')
cc_flags.append('-fp-model precise')
c2f_flags.extend(['-lifcore', '-lifport'])
with open('./config/Spack', 'w') as f:
f.writelines([
'#define HdfDefines\n',
'#define CppCommand \'/usr/bin/env cpp -traditional\'\n',
'#define CCompiler cc\n',
'#define FCompiler fc\n',
('#define CtoFLibraries ' + ' '.join(c2f_flags) + '\n'
if len(c2f_flags) > 0
else ''),
('#define CtoFLibrariesUser ' + ' '.join(c2f_flags) + '\n'
if len(c2f_flags) > 0
else ''),
('#define CcOptions ' + ' '.join(cc_flags) + '\n'
if len(cc_flags) > 0
else ''),
('#define FcOptions ' + ' '.join(fc_flags) + '\n'
if len(fc_flags) > 0
else ''),
'#define BuildShared NO'
])
def prepare_install_config(self):
# Remove the results of the previous configuration attempts.
self.delete_files('./Makefile', './config/Site.local')
# Generate an array of answers that will be passed to the interactive
# configuration script.
config_answers = [
# Enter Return to continue
'\n',
# Build NCL?
'y\n',
# Parent installation directory :
'\'' + self.spec.prefix + '\'\n',
# System temp space directory :
'\'' + tempfile.gettempdir() + '\'\n',
# Build NetCDF4 feature support (optional)?
'y\n'
]
if '+hdf4' in self.spec:
config_answers.extend([
# Build HDF4 support (optional) into NCL?
'y\n',
# Also build HDF4 support (optional) into raster library?
'y\n',
# Did you build HDF4 with szip support?
'y\n' if self.spec.satisfies('^hdf+szip') else 'n\n'
])
else:
config_answers.extend([
# Build HDF4 support (optional) into NCL?
'n\n',
# Also build HDF4 support (optional) into raster library?
'n\n'
])
config_answers.extend([
# Build Triangle support (optional) into NCL
'y\n' if '+triangle' in self.spec else 'n\n',
# If you are using NetCDF V4.x, did you enable NetCDF-4 support?
'y\n',
# Did you build NetCDF with OPeNDAP support?
'y\n',
# Build GDAL support (optional) into NCL?
'y\n' if '+gdal' in self.spec else 'n\n',
# Build EEMD support (optional) into NCL?
'n\n',
# Build Udunits-2 support (optional) into NCL?
'y\n' if '+uduints2' in self.spec else 'n\n',
# Build Vis5d+ support (optional) into NCL?
'n\n',
# Build HDF-EOS2 support (optional) into NCL?
'n\n',
# Build HDF5 support (optional) into NCL?
'y\n',
# Build HDF-EOS5 support (optional) into NCL?
'n\n',
# Build GRIB2 support (optional) into NCL?
'n\n',
# Enter local library search path(s) :
# The paths will be passed by the Spack wrapper.
' \n',
# Enter local include search path(s) :
# All other paths will be passed by the Spack wrapper.
'\'' + join_path(self.spec['freetype'].prefix.include,
'freetype2') + '\'\n',
# Go back and make more changes or review?
'n\n',
# Save current configuration?
'y\n'
])
config_answers_filename = 'spack-config.in'
config_script = Executable('./Configure')
with open(config_answers_filename, 'w') as f:
f.writelines(config_answers)
with open(config_answers_filename, 'r') as f:
config_script(input=f)
def prepare_src_tree(self):
if '+triangle' in self.spec:
triangle_src = join_path(self.stage.source_path, 'triangle_src')
triangle_dst = join_path(self.stage.source_path, 'ni', 'src',
'lib', 'hlu')
shutil.copy(join_path(triangle_src, 'triangle.h'), triangle_dst)
shutil.copy(join_path(triangle_src, 'triangle.c'), triangle_dst)
@staticmethod
def delete_files(*filenames):
for filename in filenames:
if os.path.exists(filename):
try:
os.remove(filename)
except OSError as e:
raise InstallError('Failed to delete file %s: %s' % (
e.filename, e.strerror))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/ncl/package.py
|
Python
|
lgpl-2.1
| 10,763
|
[
"NetCDF"
] |
e805d152b62b7b2a11f9ea8ba1e38ec245c378b52b08e628ed65ba44c5b49cca
|
#! /usr/bin/env python
# Copyright (C) 2016 Li Yao <yaoli95@outlook.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from sigEngine import randSig, generateSSSig
import os, string, re, subprocess, pysam, time
from repConfig import getConfig
from random import Random
from database import mysqlConnection
from os import path
con, cursor = mysqlConnection()
filterThreshold = string.atof(getConfig("splicing", "mesSCT"))
mesVT = string.atof(getConfig("splicing", "mesVT"))
bpThreshold = string.atof(getConfig("splicing", "bpThreshold"))
bpVarThreshold = string.atof(getConfig("splicing", "bpVarThreshold"))
'''
Citation:
Eng L, Coutinho G, Nahas S, Yeo G, Tanouye R, Babaei M, Dork T, Burge C, Gatti RA. Nonclassical splicing mutations in the coding and noncoding regions of the ATM Gene: maximum entropy estimates of splice junction strengths. Hum Mutat. 2004 Jan; 23(1):67-76
FO Desmet, Hamroun D, Lalande M, Collod-Beroud G, Claustres M, Beroud C. Human Splicing Finder: an online bioinformatics tool to predict splicing signals. Nucleic Acid Research, 2009
'''
#splicingSite5Motif = re.compile("[CA]AGGT[AG]AG[ACGT]", re.IGNORECASE|re.DOTALL)
splicingSite5Motif = re.compile("[CA][ACGT][ACGT]GT[ACGT][ACGT][ACGT][ACGT]", re.IGNORECASE|re.DOTALL)
splicingSite3Motif = re.compile("[ATCG]{18}AG[AG][ATCG][ATCG]", re.IGNORECASE|re.DOTALL)
branchMotif = re.compile("[CGT][ACGT][CT][CT][CG]A[CGT]", re.IGNORECASE|re.DOTALL)
branchSiteMatris = {
0:{'A': 0.0, 'C':4.25, 'G':2.62, 'T':2.72},
1:{'A': 0.0, 'C':6.87, 'G':2.29, 'T':3.5},
2:{'A': 0.0, 'C':25.72, 'G':0.0, 'T':1.88},
3:{'A': 0.0, 'C':6.05, 'G':0.0, 'T':15.09},
4:{'A': 0.0, 'C':11.82, 'G':6.89, 'T':0.0},
5:{'A': 29.63, 'C':0.0, 'G':0.0, 'T':0.0},
6:{'A': 0.0, 'C':6.62, 'G':3.89, 'T':2.72},
}
STATUSCODE = {
'INACTIVE5SS': 1,
'ENHENCED5SS': 2,
'WEAKEND5SS': 3,
'INACTIVE3SS': 4,
'ENHENCED3SS': 5,
'WEAKEND3SS': 6,
'NEWBS': 7,
'INACTIVEBS': 8,
'WEAKENDBS': 9,
}
intronInfo = pysam.Tabixfile(getConfig("datasets", "intronInfo"))
def callMaxEntScan3ss(sequence, sig):
#if len(sequence) != 23:
# print "The 3ss sequence must be 23 bases long [20 bases in the intron][3 bases in the exon]"
# return 0
fileName = path.join(getConfig("dispatch", "etpool"), "MaxEntScan_" + sig)
with open(fileName, 'w') as file:
file.writelines(sequence)
maxEntScan3 = subprocess.Popen(["perl", getConfig("program", "ss3"), fileName], shell=False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
score = maxEntScan3.stdout.read()
try:
os.remove(fileName)
except Exception, e:
print e
return score.split()
def callMaxEntScan5ss(sequence, sig):
#if len(sequence) != 9:
# print "The 5ss sequence must be 9 bases long [3 bases in the exon][6 bases in the intron]"
# return 0
fileName = path.join(getConfig("dispatch", "etpool"), "MaxEntScan_" + sig)
with open(fileName, 'w') as file:
file.writelines(sequence)
maxEntScan5 = subprocess.Popen(["perl", getConfig("program", "ss5"), fileName], shell=False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
score = maxEntScan5.stdout.read()
os.remove(fileName)
return score.split()
def isChangedSS(raw, edited, type):
'''This function will return a tuple, when the first element equals to 1, the edited sequence can be seen as a 3' splicing site.
If the second element equals to -1, it means the editing breaks the splice site. In the other case, if it returns 1, we consider that the editing creates a new splicing site'''
runWell = 0
changed = 0
if type == 3:
rawScore = callMaxEntScan3ss(raw)
editedScore = callMaxEntScan3ss(edited)
else:
rawScore = callMaxEntScan5ss(raw)
editedScore = callMaxEntScan5ss(edited)
rs = string.atof(rawScore)
es = string.atof(editedScore)
if es > string.atof(getConfig("splicing", "mesSCT")):
runWell = 1
if ((es - rs) / rs) < -1 * float(getConfig("splicing", "mesVT")):
changed = -1
elif ((es - rs) / rs) > float(getConfig("splicing", "mesVT")):
changed = 1
return (runWell, changed, rs, es)
def getIntronInfo(geneSymbol, chr, position):
global intronInfo
"""
GET Intron Info
Desc: get intron info by providing gene official symbol and mutation's position
Return: If there is a hited record, a list will be returned.
[rec_name, chromosome, start, end, sequence]
"""
result = []
for iii in intronInfo.fetch(reference=chr, start=int(position), end=int(position)+1, parser=pysam.asBed()):
chromosome, start, end, recName, geneSymbol, strand, sequence = iii
result.append([recName, chromosome, int(start), int(end), strand, sequence.replace('\n', '')])
'''
query = """SELECT `rec_name`, `chromosome`, `start`, `end`, `strand` FROM %s WHERE `gene_symbol`='%s' AND `start`<=%s AND `END`>=%s;""" % (getConfig("datasets", "intronInfo"), geneSymbol, position, position)
cursor.execute(query)
iii = cursor.fetchall()
if len(iii) == 0:
return 0
result = []
#print list(ii)[0]
for ii in iii:
if ii != None:
ii = list(ii)
else:
continue
if len(ii) == 5:
query = """SELECT `seq` FROM %s WHERE `rec_name` = '%s';""" % (getConfig("datasets", "intronSeq"), ii[0])
cursor.execute(query)
seq = cursor.fetchone()
if seq != None:
seq = seq[0]
else:
continue
ii.append(seq)
else:
continue
if ii[2] > ii[3]: #negative strand
tmp = ii[2]
ii[2] = ii[3]
ii[3] = tmp
result.append(ii)
'''
return result
def isChangeSS(raw, edited, sig, type=3):
rawDict = []
editDict = []
filteredRawDict = {}
filteredEditDict = {}
filterThreshold = string.atof(getConfig("splicing", "mesSCT"))
if type == 5:
'''
for ss5 in splicingSite5Motif.finditer(raw):
rawDict.append((ss5.start(), ss5.end(), ss5.group(0)))
for ss5 in splicingSite5Motif.finditer(edited):
editDict.append((ss5.start(), ss5.end(), ss5.group(0)))
'''
rawDict.append((0, 9, raw))
editDict.append((0, 9, edited))
rawScore = callMaxEntScan5ss([ss5[2]+"\n" for ss5 in rawDict], sig)
editScore = callMaxEntScan5ss([ss5[2]+"\n" for ss5 in editDict], sig)
else:
'''
for ss3 in splicingSite3Motif.finditer(raw):
rawDict.append((ss3.start(), ss3.end(), ss3.group(0)))
for ss3 in splicingSite3Motif.finditer(edited):
editDict.append((ss3.start(), ss3.end(), ss3.group(0)))
'''
rawDict.append((0, 9, raw))
editDict.append((0, 9, edited))
rawScore = callMaxEntScan3ss([ss3[2]+"\n" for ss3 in rawDict], sig)
editScore = callMaxEntScan3ss([ss3[2]+"\n" for ss3 in editDict], sig)
if (rawScore==editScore):
return 0, 0, 0, 0
rawRecordsNum = len(rawDict)
editRecordsNum = len(editDict)
for i in range(rawRecordsNum):
score = string.atof(rawScore[i])
#if score >= filterThreshold:
filteredRawDict[str(rawDict[i][0])+","+str(rawDict[i][1])] = (score, rawDict[i][2])
for i in range(editRecordsNum):
score = string.atof(editScore[i])
if score >= filterThreshold:
filteredEditDict[str(editDict[i][0])+","+str(editDict[i][1])] = (score, editDict[i][2])
#del(rawDict);del(editDict);del(rawScore);del(editScore)
inactiveSplicingSite = []
newPotentialSplicingSite = []
for (k,v) in filteredRawDict.items():
if k not in filteredEditDict.keys():
return (1*type, v[0], 0, 0) #inactive
elif (v[0] != filteredEditDict[k][0]):
variation = (filteredEditDict[k][0] - v[0]) / v[0]
if (variation > float(getConfig("splicing", "mesVT"))):
return (2*type, v[0], filteredEditDict[k][0], variation) #enhanced splicing
elif (variation < -1*float(getConfig("splicing", "mesVT"))):
if (variation < -1*float(getConfig("splicing", "mesVT"))):
return (1*type, v[0], filteredEditDict[k][0], variation) #weakened splicing
else:
return (3*type, v[0], filteredEditDict[k][0], variation) #weakened splicing
for (k, v) in filteredEditDict.items():
if k not in filteredRawDict.keys():
return (4*type, 0, v[0], 0) #new splicing site
return 0, 0, 0, 0
def defineAGEZ(seq, pos3SS):
#window = seq[]
pos = pos3SS - 12
firstTag = 1
while pos > 0:
if (seq[pos] == 'G' and seq[pos-1] == 'A'):
if firstTag == 1:
firstTag = 0
else:
return pos
pos -= 1
return 0
def bpPositionWeightMatrix(seq, start, end):
score = 0.0
maxScore = 0.0
for i in range(start, end - 7):
for j in range(7):
score += branchSiteMatris[j][seq[i+j].upper()]
if score >= bpThreshold:
return score
score = 0.0
return 0
def isChangeBranchSite(raw, edited, start, end):
rawScore = bpPositionWeightMatrix(raw, start, end)
editedScore = bpPositionWeightMatrix(edited, start, end)
if rawScore == 0 and editedScore != 0:
return(16, rawScore, editedScore, editedScore)
elif rawScore != 0 and editedScore == 0:
return(17, rawScore, editedScore, editedScore)
elif rawScore == 0 and editedScore == 0:
return 0, 0, 0, 0
variation = (editedScore - rawScore) / rawScore
if variation < 0 and ( -1*variation > bpVarThreshold):
return (18, rawScore, editedScore, variation)
elif variation > 0 and ( variation > bpVarThreshold):
return (19, rawScore, editedScore, variation)
return 0, 0, 0, 0
def isMutChangeSplicing(geneName, chr, mutPos, raw='A', muted='G', jid=0):
flag = 0
ssRangeFlag = 0
intronSet = getIntronInfo(geneName, chr, mutPos)
uniSig = generateSSSig(geneName, mutPos, jid, raw+'>'+muted)
if intronSet == 0:
return 0, 0, 0, 0, 0
for intron in intronSet:
orderOfIntron = intron[0].split('_')[2]
transcript = intron[0].split('_')[0].split('.')[0]
rangeOf5SS = range(10, 16)
rangeOf3SS = range(intron[3] - intron[2] - 10, intron[3] - intron[2]+10)
ter5Of3SS = intron[3] - intron[2] + 6
if intron[4] == '-':
relativeMutPos = intron[3] - mutPos + 10
else:
relativeMutPos = mutPos - intron[2] + 9
mutSeq = intron[5][:relativeMutPos]+muted+intron[5][relativeMutPos+1:]
#print mutPos, intron[5], mutSeq
if relativeMutPos in rangeOf5SS:
ssRangeFlag = 1
if (intron[5][relativeMutPos].lower() != raw.lower()):
print "Please check your sequence and mutation position(%s, %s, %s)" % (mutPos, intron[5][relativeMutPos].lower(), raw.lower())
continue
#return 0, 0, 0, 0, 0
ss5ret = list(isChangeSS(intron[5][7:16], mutSeq[7:16], uniSig, 5))
if ss5ret != 0:
ss5ret.append(orderOfIntron)
#return ss5ret
if ss5ret[0] != 0:
flag = 1
recordSplicing(geneName, chr, mutPos, ss5ret[0], ss5ret[1], ss5ret[2], ss5ret[3], ss5ret[4], jid, transcript)
elif relativeMutPos in rangeOf3SS:
ssRangeFlag = 1
if (intron[5][relativeMutPos].lower() != raw.lower()):
print "Please check your sequence and mutation position(%s, %s, %s)" % (mutPos, intron[5][relativeMutPos].lower(), raw.lower())
#return 0, 0, 0, 0, 0
continue
ss3ret = list(isChangeSS(intron[5][-30:-7], mutSeq[-30:-7], uniSig, 3))
if ss3ret != 0:
ss3ret.append(orderOfIntron)
#return ss3ret
if ss3ret[0] != 0:
flag = 1
recordSplicing(geneName, chr, mutPos, ss3ret[0], ss3ret[1], ss3ret[2], ss3ret[3], ss3ret[4], jid, transcript)
else:
bs = defineAGEZ(intron[5], ter5Of3SS)
if relativeMutPos in range(bs, ter5Of3SS):
ssRangeFlag = 1
bsRet = list(isChangeBranchSite(intron[5], mutSeq, bs, ter5Of3SS))
bsRet.append(orderOfIntron)
#return bsRet
if bsRet[0] != 0:
flag = 1
recordSplicing(geneName, chr, mutPos, bsRet[0], bsRet[1], bsRet[2], bsRet[3], bsRet[4], jid, transcript)
#return 0, 0, 0, 0, 0
if ssRangeFlag == 0:
print geneName, mutPos, 'not in ss region'
return flag
def factory(gene, chr, pos, jobId, extend=0, ref=None, edited=None):
counter = 0
for x in gene.index:
if extend == 1:
t = isMutChangeSplicing(gene.ix[x], chr.ix[x], pos.ix[x], ref.ix[x], edited.ix[x], jid=jobId)
else:
t = isMutChangeSplicing(gene.ix[x], chr.ix[x], pos.ix[x], jid=jobId)
if t != 0:
counter += 1
return counter
def recordSplicing(gene, chr, pos, t, r, n, v, order, jobId, transcript):
try:
query = """INSERT INTO `splicing_event` (`gene`, `chromosome`, `pos`, `type`, `raw_score`, `new_score`, `variation`, `order`, `job`, `transcript`) VALUES ('%s', '%s', %s, %s, %s, '%s', %s, %s, %s, '%s');""" % (gene, chr, pos, t, r, n, v, order, jobId, transcript)
cursor.execute(query)
con.commit()
except Exception, e:
print "Splicing Error:"+str(e)
return 0
|
RNAEDITINGPLUS/main
|
node/splicingOntology.py
|
Python
|
apache-2.0
| 15,218
|
[
"pysam"
] |
bbef8cbe23a45f6ae62a9ec33a637e7a7dc9b38fdd4888e921136fcc451b3b8b
|
import logging
import textwrap
import os
import tempfile
import shutil
from subprocess import check_call
from subprocess import CalledProcessError
from ghtools.exceptions import GithubError
log = logging.getLogger(__name__)
def migrate(src, dst):
src_repo = src.client.get('/repos/{0}'.format(src.org_repo)).json()
if not src_repo['has_wiki']:
log.info("Migrating %s to %s -> wiki (skipping)", src, dst)
return
dst.client.patch('/repos/{0}'.format(dst.org_repo), data={'name': dst.repo, 'has_wiki': 'true'})
checkout = tempfile.mkdtemp()
cwd = os.getcwd()
try:
_migrate(src, dst, checkout)
finally:
shutil.rmtree(checkout)
os.chdir(cwd)
def _migrate(src, dst, checkout):
log.info("Migrating %s to %s -> wiki", src, dst)
src_url = src.wiki_ssh_url
log.debug("Migrating %s to %s -> wiki -> cloning from %s", src, dst, src_url)
check_call(['git', 'clone', '--mirror', src_url, checkout])
os.chdir(checkout)
dst_url = dst.wiki_ssh_url
log.debug("Migrating %s to %s -> wiki -> pushing to %s", src, dst, dst_url)
check_call(['git', 'remote', 'add', 'dest', dst_url])
try:
check_call(['git', 'push', '--mirror', 'dest'])
except CalledProcessError:
message = textwrap.dedent(u"""
The destination wiki does not exist, you will need to visit it at:
{0}/wiki
""".format(dst.client.get('/repos/{0}'.format(dst.org_repo)).json['html_url']))
raise GithubError(message)
|
alphagov/ghtools
|
ghtools/migrators/wiki.py
|
Python
|
mit
| 1,535
|
[
"VisIt"
] |
6d4877c8b84d31a03cb3a8639f5885dcaefe84d0c3c0f1b4d4d4f56f0518851c
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
""" Create tasks for the first run
Tasks should serve as a quick tutorial how GTG works """
from GTG.core.translations import _
from GTG.tools import cleanxml
from GTG.tools.tags import extract_tags_from_text
def populate():
"""On the first run of GTG, populate the task list with tasks meant to
act as a GTG tutorial."""
doc, root = cleanxml.emptydoc("project")
# Task 0@1: Getting started with GTG
title1 = _("Getting Started With GTG")
text1 = _(
"Welcome to Getting Things GNOME!, your new task manager! In Getting "
"Things GNOME! (GTG), everything is a task. From building a bridge"
" over the Pacific Ocean to changing a light bulb or organizing a"
" party!\n\n"
"If you are new to GTG, please take the time to read this, as it will"
" provide you useful information about how to use GTG to organize"
" your everyday life.\n"
"\n"
"Creating and editing tasks:\n"
"\n"
"Using GTG is easy: you organize what you have to do by creating new"
" tasks. To do this, simply press the "New Task" button, "
"edit the task by describing it, set some parameters, and that's "
"it! Once a task done, you can close it by pressing the "Mark As"
" Done" button.\n"
"\n"
"In GTG, a task is automatically saved while you are editing it. No"
" need to press any "Save" button! Try it: add some text to"
" this task, close the window, and reopen it: your changes are still"
" there!\n\n"
"About subtasks:\n"
"\n"
"In life, you often get more things done by refining them in "
"smaller, more operational tasks. GTG helps to do just this by "
"defining "subtasks". In GTG, those subtasks are "
"considered as prerequisites that must be completed before being able"
" to close their parent task.\n\n"
"Therefore, in GTG, a task might host one or several subtasks. Those "
"appear as links in the task description, just like the link below. "
"To open and edit a subtask, simply click on its link! Try it"
" yourself: open the following subtask:\n"
"<subtask>1@1</subtask>\n"
"\n"
"Closing a task:\n"
"\n"
"In GTG, once you are done with a task, you can close it by pushing "
"either the "Mark as Done" or the "Dismiss" "
"button. Use the first one if the task is done, and the latter if you"
" want to close it because it is not relevant anymore. Want to try it"
"? Try to close the subtask above for instance!\n"
"\n"
"When you close a task, you will notice that all its subtasks will be"
" automatically closed too! Indeed, GTG considers that if you have"
" completed a given task, then you don't need to do its subtasks"
" anymore (they were prerequisites, after all).\n"
"\n"
"Note that the tasks that you have marked as done or dismissed are "
"listed in the "Closed Tasks Pane" which is hidden by"
" default, but you can easily show it using the View menu.\n"
"\n"""
"Learn more about GTG:\n"
"\n"
"If you are interested in knowing more about "
"GTG's other features, you will find more information here:\n"
"<subtask>2@1</subtask>\n"
"<subtask>3@1</subtask>\n"
"<subtask>4@1</subtask>\n"
"<subtask>5@1</subtask>\n"
"<subtask>6@1</subtask>\n"
"<subtask>7@1</subtask>\n"
"<subtask>8@1</subtask>\n"
"\n"
"You can also browse GTG documentation by pressing F1 or opening it"
" using the Help menu.\n"
"\n"
"We sincerely hope you will enjoy using GTG, and thank you for trying"
" it out! Please send us bug reports and ideas for improvement using"
" this web page: https://bugs.launchpad.net/gtg/+filebug If you want "
"to get tips for using GTG or be informed about the newest features, "
"also visit our blog at http://gtg.fritalk.com\n"
"\n"
"The GTG team.")
task1 = addtask(doc, "0@1", title1, text1,
["1@1", "2@1", "3@1", "4@1", "5@1", "6@1", "7@1", "8@1"])
root.appendChild(task1)
# Task 1@1: Learn to use subtasks
title2 = _("Learn How To Use Subtasks")
text2 = _(
"A "Subtask" is something that you need to do first before "
"being able to accomplish your task. In GTG, the purpose of subtasks "
"is to cut down a task in smaller subtasks that are easier to achieve"
" and to track down.\n\n"
"To insert a subtask in the task description (this window, for "
"instance), begin a line with "-", then write the subtask "
"title and press Enter.\n"
"\n"
"Try inserting one subtask below. Type "- This is my first "
"subtask!", for instance, and press Enter:\n"
"\n"
"\n"
"\n"
"Alternatively, you can also use the "Insert Subtask" "
"button.\n\n"
"Note that subtasks obey to some rules: first, a subtask's due date "
"can never happen after its parent's due date and, second, when you "
"mark a parent task as done, its subtasks will also be marked as "
"done.\n\n"
"And if you are not happy with your current tasks/subtasks "
"organization, you can always change it by drag-and-dropping tasks on"
" each other in the tasks list.")
task2 = addtask(doc, "1@1", title2, text2, [])
root.appendChild(task2)
# Task 2@1: Learn to use tags
title3 = _("Learn How To Use Tags")
text3 = _(
"In GTG, you use tags to sort your tasks. A tag is a simple word that"
" begins with "@".\n"
"\n"
"Try to type a word beginning with "@" here:\n"
"\n"
"Once it becomes yellow, it is a tag! And this tag is now linked to "
"the task!\n"
"\n"
"Using the View menu, you can enable a sidebar which displays all the"
" tags you are using. This allows you to easily see all tasks "
"associated to a given tag.\n"
"\n"
"If you right-click on a tag in the sidebar, you can also edit it. It"
" allows you to assign it a color or an icon for instance. This is "
"handy if you want to quickly identify the tasks associated to a "
"given tag in the task list!\n\n"
"New tags are always added exclusively to the currently edited task, "
"and never to its subtasks. However, when you create a new subtask, "
"it will inherit its parent's tags.\n"
"\n"
"If you need a more advanced task organization, you can also create a"
" hierarchy of tags by drag-and-dropping a tag onto another. This "
"is useful when you want to regroup several tags together and see all"
" related tasks easily. For instance, if you have two tags @money and"
" @to_pay, and you drag @to_pay on @money, every task tagged with "
"@to_pay will also appear when you select @money.")
task3 = addtask(doc, "2@1", title3, text3, [])
root.appendChild(task3)
# Task 3@1: Using the Workview
title4 = _("Learn How To Use The Work View")
text4 = _(
"If you press the "Work View" button, only actionable tasks"
" will be displayed in your list.\n"
"\n"
"What is an actionable task? It's a task you can do directly, right "
"now.\n\n"
"It's a task that is already "start-able", i.e. the start "
"date is already over.\n"
"\n"
"It's a task that doesn't have open subtasks, i.e. you can do the "
"task itself directly.\n"
"\n"
"It's a task that has a due date different than "Someday", "
"since this kind of date is reserved for things that needs more "
"thoughts before being actionable.\n"
"\n"
"Thus, in short, the Work View shows you tasks that you can do right "
"now. It's very useful when you want to get things done and to focus "
"on the relevant tasks!\n"
"\n"
"If you use tags, you can right click on a tag in the sidebar and "
"choose to hide tasks assigned to this particular tag in the Work "
"View. It is very useful if you have a tag like "@wait" "
"that you use for tasks blocked by some external event (i.e. a phone "
"call you wait to receive).\n\n"
"And finally, an important note regarding the Work View: since the "
"Work View is updated instantaneously, if you edit your task while "
"using the Work View, this task might disappear due to the change you"
" just made (e.g. adding a subtask, adding a tag hidden in the Work "
"View, etc.). To avoid this, it's better not to edit your task while "
"using the Work View. ")
task4 = addtask(doc, "3@1", title4, text4, [])
root.appendChild(task4)
# Task 5@1: Plugins
title5 = _("Learn How To Use Plugins")
text5 = _(
"GTG has the ability to add plugins to extend its core functionality."
"\n\n"
"Some examples of the currently available plugins are the "
"notification icon which displays a handy shortcut to GTG in your "
"notification space, or the closed tasks remover which automatically "
"deletes old tasks from your closed tasks list.\n"
"\n"
"You can find the Plugin Manager by selecting Edit in the Menu Bar, "
"then clicking Plugins.")
task5 = addtask(doc, "4@1", title5, text5, [])
root.appendChild(task5)
# Task 5@1: Reporting bugs
title6 = _("Reporting Bugs")
text6 = _(
"GTG is still beta software. We like it and use it everyday but you "
"will probably encounter some bugs will you do.\n"
"\n"
"Please, help us improving GTG by reporting them on our Launchpad "
"page:https://bugs.launchpad.net/gtg/+filebug\n"
"\n"
"We need you to make this software better. Any contribution, any "
"idea is welcome!\n"
"\n"
"If you have some trouble with GTG, we might be able to help you or "
"to solve your problem really quickly.")
task6 = addtask(doc, "5@1", title6, text6, [])
root.appendChild(task6)
# Task 6@1: Learn how to use the QuickAdd Entry
title7 = _("Learn How To Use The Quick Add Entry")
text7 = _(
"The Quick Add Entry is the fastest way to create a new task. Use "
"the check box in the View menu to enable and disable the entry field"
".\n\n"
"To add a task simply type its title in the entry and press Enter. "
"The task will be created and selected in the task browser. If a tag "
"is selected in the Tags Sidebar, it will be applied to the task you "
"created.\n\n"
"You can also create a task in the Quick Add Entry and at the same "
"time specify its tags, due and defer date. Follow these format rules"
":\n\n"
"tags:tag1,tag2,tag3\n"
"\n"
"Using this you can apply as many tags as you wish using comma as "
"separator. Note that any word in the title that begins with ""
"@" will also be interpreted as a tag!\n"
"\n"
"due:date\n"
"defer:date\n"
"\n"
"Using this you can apply a due date or a defer date. Dates can be "
"formated as per your locale or yyyy-mm-dd (for example 2012-04-01) "
"or yyyymmdd (20120401) "
"or mmdd (0401 - the year being implicitly the current one) or today,"
" tomorrow or a weekday name (due:monday means due next Monday). "
"Dates which are added in this way will not appear in the task title."
"\n\n"
"Examples:\n"
"\n"
"buy stationary tags:purchases,office due:20120330 defer:tuesday\n"
"\n"
"The above example tells GTG to create a new task with the title "
""buy stationary", under the tags "purchases" "
"and "office", with the due date March 30, 2012 and the "
"start date next Tuesday.\n"
"\n"
"call mum tags:family,calls due:sunday defer:tomorrow\n"
"\n"
"The above example tells GTG to create a new task with the title "
""call mum", under the tags "family" and "
""calls", with the due date next Sunday and the start "
"date tomorrow.")
task7 = addtask(doc, "6@1", title7, text7, [])
root.appendChild(task7)
# Task 7@1: Learn How To Use Synchonization Services
title8 = _("Learn How To Use Synchronization Services")
text8 = _(
"Synchronization Services allow GTG to synchronize (meaning to have "
"access or to import) tasks, notes or bugs from other sites or "
"services like Launchpad, Remember the Milk, Tomboy, etc.\n"
"\n"
"This can incredibly useful if, for instance, you want to access your"
" tasks on several instances of GTG running on separate computers, or"
" if you want to edit your tasks using an online service. GTG can "
"also import tasks from specific sites like launchpad for instance, "
"which allows you to manage the bug reports you're working on in GTG!"
"\n\n"
"To use Synchronization Services, use the Edit menu, and select "
""Synchronization Services". You will then have the "
"possibility to select among several online or local services "
"from/to where you can import or export your tasks.\n"
"\n"
"If you want to know more about Synchronization Services, you can "
"read more about them by in the dedicated documentation in GTG's help"
" (use the Help menu or press F1 to get access to it).")
task8 = addtask(doc, "7@1", title8, text8, [])
root.appendChild(task8)
# Task 8@1: Learn How To Search For Tasks
title9 = _("Learn How To Search For Tasks")
text9 = _(
"To help you to find specific tasks more easily, GTG allows you to "
"search for tasks based on their content.\n"
"\n"
"Searching for tasks is really easy: just type the words you are "
"looking for in the Quick Add Entry, and select "Search" in"
" the menu that will appear automatically.\n"
"\n"
"GTG stores your searches in the sidebar, under the "Search"
"" section. You can thus always go back to a previous search "
"need it. Search results are updated automatically, so you always get"
" all the tasks matching your search request.\n"
"\n"
"GTG also saves all the search requests you have made until you "
"explicitely delete them (which you can do by right-clicking on them "
"and selecting "Delete"). That allows you to safely quit "
"GTG without loosing your search requests. This can be very useful "
"when you use the search features to identify specific tasks "
"regularly!\n\n"
"GTG search feature is really powerful and accept many parameters "
"that allows you to search for very specific tasks. For instance, "
"using the search query "@errands !today", you can search "
"for tasks with the @errands tag that must be done today. To learn "
"more about those search query parameters, you can read the "
"documentation available in GTG's help (press F1 or use the Help menu"
" to get access to it).")
task9 = addtask(doc, "8@1", title9, text9, [])
root.appendChild(task9)
return doc
def addtask(doc, ze_id, title, text, children):
"""Initialize GTG tasks in order to display them at first run."""
t_xml = doc.createElement("task")
t_xml.setAttribute("id", ze_id)
t_xml.setAttribute("status", "Active")
tags = extract_tags_from_text(text)
t_xml.setAttribute("tags", ",".join(tags))
cleanxml.addTextNode(doc, t_xml, "title", title)
for child in children:
cleanxml.addTextNode(doc, t_xml, "subtask", child)
cleanxml.addTextNode(doc, t_xml, "content", text)
return t_xml
|
shtrom/gtg
|
GTG/core/firstrun_tasks.py
|
Python
|
gpl-3.0
| 17,480
|
[
"VisIt"
] |
603bb1bee344695b11505c04a03d3c4c7fb00ee40e8850d48e1ced404ca2df07
|
#
# calculation of natural product-likeness as described in:
#
# Natural Product-likeness Score and Its Application for Prioritization of Compound Libraries
# Peter Ertl, Silvio Roggo, and Ansgar Schuffenhauer
# Journal of Chemical Information and Modeling, 48, 68-74 (2008)
# http://pubs.acs.org/doi/abs/10.1021/ci700286x
#
# for the training of this model only openly available data have been used
# ~50,000 natural products collected from various open databases
# ~1 million drug-like molecules from ZINC as a "non-NP background"
#
# peter ertl, august 2015
#
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import sys,math,gzip,pickle
import os.path
def readNPModel(filename=os.path.join(os.path.dirname(__file__), 'publicnp.model.gz')):
sys.stderr.write("reading NP model ...\n")
fscore = pickle.load(gzip.open(filename))
sys.stderr.write("model in\n")
return fscore
def scoreMol(mol,fscore):
if mol is None:
raise ValueError('invalid molecule')
fp = rdMolDescriptors.GetMorganFingerprint(mol,2)
bits = fp.GetNonzeroElements()
# calculating the score
score = 0.
for bit in bits:
score += fscore.get(bit,0)
score /= float(mol.GetNumAtoms())
# preventing score explosion for exotic molecules
if score > 4:
score = 4. + math.log10(score - 4. + 1.)
if score < -4:
score = -4. - math.log10(-4. -score + 1.)
return score
def processMols(fscore,suppl):
sys.stderr.write("calculating ...\n")
count = {}
n = 0
for i,m in enumerate(suppl):
if m is None:
continue
n += 1
score = "%.3f" % scoreMol(m,fscore)
smiles = Chem.MolToSmiles(m,True)
name = m.GetProp('_Name')
print(smiles + "\t" + name + "\t" + score)
sys.stderr.write("finished, " + str(n) + " molecules processed\n")
if __name__=='__main__':
fscore=readNPModel() # fills fscore
suppl = Chem.SmilesMolSupplier(sys.argv[1],smilesColumn=0,nameColumn=1,titleLine=False)
processMols(fscore,suppl)
#
# Copyright (c) 2015, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
|
adalke/rdkit
|
Contrib/NP_Score/npscorer.py
|
Python
|
bsd-3-clause
| 3,652
|
[
"RDKit"
] |
f6e386473050ef3ad10b61c4e53870c062adda91d54a1f90d8d7d126f600b774
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from abc import ABCMeta
from pyspark import keyword_only, since
from pyspark.ml import Predictor, PredictionModel
from pyspark.ml.base import _PredictorParams
from pyspark.ml.param.shared import HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol, \
Param, Params, TypeConverters, HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth, \
HasMaxBlockSizeInMB, HasRegParam, HasSolver, HasStepSize, HasSeed, HasElasticNetParam, \
HasStandardization, HasLoss, HasVarianceCol
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _RandomForestParams, _GBTParams, _TreeRegressorParams
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, HasTrainingSummary, \
GeneralJavaMLWritable
from pyspark.ml.wrapper import JavaEstimator, JavaModel, \
JavaPredictor, JavaPredictionModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel',
'FMRegressor', 'FMRegressionModel']
class Regressor(Predictor, _PredictorParams, metaclass=ABCMeta):
"""
Regressor for regression tasks.
.. versionadded:: 3.0.0
"""
pass
class RegressionModel(PredictionModel, _PredictorParams, metaclass=ABCMeta):
"""
Model produced by a ``Regressor``.
.. versionadded:: 3.0.0
"""
pass
class _JavaRegressor(Regressor, JavaPredictor, metaclass=ABCMeta):
"""
Java Regressor for regression tasks.
.. versionadded:: 3.0.0
"""
pass
class _JavaRegressionModel(RegressionModel, JavaPredictionModel, metaclass=ABCMeta):
"""
Java Model produced by a ``_JavaRegressor``.
To be mixed in with :class:`pyspark.ml.JavaModel`
.. versionadded:: 3.0.0
"""
pass
class _LinearRegressionParams(_PredictorParams, HasRegParam, HasElasticNetParam, HasMaxIter,
HasTol, HasFitIntercept, HasStandardization, HasWeightCol, HasSolver,
HasAggregationDepth, HasLoss, HasMaxBlockSizeInMB):
"""
Params for :py:class:`LinearRegression` and :py:class:`LinearRegressionModel`.
.. versionadded:: 3.0.0
"""
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString)
loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " +
"options: squaredError, huber.", typeConverter=TypeConverters.toString)
epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " +
"robustness. Must be > 1.0. Only valid when loss is huber",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_LinearRegressionParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35,
maxBlockSizeInMB=0.0)
@since("2.3.0")
def getEpsilon(self):
"""
Gets the value of epsilon or its default value.
"""
return self.getOrDefault(self.epsilon)
@inherit_doc
class LinearRegression(_JavaRegressor, _LinearRegressionParams, JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the specified loss function, with regularization.
This supports two kinds of loss:
* squaredError (a.k.a squared loss)
* huber (a hybrid of squared error for relatively small errors and absolute error for \
relatively large ones, and we estimate the scale parameter from training data)
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
.. versionadded:: 1.4.0
Notes
-----
Fitting with huber loss only supports none and L2 regularization.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(regParam=0.0, solver="normal", weightCol="weight")
>>> lr.setMaxIter(5)
LinearRegression...
>>> lr.getMaxIter()
5
>>> lr.setRegParam(0.1)
LinearRegression...
>>> lr.getRegParam()
0.1
>>> lr.setRegParam(0.0)
LinearRegression...
>>> model = lr.fit(df)
>>> model.setFeaturesCol("features")
LinearRegressionModel...
>>> model.setPredictionCol("newPrediction")
LinearRegressionModel...
>>> model.getMaxIter()
5
>>> model.getMaxBlockSizeInMB()
0.0
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.predict(test0.head().features) - (-1.0)) < 0.001
True
>>> abs(model.transform(test0).head().newPrediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().newPrediction - 1.0) < 0.001
True
>>> lr.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.numFeatures
1
>>> model.write().format("pmml").save(model_path + "_2")
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
@since("2.3.0")
def setEpsilon(self, value):
"""
Sets the value of :py:attr:`epsilon`.
"""
return self._set(epsilon=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
def setLoss(self, value):
"""
Sets the value of :py:attr:`loss`.
"""
return self._set(lossType=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LinearRegressionModel(_JavaRegressionModel, _LinearRegressionParams, GeneralJavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.3.0")
def scale(self):
r"""
The value by which :math:`\|y - X'w\|` is scaled down when loss is "huber", otherwise 1.0.
"""
return self._call_java("scale")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
r"""
Returns the explained variance regression score.
explainedVariance = :math:`1 - \frac{variance(y - \hat{y})}{variance(y)}`
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
For additional information see
`Explained variation on Wikipedia \
<http://en.wikipedia.org/wiki/Explained_variation>`_
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2, the coefficient of determination.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
See also `Wikipedia coefficient of determination \
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
"""
return self._call_java("r2")
@property
@since("2.4.0")
def r2adj(self):
"""
Returns Adjusted R^2, the adjusted coefficient of determination.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark versions.
`Wikipedia coefficient of determination, Adjusted R^2 \
<https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_
"""
return self._call_java("r2adj")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.2.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("coefficientStandardErrors")
@property
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("tValues")
@property
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("objectiveHistory")
@property
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("totalIterations")
class _IsotonicRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol):
"""
Params for :py:class:`IsotonicRegression` and :py:class:`IsotonicRegressionModel`.
.. versionadded:: 3.0.0
"""
isotonic = Param(
Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = Param(
Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_IsotonicRegressionParams, self).__init__(*args)
self._setDefault(isotonic=True, featureIndex=0)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
@inherit_doc
class IsotonicRegression(JavaEstimator, _IsotonicRegressionParams, HasWeightCol,
JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
.. versionadded:: 1.6.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> model.setFeaturesCol("features")
IsotonicRegressionModel...
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.predict(test0.head().features[model.getFeatureIndex()])
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
@since("1.6.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.6.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.6.0")
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
@since("1.6.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
class IsotonicRegressionModel(JavaModel, _IsotonicRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
@property
@since("3.0.0")
def numFeatures(self):
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
return self._call_java("numFeatures")
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
class _DecisionTreeRegressorParams(_DecisionTreeParams, _TreeRegressorParams, HasVarianceCol):
"""
Params for :py:class:`DecisionTreeRegressor` and :py:class:`DecisionTreeRegressionModel`.
.. versionadded:: 3.0.0
"""
def __init__(self, *args):
super(_DecisionTreeRegressorParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", leafCol="", minWeightFractionPerNode=0.0)
@inherit_doc
class DecisionTreeRegressor(_JavaRegressor, _DecisionTreeRegressorParams, JavaMLWritable,
JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2)
>>> dt.setVarianceCol("variance")
DecisionTreeRegressor...
>>> model = dt.fit(df)
>>> model.getVarianceCol()
'variance'
>>> model.setLeafCol("leafId")
DecisionTreeRegressionModel...
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> model.predictLeaf(test0.head().features)
0.0
>>> result.leafId
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> df3 = spark.createDataFrame([
... (1.0, 0.2, Vectors.dense(1.0)),
... (1.0, 0.8, Vectors.dense(1.0)),
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> dt3 = DecisionTreeRegressor(maxDepth=2, weightCol="weight", varianceCol="variance")
>>> model3 = dt3.fit(df3)
>>> print(model3.toDebugString)
DecisionTreeRegressionModel...depth=1, numNodes=3...
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None, weightCol=None, leafCol="",
minWeightFractionPerNode=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
leafCol="", minWeightFractionPerNode=0.0)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None, weightCol=None,
leafCol="", minWeightFractionPerNode=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
leafCol="", minWeightFractionPerNode=0.0)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@since("1.4.0")
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
@since("1.4.0")
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
@since("1.4.0")
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
@since("1.4.0")
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
@since("1.4.0")
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
@since("1.4.0")
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
@inherit_doc
class DecisionTreeRegressionModel(
_JavaRegressionModel, _DecisionTreeModel, _DecisionTreeRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@since("3.0.0")
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. versionadded:: 2.0.0
Notes
-----
Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
class _RandomForestRegressorParams(_RandomForestParams, _TreeRegressorParams):
"""
Params for :py:class:`RandomForestRegressor` and :py:class:`RandomForestRegressionModel`.
.. versionadded:: 3.0.0
"""
def __init__(self, *args):
super(_RandomForestRegressorParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
bootstrap=True)
@inherit_doc
class RandomForestRegressor(_JavaRegressor, _RandomForestRegressorParams, JavaMLWritable,
JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
.. versionadded:: 1.4.0
Examples
--------
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2)
>>> rf.getMinWeightFractionPerNode()
0.0
>>> rf.setSeed(42)
RandomForestRegressor...
>>> model = rf.fit(df)
>>> model.getBootstrap()
True
>>> model.getSeed()
42
>>> model.setLeafCol("leafId")
RandomForestRegressionModel...
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictLeaf(test0.head().features)
DenseVector([0.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0])
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
weightCol=None, bootstrap=True):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto", leafCol=", minWeightFractionPerNode=0.0", \
weightCol=None, bootstrap=True)
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
weightCol=None, bootstrap=True):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None, bootstrap=True)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("3.0.0")
def setBootstrap(self, value):
"""
Sets the value of :py:attr:`bootstrap`.
"""
return self._set(bootstrap=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class RandomForestRegressionModel(
_JavaRegressionModel, _TreeEnsembleModel, _RandomForestRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
Examples
--------
DecisionTreeRegressionModel.featureImportances
"""
return self._call_java("featureImportances")
class _GBTRegressorParams(_GBTParams, _TreeRegressorParams):
"""
Params for :py:class:`GBTRegressor` and :py:class:`GBTRegressorModel`.
.. versionadded:: 3.0.0
"""
supportedLossTypes = ["squared", "absolute"]
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(supportedLossTypes),
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_GBTRegressorParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
leafCol="", minWeightFractionPerNode=0.0)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@inherit_doc
class GBTRegressor(_JavaRegressor, _GBTRegressorParams, JavaMLWritable, JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
.. versionadded:: 1.4.0
Examples
--------
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxDepth=2, seed=42, leafCol="leafId")
>>> gbt.setMaxIter(5)
GBTRegressor...
>>> gbt.setMinWeightFractionPerNode(0.049)
GBTRegressor...
>>> gbt.getMaxIter()
5
>>> print(gbt.getImpurity())
variance
>>> print(gbt.getFeatureSubsetStrategy())
all
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictLeaf(test0.head().features)
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0))],
... ["label", "features"])
>>> model.evaluateEachIteration(validation, "squared")
[0.0, 0.0, 0.0, 0.0, 0.0]
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
>>> gbt.getValidationIndicatorCol()
'validationIndicator'
>>> gbt.getValidationTol()
0.01
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None)
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
@since("1.4.0")
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
@since("1.4.0")
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("1.4.0")
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
@since("1.4.0")
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
@since("1.4.0")
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("3.0.0")
def setValidationIndicatorCol(self, value):
"""
Sets the value of :py:attr:`validationIndicatorCol`.
"""
return self._set(validationIndicatorCol=value)
@since("1.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("1.4.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class GBTRegressionModel(
_JavaRegressionModel, _TreeEnsembleModel, _GBTRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
Examples
--------
DecisionTreeRegressionModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
def evaluateEachIteration(self, dataset, loss):
"""
Method to compute error or loss for every iteration of gradient boosting.
.. versionadded:: 2.4.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
loss : str
The loss function used to compute error.
Supported options: squared, absolute
"""
return self._call_java("evaluateEachIteration", dataset, loss)
class _AFTSurvivalRegressionParams(_PredictorParams, HasMaxIter, HasTol, HasFitIntercept,
HasAggregationDepth, HasMaxBlockSizeInMB):
"""
Params for :py:class:`AFTSurvivalRegression` and :py:class:`AFTSurvivalRegressionModel`.
.. versionadded:: 3.0.0
"""
censorCol = Param(
Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = Param(
Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(
Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_AFTSurvivalRegressionParams, self).__init__(*args)
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6, maxBlockSizeInMB=0.0)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
@inherit_doc
class AFTSurvivalRegression(_JavaRegressor, _AFTSurvivalRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
Notes
-----
For more information see Wikipedia page on
`AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (1e-40, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> aftsr.setMaxIter(10)
AFTSurvivalRegression...
>>> aftsr.getMaxIter()
10
>>> aftsr.clear(aftsr.maxIter)
>>> model = aftsr.fit(df)
>>> model.getMaxBlockSizeInMB()
0.0
>>> model.setFeaturesCol("features")
AFTSurvivalRegressionModel...
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-------+---------+------+----------+
| label| features|censor|prediction|
+-------+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
|1.0E-40|(1,[],[])| 0.0| 1.0|
+-------+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
.. versionadded:: 1.6.0
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), # noqa: B005
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), # noqa: B005
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.6.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("1.6.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.1.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class AFTSurvivalRegressionModel(_JavaRegressionModel, _AFTSurvivalRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("3.0.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale parameter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
class _GeneralizedLinearRegressionParams(_PredictorParams, HasFitIntercept, HasMaxIter,
HasTol, HasRegParam, HasWeightCol, HasSolver,
HasAggregationDepth):
"""
Params for :py:class:`GeneralizedLinearRegression` and
:py:class:`GeneralizedLinearRegressionModel`.
.. versionadded:: 3.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson, gamma and tweedie.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " +
"of the Tweedie distribution which characterizes the relationship " +
"between the variance and mean of the distribution. Only applicable " +
"for the Tweedie family. Supported values: 0 and [1, Inf).",
typeConverter=TypeConverters.toFloat)
linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " +
"Only applicable to the Tweedie family.",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: irls.", typeConverter=TypeConverters.toString)
offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " +
"or empty, we treat all instance offsets as 0.0",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_GeneralizedLinearRegressionParams, self).__init__(*args)
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls",
variancePower=0.0, aggregationDepth=2)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
@since("2.2.0")
def getVariancePower(self):
"""
Gets the value of variancePower or its default value.
"""
return self.getOrDefault(self.variancePower)
@since("2.2.0")
def getLinkPower(self):
"""
Gets the value of linkPower or its default value.
"""
return self.getOrDefault(self.linkPower)
@since("2.3.0")
def getOffsetCol(self):
"""
Gets the value of offsetCol or its default value.
"""
return self.getOrDefault(self.offsetCol)
@inherit_doc
class GeneralizedLinearRegression(_JavaRegressor, _GeneralizedLinearRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for
each family is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
* "tweedie" -> power link function specified through "linkPower". \
The default link power in the tweedie family is 1 - variancePower.
.. versionadded:: 2.0.0
Notes
-----
For more information see Wikipedia page on
`GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> glr.setRegParam(0.1)
GeneralizedLinearRegression...
>>> glr.getRegParam()
0.1
>>> glr.clear(glr.regParam)
>>> glr.setMaxIter(10)
GeneralizedLinearRegression...
>>> glr.getMaxIter()
10
>>> glr.clear(glr.maxIter)
>>> model = glr.fit(df)
>>> model.setFeaturesCol("features")
GeneralizedLinearRegressionModel...
>>> model.getMaxIter()
25
>>> model.getAggregationDepth()
2
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
"""
__init__(self, \\*, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
"""
setParams(self, \\*, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
Sets params for generalized linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.2.0")
def setVariancePower(self, value):
"""
Sets the value of :py:attr:`variancePower`.
"""
return self._set(variancePower=value)
@since("2.2.0")
def setLinkPower(self, value):
"""
Sets the value of :py:attr:`linkPower`.
"""
return self._set(linkPower=value)
@since("2.3.0")
def setOffsetCol(self, value):
"""
Sets the value of :py:attr:`offsetCol`.
"""
return self._set(offsetCol=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GeneralizedLinearRegressionModel(_JavaRegressionModel, _GeneralizedLinearRegressionParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return GeneralizedLinearRegressionTrainingSummary(
super(GeneralizedLinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.2.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions.
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
.. versionadded:: 2.0.0
Parameters
----------
residualsType : str, optional
The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
def __repr__(self):
return self._call_java("toString")
class _FactorizationMachinesParams(_PredictorParams, HasMaxIter, HasStepSize, HasTol,
HasSolver, HasSeed, HasFitIntercept, HasRegParam, HasWeightCol):
"""
Params for :py:class:`FMRegressor`, :py:class:`FMRegressionModel`, :py:class:`FMClassifier`
and :py:class:`FMClassifierModel`.
.. versionadded:: 3.0.0
"""
factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " +
"which are used to get pairwise interactions between variables",
typeConverter=TypeConverters.toInt)
fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)",
typeConverter=TypeConverters.toBoolean)
miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " +
"set that should be used for one iteration of gradient descent",
typeConverter=TypeConverters.toFloat)
initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_FactorizationMachinesParams, self).__init__(*args)
self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW")
@since("3.0.0")
def getFactorSize(self):
"""
Gets the value of factorSize or its default value.
"""
return self.getOrDefault(self.factorSize)
@since("3.0.0")
def getFitLinear(self):
"""
Gets the value of fitLinear or its default value.
"""
return self.getOrDefault(self.fitLinear)
@since("3.0.0")
def getMiniBatchFraction(self):
"""
Gets the value of miniBatchFraction or its default value.
"""
return self.getOrDefault(self.miniBatchFraction)
@since("3.0.0")
def getInitStd(self):
"""
Gets the value of initStd or its default value.
"""
return self.getOrDefault(self.initStd)
@inherit_doc
class FMRegressor(_JavaRegressor, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable):
"""
Factorization Machines learning algorithm for regression.
solver Supports:
* gd (normal mini-batch gradient descent)
* adamW (default)
.. versionadded:: 3.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.regression import FMRegressor
>>> df = spark.createDataFrame([
... (2.0, Vectors.dense(2.0)),
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>>
>>> fm = FMRegressor(factorSize=2)
>>> fm.setSeed(16)
FMRegressor...
>>> model = fm.fit(df)
>>> model.getMaxIter()
100
>>> test0 = spark.createDataFrame([
... (Vectors.dense(-2.0),),
... (Vectors.dense(0.5),),
... (Vectors.dense(1.0),),
... (Vectors.dense(4.0),)], ["features"])
>>> model.transform(test0).show(10, False)
+--------+-------------------+
|features|prediction |
+--------+-------------------+
|[-2.0] |-1.9989237712341565|
|[0.5] |0.4956682219523814 |
|[1.0] |0.994586620589689 |
|[4.0] |3.9880970124135344 |
+--------+-------------------+
...
>>> model.intercept
-0.0032501766849261557
>>> model.linear
DenseVector([0.9978])
>>> model.factors
DenseMatrix(1, 2, [0.0173, 0.0021], 1)
>>> model_path = temp_path + "/fm_model"
>>> model.save(model_path)
>>> model2 = FMRegressionModel.load(model_path)
>>> model2.intercept
-0.0032501766849261557
>>> model2.linear
DenseVector([0.9978])
>>> model2.factors
DenseMatrix(1, 2, [0.0173, 0.0021], 1)
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", seed=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", seed=None)
"""
super(FMRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.FMRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", seed=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", seed=None)
Sets Params for FMRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FMRegressionModel(java_model)
@since("3.0.0")
def setFactorSize(self, value):
"""
Sets the value of :py:attr:`factorSize`.
"""
return self._set(factorSize=value)
@since("3.0.0")
def setFitLinear(self, value):
"""
Sets the value of :py:attr:`fitLinear`.
"""
return self._set(fitLinear=value)
@since("3.0.0")
def setMiniBatchFraction(self, value):
"""
Sets the value of :py:attr:`miniBatchFraction`.
"""
return self._set(miniBatchFraction=value)
@since("3.0.0")
def setInitStd(self, value):
"""
Sets the value of :py:attr:`initStd`.
"""
return self._set(initStd=value)
@since("3.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("3.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("3.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
class FMRegressionModel(_JavaRegressionModel, _FactorizationMachinesParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`FMRegressor`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("3.0.0")
def linear(self):
"""
Model linear term.
"""
return self._call_java("linear")
@property
@since("3.0.0")
def factors(self):
"""
Model factor term.
"""
return self._call_java("factors")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
witgo/spark
|
python/pyspark/ml/regression.py
|
Python
|
apache-2.0
| 91,643
|
[
"Gaussian"
] |
69303e8f30c2dac9fdadf3dfb5b181e30afc964b7895ba1a17d7b559b3b04fb8
|
########################################################################
# $HeadURL $
# File: CleanReqDBAgent.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/17 08:31:26
########################################################################
""" :mod: CleanReqDBAgent
=====================
.. module: CleanReqDBAgent
:synopsis: cleaning RequestDB from obsolete records and kicking assigned requests
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
cleaning ReqDB from obsolete records and kicking assigned requests
"""
__RCSID__ = "$Id: $"
# #
# @file CleanReqDBAgent.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/17 08:32:08
# @brief Definition of CleanReqDBAgent class.
# # imports
import datetime
# # from DIRAC
from DIRAC import S_OK, gMonitor
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Request import Request
AGENT_NAME = "RequestManagement/CleanReqDBAgent"
########################################################################
class CleanReqDBAgent( AgentModule ):
"""
.. class:: CleanReqDBAgent
"""
# # DEL GRACE PERIOD in DAYS
DEL_GRACE_DAYS = 60
# # DEL LIMIT
DEL_LIMIT = 100
# # KICK PERIOD in HOURS
KICK_GRACE_HOURS = 1
# # KICK LIMIT
KICK_LIMIT = 100
# # remove failed requests flag
DEL_FAILED = False
# # request client
__requestClient = None
def requestClient( self ):
""" request client getter """
if not self.__requestClient:
self.__requestClient = ReqClient()
return self.__requestClient
def initialize( self ):
""" initialization """
self.DEL_GRACE_DAYS = self.am_getOption( "DeleteGraceDays", self.DEL_GRACE_DAYS )
self.log.info( "Delete grace period = %s days" % self.DEL_GRACE_DAYS )
self.DEL_LIMIT = self.am_getOption( "DeleleLimit", self.DEL_LIMIT )
self.log.info( "Delete limit = %s request/cycle" % self.DEL_LIMIT )
self.DEL_FAILED = self.am_getOption( "DeleteFailed", self.DEL_FAILED )
self.log.info( "Delete failed requests: %s" % { True: "yes", False: "no"}[self.DEL_FAILED] )
self.KICK_GRACE_HOURS = self.am_getOption( "KickGraceHours", self.KICK_GRACE_HOURS )
self.log.info( "Kick assigned requests period = %s hours" % self.KICK_GRACE_HOURS )
self.KICK_LIMIT = self.am_getOption( "KickLimit", self.KICK_LIMIT )
self.log.info( "Kick limit = %s request/cycle" % self.KICK_LIMIT )
# # gMonitor stuff
gMonitor.registerActivity( "DeletedRequests", "Deleted finished requests",
"CleanReqDBAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "KickedRequests", "Assigned requests kicked",
"CleanReqDBAgent", "Requests/min", gMonitor.OP_SUM )
return S_OK()
def execute( self ):
""" execution in one cycle """
now = datetime.datetime.utcnow()
kickTime = now - datetime.timedelta( hours = self.KICK_GRACE_HOURS )
rmTime = now - datetime.timedelta( days = self.DEL_GRACE_DAYS )
# # kick
statusList = [ "Assigned" ]
requestNamesList = self.requestClient().getRequestNamesList( statusList, self.DEL_LIMIT )
if not requestNamesList["OK"]:
self.log.error( "execute: %s" % requestNamesList["Message"] )
return requestNamesList
requestNamesList = requestNamesList["Value"]
kicked = 0
for requestName, status, lastUpdate in requestNamesList:
if lastUpdate < kickTime:
self.log.info( "execute: kick assigned request '%s'" % requestName )
getRequest = self.requestClient().peekRequest( requestName )
if not getRequest["OK"]:
self.log.error( "execute: unable to read request '%s': %s" % ( requestName, getRequest["Message"] ) )
continue
getRequest = getRequest["Value"]
if getRequest:
getRequest.Status = "Waiting"
putRequest = self.requestClient().putRequest( getRequest )
if not putRequest["OK"]:
self.log.error( "execute: unable to put request '%s': %s" % ( requestName, putRequest["Message"] ) )
continue
else:
self.log.verbose( "Kicked request %d" % putRequest['Value'] )
kicked += 1
# # delete
statusList = [ "Done", "Failed" ] if self.DEL_FAILED else [ "Done" ]
requestNamesList = self.requestClient().getRequestNamesList( statusList, self.DEL_LIMIT )
if not requestNamesList["OK"]:
self.log.error( "execute: %s" % requestNamesList["Message"] )
return requestNamesList
requestNamesList = requestNamesList["Value"]
deleted = 0
for requestName, status, lastUpdate in requestNamesList:
if lastUpdate < rmTime:
self.log.info( "execute: deleting request '%s' with status %s" % ( requestName, status ) )
delRequest = self.requestClient().deleteRequest( requestName )
if not delRequest["OK"]:
self.log.error( "execute: unable to delete request '%s': %s" % ( requestName, delRequest["Message"] ) )
continue
deleted += 1
gMonitor.addMark( "KickedRequests", kicked )
gMonitor.addMark( "DeletedRequests", deleted )
self.log.info( "execute: kicked assigned requests = %s" % kicked )
self.log.info( "execute: deleted finished requests = %s" % deleted )
return S_OK()
|
avedaee/DIRAC
|
RequestManagementSystem/Agent/CleanReqDBAgent.py
|
Python
|
gpl-3.0
| 5,377
|
[
"DIRAC"
] |
add03a273d244928703cd3ade0aa1801277e015e508522712cc2ba3a67ccd889
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as si
import scipy.ndimage as scim
import scipy.ndimage.interpolation as sii
import os
import os.path as osp
import cPickle as cp
#import Image
from PIL import Image
from poisson_reconstruct import blit_images
def sample_weighted(p_dict):
ps = p_dict.keys()
return ps[np.random.choice(len(ps),p=p_dict.values())]
class Layer(object):
def __init__(self,alpha,color):
# alpha for the whole image:
assert alpha.ndim==2
self.alpha = alpha
[n,m] = alpha.shape[:2]
color=np.atleast_1d(np.array(color)).astype('uint8')
# color for the image:
if color.ndim==1: # constant color for whole layer
ncol = color.size
if ncol == 1 : #grayscale layer
self.color = color * np.ones((n,m,3),'uint8')
if ncol == 3 :
self.color = np.ones((n,m,3),'uint8') * color[None,None,:]
elif color.ndim==2: # grayscale image
self.color = np.repeat(color[:,:,None],repeats=3,axis=2).copy().astype('uint8')
elif color.ndim==3: #rgb image
self.color = color.copy().astype('uint8')
else:
print color.shape
raise Exception("color datatype not understood")
class FontColor(object):
def __init__(self, col_file):
with open(col_file,'r') as f:
self.colorsRGB = cp.load(f)
self.ncol = self.colorsRGB.shape[0]
# convert color-means from RGB to LAB for better nearest neighbour
# computations:
self.colorsLAB = np.r_[self.colorsRGB[:,0:3], self.colorsRGB[:,6:9]].astype('uint8')
self.colorsLAB = np.squeeze(cv.cvtColor(self.colorsLAB[None,:,:],cv.COLOR_RGB2Lab))
def sample_normal(self, col_mean, col_std):
"""
sample from a normal distribution centered around COL_MEAN
with standard deviation = COL_STD.
"""
col_sample = col_mean + col_std * np.random.randn()
return np.clip(col_sample, 0, 255).astype('uint8')
def sample_from_data(self, bg_mat):
"""
bg_mat : this is a nxmx3 RGB image.
returns a tuple : (RGB_foreground, RGB_background)
each of these is a 3-vector.
"""
bg_orig = bg_mat.copy()
bg_mat = cv.cvtColor(bg_mat, cv.COLOR_RGB2Lab)
bg_mat = np.reshape(bg_mat, (np.prod(bg_mat.shape[:2]),3))
bg_mean = np.mean(bg_mat,axis=0)
norms = np.linalg.norm(self.colorsLAB-bg_mean[None,:], axis=1)
# choose a random color amongst the top 3 closest matches:
#nn = np.random.choice(np.argsort(norms)[:3])
nn = np.argmin(norms)
## nearest neighbour color:
data_col = self.colorsRGB[np.mod(nn,self.ncol),:]
col1 = self.sample_normal(data_col[:3],data_col[3:6])
col2 = self.sample_normal(data_col[6:9],data_col[9:12])
if nn < self.ncol:
return (col2, col1)
else:
# need to swap to make the second color close to the input backgroun color
return (col1, col2)
def mean_color(self, arr):
col = cv.cvtColor(arr, cv.COLOR_RGB2HSV)
col = np.reshape(col, (np.prod(col.shape[:2]),3))
col = np.mean(col,axis=0).astype('uint8')
return np.squeeze(cv.cvtColor(col[None,None,:],cv.COLOR_HSV2RGB))
def invert(self, rgb):
rgb = 127 + rgb
return rgb
def complement(self, rgb_color):
"""
return a color which is complementary to the RGB_COLOR.
"""
col_hsv = np.squeeze(cv.cvtColor(rgb_color[None,None,:], cv.COLOR_RGB2HSV))
col_hsv[0] = col_hsv[0] + 128 #uint8 mods to 255
col_comp = np.squeeze(cv.cvtColor(col_hsv[None,None,:],cv.COLOR_HSV2RGB))
return col_comp
def triangle_color(self, col1, col2):
"""
Returns a color which is "opposite" to both col1 and col2.
"""
col1, col2 = np.array(col1), np.array(col2)
col1 = np.squeeze(cv.cvtColor(col1[None,None,:], cv.COLOR_RGB2HSV))
col2 = np.squeeze(cv.cvtColor(col2[None,None,:], cv.COLOR_RGB2HSV))
h1, h2 = col1[0], col2[0]
if h2 < h1 : h1,h2 = h2,h1 #swap
dh = h2-h1
if dh < 127: dh = 255-dh
col1[0] = h1 + dh/2
return np.squeeze(cv.cvtColor(col1[None,None,:],cv.COLOR_HSV2RGB))
def change_value(self, col_rgb, v_std=50):
col = np.squeeze(cv.cvtColor(col_rgb[None,None,:], cv.COLOR_RGB2HSV))
x = col[2]
vs = np.linspace(0,1)
ps = np.abs(vs - x/255.0)
ps /= np.sum(ps)
v_rand = np.clip(np.random.choice(vs,p=ps) + 0.1*np.random.randn(),0,1)
col[2] = 255*v_rand
return np.squeeze(cv.cvtColor(col[None,None,:],cv.COLOR_HSV2RGB))
class Colorize(object):
def __init__(self, model_dir='data'):#, im_path):
# # get a list of background-images:
# imlist = [osp.join(im_path,f) for f in os.listdir(im_path)]
# self.bg_list = [p for p in imlist if osp.isfile(p)]
self.font_color = FontColor(col_file=osp.join(model_dir,'models/colors_new.cp'))
# probabilities of different text-effects:
self.p_bevel = 0.05 # add bevel effect to text
self.p_outline = 0.05 # just keep the outline of the text
self.p_drop_shadow = 0.15
self.p_border = 0.15
self.p_displacement = 0.30 # add background-based bump-mapping
self.p_texture = 0.0 # use an image for coloring text
def drop_shadow(self, alpha, theta, shift, size, op=0.80):
"""
alpha : alpha layer whose shadow need to be cast
theta : [0,2pi] -- the shadow direction
shift : shift in pixels of the shadow
size : size of the GaussianBlur filter
op : opacity of the shadow (multiplying factor)
@return : alpha of the shadow layer
(it is assumed that the color is black/white)
"""
if size%2==0:
size -= 1
size = max(1,size)
shadow = cv.GaussianBlur(alpha,(size,size),0)
[dx,dy] = shift * np.array([-np.sin(theta), np.cos(theta)])
shadow = op*sii.shift(shadow, shift=[dx,dy],mode='constant',cval=0)
return shadow.astype('uint8')
def border(self, alpha, size, kernel_type='RECT'):
"""
alpha : alpha layer of the text
size : size of the kernel
kernel_type : one of [rect,ellipse,cross]
@return : alpha layer of the border (color to be added externally).
"""
kdict = {'RECT':cv.MORPH_RECT, 'ELLIPSE':cv.MORPH_ELLIPSE,
'CROSS':cv.MORPH_CROSS}
kernel = cv.getStructuringElement(kdict[kernel_type],(size,size))
border = cv.dilate(alpha,kernel,iterations=1) # - alpha
return border
def blend(self,cf,cb,mode='normal'):
return cf
def merge_two(self,fore,back,blend_type=None):
"""
merge two FOREground and BACKground layers.
ref: https://en.wikipedia.org/wiki/Alpha_compositing
ref: Chapter 7 (pg. 440 and pg. 444):
http://partners.adobe.com/public/developer/en/pdf/PDFReference.pdf
"""
a_f = fore.alpha/255.0
a_b = back.alpha/255.0
c_f = fore.color
c_b = back.color
a_r = a_f + a_b - a_f*a_b
if blend_type != None:
c_blend = self.blend(c_f, c_b, blend_type)
c_r = ( ((1-a_f)*a_b)[:,:,None] * c_b
+ ((1-a_b)*a_f)[:,:,None] * c_f
+ (a_f*a_b)[:,:,None] * c_blend )
else:
c_r = ( ((1-a_f)*a_b)[:,:,None] * c_b
+ a_f[:,:,None]*c_f )
return Layer((255*a_r).astype('uint8'), c_r.astype('uint8'))
def merge_down(self, layers, blends=None):
"""
layers : [l1,l2,...ln] : a list of LAYER objects.
l1 is on the top, ln is the bottom-most layer.
blend : the type of blend to use. Should be n-1.
use None for plain alpha blending.
Note : (1) it assumes that all the layers are of the SAME SIZE.
@return : a single LAYER type object representing the merged-down image
"""
nlayers = len(layers)
if nlayers > 1:
[n,m] = layers[0].alpha.shape[:2]
out_layer = layers[-1]
for i in range(-2,-nlayers-1,-1):
blend=None
if blends is not None:
blend = blends[i+1]
out_layer = self.merge_two(fore=layers[i], back=out_layer,blend_type=blend)
return out_layer
else:
return layers[0]
def resize_im(self, im, osize):
return np.array(Image.fromarray(im).resize(osize[::-1], Image.BICUBIC))
def occlude(self):
"""
somehow add occlusion to text.
"""
pass
def color_border(self, col_text, col_bg):
"""
Decide on a color for the border:
- could be the same as text-color but lower/higher 'VALUE' component.
- could be the same as bg-color but lower/higher 'VALUE'.
- could be 'mid-way' color b/w text & bg colors.
"""
choice = np.random.choice(3)
col_text = cv.cvtColor(col_text, cv.COLOR_RGB2HSV)
col_text = np.reshape(col_text, (np.prod(col_text.shape[:2]),3))
col_text = np.mean(col_text,axis=0).astype('uint8')
vs = np.linspace(0,1)
def get_sample(x):
ps = np.abs(vs - x/255.0)
ps /= np.sum(ps)
v_rand = np.clip(np.random.choice(vs,p=ps) + 0.1*np.random.randn(),0,1)
return 255*v_rand
# first choose a color, then inc/dec its VALUE:
if choice==0:
# increase/decrease saturation:
col_text[0] = get_sample(col_text[0]) # saturation
col_text = np.squeeze(cv.cvtColor(col_text[None,None,:],cv.COLOR_HSV2RGB))
elif choice==1:
# get the complementary color to text:
col_text = np.squeeze(cv.cvtColor(col_text[None,None,:],cv.COLOR_HSV2RGB))
col_text = self.font_color.complement(col_text)
else:
# choose a mid-way color:
col_bg = cv.cvtColor(col_bg, cv.COLOR_RGB2HSV)
col_bg = np.reshape(col_bg, (np.prod(col_bg.shape[:2]),3))
col_bg = np.mean(col_bg,axis=0).astype('uint8')
col_bg = np.squeeze(cv.cvtColor(col_bg[None,None,:],cv.COLOR_HSV2RGB))
col_text = np.squeeze(cv.cvtColor(col_text[None,None,:],cv.COLOR_HSV2RGB))
col_text = self.font_color.triangle_color(col_text,col_bg)
# now change the VALUE channel:
col_text = np.squeeze(cv.cvtColor(col_text[None,None,:],cv.COLOR_RGB2HSV))
col_text[2] = get_sample(col_text[2]) # value
return np.squeeze(cv.cvtColor(col_text[None,None,:],cv.COLOR_HSV2RGB))
def color_text(self, text_arr, h, bg_arr):
"""
Decide on a color for the text:
- could be some other random image.
- could be a color based on the background.
this color is sampled from a dictionary built
from text-word images' colors. The VALUE channel
is randomized.
H : minimum height of a character
"""
bg_col,fg_col,i = 0,0,0
fg_col,bg_col = self.font_color.sample_from_data(bg_arr)
return Layer(alpha=text_arr, color=fg_col), fg_col, bg_col
def process(self, text_arr, bg_arr, min_h):
"""
text_arr : one alpha mask : nxm, uint8
bg_arr : background image: nxmx3, uint8
min_h : height of the smallest character (px)
return text_arr blit onto bg_arr.
"""
# decide on a color for the text:
l_text, fg_col, bg_col = self.color_text(text_arr, min_h, bg_arr)
bg_col = np.mean(np.mean(bg_arr,axis=0),axis=0)
l_bg = Layer(alpha=255*np.ones_like(text_arr,'uint8'),color=bg_col)
l_text.alpha = l_text.alpha * np.clip(0.88 + 0.1*np.random.randn(), 0.72, 1.0)
layers = [l_text]
blends = []
# add border:
if np.random.rand() < self.p_border:
if min_h <= 15 : bsz = 1
elif 15 < min_h < 30: bsz = 3
else: bsz = 5
border_a = self.border(l_text.alpha, size=bsz)
l_border = Layer(border_a, self.color_border(l_text.color,l_bg.color))
layers.append(l_border)
blends.append('normal')
# add shadow:
if np.random.rand() < self.p_drop_shadow:
# shadow gaussian size:
if min_h <= 15 : bsz = 1
elif 15 < min_h < 30: bsz = 3
else: bsz = 5
# shadow angle:
theta = np.pi/4 * np.random.choice([1,3,5,7]) + 0.5*np.random.randn()
# shadow shift:
if min_h <= 15 : shift = 2
elif 15 < min_h < 30: shift = 7+np.random.randn()
else: shift = 15 + 3*np.random.randn()
# opacity:
op = 0.50 + 0.1*np.random.randn()
shadow = self.drop_shadow(l_text.alpha, theta, shift, 3*bsz, op)
l_shadow = Layer(shadow, 0)
layers.append(l_shadow)
blends.append('normal')
l_bg = Layer(alpha=255*np.ones_like(text_arr,'uint8'), color=bg_col)
layers.append(l_bg)
blends.append('normal')
l_normal = self.merge_down(layers,blends)
# now do poisson image editing:
l_bg = Layer(alpha=255*np.ones_like(text_arr,'uint8'), color=bg_arr)
l_out = blit_images(l_normal.color,l_bg.color.copy())
# plt.subplot(1,3,1)
# plt.imshow(l_normal.color)
# plt.subplot(1,3,2)
# plt.imshow(l_bg.color)
# plt.subplot(1,3,3)
# plt.imshow(l_out)
# plt.show()
if l_out is None:
# poisson recontruction produced
# imperceptible text. In this case,
# just do a normal blend:
layers[-1] = l_bg
return self.merge_down(layers,blends).color
return l_out
def check_perceptible(self, txt_mask, bg, txt_bg):
"""
--- DEPRECATED; USE GRADIENT CHECKING IN POISSON-RECONSTRUCT INSTEAD ---
checks if the text after merging with background
is still visible.
txt_mask (hxw) : binary image of text -- 255 where text is present
0 elsewhere
bg (hxwx3) : original background image WITHOUT any text.
txt_bg (hxwx3) : image with text.
"""
bgo,txto = bg.copy(), txt_bg.copy()
txt_mask = txt_mask.astype('bool')
bg = cv.cvtColor(bg.copy(), cv.COLOR_RGB2Lab)
txt_bg = cv.cvtColor(txt_bg.copy(), cv.COLOR_RGB2Lab)
bg_px = bg[txt_mask,:]
txt_px = txt_bg[txt_mask,:]
bg_px[:,0] *= 100.0/255.0 #rescale - L channel
txt_px[:,0] *= 100.0/255.0
diff = np.linalg.norm(bg_px-txt_px,ord=None,axis=1)
diff = np.percentile(diff,[10,30,50,70,90])
print "color diff percentile :", diff
return diff, (bgo,txto)
def color(self, bg_arr, text_arr, hs, place_order=None, pad=20):
"""
Return colorized text image.
text_arr : list of (n x m) numpy text alpha mask (unit8).
hs : list of minimum heights (scalar) of characters in each text-array.
text_loc : [row,column] : location of text in the canvas.
canvas_sz : size of canvas image.
return : nxmx3 rgb colorized text-image.
"""
bg_arr = bg_arr.copy()
if bg_arr.ndim == 2 or bg_arr.shape[2]==1: # grayscale image:
bg_arr = np.repeat(bg_arr[:,:,None], 3, 2)
# get the canvas size:
canvas_sz = np.array(bg_arr.shape[:2])
# initialize the placement order:
if place_order is None:
place_order = np.array(xrange(len(text_arr)))
rendered = []
for i in place_order[::-1]:
# get the "location" of the text in the image:
## this is the minimum x and y coordinates of text:
loc = np.where(text_arr[i])
lx, ly = np.min(loc[0]), np.min(loc[1])
mx, my = np.max(loc[0]), np.max(loc[1])
l = np.array([lx,ly])
m = np.array([mx,my])-l+1
text_patch = text_arr[i][l[0]:l[0]+m[0],l[1]:l[1]+m[1]]
# figure out padding:
ext = canvas_sz - (l+m)
num_pad = pad*np.ones(4,dtype='int32')
num_pad[:2] = np.minimum(num_pad[:2], l)
num_pad[2:] = np.minimum(num_pad[2:], ext)
text_patch = np.pad(text_patch, pad_width=((num_pad[0],num_pad[2]), (num_pad[1],num_pad[3])), mode='constant')
l -= num_pad[:2]
w,h = text_patch.shape
bg = bg_arr[l[0]:l[0]+w,l[1]:l[1]+h,:]
rdr0 = self.process(text_patch, bg, hs[i])
rendered.append(rdr0)
bg_arr[l[0]:l[0]+w,l[1]:l[1]+h,:] = rdr0#rendered[-1]
return bg_arr
return bg_arr
|
ankush-me/SynthText
|
colorize3_poisson.py
|
Python
|
apache-2.0
| 17,259
|
[
"Gaussian"
] |
d90680078439cf60f5973f29985fe37c17315cb53c080b0b6f82ab8b519b089d
|
#!/usr/bin/env python
"""dirac-docs-build-commands.py
Build scripts documentation from the scripts docstrings. The scripts are not
very uniform
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from diracdoctools.cmd.commandReference import run
from diracdoctools.Config import CLParser
sys.exit(run(**(CLParser().optionDict())))
|
yujikato/DIRAC
|
docs/diracdoctools/scripts/dirac-docs-build-commands.py
|
Python
|
gpl-3.0
| 411
|
[
"DIRAC"
] |
cdf69cedf9364ee797e4cdee6fbbda870a52101a9aaa8ae7a647775abaa45490
|
#
# ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image
# Copyright (C) 2017 Christian Zimmermann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, unicode_literals
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
import numpy as np
import math
import cv2
class NetworkOps(object):
""" Operations that are frequently used within networks. """
neg_slope_of_relu = 0.01
@classmethod
def leaky_relu(cls, tensor, name='relu'):
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name=name)
return out_tensor
@classmethod
def conv(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, in_size[3], out_chan]
# conv
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer_conv2d(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv2d(in_tensor, kernel, strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[3]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
tensor = cls.conv(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@classmethod
def max_pool(cls, bottom, name='pool'):
pooled = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID', name=name)
return pooled
@classmethod
def upconv(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
kernel_shape = [kernel_size, kernel_size, in_size[3], in_size[3]]
strides = [1, stride, stride, 1]
# conv
kernel = cls.get_deconv_filter(kernel_shape, trainable)
tmp_result = tf.nn.conv2d_transpose(value=in_tensor, filter=kernel, output_shape=output_shape,
strides=strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[2]], tf.float32,
tf.constant_initializer(0.0), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases)
return out_tensor
@classmethod
def upconv_relu(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
tensor = cls.upconv(in_tensor, layer_name, output_shape, kernel_size, stride, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@staticmethod
def get_deconv_filter(f_shape, trainable):
width = f_shape[0]
height = f_shape[1]
f = math.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(height):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="weights", initializer=init,
shape=weights.shape, trainable=trainable, collections=['wd', 'variables', 'filters'])
@staticmethod
def fully_connected(in_tensor, layer_name, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
assert len(in_size) == 2, 'Input to a fully connected layer must be a vector.'
weights_shape = [in_size[1], out_chan]
# weight matrix
weights = tf.get_variable('weights', weights_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable)
weights = tf.check_numerics(weights, 'weights: %s' % layer_name)
# bias
biases = tf.get_variable('biases', [out_chan], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable)
biases = tf.check_numerics(biases, 'biases: %s' % layer_name)
out_tensor = tf.matmul(in_tensor, weights) + biases
return out_tensor
@classmethod
def fully_connected_relu(cls, in_tensor, layer_name, out_chan, trainable=True):
tensor = cls.fully_connected(in_tensor, layer_name, out_chan, trainable)
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name='out')
return out_tensor
@staticmethod
def dropout(in_tensor, keep_prob, evaluation):
""" Dropout: Each neuron is dropped independently. """
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=tensor_shape))
return out_tensor
@staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
""" Spatial dropout: Not each neuron is dropped independently, but feature map wise. """
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]]))
return out_tensor
def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
"""
Crops an image. When factor is not given does an central crop.
Inputs:
image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension
crop_location: tensor, [batch, 2] which represent the height and width location of the crop
crop_size: int, describes the extension of the crop
Outputs:
image_crop: 4D tensor, [batch, crop_size, crop_size, channels]
"""
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
print('image.get_shape() : ', image.get_shape())
assert len(s) == 4, "Image needs to be of shape [batch, width, height, channel]"
scale = tf.reshape(scale, [-1])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = crop_size / scale
y1 = crop_location[:, 0] - crop_size_scaled//2
y2 = y1 + crop_size_scaled
x1 = crop_location[:, 1] - crop_size_scaled//2
x2 = x1 + crop_size_scaled
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], -1)
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c
def find_max_location(scoremap):
""" Returns the coordinates of the given scoremap with maximum value. """
with tf.variable_scope('find_max_location'):
s = scoremap.get_shape().as_list()
if len(s) == 4:
scoremap = tf.squeeze(scoremap, [3])
if len(s) == 2:
scoremap = tf.expand_dims(scoremap, 0)
s = scoremap.get_shape().as_list()
assert len(s) == 3, "Scoremap must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "Scoremap must be [Batch, Width, Height]"
# my meshgrid
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
x_vec = tf.reshape(X, [-1])
y_vec = tf.reshape(Y, [-1])
scoremap_vec = tf.reshape(scoremap, [s[0], -1])
max_ind_vec = tf.cast(tf.argmax(scoremap_vec, dimension=1), tf.int32)
xy_loc = list()
for i in range(s[0]):
x_loc = tf.reshape(x_vec[max_ind_vec[i]], [1])
y_loc = tf.reshape(y_vec[max_ind_vec[i]], [1])
xy_loc.append(tf.concat([x_loc, y_loc], 0))
xy_loc = tf.stack(xy_loc, 0)
return xy_loc
def single_obj_scoremap(scoremap):
""" Applies my algorithm to figure out the most likely object from a given segmentation scoremap. """
with tf.variable_scope('single_obj_scoremap'):
filter_size = 21
s = scoremap.get_shape().as_list()
assert len(s) == 4, "Scoremap must be 4D."
scoremap_softmax = tf.nn.softmax(scoremap) #B, H, W, C --> normalizes across last dimension
scoremap_fg = tf.reduce_max(scoremap_softmax[:, :, :, 1:], 3) # B, H, W
detmap_fg = tf.round(scoremap_fg) # B, H, W
# find maximum in the fg scoremap
max_loc = find_max_location(scoremap_fg)
# use maximum to start "growing" our objectmap
objectmap_list = list()
kernel_dil = tf.ones((filter_size, filter_size, 1)) / float(filter_size*filter_size)
for i in range(s[0]):
# create initial objectmap (put a one at the maximum)
sparse_ind = tf.reshape(max_loc[i, :], [1, 2]) # reshape that its one point with 2dim)
objectmap = tf.sparse_to_dense(sparse_ind, [s[1], s[2]], 1.0)
# grow the map by dilation and pixelwise and
num_passes = max(s[1], s[2]) // (filter_size//2) # number of passes needes to make sure the map can spread over the whole image
for j in range(num_passes):
objectmap = tf.reshape(objectmap, [1, s[1], s[2], 1])
objectmap_dil = tf.nn.dilation2d(objectmap, kernel_dil, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME')
objectmap_dil = tf.reshape(objectmap_dil, [s[1], s[2]])
objectmap = tf.round(tf.multiply(detmap_fg[i, :, :], objectmap_dil))
objectmap = tf.reshape(objectmap, [s[1], s[2], 1])
objectmap_list.append(objectmap)
objectmap = tf.stack(objectmap_list)
return objectmap
def calc_center_bb(binary_class_mask):
""" Returns the center of mass coordinates for the given binary_class_mask. """
with tf.variable_scope('calc_center_bb'):
binary_class_mask = tf.cast(binary_class_mask, tf.int32)
binary_class_mask = tf.equal(binary_class_mask, 1)
s = binary_class_mask.get_shape().as_list()
if len(s) == 4:
binary_class_mask = tf.squeeze(binary_class_mask, [3])
s = binary_class_mask.get_shape().as_list()
assert len(s) == 3, "binary_class_mask must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "binary_class_mask must be [Batch, Width, Height]"
# my meshgrid
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
bb_list = list()
center_list = list()
crop_size_list = list()
for i in range(s[0]):
X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)
x_min = tf.reduce_min(X_masked)
x_max = tf.reduce_max(X_masked)
y_min = tf.reduce_min(Y_masked)
y_max = tf.reduce_max(Y_masked)
start = tf.stack([x_min, y_min])
end = tf.stack([x_max, y_max])
bb = tf.stack([start, end], 1)
bb_list.append(bb)
center_x = 0.5*(x_max + x_min)
center_y = 0.5*(y_max + y_min)
center = tf.stack([center_x, center_y], 0)
center = tf.cond(tf.reduce_all(tf.is_finite(center)), lambda: center,
lambda: tf.constant([160.0, 160.0]))
center.set_shape([2])
center_list.append(center)
crop_size_x = x_max - x_min
crop_size_y = y_max - y_min
crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), lambda: crop_size,
lambda: tf.constant([100.0]))
crop_size.set_shape([1])
crop_size_list.append(crop_size)
bb = tf.stack(bb_list)
center = tf.stack(center_list)
crop_size = tf.stack(crop_size_list)
return center, bb, crop_size
def detect_keypoints(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
keypoint_coords = np.zeros((s[2], 2))
for i in range(s[2]):
v, u = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_coords[i, 0] = v
keypoint_coords[i, 1] = u
return keypoint_coords
def trafo_coords(keypoints_crop_coords, centers, scale, crop_size):
""" Transforms coords into global image coordinates. """
keypoints_coords = np.copy(keypoints_crop_coords)
keypoints_coords -= crop_size // 2
keypoints_coords /= scale
keypoints_coords += centers
return keypoints_coords
def plot_hand(coords_hw, axis, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
# define connections and colors of the bones
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 1], coords[:, 0], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 1], coords[:, 0], color_fixed, linewidth=linewidth)
def plot_hand_3d(coords_xyz, axis, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
# define connections and colors of the bones
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_xyz[connection[0], :]
coord2 = coords_xyz[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color_fixed, linewidth=linewidth)
axis.view_init(azim=-90., elev=90.)
def plot_hand_2d(coords_hw, image, color_fixed=None, linewidth=2):
""" Plots a hand stick figure into a matplotlib figure. """
colors = [(0, 0, 127),
(0, 0, 187),
(0, 0, 246),
(0, 32, 255),
(0, 85, 255),
(0, 140, 255),
(0, 192, 255),
(15, 248, 231),
(57, 255, 190),
(102, 1, 144),
(144, 1, 102),
(190, 1, 57),
(231, 1, 15),
(1, 211, 0),
(1, 163, 0),
(1, 111, 0),
(1, 63, 0),
(246, 11, 0),
(187, 0, 0),
(127, 0, 0)]
# define connections and colors of the bones
bones = [((0, 4), colors[0]),
((4, 3), colors[1]),
((3, 2), colors[2]),
((2, 1), colors[3]),
((0, 8), colors[4]),
((8, 7), colors[5]),
((7, 6), colors[6]),
((6, 5), colors[7]),
((0, 12), colors[8]),
((12, 11), colors[9]),
((11, 10), colors[10]),
((10, 9), colors[11]),
((0, 16), colors[12]),
((16, 15), colors[13]),
((15, 14), colors[14]),
((14, 13), colors[15]),
((0, 20), colors[16]),
((20, 19), colors[17]),
((19, 18), colors[18]),
((18, 17), colors[19])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
coord1_t = (int(coord1[1]), int(coord1[0]))
coord2_t = (int(coord2[1]), int(coord2[0]))
if color_fixed is None:
cv2.line(image, coord2_t, coord1_t, color, linewidth)
else:
cv2.line(image, coord1_t, coord2_t, color_fixed, linewidth)
class LearningRateScheduler:
"""
Provides scalar tensors at certain iteration as is needed for a multistep learning rate schedule.
"""
def __init__(self, steps, values):
self.steps = steps
self.values = values
assert len(steps)+1 == len(values), "There must be one more element in value as step."
def get_lr(self, global_step):
with tf.name_scope('lr_scheduler'):
if len(self.values) == 1: #1 value -> no step
learning_rate = tf.constant(self.values[0])
elif len(self.values) == 2: #2 values -> one step
cond = tf.greater(global_step, self.steps[0])
learning_rate = tf.where(cond, self.values[1], self.values[0])
else: # n values -> n-1 steps
cond_first = tf.less(global_step, self.steps[0])
cond_between = list()
for ind, step in enumerate(range(0, len(self.steps)-1)):
cond_between.append(tf.logical_and(tf.less(global_step, self.steps[ind+1]),
tf.greater_equal(global_step, self.steps[ind])))
cond_last = tf.greater_equal(global_step, self.steps[-1])
cond_full = [cond_first]
cond_full.extend(cond_between)
cond_full.append(cond_last)
cond_vec = tf.stack(cond_full)
lr_vec = tf.stack(self.values)
learning_rate = tf.where(cond_vec, lr_vec, tf.zeros_like(lr_vec))
learning_rate = tf.reduce_sum(learning_rate)
return learning_rate
class EvalUtil:
""" Util class for evaluation networks.
"""
def __init__(self, num_kp=21):
# init empty data storage
self.data = list()
self.num_kp = num_kp
for _ in range(num_kp):
self.data.append(list())
def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
""" Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. """
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert len(keypoint_gt.shape) == 2
assert len(keypoint_pred.shape) == 2
assert len(keypoint_vis.shape) == 1
# calc euclidean distance
diff = keypoint_gt - keypoint_pred
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i])
def _get_pck(self, kp_id, threshold):
""" Returns pck for one keypoint for the given threshold. """
if len(self.data[kp_id]) == 0:
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck
def _get_epe(self, kp_id):
""" Returns end point error for one keypoint. """
if len(self.data[kp_id]) == 0:
return None, None
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return epe_mean, epe_median
def get_measures(self, val_min, val_max, steps):
""" Outputs the average mean and median error as well as the pck score. """
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
# init mean measures
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
# Create one plot for each part
for part_id in range(self.num_kp):
# mean/median error
mean, median = self._get_epe(part_id)
if mean is None:
# there was no valid measurement for this keypoint
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
# pck/auc
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0) # mean only over keypoints
return epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds
def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
""" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. """
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
# Remove everything from the discard list
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
# rename everything according to rename_dict
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))
def calc_auc(x, y):
""" Given x and y values it calculates the approx. integral and normalizes it: area under curve"""
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return integral / norm
def get_stb_ref_curves():
curve_list = list()
thresh_mm = np.array([20.0, 25, 30, 35, 40, 45, 50])
pso_b1 = np.array([0.32236842, 0.53947368, 0.67434211, 0.75657895, 0.80921053, 0.86513158, 0.89473684])
curve_list.append((thresh_mm, pso_b1, 'PSO (AUC=%.3f)' % calc_auc(thresh_mm, pso_b1)))
icppso_b1 = np.array([ 0.51973684, 0.64473684, 0.71710526, 0.77302632, 0.80921053, 0.84868421, 0.86842105])
curve_list.append((thresh_mm, icppso_b1, 'ICPPSO (AUC=%.3f)' % calc_auc(thresh_mm, icppso_b1)))
chpr_b1 = np.array([ 0.56578947, 0.71710526, 0.82236842, 0.88157895, 0.91447368, 0.9375, 0.96052632])
curve_list.append((thresh_mm, chpr_b1, 'CHPR (AUC=%.3f)' % calc_auc(thresh_mm, chpr_b1)))
return curve_list
|
dedoogong/asrada
|
HandPose_Detector/general.py
|
Python
|
apache-2.0
| 29,504
|
[
"NEURON"
] |
e612f6bd603a69b98a16797853b043a4b6c42a7d6788b0a028df2594d7ba7688
|
import vtk
import requests
# to support alternative baseURL
import os
import os.path
def add_arguments(parser):
parser.add_argument("--table", help="URI to serialized vtkTable", dest="tableURI")
parser.add_argument("--tree", help="URI to serialized vtkTree", dest="treeURI")
parser.add_argument("--width", help="desired width of render window", dest="width")
parser.add_argument("--height", help="desired height of render window", dest="height")
parser.add_argument("--baseURL", help="the protocol, hostname, and port of the girder instance", dest="baseURL")
def initialize(self, VTKWebApp, args):
# Create default pipeline (Only once for all the session)
if not VTKWebApp.view:
baseURL = args.baseURL
# support for overriding the base URL
scriptDir = os.path.dirname(os.path.realpath(__file__))
configPath = scriptDir + "/baseURL.txt"
if os.path.isfile(configPath):
f = file(configPath, "r")
baseURL = f.read().rstrip()
f.close()
# get our input data from romanesco
r = requests.get(baseURL + args.tableURI, verify=False)
tableJSON = r.json()
tableStr = tableJSON["data"]
r = requests.get(baseURL + args.treeURI, verify=False)
treeJSON = r.json()
treeStr = treeJSON["data"]
# deserialize our input data
tableReader = vtk.vtkTableReader()
tableReader.ReadFromInputStringOn()
tableReader.SetInputString(tableStr, len(tableStr))
tableReader.Update()
table = tableReader.GetOutput()
treeReader = vtk.vtkTreeReader()
treeReader.ReadFromInputStringOn()
treeReader.SetInputString(treeStr, len(treeStr))
treeReader.Update()
tree = treeReader.GetOutput()
# create our visualization item and load the data into it.
treeHeatmapItem = vtk.vtkTreeHeatmapItem()
treeHeatmapItem.SetTree(tree)
treeHeatmapItem.SetTable(table)
# detect if we are visualizing the results of a tree comparison
if tree.GetVertexData().GetArray("property.differences"):
treeHeatmapItem.GetDendrogram().SetColorArray("property.differences")
treeHeatmapItem.GetDendrogram().SetLineWidth(2.0)
# setup the window
view = vtk.vtkContextView()
view.GetRenderWindow().SetSize(int(args.width), int(args.height))
view.GetRenderer().SetBackground(1,1,1)
iren = view.GetInteractor()
iren.SetRenderWindow(view.GetRenderWindow())
transformItem = vtk.vtkContextTransform()
transformItem.AddItem(treeHeatmapItem)
transformItem.SetInteractive(1)
view.GetScene().AddItem(transformItem)
view.GetRenderWindow().SetMultiSamples(0)
iren.Initialize()
view.GetRenderWindow().Render()
# adjust zoom so the item nicely fills the screen
itemSize = [0, 0]
treeHeatmapItem.GetSize(itemSize)
itemSize.append(0)
transformItem.GetTransform().MultiplyPoint(itemSize, itemSize)
newWidth = view.GetScene().GetSceneWidth()
newHeight = view.GetScene().GetSceneHeight()
pageWidth = newWidth
pageHeight = newHeight
sx = pageWidth / itemSize[0]
sy = pageHeight / itemSize[1]
if sy < sx:
scale = sy;
else:
scale = sx;
if scale > 1:
scale = scale * 0.5
else:
scale = scale * 0.9
transformItem.Scale(scale, scale)
# center the item within the screen
itemCenter = [0, 0]
treeHeatmapItem.GetCenter(itemCenter)
itemCenter.append(0)
centerPt = vtk.vtkPoints2D()
centerPt.InsertNextPoint(newWidth / 2.0, newHeight / 2.0)
transformItem.GetTransform().InverseTransformPoints(centerPt, centerPt)
sceneCenter = [0, 0]
centerPt.GetPoint(0, sceneCenter)
dx = -1 * (itemCenter[0] - sceneCenter[0])
dy = -1 * (itemCenter[1] - sceneCenter[1])
transformItem.Translate(dx, dy)
# VTK Web application specific
VTKWebApp.view = view.GetRenderWindow()
self.Application.GetObjectIdMap().SetActiveObject("VIEW", view.GetRenderWindow())
|
lukejharmon/tangelohub
|
app/vtk_tree_heatmap.py
|
Python
|
apache-2.0
| 4,270
|
[
"VTK"
] |
0f640bee79489ba8af31239b4c1368fd592674a25e8e35be08612dde638f97bb
|
#
# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
import platform, subprocess, sys, shlex
from os.path import join, sep
from argparse import ArgumentParser
import mx
import mx_gate
import mx_fastr_pkgs
import mx_fastr_compile
import mx_fastr_dists
import mx_fastr_junit
from mx_fastr_dists import FastRNativeProject, FastRTestNativeProject, FastRReleaseProject, FastRNativeRecommendedProject #pylint: disable=unused-import
import mx_copylib
import mx_fastr_mkgramrd
import os
'''
This is the launchpad for all the functions available for building/running/testing/analyzing
FastR. FastR can run with or without the Graal compiler enabled. As a convenience if the
graal-core suite is detected then the use of the Graal compiler is enabled without any
additional command line options being required to the mx command, i.e. it is as if --jdk jvmci
was passed as an mx global option.
'''
_fastr_suite = mx.suite('fastr')
'''
If this is None, then we run under the standard VM in interpreted mode only.
'''
_mx_graal = mx.suite("graal-core", fatalIfMissing=False)
_mx_sulong = mx.suite("sulong", fatalIfMissing=False)
_r_command_package = 'com.oracle.truffle.r.engine'
_repl_command = 'com.oracle.truffle.tools.debug.shell.client.SimpleREPLClient'
_command_class_dict = {'r': _r_command_package + ".shell.RCommand",
'rscript': _r_command_package + ".shell.RscriptCommand",
'rrepl': _repl_command,
'rembed': _r_command_package + ".shell.REmbedded",
}
# benchmarking support
def r_path():
return join(_fastr_suite.dir, 'bin', 'R')
def r_version():
# Could figure this out dynamically
return 'R-3.3.2'
def get_default_jdk():
if _mx_graal:
tag = 'jvmci'
else:
tag = None
return mx.get_jdk(tag=tag)
def do_run_r(args, command, extraVmArgs=None, jdk=None, **kwargs):
'''
This is the basic function that runs a FastR process, where args have already been parsed.
Args:
args: a list of command arguments
command: e.g. 'R', implicitly defines the entry class (can be None for AOT)
extraVmArgs: additional vm arguments
jdk: jdk (an mx.JDKConfig instance) to use
**kwargs other keyword args understood by run_java
nonZeroIsFatal: whether to terminate the execution run fails
out,err possible redirects to collect output
By default a non-zero return code will cause an mx.abort, unless nonZeroIsFatal=False
The assumption is that the VM is already built and available.
'''
env = kwargs['env'] if 'env' in kwargs else os.environ
setREnvironment(env)
if not jdk:
jdk = get_default_jdk()
dists = ['FASTR']
if _mx_sulong:
dists.append('SULONG')
vmArgs = mx.get_runtime_jvm_args(dists, jdk=jdk)
vmArgs += set_graal_options()
vmArgs += _sulong_options()
if extraVmArgs is None or not '-da' in extraVmArgs:
# unless explicitly disabled we enable assertion checking
vmArgs += ['-ea', '-esa']
if extraVmArgs:
vmArgs += extraVmArgs
vmArgs = _sanitize_vmArgs(jdk, vmArgs)
if command:
vmArgs.append(_command_class_dict[command.lower()])
return mx.run_java(vmArgs + args, jdk=jdk, **kwargs)
def r_classpath(args):
print mx.classpath('FASTR', jdk=mx.get_jdk())
def _sanitize_vmArgs(jdk, vmArgs):
'''
jdk dependent analysis of vmArgs to remove those that are not appropriate for the
chosen jdk. It is easier to allow clients to set anything they want and filter them
out here.
'''
jvmci_jdk = jdk.tag is not None and 'jvmci' in jdk.tag
jvmci_disabled = '-XX:-EnableJVMCI' in vmArgs
xargs = []
i = 0
while i < len(vmArgs):
vmArg = vmArgs[i]
if vmArg != '-XX:-EnableJVMCI':
if vmArg.startswith("-") and '-Dgraal' in vmArg or 'JVMCI' in vmArg:
if not jvmci_jdk or jvmci_disabled:
i = i + 1
continue
xargs.append(vmArg)
i = i + 1
return xargs
def set_graal_options():
'''
If Graal is enabled, set some options specific to FastR
'''
if _mx_graal:
result = ['-Dgraal.InliningDepthError=500', '-Dgraal.EscapeAnalysisIterations=3', '-XX:JVMCINMethodSizeLimit=1000000']
return result
else:
return []
def _sulong_options():
if _mx_sulong:
return ['-Dfastr.ffi.factory.class=com.oracle.truffle.r.engine.interop.ffi.Truffle_RFFIFactory',
'-XX:-UseJVMCIClassLoader']
else:
return []
def _get_ldpaths(env, lib_env_name):
ldpaths = os.path.join(env['R_HOME'], 'etc', 'ldpaths')
command = ['bash', '-c', 'source ' + ldpaths + ' && env']
try:
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.partition("=")
if key == lib_env_name:
return value.rstrip()
# error if not found
mx.abort('etc/ldpaths does not define ' + lib_env_name)
except subprocess.CalledProcessError:
mx.abort('error retrieving etc/ldpaths')
def setREnvironment(env=None):
'''
If R is run via mx, then the library path will not be set, whereas if it is
run from 'bin/R' it will be, via etc/ldpaths.
On Mac OS X El Capitan and beyond, this is moot as the variable is not
passed down. It is TBD if we can avoid this on Linux.
'''
if not env:
env = os.environ
# This may have been set by a higher power
if not 'R_HOME' in env:
env['R_HOME'] = _fastr_suite.dir
# Make sure that native code formats numbers consistently
env['LC_NUMERIC'] = 'C'
osname = platform.system()
if osname != 'Darwin':
lib_env = 'LD_LIBRARY_PATH'
if lib_env in env:
lib_value = env[lib_env]
else:
lib_value = _get_ldpaths(env, lib_env)
env[lib_env] = lib_value
def run_r(args, command, parser=None, extraVmArgs=None, jdk=None, **kwargs):
'''
Common function for running either R, Rscript (or rrepl).
args are a list of strings that came after 'command' on the command line
'''
parser = parser if parser is not None else ArgumentParser(prog='mx ' + command)
parser.add_argument('--J', dest='extraVmArgsList', action='append', help='extra Java VM arguments', metavar='@<args>')
parser.add_argument('--jdk', action='store', help='jdk to use')
ns, rargs = parser.parse_known_args(args)
if ns.extraVmArgsList:
j_extraVmArgsList = split_j_args(ns.extraVmArgsList)
if extraVmArgs is None:
extraVmArgs = []
extraVmArgs += j_extraVmArgsList
if not jdk and ns.jdk:
jdk = mx.get_jdk(tag=ns.jdk)
# special cases normally handled in shell script startup
if command == 'r' and len(rargs) > 0:
if rargs[0] == 'RHOME':
print _fastr_suite.dir
sys.exit(0)
elif rargs[0] == 'CMD':
print 'CMD not implemented via mx, use: bin/R CMD ...'
sys.exit(1)
return do_run_r(rargs, command, extraVmArgs=extraVmArgs, jdk=jdk, **kwargs)
def split_j_args(extraVmArgsList):
extraVmArgs = []
if extraVmArgsList:
for e in extraVmArgsList:
extraVmArgs += [x for x in shlex.split(e.lstrip('@'))]
return extraVmArgs
def rshell(args):
'''run R shell'''
return run_r(args, 'r')
def rscript(args, parser=None, **kwargs):
'''run Rscript'''
return run_r(args, 'rscript', parser=parser, **kwargs)
def rrepl(args, nonZeroIsFatal=True, extraVmArgs=None):
'''run R repl'''
run_r(args, 'rrepl')
def rembed(args, nonZeroIsFatal=True, extraVmArgs=None):
run_r(args, 'rembed')
def _fastr_gate_runner(args, tasks):
'''
The specific additional gates tasks provided by FastR:
1. Copyright check
2. Check that ExpectedTestOutput file is in sync with unit tests
3. Unit tests
'''
# FastR has custom copyright check
with mx_gate.Task('Copyright check', tasks) as t:
if t:
if mx.checkcopyrights(['--primary']) != 0:
t.abort('copyright errors')
# check that the expected test output file is up to date
with mx_gate.Task('UnitTests: ExpectedTestOutput file check', tasks) as t:
if t:
if junit(['--tests', _gate_unit_tests(), '--check-expected-output']) != 0:
t.abort('unit tests expected output check failed')
with mx_gate.Task('UnitTests: no specials', tasks) as t:
if t:
if junit(['--J', '@-DR:-UseSpecials', '--tests', _gate_noapps_unit_tests()]) != 0:
t.abort('unit tests failed')
with mx_gate.Task('UnitTests: with specials', tasks) as t:
if t:
if junit(['--tests', _gate_noapps_unit_tests()]) != 0:
t.abort('unit tests failed')
with mx_gate.Task('UnitTests: apps', tasks) as t:
if t:
if junit(['--tests', _apps_unit_tests()]) != 0:
t.abort('unit tests failed')
mx_gate.add_gate_runner(_fastr_suite, _fastr_gate_runner)
def rgate(args):
'''
Run 'mx.gate' with given args (used in CI system).
N.B. This will fail if run without certain exclusions; use the local
'gate' command for that.
'''
mx_gate.gate(args)
def gate(args):
'''Run 'mx.gate' with some standard tasks excluded as they currently fail'''
mx_gate.gate(args + ['-x', '-t', 'FindBugs,Checkheaders,Distribution Overlap Check,BuildJavaWithEcj'])
def _test_srcdir():
tp = 'com.oracle.truffle.r.test'
return join(mx.project(tp).dir, 'src', tp.replace('.', sep))
def _junit_r_harness(args, vmArgs, jdk, junitArgs):
# always pass the directory where the expected output file should reside
runlistener_arg = 'expected=' + _test_srcdir()
# there should not be any unparsed arguments at this stage
if args.remainder:
mx.abort('unexpected arguments: ' + str(args.remainder).strip('[]') + '; did you forget --tests')
def add_arg_separator():
# can't update in Python 2.7
arg = runlistener_arg
if len(arg) > 0:
arg += ','
return arg
if args.gen_fastr_output:
runlistener_arg = add_arg_separator()
runlistener_arg += 'gen-fastr=' + args.gen_fastr_output
if args.check_expected_output:
args.gen_expected_output = True
runlistener_arg = add_arg_separator()
runlistener_arg += 'check-expected'
if args.gen_expected_output:
runlistener_arg = add_arg_separator()
runlistener_arg += 'gen-expected'
if args.keep_trailing_whitespace:
runlistener_arg = add_arg_separator()
runlistener_arg += 'keep-trailing-whitespace'
if args.gen_expected_quiet:
runlistener_arg = add_arg_separator()
runlistener_arg += 'gen-expected-quiet'
if args.gen_diff_output:
runlistener_arg = add_arg_separator()
runlistener_arg += 'gen-diff=' + args.gen_diff_output
if args.trace_tests:
runlistener_arg = add_arg_separator()
runlistener_arg += 'trace-tests'
# if args.test_methods:
# runlistener_arg = add_arg_separator()
# runlistener_arg = 'test-methods=' + args.test_methods
runlistener_arg = add_arg_separator()
runlistener_arg += 'test-project-output-dir=' + mx.project('com.oracle.truffle.r.test').output_dir()
# use a custom junit.RunListener
runlistener = 'com.oracle.truffle.r.test.TestBase$RunListener'
if len(runlistener_arg) > 0:
runlistener += ':' + runlistener_arg
junitArgs += ['--runlistener', runlistener]
# on some systems a large Java stack seems necessary
vmArgs += ['-Xss12m']
# no point in printing errors to file when running tests (that contain errors on purpose)
vmArgs += ['-DR:-PrintErrorStacktracesToFile']
vmArgs += _sulong_options()
setREnvironment()
return mx.run_java(vmArgs + junitArgs, nonZeroIsFatal=False, jdk=jdk)
def junit(args):
'''run R Junit tests'''
parser = ArgumentParser(prog='r junit')
parser.add_argument('--gen-expected-output', action='store_true', help='generate/update expected test output file')
parser.add_argument('--gen-expected-quiet', action='store_true', help='suppress output on new tests being added')
parser.add_argument('--keep-trailing-whitespace', action='store_true', help='keep trailing whitespace in expected test output file')
parser.add_argument('--check-expected-output', action='store_true', help='check but do not update expected test output file')
parser.add_argument('--gen-fastr-output', action='store', metavar='<path>', help='generate FastR test output file in given directory (e.g. ".")')
parser.add_argument('--gen-diff-output', action='store', metavar='<path>', help='generate difference test output file in given directory (e.g. ".")')
parser.add_argument('--trace-tests', action='store_true', help='trace the actual @Test methods as they are executed')
# parser.add_argument('--test-methods', action='store', help='pattern to match test methods in test classes')
if os.environ.has_key('R_PROFILE_USER'):
mx.abort('unset R_PROFILE_USER before running unit tests')
_unset_conflicting_envs()
return mx_fastr_junit.junit(args, _junit_r_harness, parser=parser, jdk_default=get_default_jdk())
def junit_simple(args):
return mx.command_function('junit')(['--tests', _simple_unit_tests()] + args)
def junit_noapps(args):
return mx.command_function('junit')(['--tests', _gate_noapps_unit_tests()] + args)
def junit_nopkgs(args):
return mx.command_function('junit')(['--tests', ','.join([_simple_unit_tests(), _nodes_unit_tests()])] + args)
def junit_default(args):
return mx.command_function('junit')(['--tests', _all_unit_tests()] + args)
def junit_gate(args):
return mx.command_function('junit')(['--tests', _gate_unit_tests()] + args)
def _test_package():
return 'com.oracle.truffle.r.test'
def _test_subpackage(name):
return '.'.join((_test_package(), name))
def _simple_unit_tests():
return ','.join(map(_test_subpackage, ['library.base', 'library.stats', 'library.utils', 'library.fastr', 'builtins', 'functions', 'tck', 'parser', 'S4', 'rng', 'runtime.data']))
def _package_unit_tests():
return ','.join(map(_test_subpackage, ['rffi', 'rpackages']))
def _nodes_unit_tests():
return 'com.oracle.truffle.r.nodes.test'
def _apps_unit_tests():
return _test_subpackage('apps')
def _gate_noapps_unit_tests():
return ','.join([_simple_unit_tests(), _nodes_unit_tests(), _package_unit_tests()])
def _gate_unit_tests():
return ','.join([_gate_noapps_unit_tests(), _apps_unit_tests()])
def _all_unit_tests():
return _gate_unit_tests()
def testgen(args):
'''generate the expected output for unit tests, and All/Failing test classes'''
parser = ArgumentParser(prog='r testgen')
parser.add_argument('--tests', action='store', default=_all_unit_tests(), help='pattern to match test classes')
args = parser.parse_args(args)
# check we are in the home directory
if os.getcwd() != _fastr_suite.dir:
mx.abort('must run rtestgen from FastR home directory')
def need_version_check():
vardef = os.environ.has_key('FASTR_TESTGEN_GNUR')
varval = os.environ['FASTR_TESTGEN_GNUR'] if vardef else None
version_check = vardef and varval != 'internal'
if version_check:
rpath = join(varval, 'bin', 'R')
else:
rpath = None
return version_check, rpath
version_check, rpath = need_version_check()
if version_check:
# check the version of GnuR against FastR
try:
fastr_version = subprocess.check_output([mx.get_jdk().java, mx.get_runtime_jvm_args('com.oracle.truffle.r.runtime'), 'com.oracle.truffle.r.runtime.RVersionNumber'])
gnur_version = subprocess.check_output([rpath, '--version'])
if not gnur_version.startswith(fastr_version):
mx.abort('R version is incompatible with FastR, please update to ' + fastr_version)
except subprocess.CalledProcessError:
mx.abort('RVersionNumber.main failed')
# now just invoke junit with the appropriate options
mx.log("generating expected output for packages: ")
for pkg in args.tests.split(','):
mx.log(" " + str(pkg))
os.environ["TZDIR"] = "/usr/share/zoneinfo/"
_unset_conflicting_envs()
junit(['--tests', args.tests, '--gen-expected-output', '--gen-expected-quiet'])
def _unset_conflicting_envs():
# this can interfere with the recommended packages
if os.environ.has_key('R_LIBS_USER'):
del os.environ['R_LIBS_USER']
# the default must be vi for unit tests
if os.environ.has_key('EDITOR'):
del os.environ['EDITOR']
def unittest(args):
print "use 'junit --tests testclasses' or 'junitsimple' to run FastR unit tests"
def rbcheck(args):
'''Checks FastR builtins against GnuR
gnur-only: GnuR builtins not implemented in FastR (i.e. TODO list).
fastr-only: FastR builtins not implemented in GnuR
both-diff: implemented in both GnuR and FastR, but with difference
in signature (e.g. visibility)
both: implemented in both GnuR and FastR with matching signature
If the option --filter is not given, shows all groups.
Multiple groups can be combined: e.g. "--filter gnur-only,fastr-only"'''
vmArgs = mx.get_runtime_jvm_args('com.oracle.truffle.r.test')
args.append("--suite-path")
args.append(mx.primary_suite().dir)
vmArgs += ['com.oracle.truffle.r.test.tools.RBuiltinCheck']
mx.run_java(vmArgs + args)
def rbdiag(args):
'''Diagnoses FastR builtins
-v Verbose output including the list of unimplemented specializations
-n Ignore RNull as an argument type
-m Ignore RMissing as an argument type
--mnonly Uses the RMissing and RNull values as the only samples for the chimney-sweeping
--noSelfTest Does not perform the pipeline self-test using the generated samples as the intro to each chimney-sweeping. It has no effect when --mnonly is specified as the self-test is never performed in that case.
--sweep Performs the 'chimney-sweeping'. The sample combination selection method is determined automatically.
--sweep=lite Performs the 'chimney-sweeping'. The diagonal sample selection method is used.
--sweep=total Performs the 'chimney-sweeping'. The total sample selection method is used.
--matchLevel=same Outputs produced by FastR and GnuR must be same (default)
--matchLevel=error Outputs are considered matching if none or both outputs contain an error
--maxSweeps=N Sets the maximum number of sweeps
--outMaxLev=N Sets the maximum output detail level for report messages. Use 0 for the basic messages only.
If no builtin is specified, all registered builtins are diagnosed.
An external builtin is specified by the fully qualified name of its node class.
Examples:
mx rbdiag
mx rbdiag colSums colMeans -v
mx rbdiag scan -m -n
mx rbdiag colSums --sweep
mx rbdiag com.oracle.truffle.r.library.stats.Rnorm
'''
vmArgs = mx.get_runtime_jvm_args('com.oracle.truffle.r.nodes.test')
setREnvironment()
os.environ["FASTR_TESTGEN_GNUR"] = "internal"
# this should work for Linux and Mac:
os.environ["TZDIR"] = "/usr/share/zoneinfo/"
vmArgs += ['com.oracle.truffle.r.nodes.test.RBuiltinDiagnostics']
mx.run_java(vmArgs + args)
def _gnur_path():
np = mx.project('com.oracle.truffle.r.native')
return join(np.dir, 'gnur', r_version(), 'bin')
def gnu_r(args):
'''
run the internally built GNU R executable'
'''
cmd = [join(_gnur_path(), 'R')] + args
return mx.run(cmd, nonZeroIsFatal=False)
def gnu_rscript(args, env=None):
'''
run the internally built GNU Rscript executable
env arg is used by pkgtest
'''
cmd = [join(_gnur_path(), 'Rscript')] + args
return mx.run(cmd, nonZeroIsFatal=False, env=env)
def nativebuild(args):
'''
force the build of part or all of the native project
'''
parser = ArgumentParser(prog='nativebuild')
parser.add_argument('--all', action='store_true', help='clean and build everything, else just ffi')
args = parser.parse_args(args)
nativedir = mx.project('com.oracle.truffle.r.native').dir
if args.all:
return subprocess.call(['make clean && make'], shell=True, cwd=nativedir)
else:
ffidir = join(nativedir, 'fficall')
jni_done = join(ffidir, 'jni.done')
jniboot_done = join(ffidir, 'jniboot.done')
if os.path.exists(jni_done):
os.remove(jni_done)
if os.path.exists(jniboot_done):
os.remove(jniboot_done)
return mx.build(['--no-java'])
def mx_post_parse_cmd_line(opts):
mx_fastr_dists.mx_post_parse_cmd_line(opts)
_commands = {
'r' : [rshell, '[options]'],
'R' : [rshell, '[options]'],
'rscript' : [rscript, '[options]'],
'Rscript' : [rscript, '[options]'],
'rtestgen' : [testgen, ''],
'rgate' : [rgate, ''],
'gate' : [gate, ''],
'junit' : [junit, ['options']],
'junitsimple' : [junit_simple, ['options']],
'junitdefault' : [junit_default, ['options']],
'junitgate' : [junit_gate, ['options']],
'junitnoapps' : [junit_noapps, ['options']],
'junitnopkgs' : [junit_nopkgs, ['options']],
'unittest' : [unittest, ['options']],
'rbcheck' : [rbcheck, '--filter [gnur-only,fastr-only,both,both-diff]'],
'rbdiag' : [rbdiag, '(builtin)* [-v] [-n] [-m] [--sweep | --sweep=lite | --sweep=total] [--mnonly] [--noSelfTest] [--matchLevel=same | --matchLevel=error] [--maxSweeps=N] [--outMaxLev=N]'],
'rrepl' : [rrepl, '[options]'],
'rembed' : [rembed, '[options]'],
'r-cp' : [r_classpath, '[options]'],
'pkgtest' : [mx_fastr_pkgs.pkgtest, ['options']],
'installpkgs' : [mx_fastr_pkgs.installpkgs, '[options]'],
'mkgramrd': [mx_fastr_mkgramrd.mkgramrd, '[options]'],
'rcopylib' : [mx_copylib.copylib, '[]'],
'rupdatelib' : [mx_copylib.updatelib, '[]'],
'gnu-r' : [gnu_r, '[]'],
'gnu-rscript' : [gnu_rscript, '[]'],
'nativebuild' : [nativebuild, '[]'],
}
_commands.update(mx_fastr_compile._commands)
mx.update_commands(_fastr_suite, _commands)
|
jjfumero/fastr
|
mx.fastr/mx_fastr.py
|
Python
|
gpl-2.0
| 23,438
|
[
"VisIt"
] |
2b84b16bada7d9f433cb8d3430595d087acad1a4118eca1321866b5579383327
|
#!/usr/bin/env python
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import map
from builtins import range
from builtins import object
from past.utils import old_div
import os
import sys
from . import extras
import numpy.random_array as nrandom
import re, time
import tableio
import cosmopy
import math
import cosmology # from bpz
from . import astrometry
import fitsio
import bpz_mix
import numpy
# matplotlib stuff
import pylab
from matplotlib import rc
import MLab
import random
import aux
import scipy
import scipy.interpolate
import scipy.special
import scipy.misc.pilutil as pilutil
erf = scipy.special.erf # normal distribution error function
sout = sys.stderr
#logfile = "logfile_%s.log" % time.strftime("%d%b%Y_%H:%M", time.localtime())
#sout = open(logfile,"w")
#print >>sys.stderr,"# Will write to %s" % logfile
land = numpy.logical_and
lge = numpy.greater_equal
lle = numpy.less_equal
lor = numpy.logical_or
class BCGfinder(object):
""" A class to find clusters in the BCS Survey"""
def __init__(self, catsfile,probsfile,
maglim=24.0,
zlim =1.2,
dz=0.05,
cosmo=(0.3,0.7,0.7),
zuse="ZB", # Use ZB (Bayesian) or ML (Max Like)
outpath='plots',
path = "/home/felipe/COMB-07",
evolfile = "0_1gyr_hr_m62_salp.color"):
# Check for environ vars
if not os.getenv('BCSPIPE'):
os.environ['BCSPIPE'] = '/home/felipe/BCSPIPE'
self.BCSPIPE = os.getenv('BCSPIPE')
self.catsfile = catsfile
self.probsfile = probsfile
self.maglim = maglim
self.zlim = zlim
self.cosmo = cosmo
self.evolfile = os.path.join(self.BCSPIPE, "LIB/evol", evolfile)
self.dz = dz
self.zuse = zuse
self.outpath = outpath
self.path = path
# Set the cosmology now
self.cset = cosmopy.set(self.cosmo)
self.Om = cosmo[0]
self.OL = cosmo[1]
self.h = cosmo[2]
self.Ho = self.h * 100.0
self.read_cat() # Read catalogs avoding, faint, high-z and 99 objects
self.read_probs() # Read probs function of objects from catalogs
self.get_absmags() # We compute Abs Mags for each object
# Not need, done by self.get_absmags()
#self.get_evol() # Compute evol(z) for each object
# Set the BCG masked to False, so we select BCGs on the firts run
self.BCG_masked = False
self.BCG_probs = False
self.pixscale = 0.266
# Check if the destination folder exists
if os.path.exists(self.outpath):
sout.write("# Will put files to: %s\n" % self.outpath)
else:
sout.write("# Will create new folder: %s" % self.outpath)
os.mkdir(self.outpath)
return
##################################################################
# Define the sub-sample for BCGs candidates around a position
##################################################################
def get_BCG_candidates_radec(self,
ID,
ra,
dec,
zo,
Mr_limit=-22.71,
p_lim=1e-4,
i_lim=24,
plot='yes',
D_factor=1.0,
dz=0.1):
t0 = time.time()
RA = astrometry.dec2deg(old_div(ra, 15.))
DEC = astrometry.dec2deg(dec)
# Make some variables part of the class
self.ra0 = ra
self.dec0 = dec
self.RA = RA
self.DEC = DEC
self.cID = ID # Candidate's ID
self.z_c = zo
# The Abs mag limit @ z=0.1 in the i-band
Mi_limit = cosmology.reobs('El_Benitez2003',
m=Mr_limit,
oldfilter="r_MOSAICII",
newfilter="i_MOSAICII")
#dz = 0.1
z1 = zo - dz
z2 = zo + dz
star_lim = 0.50
Dmin = D_factor * 250.0 # in kpc
# Ccompute the largerst angular distance closer to the lower redshift limit of the interval
if zo <= 0.1: # to avoid inf
zcenter = zo
else:
zcenter = zo - dz
dmin = old_div(
astrometry.kpc2arc(zcenter, Dmin, self.cosmo),
3600.) # in degrees.
print("# Will Use Dmin: %.2f @ zo: %s" % (dmin * 60.0, zcenter))
# Evaluate the genertic mask for BCG only onece
if not self.BCG_masked:
# We also might want to take into account the error in the
# photo-z (around ~0.05) that can change the value of the
# distance modulus DM = 25 + 5*log10(dl) significantly,
# about 1Mag a more at z=0.1 and below. The factor is :
# 5log10(dL(z+dz)/dL(z)), where z is the objects's photo-z and
# dz the error. So the limit is 5log10(dL(z+dz)/dL(z)) magnitude
# fainter.
# dL_up = self.cset.dlum(self.z_ph+self.dz)
# dL_lo = self.cset.dlum(self.z_ph)
# self.DM_factor = 5*numpy.log10(dL_up/dL_lo)
#
# We get the limit at the z_ph of each candidate, corrected by z=0.1
Mr_BCG_limit = Mr_limit + self.ev_r - self.evf['r'](
0.1) #+ self.DM_factor
Mi_BCG_limit = Mi_limit + self.ev_i - self.evf['i'](
0.1) #+ self.DM_factor
# Evaluate the BCG Probability function, we get the limit for each object
self.p = p_BCG(self.Mr, Mr_BCG_limit)
sout.write(
"# Selecting BCG candidates from %s objects... will do this only once\n"
% (len(self.ra)))
mask_p = numpy.where(self.p >= p_lim, 1, 0)
mask_g = numpy.where(self.g < i_lim + 5, 1, 0)
mask_r = numpy.where(self.r < i_lim + 2, 1, 0)
mask_i = numpy.where(self.i < i_lim, 1, 0)
mask_z = numpy.where(self.z < i_lim + 1, 1, 0)
# Avoid freakishly bright objects, 2.5 mags brighter than the M_BCG_limit
mask_br = numpy.where(self.Mr > Mr_BCG_limit - 2.5, 1, 0)
mask_bi = numpy.where(self.Mi > Mi_BCG_limit - 2.5, 1, 0)
# Put a more strict cut in class_star for bcg candidates
sout.write("# Avoiding CLASS_STAR > %s in BGCs\n" % star_lim)
mask_star = numpy.where(self.class_star <= star_lim, 1, 0)
# Construct the final mask now
self.mask_BCG = mask_g * mask_r * mask_i * mask_z * mask_br * mask_bi * mask_p
self.BCG_masked = True
# Model color only once
self.zx = numpy.arange(0.01, self.zlim, 0.01)
self.gr_model = cosmology.color_z(sed='El_Benitez2003',
filter_new='g_MOSAICII',
filter_old='r_MOSAICII',
z=self.zx,
calibration='AB')
self.ri_model = cosmology.color_z(sed='El_Benitez2003',
filter_new='r_MOSAICII',
filter_old='i_MOSAICII',
z=self.zx,
calibration='AB')
self.iz_model = cosmology.color_z(sed='El_Benitez2003',
filter_new='i_MOSAICII',
filter_old='z_MOSAICII',
z=self.zx,
calibration='AB')
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
# Now we select based on position and redshift.
# These are the only two masks that depend on the position and
# redshift and will be compurted on each call.
mask_zph = numpy.where(land(self.z_ph >= z1, self.z_ph <= z2), 1, 0)
# Mask in positions
#distance = numpy.sqrt( (ra - self.ra)**2 + (dec - self.dec)**2)
distance = astrometry.circle_distance(ra,
dec,
self.ra,
self.dec,
units='deg')
mask_pos = numpy.where(distance <= dmin, 1, 0)
# Select the candidates now
idx = numpy.where(mask_zph * mask_pos * self.mask_BCG == 1)
# And pass up to to class
self.idx_BCG = idx
self.id_BCG = self.id[idx]
self.ra_BCG = self.ra[idx]
self.dec_BCG = self.dec[idx]
self.p_BCG = self.p[idx]
self.z_BCG = self.z_ph[idx]
self.t_BCG = self.type[idx]
self.N_BCG = len(idx[0])
self.Mi_BCG = self.Mi[idx]
self.Mr_BCG = self.Mr[idx]
self.DM_BCG = self.DM[idx] # distance modulus
self.dang_BCG = self.dang[idx] # distance modulus
self.zml_BCG = self.z_ml[idx]
self.tml_BCG = self.t_ml[idx]
self.zb_BCG = self.z_b[idx]
self.tb_BCG = self.t_b[idx]
self.class_BCG = self.class_star[idx]
self.a_BCG = self.a_image[idx]
self.b_BCG = self.b_image[idx]
self.theta_BCG = self.theta[idx]
# r,i-band stuff
self.r_BCG = self.r[idx]
self.i_BCG = self.i[idx]
# Get the 1-sigma intervals
self.z1_BCG = self.z1[idx]
self.z2_BCG = self.z2[idx]
# Get the tile name closet to that postion
iclose = numpy.argmin(distance)
IDclose = self.id[iclose]
self.tile = IDclose.split('_')[0]
# The distance to the candidate's position for each BCG, in arcmin
self.d_BCG = distance[idx] * 60.0
# The radius used in the search in arcsec
self.dmin = dmin * 60.0
# The r-band Luminosity of the BCGs
self.LBCG = self.Lr[idx]
sout.write(
"# Found %s BCG candidates with i<%s at zo:%s, (z1,z2) = %s-%s\n" %
(self.N_BCG, i_lim, zo, z1, z2))
# Optional, plot to see that we are getting the right stuff
if plot:
self.BCG_plots(ID, zo, Mr_limit)
return
##################################################################
# Define the sub-sample for BCGs candidates around a position
##################################################################
def get_BCG_ID(self, ID, Mr_limit=-22.71, p_lim=1e-4, SCSname=None):
t0 = time.time()
# The Abs mag limit @ z=0.1 in the i-band
Mi_limit = cosmology.reobs('El_Benitez2003',
m=Mr_limit,
oldfilter="r_MOSAICII",
newfilter="i_MOSAICII")
# Evaluate the genertic mask for BCG only onece
if not self.BCG_probs:
# We get the limit at the z_ph of each candidate, corrected by z=0.1
Mr_BCG_limit = Mr_limit + self.ev_r - self.evf['r'](
0.1) #+ self.DM_factor
Mi_BCG_limit = Mi_limit + self.ev_i - self.evf['i'](
0.1) #+ self.DM_factor
# Evaluate the BCG Probability function, we get the limit for each object
self.p = p_BCG(self.Mr, Mr_BCG_limit)
self.BCG_probs = True
# Model color only once
self.zx = numpy.arange(0.01, self.zlim, 0.01)
self.gr_model = cosmology.color_z(sed='El_Benitez2003',
filter_new='g_MOSAICII',
filter_old='r_MOSAICII',
z=self.zx,
calibration='AB')
self.ri_model = cosmology.color_z(sed='El_Benitez2003',
filter_new='r_MOSAICII',
filter_old='i_MOSAICII',
z=self.zx,
calibration='AB')
self.iz_model = cosmology.color_z(sed='El_Benitez2003',
filter_new='i_MOSAICII',
filter_old='z_MOSAICII',
z=self.zx,
calibration='AB')
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
# And pass up to to class
idx = numpy.where(self.id == ID)
# The index number
iBCG = idx #[0]
self.idx_BCG = idx #[0]
self.id_BCG = self.id[iBCG]
self.ra_BCG = self.ra[iBCG]
self.dec_BCG = self.dec[iBCG]
self.p_BCG = self.p[iBCG]
self.z_BCG = self.z_ph[iBCG]
self.t_BCG = self.type[iBCG]
self.N_BCG = len(idx[0])
self.Mi_BCG = self.Mi[iBCG]
self.Mr_BCG = self.Mr[iBCG]
self.DM_BCG = self.DM[iBCG] # distance modulus
self.dang_BCG = self.dang[iBCG] # distance modulus
self.zml_BCG = self.z_ml[iBCG]
self.tml_BCG = self.t_ml[iBCG]
self.zb_BCG = self.z_b[iBCG]
self.tb_BCG = self.t_b[iBCG]
self.class_BCG = self.class_star[iBCG]
self.a_BCG = self.a_image[iBCG]
self.b_BCG = self.b_image[iBCG]
self.theta_BCG = self.theta[iBCG]
# r,i-band stuff
self.r_BCG = self.r[iBCG]
self.i_BCG = self.i[iBCG]
# Get the 1-sigma intervals
self.z1_BCG = self.z1[iBCG]
self.z2_BCG = self.z2[iBCG]
# Get the tile name closet to that postion
self.tile = ID.split('_')[0]
print(old_div(self.ra_BCG, 15))
print(old_div(self.dec_BCG, 15))
self.RA = astrometry.dec2deg(old_div(self.ra_BCG, 15.))[0]
self.DEC = astrometry.dec2deg(self.dec_BCG)[0]
# The SCS rootname for the files
if SCSname:
self.SCSname = SCSname
else:
RA = astrometry.dec2deg(old_div(self.ra_BCG[0], 15), sep="")
DEC = astrometry.dec2deg(self.dec_BCG[0], sep="")
# self.SCSname = "SCSO_J%s%s" % (RA[0:4],DEC[0:5])
self.SCSname = "SCSO_J%s%s" % (RA[0:6],
DEC[0:7]) # to avoid confusion
# The r-band Luminosity of the BCGs
self.LBCG = self.Lr[idx]
# The distance to the candidate's position for each BCG, in arcmin
self.d_BCG = 0.0
sout.write("# Found BCG candidates for %s @ i:%s \n" % (ID, iBCG[0]))
#######################
# BCG plot diagnostics
#######################
def BCG_plots(self, ID, zo, Mr_limit):
params = {
'axes.labelsize': 12,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
# The figure subplot parameters. All dimensions are fraction of the
# figure width or height
'figure.subplot.left':
0.10, # the left side of the subplots of the figure
'figure.subplot.right':
0.95, # the right side of the subplots of the figure
'figure.subplot.bottom':
0.05, # the bottom of the subplots of the figure
'figure.subplot.top':
0.95, # the top of the subplots of the figure
'figure.subplot.wspace':
0.2, # the amount of width reserved for blank space between subplots
'figure.subplot.hspace':
0.3, # the amount of height reserved for white space between subplots
#'font.size': 8,
#'backend': 'ps',
#'text.usetex': True,
'figure.figsize': (11.5, 9)
}
pylab.rcParams.update(params)
zx = self.zx
idx = self.idx_BCG
#######################
# p_BCG(z) plot at zo
#######################
Mr_limit_zo = Mr_limit + self.evf['r'](zo) - self.evf['r'](0.1)
M1 = Mr_limit_zo - 3
M2 = Mr_limit_zo + 3
M_x = numpy.arange(M1, M2, 0.05)
P_x = p_BCG(M_x, Mr_limit_zo)
pylab.figure(1)
pylab.subplot(321)
pylab.plot(M_x, P_x, 'k-')
pylab.plot(self.Mr_BCG, self.p_BCG, 'ro')
xx = numpy.asarray([Mr_limit_zo, Mr_limit_zo])
yy = numpy.asarray([-3, 3])
pylab.plot(xx, yy, '--')
dx = [-2, -1, 1, +2] # make dotted lines at N +/- mags
for d in dx:
pylab.plot(xx + d, yy, 'k:')
pylab.xlabel("r-band Abs Mag")
pylab.ylabel("BCG Probability p(M)")
pylab.ylim(-0.02, 1.02)
pylab.xlim(M1, M2)
################
# M vs m plot
################
dlum = self.cset.dlum(zx)
dm = 25.0 + 5.0 * numpy.log10(dlum)
Mo = Mr_limit + self.evf['r'](zx) - self.evf['r'](0.1)
mo = Mo + dm + self.kcorr['r'](zx)
M1 = Mo + 1.0
M2 = Mo - 1.0
pylab.subplot(323)
pylab.plot(mo, Mo, 'k')
pylab.plot(mo, M1, 'k:')
pylab.plot(mo, M2, 'k:')
pylab.plot(self.r_BCG, self.Mr_BCG, 'ro')
pylab.xlabel("r-band magnitude")
pylab.ylabel("Abs Magnitude")
################
# m vs z plot
################
pylab.subplot(325)
pylab.plot(zx, mo, 'k--')
pylab.plot(self.z_BCG, self.r_BCG, 'ro')
pylab.ylabel("r-band magnitude")
pylab.xlabel("Redshift")
pylab.xlim(0.05, 0.9)
pylab.ylim(13.5, 24.1)
#############################
# Color - redshift plots
#############################
gr = self.g_bpz[idx] - self.r_bpz[idx]
ri = self.r_bpz[idx] - self.i_bpz[idx]
iz = self.i_bpz[idx] - self.z_bpz[idx]
# (g-r)
pylab.subplot(322)
pylab.plot(self.z_BCG, gr, 'ro')
pylab.plot(zx, self.gr_model, 'k--')
pylab.plot(zx, self.gr_model - 0.3, 'k:')
pylab.plot(zx, self.gr_model + 0.3, 'k:')
pylab.ylabel("g-r")
pylab.xlim(0.05, 0.9)
# (r-i)
pylab.subplot(324)
pylab.plot(self.z_BCG, ri, 'ro')
pylab.plot(zx, self.ri_model, 'k--')
pylab.plot(zx, self.ri_model - 0.3, 'k:')
pylab.plot(zx, self.ri_model + 0.3, 'k:')
pylab.ylabel("r-i")
pylab.xlim(0.05, 0.9)
# (i-z)
pylab.subplot(326)
pylab.plot(self.z_BCG, iz, 'ro')
pylab.plot(zx, self.iz_model, 'k--')
pylab.plot(zx, self.iz_model + 0.3, 'k:')
pylab.plot(zx, self.iz_model - 0.3, 'k:')
pylab.xlabel("Redshift")
pylab.ylabel("i-z")
pylab.xlim(0.05, 0.9)
# Make the ps file
pylab.savefig(os.path.join(self.outpath, "%s_plots.eps" % ID))
pylab.close()
return
##################################################################
# Select neighbors around ra,dec and at the right brightness @ zo
##################################################################
def get_BCG_neighbors(self, i, Mi_lim=-20.25, dz=0.05):
t0 = time.time()
sout.write("# Selecting Neighbors ")
# Get the relevant info for ith BCG
zo = self.z_BCG[i]
ra0 = self.ra_BCG[i]
dec0 = self.dec_BCG[i]
Mi_BCG = self.Mi_BCG[i]
DM = self.DM_BCG[i]
# 1 - Select in position around ra0,dec0
# Define 1h^-1 Mpc radius in degress @ zo
R1Mpc = 1000 * 1.0 / self.h # in kpc
rmin = old_div(
astrometry.kpc2arc(zo, R1Mpc, self.cosmo), 3600.) # in degrees.
#dist = numpy.sqrt( (ra0 - self.ra)**2 + (dec0 - self.dec)**2)
dist = astrometry.circle_distance(ra0,
dec0,
self.ra,
self.dec,
units='deg')
mask_pos = numpy.where(dist < rmin, 1, 0)
# 2 - Select in redshift
#dz = self.dz*(1 + zo)
z1 = zo - dz
z2 = zo + dz
mask_z = numpy.where(land(self.z_ph >= z1, self.z_ph <= z2), 1, 0)
# 3 - Select in brightness
Mi_lim_zo = Mi_lim + self.evf['i'](zo) - self.evf['i'](0.1)
mask_L1 = numpy.where(self.Mi <= Mi_lim_zo, 1, 0) # Faint cut > 0.4L*
mask_L2 = numpy.where(self.Mi >= Mi_BCG, 1, 0) # Bright cut < L_BCG
# The final selection mask, position x redshift x Luminosity
mask_sel = mask_pos * mask_L1 * mask_L2 * mask_z
idx = numpy.where(mask_sel == 1)
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
return idx
#########################################
# Read in the big catalog of photometry
#########################################
def read_cat(self):
cols = (1,
2,
23,
27,
26,
28,
29,
30,
3,
4, #5,
6,
7, #8,
9,
10, #11,
12,
13, #14,
15,
16,
17,
18,
19,
20,
21,
22,
31,
32,
33,
34)
t1 = time.time()
sout.write("# Reading cols:%s\n# Reading cats from: %s... \n" %
(cols, self.catsfile))
(ra,
dec,
z_b,
odds,
t_b,
z_ml,
t_ml,
chi,
g,
g_err, #g_sn,
r,
r_err, #r_sn,
i,
i_err, #i_sn,
z,
z_err, #z_sn,
g_bpz,
g_berr,
r_bpz,
r_berr,
i_bpz,
i_berr,
z_bpz,
z_berr, #) = tableio.get_data(self.catsfile,cols=cols)
class_star,
a_image,
b_image,
theta) = tableio.get_data(self.catsfile, cols=cols)
(id) = tableio.get_str(self.catsfile, cols=(0, ))
############################################
# Choose the photo-z to use, ml or bayesian
############################################
sout.write("# Will use %s redshifts\n" % self.zuse)
if self.zuse == "ML":
z_ph = z_ml
t = t_ml
elif self.zuse == "ZB":
z_ph = z_b
t = t_b
i_lim = self.maglim
odds_lim = 0.80
star_lim = 0.80
# Clean up according to BPZ
sout.write("# Avoiding magnitudes -99 and 99 in BPZ \n")
g_mask = numpy.where(lor(g_bpz == 99, g_bpz == -99), 0, 1)
r_mask = numpy.where(lor(r_bpz == 99, r_bpz == -99), 0, 1)
i_mask = numpy.where(lor(i_bpz == 99, i_bpz == -99), 0, 1)
z_mask = numpy.where(lor(z_bpz == 99, z_bpz == -99), 0, 1)
bpz_mask = g_mask * r_mask * i_mask * z_mask
# Clean up to avoid 99 values and very faint i_mag values
sout.write("# Avoiding magnitudes 99 in MAG_AUTO \n")
g_mask = numpy.where(g >= 99, 0, 1)
r_mask = numpy.where(r >= 99, 0, 1)
i_mask = numpy.where(i >= i_lim, 0, 1)
z_mask = numpy.where(z >= 99, 0, 1)
sout.write("# Avoiding magnitudes i > %s in MAG_AUTO \n" % i_lim)
# Clean by class_star
sout.write("# Avoiding CLASS_STAR > %s \n" % star_lim)
mask_star = numpy.where(class_star > star_lim, 0, 1)
# Clean up by odds
sout.write("# Avoiding ODDS < %s in BPZ \n" % odds_lim)
odds_mask = numpy.where(odds > odds_lim, 1, 0)
# Avoid z> zlim objects too.
sout.write("# Avoiding objects with z > %s " % self.zlim)
zp_mask = numpy.where(z_ph > self.zlim, 0, 1)
# The final 'good' mask
mask_good = g_mask * r_mask * i_mask * z_mask * zp_mask * odds_mask * mask_star
idx = numpy.where(mask_good == 1)
# Make ids a Char String in numarray
self.id = nstr.array(id)[idx]
# Only keep the 'good' one, avoid -99 and 99 values in BPZ mags
self.ra = ra[idx]
self.dec = dec[idx]
self.z_b = z_b[idx]
self.odds = odds[idx]
self.z_ml = z_ml[idx]
self.t_ml = t_ml[idx]
self.t_b = t_b[idx]
self.t_ml = t_ml[idx]
############################################
# Choose the photo-z to use, ml or bayesian
############################################
if self.zuse == "ML":
self.z_ph = self.z_ml
self.type = self.t_ml
elif self.zuse == "ZB":
self.z_ph = self.z_b
self.type = self.t_b
self.g = g[idx]
self.r = r[idx]
self.i = i[idx]
self.z = z[idx]
self.g_err = g_err[idx]
self.r_err = r_err[idx]
self.i_err = i_err[idx]
self.z_err = z_err[idx]
self.g_bpz = g_bpz[idx]
self.r_bpz = r_bpz[idx]
self.i_bpz = i_bpz[idx]
self.z_bpz = z_bpz[idx]
self.g_berr = g_berr[idx]
self.r_berr = r_berr[idx]
self.i_berr = i_berr[idx]
self.z_berr = z_berr[idx]
self.class_star = class_star[idx]
self.a_image = a_image[idx]
self.b_image = b_image[idx]
self.theta = theta[idx]
# Color of selected galaxies
self.gr = self.g_bpz - self.r_bpz
self.ri = self.r_bpz - self.i_bpz
self.iz = self.i_bpz - self.z_bpz
# Min and and max values in RA/DEC
self.ramin = self.ra.min()
self.ramax = self.ra.max()
self.decmin = self.dec.min()
self.decmax = self.dec.max()
self.idx_cat = idx
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t1))
return
####################################
# Read in the the probabilty file
####################################
def read_probs(self):
# The reg expresion to compile
regexp_point = re.compile(r"arange\("
r"(?P<z1>[0-9]+.[0-9]+),"
r"(?P<z2>[0-9]+.[0-9]+),"
r"(?P<dz>[0-9]+.[0-9]+)\)")
t0 = time.time()
sout.write("# Reading probs from :%s... " % self.probsfile)
# probability arrays
probs = []
for line in open(self.probsfile).readlines():
fields = line.split()
if fields[0][0] == "#":
point = regexp_point.search(line)
# Extract the information if a point was selected
if point:
z1 = float(point.group('z1'))
z2 = float(point.group('z2'))
dz = float(point.group('dz'))
zx = numpy.arange(z1, z2, dz)
continue
ID = fields[0]
probs.append(numpy.asarray(list(map(float, fields[1:]))))
# Transform the list into an N array
p_z = numpy.asarray(probs)
# select same galaxies as in catalogs we just read
self.p_z = p_z[self.idx_cat][:]
self.zx = zx
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
t1 = time.time()
# Get the 1-sigma z1, z2 limits for each galaxy
# Cumulatibe P(<z) function for each selected galaxy
self.Psum = numpy.cumsum(self.p_z, axis=1)
sout.write("# Getting +/- 1sigma (z1,z2) limits for each galaxy ")
self.z1 = self.ra * 0.0
self.z2 = self.ra * 0.0
# One by one in the list
for i in range(len(self.ra)):
i1 = numpy.where(self.Psum[i, :] >= 0.159)[0][0]
i2 = numpy.where(self.Psum[i, :] > 0.842)[0][0]
self.z1[i] = self.zx[i1]
self.z2[i] = self.zx[i2]
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t1))
return
################################################
# Get the absolute magnitudes for each object
################################################
def get_absmags(self):
# Distance modulus, dlum and dangular
self.dlum = self.cset.dlum(self.z_ph)
self.dang = self.cset.dang(self.z_ph)
self.DM = 25.0 + 5.0 * numpy.log10(self.dlum)
t0 = time.time()
# Get the absolute magnitudes, *** not including evolution ***, only Kcorr
# We use a BPZ E's template for Kcorr
#sout.write("# Computing absolute magnitudes interpolating Kcorr ")
#k = Kcorr_fit(sed='El_Benitez2003')
# Alternatibely we can get both the kcorr and the evol from
# the *.color file from BC03 *.ised file
sout.write(
"# Computing absolute magnitudes interpolating konly from BC03 model \n")
k, ev = KEfit(self.evolfile)
self.Mg = self.g - self.DM - k['g'](self.z_ph)
self.Mr = self.r - self.DM - k['r'](self.z_ph)
self.Mi = self.i - self.DM - k['i'](self.z_ph)
self.Mz = self.z - self.DM - k['z'](self.z_ph)
sout.write("# Computing evolution ev(z) for each galaxy ")
self.ev_g = ev['g'](self.z_ph)
self.ev_r = ev['r'](self.z_ph)
self.ev_i = ev['i'](self.z_ph)
self.ev_z = ev['z'](self.z_ph)
# Also get the luminosities in Msun
# taken from http://www.ucolick.org/~cnaw/sun.html
self.Msun = {}
self.Msun['g'] = 5.11
self.Msun['r'] = 4.65
self.Msun['i'] = 4.54
self.Msun['z'] = 4.52
# Mags k-corrected to z=0.25 as done in Reyes el al 2009
Mg = self.g - self.DM - k['g'](self.z_ph) + k['g'](0.25)
Mr = self.r - self.DM - k['r'](self.z_ph) + k['r'](0.25)
Mi = self.i - self.DM - k['i'](self.z_ph) + k['i'](0.25)
Mz = self.z - self.DM - k['z'](self.z_ph) + k['z'](0.25)
self.Lg = 10.0**(-0.4 * (Mg - self.Msun['g']))
self.Lr = 10.0**(-0.4 * (Mr - self.Msun['r']))
self.Li = 10.0**(-0.4 * (Mi - self.Msun['i']))
self.Lz = 10.0**(-0.4 * (Mz - self.Msun['z']))
self.Lg_err = self.Lg * self.g_err / 1.0857
self.Lr_err = self.Lr * self.r_err / 1.0857
self.Li_err = self.Li * self.i_err / 1.0857
self.Lz_err = self.Lz * self.z_err / 1.0857
# Pass it up to the class
self.kcorr = k
self.evf = ev
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
return
# #########################################################################
# Not needed, all done now at self.get_absmag()
#
# ###########################################################
# # Get the evolutionay correction for each object's redshift
# ###########################################################
# def get_evol(self):
# t0 = time.time()
# sout.write("# Computing evolution")
# ev = evolfit(self.evolfile)
# self.evf = ev # pass the function to the class
# self.ev_g = numpy.asarray(ev['g'](self.z_ph))
# self.ev_r = numpy.asarray(ev['r'](self.z_ph))
# self.ev_i = numpy.asarray(ev['i'](self.z_ph))
# self.ev_z = numpy.asarray(ev['z'](self.z_ph))
# sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
############################################################################
###########################################################################
# intergrates the p_z Bayesian probability between zlow and zhigh interval
###########################################################################
def p_z_int(self, z1, z2, idx=None):
#sout.write("# Integrating p(z) between %s -- %s ..." % (z1,z2))
# the limits of integration of the probabilty
i1 = numpy.where(self.zx >= z1)[0][0]
i2 = numpy.where(self.zx >= z2)[0][0]
#dz = abs(self.zx[1]-self.zx[0])
if idx:
p_int = numpy.sum(self.p_z[idx][:, i1:i2], axis=1)
else:
p_int = numpy.sum(self.p_z[:, i1:i2], axis=1)
#sout.write(" Done\n")
return p_int
################################################
# Set the coeffs of P(r) and normalize it unity
################################################
def set_Pr(self, zo, dang, ro=0.180, n=1.e5): # ro in [Mpc]
# Set the cosmology
#dang = self.cset.dang(zo)[0]
#dang = 1.0
# Set rc and rmax
#scale = 180.*60/math.pi
scale = old_div(180.0, math.pi) # Mpc to degrees
self.rc = ro * scale / dang
#self.rmax = 10.0*self.rc
self.rmax = 1.0 * scale / dang
# Normalize the function to unity
#sout.write("# Normalizing P(r,z) at z=%s... " % zo)
r1 = 0.
r2 = self.rmax * 1.1
dr = old_div((r2 - r1), n) # arcmins in 10^3 steps
rx = numpy.arange(r1, r2, dr)
self.Pn = 1.0 # reset normalization before normalizing
self.Pn = 2.0 * math.pi * (self.P_r(rx) * rx * dr).sum()
#print self.Pn
#print self.P_sum()
#sout.write(" Done\n")
return
#####################################
# Profile weight, r is in arcmins
#####################################
def P_r(self, r):
rc = self.rc
rmax = self.rmax
#r = extras.asnumpy.r)
#r = asarray(r) # fix to work with numpy.and numpy
r = numpy.asarray(r)
Pr = r * 0.0
idx = numpy.where(r <= rmax) # P(r) = 0 for r > rmax
ri = r[idx]
Pr[idx] = old_div(1.0, numpy.sqrt(1 + (old_div(ri, rc))**2)) - old_div(
1.0, numpy.sqrt(1 + (old_div(rmax, rc))**2))
return old_div(Pr, self.Pn)
##############################################
# Normalize phi and set the Lum weight, mstar
##############################################
def set_L(self, zo, alpha=-0.5, n=1.e5):
self.alpha = alpha
self.mstar = mi_star(zo, self.cosmo)
# intergration limits for normalization
m2 = self.maglim
m1 = 10.0
dm = old_div(abs(m2 - m1), n)
# Normalize the schecter luminosity function -- GET phi*
sout.write("# Normalizing L(m,z) at z=%s... " % zo)
m = numpy.arange(m1, m2, dm)
phi = PHI(m, self.mstar, self.alpha)
self.phinorm = (phi * dm).sum()
sout.write(" Done\n")
return
####################
# Luminosity weight
####################
def L(self, m):
# Background number of galaxies from number counts Yasuda et al (2001)
b = 0.537 * 10**(-0.4 * (m - 20.))
# Postman Luminosity weight
phi = PHI(m, self.mstar, self.alpha)
L = phi / b / self.phinorm
return L
# Just to check the normalization
def P_sum(self):
r1 = 0.
r2 = self.rmax * 1.1
dr = old_div((r2 - r1), 1000.) # arcmins
rx = numpy.arange(r1, r2, dr)
return 2 * math.pi * (self.P_r(rx) * rx * dr).sum()
############################################################################
# make the jpeg for each candidate and label each accordingly, A, B, C, etc.
############################################################################
def make_jpeg(self, k=None):
t0 = time.time()
# Order is important for color image
filters = ('i', 'r', 'g')
# Use the candidates (ra,dec) as position, default
if k == None:
xo = self.RA
yo = self.DEC
#zo = self.z_c
zo = self.zcl
# Use the ranked (A) BCG as the center
else:
xo = astrometry.dec2deg(old_div(self.ra_BCG[k], 15.))
yo = astrometry.dec2deg(self.dec_BCG[k])
zo = self.z_BCG[k]
zo = self.zcl
# Find out tile name for ranking BCG ID name
tile = self.tile
# Size of the jpeg in pixels depending on the redshift
size = old_div(1000, self.h) # 1h^1Mpc [in kpc]
dx = old_div(astrometry.kpc2arc(zo, size, self.cosmo), self.pixscale)
# Avoid crashing getfits, it fails for size ~ 1900 pix and above
if dx > 1900:
dx = 1900
# Call imhead instead
dx = int(dx)
dy = dx
sout.write("# %.3f Mpc @ z=%s is %s x %s pixels\n" %
(old_div(size, 1000.0), zo, dx, dy))
# Cut the fits files using getfits
files = ""
for filter in filters:
fitsfile = os.path.join(self.path, tile,
tile + filter + "_ext.fits")
fitsout = os.path.join(self.outpath,
"tmp_%s%s.fits" % (tile, filter))
cmd = "getfits %s %s %s %s %s -o %s > /dev/null 2>&1 " % (
fitsfile, xo, yo, dx, dy, fitsout)
os.system(cmd)
files = files + fitsout + " "
# Keep names in the class
self.jpeg_name = os.path.join(self.outpath, "%s.jpg" % self.cID)
self.tiff_name = os.path.join(self.outpath, "%s.tif" % self.cID)
self.fitsfile = fitsout
self.tmp_fits = files
self.dx = dx
self.dy = dy
# Make the color tiff
conf = os.path.join(os.environ['BCSPIPE'], 'LIB/stiff.conf')
opts = {}
opts["OUTFILE_NAME"] = self.tiff_name
opts["BINNING"] = 1
opts['GAMMA'] = 2.0 + (zo - 0.2) # For better contrast at higher z
opts['MAX_LEVEL'] = 0.99 - old_div((zo - 0.2), 50.)
opts['VERBOSE_TYPE'] = "QUIET"
cmd = "stiff "
cmd = cmd + " %s -c %s " % (files, conf)
for param, value in list(opts.items()):
cmd = cmd + "-%s %s " % (param, value)
os.system(cmd)
# Make the color jpg
cmd = "convert %s %s" % (self.tiff_name, self.jpeg_name)
os.system(cmd)
return
############################################################################
# make the jpeg for each candidate and label each accordingly, A, B, C, etc.
############################################################################
def make_jpeg_ID(self, k=0):
t0 = time.time()
# Order is important for color image
filters = ('i', 'r', 'g')
xo = astrometry.dec2deg(old_div(self.ra_BCG[k], 15.))
yo = astrometry.dec2deg(self.dec_BCG[k])
#zo = self.z_BCG[k]
zo = self.zcl
# Find out tile name for ranking BCG ID name
tile = self.tile
# Size of the jpeg in pixels depending on the redshift
size = old_div(1000, self.h) # 1h^1Mpc [in kpc]
dx = 2 * astrometry.kpc2arc(zo, size, self.cosmo) / self.pixscale
dx = int(dx)
dy = dx
sout.write("# 2 x %.3f Mpc @ z=%s is %s x %s pixels\n" %
(old_div(size, 1000.0), zo, dx, dy))
# Cut the fits files using getfits
files = ""
for filter in filters:
fitsfile = os.path.join(self.path, tile,
tile + filter + "_ext.fits")
fitsout = os.path.join(self.outpath,
"tmp_%s%s.fits" % (tile, filter))
files = files + fitsout + " "
xpix, ypix = astrometry.rd2xy(self.ra_BCG[k], self.dec_BCG[k],
fitsfile)
cutfits(fitsfile, xpix, ypix, dx, dy, fitsout)
# Keep names in the class
self.jpeg_name = os.path.join(self.outpath, "%s.jpg" % self.SCSname)
self.tiff_name = os.path.join(self.outpath, "%s.tif" % self.SCSname)
self.fitsfile = fitsout
self.tmp_fits = files
self.dx = dx
self.dy = dy
# Make the color tiff
conf = os.path.join(os.environ['BCSPIPE'], 'LIB/stiff.conf')
opts = {}
opts["OUTFILE_NAME"] = self.tiff_name
opts["BINNING"] = 1
opts['GAMMA'] = 2.0 + (zo - 0.2) # For better contrast at higher z
opts['MAX_LEVEL'] = 0.99 - old_div((zo - 0.2), 50.)
opts['VERBOSE_TYPE'] = "QUIET"
cmd = "stiff "
cmd = cmd + " %s -c %s " % (files, conf)
for param, value in list(opts.items()):
cmd = cmd + "-%s %s " % (param, value)
os.system(cmd)
# Make the color jpg
cmd = "convert %s %s" % (self.tiff_name, self.jpeg_name)
os.system(cmd)
return
##############################
# Draw the BGCs in the jpeg
##############################
def ellipse_BCGs(self, index=None):
sout.write("# Drawing ellipses + info for %s\n" % self.cID)
self.jpg_array = pilutil.imread(self.jpeg_name)
self.jpg_region = self.jpg_array #[y1:y2, x1:x2, :]
(ny, nx, nz) = self.jpg_array.shape
x1 = 0.05
y1 = 0.05
dsx = old_div((0.95 - x1), 3.)
dsy = old_div((0.99 - y1), 4.)
dx = 3.0 * dsx
dy = 3.0 * dsy
y1 = dsy
fig = pylab.figure(1, figsize=(9, 10))
ax1 = pylab.axes([x1, y1, dx, dy])
pylab.imshow(self.jpg_region)
# Change ax to arcmin
self.ax_to_arcmin(ds=1.0)
pylab.xlabel("x[arcmin]")
pylab.ylabel("y[arcmin]")
# Add a green cross at the center of the candidate's position
pylab.plot([old_div(nx, 2.0)], [old_div(ny, 2.0)],
'gx',
markersize=15,
markeredgewidth=1.0)
# Radius of search in from arcmin --> pixels
radius = self.dmin * 60.0 / self.pixscale
# And a circle in the search area
C = PCircle((old_div(nx, 2.0), old_div(ny, 2.0)),
radius,
resolution=80,
fill=0,
edgecolor="white",
linestyle='dashed',
linewidth=0.3)
ax1.add_patch(C)
# Label with radius search size and singal of detection
ax1.text(
old_div(nx, 20.),
old_div(ny, 10.),
"Dm=%.2f'\nSn=%.2f" % (self.dmin, self.Sn),
color='white',
#family='monospace',
horizontalalignment='left',
fontsize=11)
# If no BCGs, plot simple and exit
if index == None:
RA = self.RA
DEC = self.DEC
#zo = self.z_c
zo = self.zc
pylab.title("%s --- %s, %s -- zc:%.1f (NO BCGs)" %
(self.cID, RA, DEC, zo),
fontsize=10)
pylab.savefig(os.path.join(self.outpath, "%s_finder.png" %
self.cID))
pylab.close()
# Clean up some files
os.system("rm %s %s" % (self.jpeg_name, self.tiff_name))
# Remove temporary fits files
os.system("rm %s" % self.tmp_fits)
return
RA = astrometry.dec2deg(old_div(self.ra_BCG[index[0]], 15.0))
DEC = astrometry.dec2deg(self.dec_BCG[index[0]])
zo = self.z_BCG[index[0]]
pylab.title("%s --- %s, %s -- z:%.3f (A)" % (self.cID, RA, DEC, zo),
fontsize=10)
# construct the ellipse for the current display
label = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'Q', 'R', 'S', 'T']
j = 0
for i in index[0:7]:
ra = self.ra_BCG[i]
dec = self.dec_BCG[i]
a = self.a_BCG[i]
b = self.b_BCG[i]
theta = self.theta_BCG[i] #*math.pi/180.0
(xo, yo) = astrometry.rd2xy(ra, dec, self.fitsfile)
# Change the referece pixel to reflect jpg standards where the
# origin is at (0,ny), is the upper left corner
yo = ny - yo
E = PEllipse((xo, yo), (a, b),
resolution=80,
angle=theta,
fill=0,
edgecolor="red",
linewidth=0.5)
ax1.add_patch(E)
#angle = random.randint(0,360)
angle = theta
xsh = 3 * a * math.cos(angle * math.pi / 180.)
ysh = 3 * b * math.sin(angle * math.pi / 180.)
# Draw labels, A,B,C, etc.
ax1.annotate(label[j],
xy=(xo, yo),
xycoords='data',
xytext=(xsh, ysh),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2",
edgecolor='white',
shrinkA=0.0,
linewidth=0.5),
bbox=dict(boxstyle="round",
fc="1.0",
edgecolor=(1., .5, .5),
fill=1,
alpha=0.4))
#bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec=(1., .5, .5),alpha=0.5),
#bbox=dict(boxstyle="round", fc=(1.0, 0.0, 0.0), ec=(1., .5, .5),alpha=0.5),
#arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2",edgecolor='red'),color='white')
j = j + 1
# The sub-plot with the info text
ya = 0
yb = 10
x1 = 0.05
y1 = 0.01
dx = dx
dy = dsy - 0.05
ax2 = pylab.axes([x1, y1, dx, dy])
# Header with info
x = 0
y = yb - 2
header = " %-2s %6s %6s %5s %5s %6s %7s %7s %8s %8s %8s %6s" % (
'', 'z_B', 'z_ML', 'Ngal', 'N200', 'R200', 'M_r', 'm_i', 'p(BCG)',
'P(color)', 'P(total)', 'D\"')
ax2.text(x,
y,
header,
family='monospace',
horizontalalignment='left',
fontsize=11)
j = 0
format = " %-2s %6.3f %6.3f %5d %5d %6.2f %7.2f %7.2f %8.3f %8.1f %8.1f %6.2f"
for i in index[0:7]:
y = yb - 3 - j
vars = (label[j], self.zb_BCG[i], self.zml_BCG[i], self.Ngal[i],
self.N200[i], self.R200[i], self.Mr_BCG[i], self.i_BCG[i],
self.p_BCG[i], self.P_color[i], self.P_total[i],
self.d_BCG[i])
texto = format % vars
ax2.text(x,
y,
texto,
family='monospace',
horizontalalignment='left',
fontsize=11)
j = j + 1
pylab.axis('off')
pylab.ylim(ya, yb)
pylab.savefig(os.path.join(self.outpath, "%s_finder.png" % self.cID))
pylab.close()
# Clean up some files
os.system("rm %s %s" % (self.jpeg_name, self.tiff_name))
# Remove temporary fits files
os.system("rm %s" % self.tmp_fits)
return
##########################################
# Write the BCGs candidates information
##########################################
def write_BCGs(self, k):
n = len(k)
label = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'Q', 'R', 'S', 'T']
# Make ids a Char String in numarray
lab = nstr.array(label)
header = "# %2s %20s %6s %11s %11s %5s %5s %6s %7s %7s %8s %8s %7s %7s %6s" % (
'', 'ID', 'z', 'RA', 'DEC', 'Ngal', 'N200', 'R200\'', 'p(BCG)',
'P_z', 'P(color)', 'P(total)', 'M_r', 'm_i', 'D\"')
format = "%4s %20s %6.3f %11.6f %11.6f %5d %5d %6.2f %7.3f %7.1f %8.1f %8.1f %7.2f %7.2f %6.2f"
vars = (lab[0:n], self.id_BCG[k], self.z_BCG[k], self.ra_BCG[k],
self.dec_BCG[k], self.Ngal[k], self.N200[k], self.R200[k],
self.p_BCG[k], self.P_z[k], self.P_color[k], self.P_total[k],
self.Mr_BCG[k], self.i_BCG[k], self.d_BCG[k])
P_file = os.path.join(self.outpath, "%s_Ptotal.dat" % self.cID)
tableio.put_data(P_file, vars, format=format, header=header)
sout.write("# BCGs info in %s\n" % P_file)
return
#####################################
# Draw the BGCs and cluster members
#####################################
def ellipse_members(self, k=0):
sout.write("# Drawing ellipses + info for %s\n" % self.SCSname)
self.jpg_array = pilutil.imread(self.jpeg_name)
self.jpg_region = self.jpg_array #[y1:y2, x1:x2, :]
(ny, nx, nz) = self.jpg_array.shape
x1 = 0.05
y1 = 0.05
dsx = old_div((0.95 - x1), 3.)
dsy = old_div((0.99 - y1), 4.)
dx = 3.0 * dsx
dy = 3.0 * dsy
y1 = dsy
fig = pylab.figure(1, figsize=(9, 10))
ax1 = pylab.axes([x1, y1, dx, dy])
pylab.imshow(self.jpg_region)
# Change ax to arcmin
self.ax_to_arcmin(ds=1.0)
pylab.xlabel("x[arcmin]")
pylab.ylabel("y[arcmin]")
RA = astrometry.dec2deg(old_div(self.ra_BCG[k], 15.0))
DEC = astrometry.dec2deg(self.dec_BCG[k])
zo = self.z_BCG[k]
pylab.title("%s - %s, %s - z:%.3f Ngal:%d" %
(self.SCSname, RA, DEC, zo, self.N1Mpc),
fontsize=10)
# construct the ellipses for each members
for i in self.iR1Mpc[0]:
ra = self.ra[i]
dec = self.dec[i]
a = self.a_image[i]
b = self.b_image[i]
theta = self.theta[i] #*math.pi/180.0
(xo, yo) = astrometry.rd2xy(ra, dec, self.fitsfile)
# Change the referece pixel to reflect jpg standards where the
# origin is at (0,ny), is the upper left corner
yo = ny - yo
if i == self.idx_BCG[0]:
ec = 'blue'
else:
ec = 'green'
E = PEllipse((xo, yo), (a, b),
resolution=80,
angle=theta,
fill=0,
edgecolor=ec,
linewidth=0.5)
ax1.add_patch(E)
# And a circle of 1Mpc/h radiys
radius = self.r1Mpc * 3600.0 / self.pixscale
C = PCircle((old_div(nx, 2.0), old_div(ny, 2.0)),
radius,
resolution=80,
fill=0,
edgecolor="white",
linestyle='dashed',
linewidth=0.5)
ax1.add_patch(C)
# The sub-plot with the info text
ya = 0
yb = 10
x1 = 0.05
y1 = 0.01
dx = dx
dy = dsy - 0.05
ax2 = pylab.axes([x1, y1, dx, dy])
# Header with info
x = 0
y = yb - 2
header = "%6s %6s %5s %5s %6s %6s %7s %7s %8s %8s %8s" % (
'z_B', 'z_ML', 'Ngal', 'N200', 'R200', 'r200', 'M_r', 'm_i',
'p(BCG)', 'P(color)', 'P(total)')
ax2.text(x,
y,
header,
family='monospace',
horizontalalignment='left',
fontsize=11)
format = "%6.3f %6.3f %5d %5d %6.2f %6.2f %7.2f %7.2f %8.3f %8.1f %8.1f"
y = yb - 3
vars = (self.zb_BCG[k], self.zml_BCG[k], self.N1Mpc, self.N200,
self.R200, self.r200 * 60, self.Mr_BCG[k], self.i_BCG[k],
self.p_BCG[k], self.P_color, self.P_total)
texto = format % vars
ax2.text(x,
y,
texto,
family='monospace',
horizontalalignment='left',
fontsize=11)
pylab.axis('off')
pylab.ylim(ya, yb)
pylab.savefig(
os.path.join(self.outpath, "%s.png" % self.SCSname),
dpi=300)
pylab.close()
# Clean up some files
os.system("rm %s %s" % (self.jpeg_name, self.tiff_name))
# Remove temporary fits files
os.system("rm %s" % self.tmp_fits)
return
##############################
# Change the axes to arcmins
###############################
def ax_to_arcmin(self, ds=1.0): # ds in arcmin
[xmin, xmax, ymin, ymax] = pylab.axis()
dx = self.dx
dy = self.dy
scale = old_div(self.pixscale, 60.) # in arcmin
xo = old_div((xmin + xmax), 2.0)
yo = old_div((ymin + ymax), 2.0)
s1 = int((old_div(-dx, 2.0)) * scale)
s2 = int((old_div(+dx, 2.0)) * scale)
sx = numpy.arange(s1, s2 + 0.05, ds)
xtext = []
xtick = []
for s in sx:
x = xo + old_div(s, scale) #+ ds/scale
xtick.append(x)
xtext.append("%.1f" % s)
s1 = int((old_div(-dy, 2.0)) * scale)
s2 = int((old_div(+dy, 2.0)) * scale)
sy = numpy.arange(s1, s2 + 0.05, ds)
ytext = []
ytick = []
for s in sy:
y = yo + old_div(s, scale) #+ ds/scale
ytick.append(y)
ytext.append("%.1f" % s)
pylab.yticks(ytick, tuple(ytext))
pylab.xticks(xtick, tuple(xtext))
# Make sure we plot everithing
pylab.xlim(xmin, xmax)
pylab.ylim(ymin, ymax)
return
########################################################
# Modified/updated from find_clusters_ext_auto.py
# Select galaxies around ID galaxy un redshift range
########################################################
def select_members(self, i, dz=0.1, Mi_lim=-20.25):
t0 = time.time()
sout.write("# Selecting Cluster members... Ngal, N200, R200 ")
# Get the relevant info for ith BCG
zo = self.z_BCG[i]
ra0 = self.ra_BCG[i]
dec0 = self.dec_BCG[i]
Mi_BCG = self.Mi_BCG[i]
DM = self.DM_BCG[i]
ID_BCG = self.id_BCG[i]
# 1 - Select in position around ra0,dec0
# Define 1h^-1 Mpc radius in degress @ zo
R1Mpc = 1000 * 1.0 / self.h # in kpc
r1Mpc = old_div(
astrometry.kpc2arc(zo, R1Mpc, self.cosmo), 3600.) # in degrees.
rcore = old_div(r1Mpc, 2.0)
dist = astrometry.circle_distance(ra0,
dec0,
self.ra,
self.dec,
units='deg')
mask_R1Mpc = numpy.where(dist <= r1Mpc, 1, 0)
mask_rcore = numpy.where(dist <= rcore, 1, 0)
arcmin2Mpc = old_div(
astrometry.arc2kpc(zo, 60.0, self.cosmo),
1000.0) # scale between arcmin and Mpc
# 2 - Select in redshift
#dz = self.dz*(1 + zo)
z1 = zo - dz
z2 = zo + dz
mask_z = numpy.where(land(self.z_ph >= z1, self.z_ph <= z2), 1, 0)
# 3 - Select in brightness
Mi_lim_zo = Mi_lim + self.evf['i'](zo) - self.evf['i'](0.1)
mask_L1 = numpy.where(self.Mi <= Mi_lim_zo, 1, 0) # Faint cut > 0.4L*
mask_L2 = numpy.where(self.Mi >= Mi_BCG, 1, 0) # Bright cut < L_BCG
# The final selection mask, position x redshift x Luminosity
idx = numpy.where(mask_R1Mpc * mask_L1 * mask_L2 * mask_z == 1)
idc = numpy.where(mask_rcore * mask_L1 * mask_L2 * mask_z == 1)
# Shot versions handles
gr = self.gr
ri = self.ri
iz = self.iz
# Some simple 3-sigma clipping defined using r< rcore
Nsigma = 3.0
loop = 1
converge = False
while not converge:
# The conditions to apply
c1 = numpy.abs(gr[idc] - gr[idc].mean()) > Nsigma * numpy.std(
gr[idc], ddof=1)
c2 = numpy.abs(ri[idc] - ri[idc].mean()) > Nsigma * numpy.std(
ri[idc], ddof=1)
c3 = numpy.abs(iz[idc] - iz[idc].mean()) > Nsigma * numpy.std(
iz[idc], ddof=1)
iclip = numpy.where(lor(c1, c2,
c3)) # where any of the conditions fails
if len(iclip[0]) > 0:
idc = numpy.delete(idc, iclip[0]) # Removed failed ones
converge = False
else:
converge = True
loop = loop + 1
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
# Or we can make a new mask where the condition's are true
c1 = numpy.abs(self.gr - gr[idc].mean()) > Nsigma * numpy.std(gr[idc],
ddof=1)
c2 = numpy.abs(self.ri - ri[idc].mean()) > Nsigma * numpy.std(ri[idc],
ddof=1)
c3 = numpy.abs(self.iz - iz[idc].mean()) > Nsigma * numpy.std(iz[idc],
ddof=1)
mask_cm = numpy.where(lor(c1, c2, c3), 0, 1) # where condition fails
iR1Mpc = numpy.where(
mask_R1Mpc * mask_L1 * mask_L2 * mask_z * mask_cm == 1)
Ngal = len(iR1Mpc[0])
sout.write("# Total: %s objects selected in 1h^-1Mpc around %s\n" %
(Ngal, self.id_BCG[i]))
#############################################################################
# We'll skip 200 measurement as they depend on the corrected values of Ngal
# Now let's get R200 and N200
R200 = 0.156 * (Ngal**0.6) / self.h # In Mpc
r200 = old_div(
astrometry.kpc2arc(zo, R200 * 1000.0, self.cosmo),
3600.) # in degrees.
mask_r200 = numpy.where(dist <= r200, 1, 0)
i200 = numpy.where(
mask_r200 * mask_L1 * mask_L2 * mask_z * mask_cm == 1)
N200 = len(i200[0])
self.i200 = i200
self.N200 = N200
self.R200 = R200
self.r200 = r200
self.L200 = self.Lr[i200].sum()
############################################################################
# And the value for all galaxies up NxR1Mpc -- change if required.
mask_R = numpy.where(dist <= 10 * r1Mpc, 1, 0)
iR = numpy.where(mask_R * mask_L1 * mask_L2 * mask_z * mask_cm == 1)
# Pass up
self.iR = iR
self.iR1Mpc = iR1Mpc
self.N1Mpc = Ngal
self.r1Mpc = r1Mpc # in degress
self.dist2BCG = dist
self.arcmin2Mpc = arcmin2Mpc
# Sort indices radially for galaxies < N*R1Mpc, will be used later
i = numpy.argsort(self.dist2BCG[iR])
self.ix_radial = iR[0][i]
# We want to keep i200 and iR1Mpc to write out members.
return Ngal, N200, R200 # iR1Mpc,i200
########################################################
# Modified/updated from find_clusters_ext_auto.py
# Select galaxies around ID galaxy un redshift range
########################################################
def select_members_redshift(self,
i,
dz=0.05,
Mi_lim=-20.25,
zo=None,
radius=1000.0,
weight=None):
t0 = time.time()
sout.write("# Selecting Cluster members... Ngal, N200, R200 ")
# Get the relevant info for ith BCG
ra0 = self.ra_BCG[i]
dec0 = self.dec_BCG[i]
Mi_BCG = self.Mi_BCG[i]
DM = self.DM_BCG[i]
ID_BCG = self.id_BCG[i]
if zo:
print("Will use z:%.3f for cluster" % zo)
else:
zo = self.z_BCG[i]
# 1 - Select in position around ra0,dec0
# Define 1h^-1 Mpc radius in degress @ zo
R1Mpc = radius * 1.0 / self.h # in kpc
r1Mpc = old_div(
astrometry.kpc2arc(zo, R1Mpc, self.cosmo), 3600.) # in degrees.
rcore = old_div(r1Mpc, 2.0)
dist = astrometry.circle_distance(ra0,
dec0,
self.ra,
self.dec,
units='deg')
mask_R1Mpc = numpy.where(dist <= r1Mpc, 1, 0)
mask_rcore = numpy.where(dist <= rcore, 1, 0)
arcmin2Mpc = old_div(
astrometry.arc2kpc(zo, 60.0, self.cosmo),
1000.0) # scale between arcmin and Mpc
# 2 - Select in redshift
#dz = self.dz*(1 + zo)
z1 = zo - dz
z2 = zo + dz
mask_z = numpy.where(land(self.z_ph >= z1, self.z_ph <= z2), 1, 0)
# 3 - Select in brightness
Mi_lim_zo = Mi_lim + self.evf['i'](zo) - self.evf['i'](0.1)
mask_L1 = numpy.where(self.Mi <= Mi_lim_zo, 1, 0) # Faint cut > 0.4L*
mask_L2 = numpy.where(self.Mi >= Mi_BCG, 1, 0) # Bright cut < L_BCG
# The final selection mask, position x redshift x Luminosity
idx = numpy.where(mask_R1Mpc * mask_L1 * mask_L2 * mask_z == 1)
idc = numpy.where(mask_rcore * mask_L1 * mask_L2 * mask_z == 1)
# Shot versions handles
gr = self.gr
ri = self.ri
iz = self.iz
# Some simple 3-sigma clipping defined using r< rcore
Nsigma = 3.0
loop = 1
converge = False
while not converge:
# The conditions to apply
c1 = numpy.abs(gr[idc] - gr[idc].mean()) > Nsigma * numpy.std(
gr[idc], ddof=1)
c2 = numpy.abs(ri[idc] - ri[idc].mean()) > Nsigma * numpy.std(
ri[idc], ddof=1)
c3 = numpy.abs(iz[idc] - iz[idc].mean()) > Nsigma * numpy.std(
iz[idc], ddof=1)
iclip = numpy.where(lor(c1, c2,
c3)) # where any of the conditions fails
if len(iclip[0]) > 0:
idc = numpy.delete(idc, iclip[0]) # Removed failed ones
converge = False
else:
converge = True
loop = loop + 1
# Put it back in case we missed the BCG
#if self.idx_BCG[0][0] not in idc[0]:
# i0 = numpy.append(self.idx_BCG[0][0],idc[0])
# idc = (i0,)
if weight:
# Get the weighted-average redshift within the core:
dz = 0.5 * numpy.abs(self.z2[idc] - self.z1[idc])
z_cl, z_clrms = aux.statsw(self.z_ph[idc], weight=old_div(1.0, dz))
else:
# Get the average redshift within the core:
z_cl = numpy.median(self.z_ph[idc])
z_clrms = self.z_ph[idc].std()
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
# Or we can make a new mask where the condition's are true
c1 = numpy.abs(self.gr - gr[idc].mean()) > Nsigma * numpy.std(gr[idc],
ddof=1)
c2 = numpy.abs(self.ri - ri[idc].mean()) > Nsigma * numpy.std(ri[idc],
ddof=1)
c3 = numpy.abs(self.iz - iz[idc].mean()) > Nsigma * numpy.std(iz[idc],
ddof=1)
mask_cm = numpy.where(lor(c1, c2, c3), 0, 1) # where condition fails
iR1Mpc = numpy.where(
mask_R1Mpc * mask_L1 * mask_L2 * mask_z * mask_cm == 1)
Ngal = len(iR1Mpc[0])
sout.write("# Total: %s objects selected in %sh^-1Mpc around %s\n" %
(Ngal, old_div(radius, 1000.0), self.id_BCG[i]))
#############################################################################
# We'll skip 200 measurement as they depend on the corrected values of Ngal
# Now let's get R200 and N200
R200 = 0.156 * (Ngal**0.6) / self.h # In Mpc
r200 = old_div(
astrometry.kpc2arc(zo, R200 * 1000.0, self.cosmo),
3600.) # in degrees.
mask_r200 = numpy.where(dist <= r200, 1, 0)
i200 = numpy.where(
mask_r200 * mask_L1 * mask_L2 * mask_z * mask_cm == 1)
N200 = len(i200[0])
self.i200 = i200
self.N200 = N200
self.R200 = R200
self.r200 = r200
self.L200 = self.Lr[i200].sum()
############################################################################
# And the value for all galaxies up NxR1Mpc -- change if required.
mask_R = numpy.where(dist <= 10 * r1Mpc, 1, 0)
iR = numpy.where(mask_R * mask_L1 * mask_L2 * mask_z * mask_cm == 1)
# Pass up
self.iR = iR
self.iR1Mpc = iR1Mpc
self.N1Mpc = Ngal
self.r1Mpc = r1Mpc # in degress
self.dist2BCG = dist
self.arcmin2Mpc = arcmin2Mpc
# Sort indices radially for galaxies < N*R1Mpc, will be used later
i = numpy.argsort(self.dist2BCG[iR])
self.ix_radial = iR[0][i]
# We want to keep i200 and iR1Mpc to write out members.
return Ngal, N200, R200, z_cl, z_clrms
############################
# Header for the info file
############################
def write_head(self, k=0):
header = ""
header = header + "# catID : %s \n" % self.id_BCG[k]
header = header + "# RA,DEC : %s %s \n" % (self.RA, self.DEC)
header = header + "# ra,dec : %s %s \n" % (self.ra_BCG[k],
self.dec_BCG[k])
header = header + "# zo(BCG): %6.3f \n" % self.z_BCG[k]
header = header + "# zcl : %6.3f +/- %6.3f\n" % (self.zcl,
self.zcl_err)
header = header + "# Ngal (1Mpc/h) : %3d \n" % self.Ngal
header = header + "# N200 : %3d \n" % self.N200
header = header + "# Cosmology : %s, %s, %s \n" % self.cosmo
header = header + "# R(1Mpc/h) : %8.3f [arcmin] \n" % (self.r1Mpc *
60.0)
header = header + "# R200 : %8.3f [arcmin]\n" % (self.r200 *
60.0)
header = header + "# R200 : %8.3f [Mpc]\n" % self.R200
header = header + "# 1 arcmin @ zo : %8.3f [Mpc] \n" % self.arcmin2Mpc
header = header + "# \n"
self.header = header
return
########################################
# Write the clusters members in a file
########################################
def write_members(self, k=0):
k = self.ix_radial #= iR[0][i]
# Do the header first
self.write_head()
# All E/S0 within N x R200
iR = self.iR
# The key describing the position
self.keyR = self.dist2BCG * 0 - 1
self.keyR[self.dist2BCG <= self.r200] = 0
self.keyR[self.dist2BCG <= self.r1Mpc] = 1
# Sort by distance
filename = os.path.join(self.outpath, "%s.dat" % self.SCSname)
header = self.header + "# %-22s %12s %12s %8s %8s %8s %8s %8s %8s %10s %10s %8s %5s" % (
' catID', 'RA', 'DEC', 'z_b', 'z_ml', 'm_r', 'm_i', 'Mr', 'Mi',
'Lr[Mo]', 'Li[Mo]', 'd[arcmin]', 'R1Mpc')
format = "%-24s %12.6f %12.6f %8.3f %8.3f %8.3f %8.3f %8.3f %8.3f %10.3e %10.3e %8.3f %5d"
vars = (self.id[k], self.ra[k], self.dec[k], self.z_b[k], self.z_ml[k],
self.r[k], self.i[k], self.Mr[k], self.Mi[k], self.Lr[k],
self.Li[k], self.dist2BCG[k] * 60, self.keyR[k])
tableio.put_data(filename, vars, format=format, header=header)
# The indexes redially sorted
self.ix_radial = k
return
##########################################
# Compute the Background for the clusters
##########################################
def background(self, k=0):
ixr = self.ix_radial
#zo = self.z_BCG[k]
zo = self.zcl
# Store radially ordered
r = self.dist2BCG[ixr] * 60.0 # in arcmin
Lr = self.Lr[ixr] # We do in the r-band as Reyes et al
# Bin the Ngal/Lum data in log spacing
n = 15
rbin = mklogarray(0.0, r.max(), n)
Nbin, rcenter = histo(r, rbin, center='yes')
Lbin, rcenter = bin_data(r, Lr, rbin, center='yes')
# Compute the area in each shell
ir = numpy.indices(rbin.shape)[0]
ir1 = ir[:-1]
ir2 = ir[1:]
r1 = rbin[ir1]
r2 = rbin[ir2]
abin = math.pi * (r2**2 - r1**2)
PN = old_div(Nbin, abin) # Number Surface density
PL = old_div(Lbin, abin) # Luminosity surface density
# Compute the background median density both in Lum and Ngal
# Between 4.0 - 9.0 r1Mpc
R1 = 4.0 * self.r1Mpc * 60.0
R2 = 9.0 * self.r1Mpc * 60.0
if R2 >= r.max():
R2 = r2.max()
R1 = R2 - 2.0 * self.r1Mpc * 60.0
PN_bgr = PN[land(rcenter > R1, rcenter < R2)]
PL_bgr = PL[land(rcenter > R1, rcenter < R2)]
r_bgr = rcenter[land(rcenter > R1, rcenter < R2)]
# Get the mean values for the Ngal and Lr profiles, which will
# be the correction per arcmin^2
PN_mean = numpy.mean(PN_bgr)
PL_mean = numpy.mean(PL_bgr)
# Total number in area
N_bgr = PN_bgr.sum()
L_bgr = PL_bgr.sum()
area_bgr = math.pi * (R2**2 - R1**2)
# Get the correction for Number of galaxies and Luminosoty
# For R200 we need to recompute R200 and N200 based on new
# R200 value.
area_r1Mpc = math.pi * (self.r1Mpc * 60.)**2 # in arcmin2
self.Ngal_c = self.Ngal - PN_mean * area_r1Mpc
if self.Ngal_c < 0:
self.Ngal_c = 0.0
self.R200_c = 0.156 * (self.Ngal_c**0.6) / self.h # In Mpc
self.r200_c = old_div((old_div(self.R200_c, self.arcmin2Mpc)), 60.0)
area_r200_c = math.pi * (self.r200_c * 60.)**2 # in arcmin2
area_r200 = math.pi * (self.r200 * 60.)**2 # in arcmin2
self.i200_c = numpy.where(self.dist2BCG[ixr] <= self.r200_c)
self.N200_c = len(self.i200_c[0]) - PN_mean * area_r200_c
self.L200_c = Lr[self.i200_c].sum(
) - 0.3 * PL_mean * area_r200_c # 0.3 factor from old code, why????
#print self.Ngal
#print PN
#print r1
#print rcenter
#print R1,R2
#print r.min(),r.max()
#print "PN_mean",PN_mean
#print PN_bgr
#print area_r1Mpc
#print self.Ngal_c
#print self.r200_c
#print self.R200_c
# Errors for uncorrected valyes
dL200 = self.Lr_err[self.i200].sum()
self.d_Ngal = math.sqrt(self.Ngal)
self.d_N200 = math.sqrt(self.N200)
self.d_L200 = math.sqrt(dL200**2)
# We estimate the errors
dL200_c = self.Lr_err[self.i200_c].sum()
self.d_Ngal_c2 = self.Ngal_c + (
(old_div(area_r1Mpc, area_bgr))**2) * N_bgr
self.d_N200_c2 = self.N200_c + (
(old_div(area_r200_c, area_bgr))**2) * N_bgr
self.d_L200_c2 = dL200_c**2 + (
(old_div(area_r200_c, area_bgr))**2) * dL200_c**2
# Avoid sqrt of negative number
if self.d_Ngal_c2 < 0:
self.d_Ngal_c = 0
else:
self.d_Ngal_c = math.sqrt(self.Ngal_c + ((old_div(
area_r1Mpc, area_bgr))**2) * N_bgr)
if self.d_N200_c2 < 0:
self.d_N200_c = 0
else:
self.d_N200_c = math.sqrt(self.N200_c + ((old_div(
area_r200_c, area_bgr))**2) * N_bgr)
if self.d_L200_c2 < 0:
self.d_L200_c = 0
else:
self.d_L200_c = math.sqrt(dL200_c**2 + ((old_div(
area_r200_c, area_bgr))**2) * dL200_c**2)
# Get the mass for corrected values
(self.M_N200, self.M_L200) = Mass_calib(self.N200_c,
self.L200_c,
self.LBCG,
zo,
h=self.h)
####################################
# Now plot the profiles + some info
####################################
x_bg = [R1, R2]
yN_bg = [PN_mean, PN_mean]
yL_bg = [PL_mean, PL_mean]
xx = [self.r1Mpc * 60., self.r1Mpc * 60.]
rr = [self.r200_c * 60., self.r200_c * 60.]
yy = [PN.min(), PN.max()]
pylab.figure(1, figsize=(8, 8))
pylab.subplot(2, 1, 1)
pylab.plot(rcenter, PN, 'k-')
pylab.plot(rcenter, PN, 'ko')
pylab.plot(xx, yy, 'r-')
pylab.plot(rr, yy, 'y-')
pylab.plot(x_bg, yN_bg, 'g--')
pylab.text(rcenter[0],
PN.min() * 1.2,
self.SCSname,
ha='left',
size=10)
pylab.text(self.r1Mpc * 60.0 * 1.1,
PN.max() * 0.2,
"R1Mpc",
rotation='vertical',
ha='left',
size=10)
pylab.text(self.r200_c * 60.0 * 1.1,
PN.max() * 0.2,
"R200",
rotation='vertical',
ha='left',
size=10)
pylab.xlabel(r'$r {\rm (arcmin)}$', fontsize=14)
pylab.ylabel(r'$N(r) {\rm arcmin}^{-2}$', fontsize=14)
pylab.loglog()
pylab.subplot(2, 1, 2)
pylab.plot(rcenter, PL, 'k-')
pylab.plot(rcenter, PL, 'ko')
yy = [PL.min(), PL.max()]
pylab.plot(xx, yy, 'r-')
pylab.plot(rr, yy, 'y-')
pylab.plot(x_bg, yL_bg, 'g--')
pylab.text(rcenter[0],
PL.min() * 1.2,
self.SCSname,
ha='left',
size=10)
pylab.text(self.r1Mpc * 60.0 * 1.1,
PL.max() * 0.2,
"R1Mpc",
rotation='vertical',
ha='left',
size=10)
pylab.text(self.r200_c * 60.0 * 1.1,
PL.max() * 0.2,
"R200",
rotation='vertical',
ha='left',
size=10)
pylab.xlabel(r'$r {\rm (arcmin)}$', fontsize=14)
pylab.ylabel(r'$L(r) {\rm arcmin}^{-2}$', fontsize=14)
outname = os.path.join(self.outpath, "%s_prof.png" % self.SCSname)
pylab.loglog()
try:
pylab.savefig(outname)
pylab.close()
except:
print("** ERROR: Could not write %s ***" % outname)
pylab.close()
return
def write_info(self, k=0):
header = ''
header = header + "# %18s" % "name"
header = header + "%14s %14s " % ('RA(deg)', 'DEC(deg)')
#header = header + "%11s %11s " % ('RA','DEC')
header = header + "%7s " * 2 % ('z_cl', 'err')
header = header + "%7s " * 2 % ('zb', 'zml')
header = header + "%7s " * 2 % ('R200_c', 'r200_c')
header = header + "%7s " * 2 % ('Ngal_c', 'd_Ngal')
header = header + "%7s " * 2 % ('N200_c', 'd_N200')
header = header + "%9s " * 3 % ('L_BCG', 'L200_c', 'd_L200')
header = header + "%9s " * 2 % ('M_N200', 'M_L200')
header = header + "\n"
# Corrected values
cfile = os.path.join(self.outpath, "%s.cinfo" % self.SCSname)
c = open(cfile, "w")
c.write(header)
c.write("%-20s" % self.SCSname)
c.write("%14s %14s " % (self.ra_BCG[k], self.dec_BCG[k]))
#c.write("%11s %11s " % (self.RA,self.DEC))
c.write("%7.3f " * 2 % (self.zcl, self.zcl_err))
c.write("%7.3f " * 2 % (self.zb_BCG[k], self.zml_BCG[k]))
c.write("%7.2f " * 2 % (self.R200_c, self.r200_c * 60.0))
c.write("%7.2f " * 2 % (self.Ngal_c, self.d_Ngal_c))
c.write("%7.2f " * 2 % (self.N200_c, self.d_N200_c))
c.write("%9.2e " * 3 % (self.LBCG, self.L200_c, self.d_L200_c))
c.write("%9.2e " * 2 % (self.M_N200, self.M_L200))
c.write("%4s " % self.zuse)
c.write("\n")
c.close()
header = ''
header = header + "# %18s" % " name"
header = header + "%14s %14s " % ('RA(deg)', 'DEC(deg)')
#header = header + "%11s %11s " % ('RA','DEC')
header = header + "%7s " * 2 % ('z_cl', 'err')
header = header + "%7s " * 2 % ('zb', 'zml')
header = header + "%7s " * 2 % ('R200', 'r200')
header = header + "%7s " * 2 % ('Ngal', 'd_Ngal')
header = header + "%7s " * 2 % ('N200', 'd_N200')
header = header + "%9s " * 3 % ('L_BCG', 'L200', 'd_L200')
header = header + "%9s " * 2 % ('M_N200', 'M_L200')
header = header + "\n"
# UN-corrected values
ofile = os.path.join(self.outpath, "%s.oinfo" % self.SCSname)
o = open(ofile, "w")
o.write(header)
o.write("%-20s" % self.SCSname)
o.write("%14s %14s " % (self.ra_BCG[k], self.dec_BCG[k]))
#o.write("%11s %11s " % (self.RA,self.DEC))
o.write("%7.3f " * 2 % (self.zcl, self.zcl_err))
o.write("%7.3f " * 2 % (self.zb_BCG[k], self.zml_BCG[k]))
o.write("%7.2f " * 2 % (self.R200, self.r200 * 60.0))
o.write("%7.2f " * 2 % (self.Ngal, self.d_Ngal))
o.write("%7.2f " * 2 % (self.N200, self.d_N200))
o.write("%9.2e " * 3 % (self.LBCG, self.L200, self.d_L200))
o.write("%9.2e " * 2 % Mass_calib(self.N200,
self.L200,
self.LBCG,
self.z_BCG[0],
h=self.h))
o.write("%4s " % self.zuse)
o.write("\n")
o.close()
return
# #########################################################
# # Compute Kcorr array to make linear interpolation later
# #########################################################
# def Kcorr_fit(sed='El_Benitez2003'):
# import scipy
# import scipy.interpolate
# k = {}
# zx = numpy.arange(0.0, 2.0, 0.005)
# kg = bpz_mix.Kcorr(zx,sed,'g_MOSAICII')
# kr = bpz_mix.Kcorr(zx,sed,'r_MOSAICII')
# ki = bpz_mix.Kcorr(zx,sed,'i_MOSAICII')
# kz = bpz_mix.Kcorr(zx,sed,'z_MOSAICII')
# k['g'] = scipy.interpolate.interp1d(zx,kg)
# k['r'] = scipy.interpolate.interp1d(zx,kr)
# k['i'] = scipy.interpolate.interp1d(zx,ki)
# k['z'] = scipy.interpolate.interp1d(zx,kz)
# return k
##################################################################
# Read both kcorrection k(z) and evolution ev(z) from BC03 model
##################################################################
def KEfit(modelfile):
import scipy
import scipy.interpolate
import tableio
sout.write("# Getting K(z) and Ev(z) corrections from file: %s\n" %
modelfile)
e = {}
k = {}
(z, k_g, k_r, k_i, k_z, e_g, e_r, e_i,
e_z) = tableio.get_data(modelfile,
cols=(0, 10, 11, 12, 13, 14, 15, 16, 17))
# K-only correction at each age SED,
k['g'] = scipy.interpolate.interp1d(z, k_g)
k['r'] = scipy.interpolate.interp1d(z, k_r)
k['i'] = scipy.interpolate.interp1d(z, k_i)
k['z'] = scipy.interpolate.interp1d(z, k_z)
# Evolution term alone
e['g'] = scipy.interpolate.interp1d(z, e_g)
e['r'] = scipy.interpolate.interp1d(z, e_r)
e['i'] = scipy.interpolate.interp1d(z, e_i)
e['z'] = scipy.interpolate.interp1d(z, e_z)
return k, e
############################################
# Read evolution ev(z) only from BC03 model
############################################
def evolfit(modelfile):
import scipy
import scipy.interpolate
import tableio
e = {}
(z, e_g, e_r, e_i,
e_z) = tableio.get_data(modelfile, cols=(0, 14, 15, 16, 17))
e['g'] = scipy.interpolate.interp1d(z, e_g)
e['r'] = scipy.interpolate.interp1d(z, e_r)
e['i'] = scipy.interpolate.interp1d(z, e_i)
e['z'] = scipy.interpolate.interp1d(z, e_z)
return e
######################################
# BCG Probability function
# p = 0 for M dimmer than Mlimit
# p = 1 for M brighter than Mlimit
######################################
def p_BCG(M, Mlim, b=0.4, zp=0.5):
x = M - Mlim
return F_BCG(x, b, zp)
################################################################
# BCG priot aux function,
#################################################################
def F_BCG(x, b=0.4, zp=0.5):
#print "will use zp:", zp
#print "will use b:", b
# Recenter at 50% (0.5) or at 68.2% (0.682)
dx = old_div(math.log10(-math.log(zp)), b)
u = x + dx
phi = numpy.exp(-10**(b * u))
return phi
#######################################################################
# Modified Schechter magnitude function from Postman et al (2001)
# uses alpha+2 rather than alpha+1 because of the extra 10^-0.4(m-m*)
# phi = (10^(-0.4(m-m*)))^(alpha+1) * exp[-10^(-0.4(m-m*))]
# PHI = phi*10^(-0.4(m-m*))
#######################################################################
def PHI(m, mstar, alpha):
exp = numpy.exp
a = 10**(-0.4 * (m - mstar))
# Note (alpha+2) normally is just (alpha+1)
phi = a**(alpha + 2) * exp(-a)
return phi
###########################################
# Get m_star aparent mangnitude in i-band
###########################################
def mi_star(z, cosmo=(0.3, 0.7, 0.7)):
# Set the cosmology
c = cosmopy.set(cosmo)
dlum = c.dlum(z)[0]
Mb_star = -19.43 - 1.01 * z
Mi_star = cosmology.reobs('El_Benitez2003',
m=Mb_star,
oldfilter="B_Johnson",
newfilter="i_MOSAICII")
return Mi_star + 5.0 * math.log10(dlum) + 25
###################################################
# Read in the info file with candidates positions
###################################################
def read_candidates(file):
ra = {}
dec = {}
sn = {}
zo = {}
zx = numpy.arange(0.1, 0.9, 0.1)
for line in open(file).readlines():
if line[0] == "#":
continue
vals = line.split()
ID = vals[0]
ra[ID] = float(vals[3])
dec[ID] = float(vals[4])
i = 0
si_max = -99.
for s in vals[5:]:
try:
si = float(s)
except:
si = 0.0
if si > si_max:
si_max = si
zmax = zx[i]
i = i + 1
sn[ID] = si_max
zo[ID] = zmax
return ra, dec, zo, sn
####################################################
# Fake an ellipse using an N-sided polygon
#####################################################
import matplotlib.patches
import math
Polygon = matplotlib.patches.Polygon
def PEllipse(xxx_todo_changeme,
xxx_todo_changeme1,
resolution=100,
angle=0.0,
**kwargs):
(xo, yo) = xxx_todo_changeme
(A, B) = xxx_todo_changeme1
pi = math.pi
cos = math.cos
sin = math.sin
angle = -angle * pi / 180. # hack to make it work, angle=-angle
t = 2 * pi / resolution * numpy.arange(resolution)
xtmp = A * numpy.cos(t)
ytmp = B * numpy.sin(t)
x = xtmp * cos(angle) - ytmp * sin(angle) + xo
y = xtmp * sin(angle) + ytmp * cos(angle) + yo
return Polygon(list(zip(x, y)), **kwargs)
##############################
# A circle as a polygon too
###############################
def PCircle(xxx_todo_changeme2, radius, resolution=100, **kwargs):
(xo, yo) = xxx_todo_changeme2
pi = math.pi
cos = math.cos
sin = math.sin
t = 2 * pi / resolution * numpy.arange(resolution)
xtmp = radius * numpy.cos(t)
ytmp = radius * numpy.sin(t)
x = xtmp + xo
y = ytmp + yo
return Polygon(list(zip(x, y)), **kwargs)
##########################################################
# Cuts fits file around (xo,yo)
# Uses getfits (faster) or imcopy(iraf) for bigger files
##########################################################
def cutfits(fitsin, xo, yo, dx, dy, fitsout):
# Avoid crashing getfits as it fails for size ~ 1900 pix and above,
# use iraf's imhead insted
if dx > 1900 or dy > 1900:
from .pyraf import iraf
i1 = int(xo - old_div(dx, 2))
i2 = int(xo + old_div(dx, 2))
j1 = int(yo - old_div(dy, 2))
j2 = int(yo + old_div(dy, 2))
section = "[%s:%s,%s:%s]" % (i1, i2, j1, j2)
iraf.imcopy("%s%s" % (fitsin, section), fitsout, verb=0)
else:
cmd = "getfits %s %s %s %s %s -o %s > /dev/null 2>&1 " % (
fitsin, xo, yo, dx, dy, fitsout)
os.system(cmd)
return
#######################################
# make an array with power law growth
########################################
def mklogarray(x1, x2, n):
if x1 > 0:
i = numpy.indices((n + 1, ))[0] * 1.0
x = x1 * (old_div(x2, x1))**(old_div(i, n))
#dx = x1*( (x2/x1)**(i/n) - (x2/x1)**((i-1)/n))
elif x1 == 0:
i = numpy.indices((n, ))[0] * 1.0 + 1
x = numpy.zeros((n + 1, ))
#x[1:] = x2**(i/n)
dx = (x2 + 1)**(old_div(i, n)) - (x2 + 1)**(old_div((i - 1), n))
x[1:] = dx.cumsum()
else:
print("ERROR, x < 0")
return
return x
#################################################
# Make histogram using xbin, gives the same
# results as numpy.histogram
#################################################
def histo(x, xbin, center=None):
n = len(xbin) - 1
nbin = numpy.zeros(n).astype(int16)
for i in range(n):
if i == 0:
nbin[i] = len(numpy.where(land(x >= xbin[i], x <= xbin[i + 1]))[0])
else:
nbin[i] = len(numpy.where(land(x > xbin[i], x <= xbin[i + 1]))[0])
# Center and reduce to n-1
if center:
ix = numpy.indices(xbin.shape)[0]
i1 = ix[:-1]
i2 = ix[1:]
dx = xbin[i2] - xbin[i1]
xbin = xbin[:-1] + old_div(dx, 2.0)
return nbin, xbin
################################################################
# Bin data in y(n) acoording to x(n) using bin spacing in xbin
###############################################################
def bin_data(x, y, xbin, center=None):
n = len(xbin) - 1
ybin = numpy.zeros(n).astype(float64)
for i in range(n):
if i == 0:
idx = numpy.where(land(x >= xbin[i], x <= xbin[i + 1]))
else:
idx = numpy.where(land(x > xbin[i], x <= xbin[i + 1]))
ybin[i] = y[idx].sum()
# Center and reduce to n-1
if center:
ix = numpy.indices(xbin.shape)[0]
i1 = ix[:-1]
i2 = ix[1:]
dx = xbin[i2] - xbin[i1]
xbin = xbin[:-1] + old_div(dx, 2.0)
return ybin, xbin
def get_R200(Ngal, h=0.7):
return 0.156 * (Ngal**0.6) / h
def Mass123(N200, L200, LBCG, h=0.7):
# Scale to convert Lum in units of 10^11*h^-2
Lscale = old_div(1.e10, h**2)
Mscale = old_div(1.e14, h)
L200 = old_div(L200, Lscale)
LBCG = old_div(LBCG, Lscale)
M_N200 = Mscale * 1.43 * (old_div(N200, 20.))**1.20
M_L200 = Mscale * 1.72 * (old_div(L200, 20.))**1.55
M_LBCG = Mscale * 1.07 * (old_div(LBCG, 5.))**1.10
return M_N200, M_L200, M_LBCG
def Mass_calib_old(N200, L200, LBCG, z, h=0.7):
# The best fit parameters
if z < 0.23:
M0_N = 1.27
M0_L = 1.81
alphaN = 1.20
alphaL = 1.27
gammaN = 0.71
gammaL = 0.40
aN = 1.54
aL = 7.77
bN = 0.41
bL = 0.67
else:
M0_N = 1.57
M0_L = 1.76
alphaN = 1.12
alphaL = 1.30
gammaN = 0.34
gammaL = 0.26
aN = 1.64
aL = 7.92
bN = 0.43
bL = 0.66
Lscale = old_div(1.e10, h**2)
Mscale = old_div(1.e14, h)
L200 = old_div(L200, Lscale)
LBCG = old_div(LBCG, Lscale)
LBCG_N = aN * N200**bN
LBCG_L = aL * L200**bL
M_N200 = Mscale * M0_N * (
(old_div(N200, 20.0))**alphaN) * (old_div(LBCG, LBCG_N))**gammaN
M_L200 = Mscale * M0_L * (
(old_div(L200, 40.0))**alphaL) * (old_div(LBCG, LBCG_L))**gammaL
return M_N200, M_L200
def Mass_calib(N200, L200, LBCG, z, h=0.7):
# The best fit parameters
if z < 0.23:
M0_N = 1.27
M0_L = 1.81
alphaN = 1.20
alphaL = 1.27
gammaN = 0.71
gammaL = 0.40
aN = old_div(1.54, h**2)
#aL = 7.77/h**2 # bad value
aL = old_div(0.61, h**2)
bN = 0.41
bL = 0.67
else:
M0_N = 1.57
M0_L = 1.76
alphaN = 1.12
alphaL = 1.30
gammaN = 0.34
gammaL = 0.26
aN = old_div(1.64, h**2)
#aL = 7.92/h**2 # bad value
aL = old_div(0.58, h**2)
bN = 0.43
bL = 0.66
L200 = L200 * (h**2) / 1.e10
LBCG = LBCG * (h**2) / 1.e10
#L200 = L200/1.e10
#LBCG = LBCG/1.e10
LBCG_N = aN * N200**bN
LBCG_L = aL * L200**bL
M_N200 = (old_div(1.e14, h)) * M0_N * (
(old_div(N200, 20.0))**alphaN) * (old_div(LBCG, LBCG_N))**gammaN
M_L200 = (old_div(1.e14, h)) * M0_L * (
(old_div(L200, 40.0))**alphaL) * (old_div(LBCG, LBCG_L))**gammaL
return M_N200, M_L200
def M_L200(L200, LBCG, z, h=0.7):
# The best fit parameters
if z < 0.23:
M0_L = 1.81
alphaL = 1.27
gammaL = 0.40
aL = old_div(7.77, h**2)
bL = 0.67
else:
M0_L = 1.76
alphaL = 1.30
gammaL = 0.26
aL = old_div(7.92, h**2)
bL = 0.66
L200 = L200 * (h**2) / 1.e10
LBCG = LBCG * (h**2) / 1.e10
M_L200 = (old_div(1.e14, h)) * M0_L * (
(old_div(L200, 40.0))**alphaL) * (old_div(LBCG, LBCG_L))**gammaL
return M_L200
|
boada/planckClusters
|
MOSAICpipe/legacy/CLUSTERpipe/maxBCG.py
|
Python
|
mit
| 90,599
|
[
"Galaxy"
] |
ff10319df78aab075d844d90feeb9e3e0b9809a66d48af5e553a8b29023cd4e8
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv import Fmat_original_hshv
from data_variable_hslv import Fmat_original_hslv
from data_variable_lshv import Fmat_original_lshv
from data_variable_lslv import Fmat_original_lslv
# Scaling function
def scaling(mat):
Fvec_a = mat[0:51,0:]
Fvec_b = mat[51:102,0:]
Fvec_c = mat[102:153,0:]
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
# With Scaling
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
# With Scaling
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((10,1))
mu_2 = np.zeros((10,1))
cov = np.zeros((10,2,2))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
print cov[index,:,:]
print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
# Scaling wrt all data
Fmat_rf_hshv = scaling(Fmat_original_hshv[:,0:15])
Fmat_rm_hshv = scaling(Fmat_original_hshv[:,15:17])
Fmat_sf_hshv = scaling(Fmat_original_hshv[:,17:30])
Fmat_sm_hshv = scaling(Fmat_original_hshv[:,30:40])
Fmat_hshv = np.matrix(np.column_stack((Fmat_rf_hshv,Fmat_rm_hshv,Fmat_sf_hshv,Fmat_sm_hshv)))
Fmat_rf_hslv = scaling(Fmat_original_hslv[:,0:15])
Fmat_rm_hslv = scaling(Fmat_original_hslv[:,15:30])
Fmat_sf_hslv = scaling(Fmat_original_hslv[:,30:45])
Fmat_sm_hslv = scaling(Fmat_original_hslv[:,45:57])
Fmat_hslv = np.matrix(np.column_stack((Fmat_rf_hslv,Fmat_rm_hslv,Fmat_sf_hslv,Fmat_sm_hslv)))
Fmat_rf_lshv = scaling(Fmat_original_lshv[:,0:15])
Fmat_rm_lshv = scaling(Fmat_original_lshv[:,15:16])
Fmat_sf_lshv = scaling(Fmat_original_lshv[:,16:23])
Fmat_sm_lshv = scaling(Fmat_original_lshv[:,23:34])
Fmat_lshv = np.matrix(np.column_stack((Fmat_rf_lshv,Fmat_rm_lshv,Fmat_sf_lshv,Fmat_sm_lshv)))
Fmat_rf_lslv = scaling(Fmat_original_lslv[:,0:15])
Fmat_rm_lslv = scaling(Fmat_original_lslv[:,15:28])
Fmat_sf_lslv = scaling(Fmat_original_lslv[:,28:38])
Fmat_sm_lslv = scaling(Fmat_original_lslv[:,38:49])
Fmat_lslv = np.matrix(np.column_stack((Fmat_rf_lslv,Fmat_rm_lslv,Fmat_sf_lslv,Fmat_sm_lslv)))
Fmat = np.matrix(np.column_stack((Fmat_hshv,Fmat_hslv,Fmat_lshv,Fmat_lslv)))
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.1] * 10
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_force_hshv,mu_rf_motion_hshv,cov_rf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:51,0:15], Fmat_lshv[0:51,0:15], Fmat_lslv[0:51,0:15])))), (np.matrix(np.column_stack((Fmat_hslv[102:153,0:15], Fmat_lshv[102:153,0:15], Fmat_lslv[102:153,0:15])))))
mu_rm_force_hshv,mu_rm_motion_hshv,cov_rm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:51,15:30], Fmat_lshv[0:51,15:16], Fmat_lslv[0:51,15:28])))), (np.matrix(np.column_stack((Fmat_hslv[102:153,15:30], Fmat_lshv[102:153,15:16], Fmat_lslv[102:153,15:28])))))
mu_sf_force_hshv,mu_sf_motion_hshv,cov_sf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:51,30:45], Fmat_lshv[0:51,16:23], Fmat_lslv[0:51,28:38])))), (np.matrix(np.column_stack((Fmat_hslv[102:153,30:45], Fmat_lshv[102:153,16:23], Fmat_lslv[102:153,28:38])))))
mu_sm_force_hshv,mu_sm_motion_hshv,cov_sm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:51,45:57], Fmat_lshv[0:51,23:34], Fmat_lslv[0:51,38:49])))), (np.matrix(np.column_stack((Fmat_hslv[102:153,45:57], Fmat_lshv[102:153,23:34], Fmat_lslv[102:153,38:49])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = [0.0]*10
B_rm_hshv = [0.0]*10
B_sf_hshv = [0.0]*10
B_sm_hshv = [0.0]*10
for num_states in range(10):
B_rf_hshv[num_states] = [[mu_rf_force_hshv[num_states][0],mu_rf_motion_hshv[num_states][0]],[cov_rf_hshv[num_states][0][0],cov_rf_hshv[num_states][0][1],cov_rf_hshv[num_states][1][0],cov_rf_hshv[num_states][1][1]]]
B_rm_hshv[num_states] = [[mu_rm_force_hshv[num_states][0],mu_rm_motion_hshv[num_states][0]],[cov_rm_hshv[num_states][0][0],cov_rm_hshv[num_states][0][1],cov_rm_hshv[num_states][1][0],cov_rm_hshv[num_states][1][1]]]
B_sf_hshv[num_states] = [[mu_sf_force_hshv[num_states][0],mu_sf_motion_hshv[num_states][0]],[cov_sf_hshv[num_states][0][0],cov_sf_hshv[num_states][0][1],cov_sf_hshv[num_states][1][0],cov_sf_hshv[num_states][1][1]]]
B_sm_hshv[num_states] = [[mu_sm_force_hshv[num_states][0],mu_sm_motion_hshv[num_states][0]],[cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]]]
print cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]
print "----"
#print B_sm_hshv
#print mu_sm_motion_hshv
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:51,0:15], Fmat_lshv[0:51,0:15], Fmat_lslv[0:51,0:15])))
total_seq_rm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:51,15:30], Fmat_lshv[0:51,15:16], Fmat_lslv[0:51,15:28])))
total_seq_sf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:51,30:45], Fmat_lshv[0:51,16:23], Fmat_lslv[0:51,28:38])))
total_seq_sm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:51,45:57], Fmat_lshv[0:51,23:34], Fmat_lslv[0:51,38:49])))
total_seq_rf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[102:153,0:15], Fmat_lshv[102:153,0:15], Fmat_lslv[102:153,0:15])))
total_seq_rm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[102:153,15:30], Fmat_lshv[102:153,15:16], Fmat_lslv[102:153,15:28])))
total_seq_sf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[102:153,30:45], Fmat_lshv[102:153,16:23], Fmat_lslv[102:153,28:38])))
total_seq_sm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[102:153,45:57], Fmat_lshv[102:153,23:34], Fmat_lslv[102:153,38:49])))
total_seq_rf_hshv = np.zeros((102,45))
total_seq_rm_hshv = np.zeros((102,29))
total_seq_sf_hshv = np.zeros((102,32))
total_seq_sm_hshv = np.zeros((102,34))
i = 0
j = 0
while i < 102:
total_seq_rf_hshv[i] = total_seq_rf_force_hshv[j]
total_seq_rf_hshv[i+1] = total_seq_rf_motion_hshv[j]
total_seq_rm_hshv[i] = total_seq_rm_force_hshv[j]
total_seq_rm_hshv[i+1] = total_seq_rm_motion_hshv[j]
total_seq_sf_hshv[i] = total_seq_sf_force_hshv[j]
total_seq_sf_hshv[i+1] = total_seq_sf_motion_hshv[j]
total_seq_sm_hshv[i] = total_seq_sm_force_hshv[j]
total_seq_sm_hshv[i+1] = total_seq_sm_motion_hshv[j]
j=j+1
i=i+2
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = np.zeros((102,40))
total_seq_obj_force_hshv = Fmat_hshv[0:51,:]
total_seq_obj_motion_hshv = Fmat_hshv[102:153,:]
i = 0
j = 0
while i < 102:
total_seq_obj_hshv[i] = total_seq_obj_force_hshv[j]
total_seq_obj_hshv[i+1] = total_seq_obj_motion_hshv[j]
j=j+1
i=i+2
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[:,k]).T).tolist()
new_test_seq_obj_hshv = np.array(test_seq_obj_hshv)
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:17])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,17:30])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,30:40])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:17])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,17:30])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,30:40])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:17])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,17:30])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,30:40])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:17])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,17:30])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,30:40])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_force_hslv,mu_rf_motion_hslv,cov_rf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,0:15], Fmat_lshv[0:51,0:15], Fmat_lslv[0:51,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,0:15], Fmat_lshv[102:153,0:15], Fmat_lslv[102:153,0:15])))))
mu_rm_force_hslv,mu_rm_motion_hslv,cov_rm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,15:17], Fmat_lshv[0:51,15:16], Fmat_lslv[0:51,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,15:17], Fmat_lshv[102:153,15:16], Fmat_lslv[102:153,15:28])))))
mu_sf_force_hslv,mu_sf_motion_hslv,cov_sf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,17:30], Fmat_lshv[0:51,17:23], Fmat_lslv[0:51,28:38])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,17:30], Fmat_lshv[102:153,17:23], Fmat_lslv[102:153,28:38])))))
mu_sm_force_hslv,mu_sm_motion_hslv,cov_sm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,30:40], Fmat_lshv[0:51,23:34], Fmat_lslv[0:51,38:49])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,30:40], Fmat_lshv[102:153,23:34], Fmat_lslv[102:153,38:49])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = [0.0]*10
B_rm_hslv = [0.0]*10
B_sf_hslv = [0.0]*10
B_sm_hslv = [0.0]*10
for num_states in range(10):
B_rf_hslv[num_states] = [[mu_rf_force_hslv[num_states][0],mu_rf_motion_hslv[num_states][0]],[cov_rf_hslv[num_states][0][0],cov_rf_hslv[num_states][0][1],cov_rf_hslv[num_states][1][0],cov_rf_hslv[num_states][1][1]]]
B_rm_hslv[num_states] = [[mu_rm_force_hslv[num_states][0],mu_rm_motion_hslv[num_states][0]],[cov_rm_hslv[num_states][0][0],cov_rm_hslv[num_states][0][1],cov_rm_hslv[num_states][1][0],cov_rm_hslv[num_states][1][1]]]
B_sf_hslv[num_states] = [[mu_sf_force_hslv[num_states][0],mu_sf_motion_hslv[num_states][0]],[cov_sf_hslv[num_states][0][0],cov_sf_hslv[num_states][0][1],cov_sf_hslv[num_states][1][0],cov_sf_hslv[num_states][1][1]]]
B_sm_hslv[num_states] = [[mu_sm_force_hslv[num_states][0],mu_sm_motion_hslv[num_states][0]],[cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]]]
print cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]
print "----"
#print B_sm_hslv
#print mu_sm_motion_hslv
# generate RF, RM, SF, SM models from parameters
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:51,0:15], Fmat_lshv[0:51,0:15], Fmat_lslv[0:51,0:15])))
total_seq_rm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:51,15:17], Fmat_lshv[0:51,15:16], Fmat_lslv[0:51,15:28])))
total_seq_sf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:51,17:30], Fmat_lshv[0:51,16:23], Fmat_lslv[0:51,28:38])))
total_seq_sm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:51,30:40], Fmat_lshv[0:51,23:34], Fmat_lslv[0:51,38:49])))
total_seq_rf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[102:153,0:15], Fmat_lshv[102:153,0:15], Fmat_lslv[102:153,0:15])))
total_seq_rm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[102:153,15:17], Fmat_lshv[102:153,15:16], Fmat_lslv[102:153,15:28])))
total_seq_sf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[102:153,17:30], Fmat_lshv[102:153,16:23], Fmat_lslv[102:153,28:38])))
total_seq_sm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[102:153,30:40], Fmat_lshv[102:153,23:34], Fmat_lslv[102:153,38:49])))
total_seq_rf_hslv = np.zeros((102,45))
total_seq_rm_hslv = np.zeros((102,16))
total_seq_sf_hslv = np.zeros((102,30))
total_seq_sm_hslv = np.zeros((102,32))
i = 0
j = 0
while i < 102:
total_seq_rf_hslv[i] = total_seq_rf_force_hslv[j]
total_seq_rf_hslv[i+1] = total_seq_rf_motion_hslv[j]
total_seq_rm_hslv[i] = total_seq_rm_force_hslv[j]
total_seq_rm_hslv[i+1] = total_seq_rm_motion_hslv[j]
total_seq_sf_hslv[i] = total_seq_sf_force_hslv[j]
total_seq_sf_hslv[i+1] = total_seq_sf_motion_hslv[j]
total_seq_sm_hslv[i] = total_seq_sm_force_hslv[j]
total_seq_sm_hslv[i+1] = total_seq_sm_motion_hslv[j]
j=j+1
i=i+2
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = np.zeros((102,57))
total_seq_obj_force_hslv = Fmat_hslv[0:51,:]
total_seq_obj_motion_hslv = Fmat_hslv[102:153,:]
i = 0
j = 0
while i < 102:
total_seq_obj_hslv[i] = total_seq_obj_force_hslv[j]
total_seq_obj_hslv[i+1] = total_seq_obj_motion_hslv[j]
j=j+1
i=i+2
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[:,k]).T).tolist()
new_test_seq_obj_hslv = np.array(test_seq_obj_hslv)
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:57])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:57])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:57])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:57])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_force_lshv,mu_rf_motion_lshv,cov_rf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,0:15], Fmat_hslv[0:51,0:15], Fmat_lslv[0:51,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,0:15], Fmat_hslv[102:153,0:15], Fmat_lslv[102:153,0:15])))))
mu_rm_force_lshv,mu_rm_motion_lshv,cov_rm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,15:17], Fmat_hslv[0:51,15:30], Fmat_lslv[0:51,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,15:17], Fmat_hslv[102:153,15:30], Fmat_lslv[102:153,15:28])))))
mu_sf_force_lshv,mu_sf_motion_lshv,cov_sf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,17:30], Fmat_hslv[0:51,30:45], Fmat_lslv[0:51,28:38])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,17:30], Fmat_hslv[102:153,30:45], Fmat_lslv[102:153,28:38])))))
mu_sm_force_lshv,mu_sm_motion_lshv,cov_sm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,30:40], Fmat_hslv[0:51,45:57], Fmat_lslv[0:51,38:49])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,30:40], Fmat_hslv[102:153,45:57], Fmat_lslv[102:153,38:49])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = [0.0]*10
B_rm_lshv = [0.0]*10
B_sf_lshv = [0.0]*10
B_sm_lshv = [0.0]*10
for num_states in range(10):
B_rf_lshv[num_states] = [[mu_rf_force_lshv[num_states][0],mu_rf_motion_lshv[num_states][0]],[cov_rf_lshv[num_states][0][0],cov_rf_lshv[num_states][0][1],cov_rf_lshv[num_states][1][0],cov_rf_lshv[num_states][1][1]]]
B_rm_lshv[num_states] = [[mu_rm_force_lshv[num_states][0],mu_rm_motion_lshv[num_states][0]],[cov_rm_lshv[num_states][0][0],cov_rm_lshv[num_states][0][1],cov_rm_lshv[num_states][1][0],cov_rm_lshv[num_states][1][1]]]
B_sf_lshv[num_states] = [[mu_sf_force_lshv[num_states][0],mu_sf_motion_lshv[num_states][0]],[cov_sf_lshv[num_states][0][0],cov_sf_lshv[num_states][0][1],cov_sf_lshv[num_states][1][0],cov_sf_lshv[num_states][1][1]]]
B_sm_lshv[num_states] = [[mu_sm_force_lshv[num_states][0],mu_sm_motion_lshv[num_states][0]],[cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]]]
print cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]
print "----"
#print B_sm_lshv
#print mu_sm_motion_lshv
# generate RF, RM, SF, SM models from parameters
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:51,0:15], Fmat_hslv[0:51,0:15], Fmat_lslv[0:51,0:15])))
total_seq_rm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:51,15:17], Fmat_hslv[0:51,15:30], Fmat_lslv[0:51,15:28])))
total_seq_sf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:51,17:30], Fmat_hslv[0:51,30:45], Fmat_lslv[0:51,28:38])))
total_seq_sm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:51,30:40], Fmat_hslv[0:51,45:57], Fmat_lslv[0:51,38:49])))
total_seq_rf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[102:153,0:15], Fmat_hslv[102:153,0:15], Fmat_lslv[102:153,0:15])))
total_seq_rm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[102:153,15:17], Fmat_hslv[102:153,15:30], Fmat_lslv[102:153,15:28])))
total_seq_sf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[102:153,17:30], Fmat_hslv[102:153,30:45], Fmat_lslv[102:153,28:38])))
total_seq_sm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[102:153,30:40], Fmat_hslv[102:153,45:57], Fmat_lslv[102:153,38:49])))
total_seq_rf_lshv = np.zeros((102,45))
total_seq_rm_lshv = np.zeros((102,30))
total_seq_sf_lshv = np.zeros((102,38))
total_seq_sm_lshv = np.zeros((102,33))
i = 0
j = 0
while i < 102:
total_seq_rf_lshv[i] = total_seq_rf_force_lshv[j]
total_seq_rf_lshv[i+1] = total_seq_rf_motion_lshv[j]
total_seq_rm_lshv[i] = total_seq_rm_force_lshv[j]
total_seq_rm_lshv[i+1] = total_seq_rm_motion_lshv[j]
total_seq_sf_lshv[i] = total_seq_sf_force_lshv[j]
total_seq_sf_lshv[i+1] = total_seq_sf_motion_lshv[j]
total_seq_sm_lshv[i] = total_seq_sm_force_lshv[j]
total_seq_sm_lshv[i+1] = total_seq_sm_motion_lshv[j]
j=j+1
i=i+2
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = np.zeros((102,34))
total_seq_obj_force_lshv = Fmat_lshv[0:51,:]
total_seq_obj_motion_lshv = Fmat_lshv[102:153,:]
i = 0
j = 0
while i < 102:
total_seq_obj_lshv[i] = total_seq_obj_force_lshv[j]
total_seq_obj_lshv[i+1] = total_seq_obj_motion_lshv[j]
j=j+1
i=i+2
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[:,k]).T).tolist()
new_test_seq_obj_lshv = np.array(test_seq_obj_lshv)
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:16])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,16:23])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,23:34])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:16])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,16:23])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,23:34])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:16])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,16:23])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,23:34])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:16])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,16:23])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,23:34])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_force_lslv,mu_rf_motion_lslv,cov_rf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,0:15], Fmat_hslv[0:51,0:15], Fmat_lshv[0:51,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,0:15], Fmat_hslv[102:153,0:15], Fmat_lshv[102:153,0:15])))))
mu_rm_force_lslv,mu_rm_motion_lslv,cov_rm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,15:17], Fmat_hslv[0:51,15:30], Fmat_lshv[0:51,15:16])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,15:17], Fmat_hslv[102:153,15:30], Fmat_lshv[102:153,15:16])))))
mu_sf_force_lslv,mu_sf_motion_lslv,cov_sf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,17:30], Fmat_hslv[0:51,30:45], Fmat_lshv[0:51,16:23])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,17:30], Fmat_hslv[102:153,30:45], Fmat_lshv[102:153,16:23])))))
mu_sm_force_lslv,mu_sm_motion_lslv,cov_sm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:51,30:40], Fmat_hslv[0:51,45:57], Fmat_lshv[0:51,23:34])))), (np.matrix(np.column_stack((Fmat_hshv[102:153,30:40], Fmat_hslv[102:153,45:57], Fmat_lshv[102:153,23:34])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = [0.0]*10
B_rm_lslv = [0.0]*10
B_sf_lslv = [0.0]*10
B_sm_lslv = [0.0]*10
for num_states in range(10):
B_rf_lslv[num_states] = [[mu_rf_force_lslv[num_states][0],mu_rf_motion_lslv[num_states][0]],[cov_rf_lslv[num_states][0][0],cov_rf_lslv[num_states][0][1],cov_rf_lslv[num_states][1][0],cov_rf_lslv[num_states][1][1]]]
B_rm_lslv[num_states] = [[mu_rm_force_lslv[num_states][0],mu_rm_motion_lslv[num_states][0]],[cov_rm_lslv[num_states][0][0],cov_rm_lslv[num_states][0][1],cov_rm_lslv[num_states][1][0],cov_rm_lslv[num_states][1][1]]]
B_sf_lslv[num_states] = [[mu_sf_force_lslv[num_states][0],mu_sf_motion_lslv[num_states][0]],[cov_sf_lslv[num_states][0][0],cov_sf_lslv[num_states][0][1],cov_sf_lslv[num_states][1][0],cov_sf_lslv[num_states][1][1]]]
B_sm_lslv[num_states] = [[mu_sm_force_lslv[num_states][0],mu_sm_motion_lslv[num_states][0]],[cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]]]
print cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]
print "----"
#print B_sm_lslv
#print mu_sm_motion_lslv
# generate RF, RM, SF, SM models from parameters
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:51,0:15], Fmat_hslv[0:51,0:15], Fmat_lshv[0:51,0:15])))
total_seq_rm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:51,15:17], Fmat_hslv[0:51,15:30], Fmat_lshv[0:51,15:16])))
total_seq_sf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:51,17:30], Fmat_hslv[0:51,30:45], Fmat_lshv[0:51,16:23])))
total_seq_sm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:51,30:40], Fmat_hslv[0:51,45:57], Fmat_lshv[0:51,23:34])))
total_seq_rf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[102:153,0:15], Fmat_hslv[102:153,0:15], Fmat_lshv[102:153,0:15])))
total_seq_rm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[102:153,15:17], Fmat_hslv[102:153,15:30], Fmat_lshv[102:153,15:16])))
total_seq_sf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[102:153,17:30], Fmat_hslv[102:153,30:45], Fmat_lshv[102:153,16:23])))
total_seq_sm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[102:153,30:40], Fmat_hslv[102:153,45:57], Fmat_lshv[102:153,23:34])))
total_seq_rf_lslv = np.zeros((102,45))
total_seq_rm_lslv = np.zeros((102,18))
total_seq_sf_lslv = np.zeros((102,35))
total_seq_sm_lslv = np.zeros((102,33))
i = 0
j = 0
while i < 102:
total_seq_rf_lslv[i] = total_seq_rf_force_lslv[j]
total_seq_rf_lslv[i+1] = total_seq_rf_motion_lslv[j]
total_seq_rm_lslv[i] = total_seq_rm_force_lslv[j]
total_seq_rm_lslv[i+1] = total_seq_rm_motion_lslv[j]
total_seq_sf_lslv[i] = total_seq_sf_force_lslv[j]
total_seq_sf_lslv[i+1] = total_seq_sf_motion_lslv[j]
total_seq_sm_lslv[i] = total_seq_sm_force_lslv[j]
total_seq_sm_lslv[i+1] = total_seq_sm_motion_lslv[j]
j=j+1
i=i+2
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = np.zeros((102,49))
total_seq_obj_force_lslv = Fmat_lslv[0:51,:]
total_seq_obj_motion_lslv = Fmat_lslv[102:153,:]
i = 0
j = 0
while i < 102:
total_seq_obj_lslv[i] = total_seq_obj_force_lslv[j]
total_seq_obj_lslv[i+1] = total_seq_obj_motion_lslv[j]
j=j+1
i=i+2
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[:,k]).T).tolist()
new_test_seq_obj_lslv = np.array(test_seq_obj_lslv)
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
# Find Viterbi Path
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:28])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,28:38])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,38:49])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:28])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,28:38])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,38:49])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:28])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,28:38])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,38:49])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:28])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,28:38])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,38:49])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_motion_10_states.png')
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with 0.5s/hmm_crossvalidation_force_motion_10_states_scaled_wrt_all_data.py
|
Python
|
mit
| 39,683
|
[
"Mayavi"
] |
6c712da08280b9eb0544d3ca60fac9bfa8f1d102e3ac83da618e6fed5d8b78b8
|
""" A set of utilities used in the WMS services
Requires the Nordugrid ARC plugins. In particular : nordugrid-arc-python
"""
from tempfile import mkdtemp
import shutil
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getQueue
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getGroupOption
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Resources.Computing.ComputingElementFactory import ComputingElementFactory
# List of files to be inserted/retrieved into/from pilot Output Sandbox
# first will be defined as StdOut in JDL and the second as StdErr
outputSandboxFiles = ["StdOut", "StdErr"]
COMMAND_TIMEOUT = 60
###########################################################################
def getGridEnv():
gridEnv = ""
setup = gConfig.getValue("/DIRAC/Setup", "")
if setup:
instance = gConfig.getValue("/DIRAC/Setups/%s/WorkloadManagement" % setup, "")
if instance:
gridEnv = gConfig.getValue("/Systems/WorkloadManagement/%s/GridEnv" % instance, "")
return gridEnv
def getPilotCE(pilotDict):
"""Instantiate and return a CE bound to a pilot"""
ceFactory = ComputingElementFactory()
result = getQueue(pilotDict["GridSite"], pilotDict["DestinationSite"], pilotDict["Queue"])
if not result["OK"]:
return result
queueDict = result["Value"]
gridEnv = getGridEnv()
queueDict["GridEnv"] = gridEnv
queueDict["WorkingDirectory"] = mkdtemp()
result = ceFactory.getCE(pilotDict["GridType"], pilotDict["DestinationSite"], queueDict)
if not result["OK"]:
shutil.rmtree(queueDict["WorkingDirectory"])
return result
ce = result["Value"]
return S_OK(ce)
def getPilotProxy(pilotDict):
"""Get a proxy bound to a pilot"""
owner = pilotDict["OwnerDN"]
group = pilotDict["OwnerGroup"]
groupVOMS = getGroupOption(group, "VOMSRole", group)
result = gProxyManager.getPilotProxyFromVOMSGroup(owner, groupVOMS)
if not result["OK"]:
gLogger.error("Could not get proxy:", 'User "%s" Group "%s" : %s' % (owner, groupVOMS, result["Message"]))
return S_ERROR("Failed to get the pilot's owner proxy")
proxy = result["Value"]
return S_OK(proxy)
def getPilotRef(pilotReference, pilotDict):
"""Add the pilotStamp to the pilotReference, if the pilotStamp is in the dictionary,
otherwise return unchanged pilotReference.
"""
pilotStamp = pilotDict["PilotStamp"]
pRef = pilotReference
if pilotStamp:
pRef = pRef + ":::" + pilotStamp
return S_OK(pRef)
def killPilotsInQueues(pilotRefDict):
"""kill pilots queue by queue
:params dict pilotRefDict: a dict of pilots in queues
"""
ceFactory = ComputingElementFactory()
failed = []
for key, pilotDict in pilotRefDict.items():
owner, group, site, ce, queue = key.split("@@@")
result = getQueue(site, ce, queue)
if not result["OK"]:
return result
queueDict = result["Value"]
gridType = pilotDict["GridType"]
result = ceFactory.getCE(gridType, ce, queueDict)
if not result["OK"]:
return result
ce = result["Value"]
# FIXME: quite hacky. Should be either removed, or based on some flag
if gridType in ["CREAM", "ARC", "Globus", "HTCondorCE"]:
group = getGroupOption(group, "VOMSRole", group)
ret = gProxyManager.getPilotProxyFromVOMSGroup(owner, group)
if not ret["OK"]:
gLogger.error("Could not get proxy:", 'User "%s" Group "%s" : %s' % (owner, group, ret["Message"]))
return S_ERROR("Failed to get the pilot's owner proxy")
proxy = ret["Value"]
ce.setProxy(proxy)
pilotList = pilotDict["PilotList"]
result = ce.killJob(pilotList)
if not result["OK"]:
failed.extend(pilotList)
return failed
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Service/WMSUtilities.py
|
Python
|
gpl-3.0
| 3,998
|
[
"DIRAC"
] |
882ee5a7da55621bdd89c0a39de0797125c6ad2af015969b91b34fcbc2a9ad65
|
#!/usr/bin/env python
# polarizer.py - add Drude oscillators to LAMMPS data file.
# Agilio Padua <agilio.padua@univ-bpclermont.fr>
# Alain Dequidt <alain.dequidt@univ-bpclermont.fr>
# version 2017/02/08
import sys
import argparse
import random
from copy import deepcopy
usage = """Add Drude oscillators to LAMMPS data file.
Format of file containing specification of Drude oscillators:
# type dm/u dq/e k/(kJ/molA2) alpha/A3 thole
C3H 1.0 0.0 4184.0 2.051 2.6
...
* dm is the mass to place on the Drude particle (taken from its core),
* dq is the charge to place on the Drude particle (taken from its core),
* k is the harmonic force constant of the bond between core and Drude,
* alpha is the polarizability,
* thole is a parameter of the Thole damping function.
A Drude particle is created for each atom in the LAMMPS data file
that corresponds to an atom type given in the Drude file.
Since LAMMPS uses numbers for atom types in the data file, a comment
after each line in the Masses section has to be introduced to allow
identification of the atom types within the force field database:
Masses
1 12.011 # C3H
2 12.011 # CTO
...
This script will add new atom types, new bond types, new atoms and
new bonds to the data file.
It will also generate some commands to be included in the LAMMPS input script,
which are related to the topology and force field, namely fix drude,
pair_style and pair_coeff commands. For information on thermostating please
read the documentation of the DRUDE package.
This tool can also be used to revert a Drude-polarized data file to a
non-polarizable one.
"""
# keywords of header and main sections (from data.py in Pizza.py)
hkeywords = ["atoms", "ellipsoids", "lines", "triangles", "bodies",
"bonds", "angles", "dihedrals", "impropers",
"atom types", "bond types", "angle types", "dihedral types",
"improper types", "xlo xhi", "ylo yhi", "zlo zhi", "xy xz yz"]
skeywords = [["Masses", "atom types"],
["Pair Coeffs", "atom types"],
["Bond Coeffs", "bond types"], ["Angle Coeffs", "angle types"],
["Dihedral Coeffs", "dihedral types"],
["Improper Coeffs", "improper types"],
["BondBond Coeffs", "angle types"],
["BondAngle Coeffs", "angle types"],
["MiddleBondTorsion Coeffs", "dihedral types"],
["EndBondTorsion Coeffs", "dihedral types"],
["AngleTorsion Coeffs", "dihedral types"],
["AngleAngleTorsion Coeffs", "dihedral types"],
["BondBond13 Coeffs", "dihedral types"],
["AngleAngle Coeffs", "improper types"],
["Atoms", "atoms"], ["Velocities", "atoms"],
["Ellipsoids", "ellipsoids"],
["Lines", "lines"], ["Triangles", "triangles"],
["Bodies", "bodies"],
["Bonds", "bonds"],
["Angles", "angles"], ["Dihedrals", "dihedrals"],
["Impropers", "impropers"], ["Molecules", "atoms"]]
def massline(att):
return "{0:4d} {1:8.3f} # {2}\n".format(att['id'], att['m'], att['type'])
def bdtline(bdt):
return "{0:4d} {1:12.6f} {2:12.6f} {3}\n".format(bdt['id'], bdt['k'],
bdt['r0'], bdt['note'])
def atomline(at):
return "{0:7d} {1:7d} {2:4d} {3:8.4f} {4:13.6e} {5:13.6e} {6:13.6e} "\
" {7}\n".format(at['n'], at['mol'], at['id'], at['q'],
at['x'], at['y'], at['z'], at['note'])
def bondline(bd):
return "{0:7d} {1:4d} {2:7d} {3:7d} {4}\n".format(bd['n'], bd['id'],
bd['i'], bd['j'], bd['note'])
def velline(at):
return "{0:7d} {1:13.6e} {2:13.6e} {3:13.6e} \n".format(at['n'],
at['vx'], at['vy'], at['vz'])
# --------------------------------------
class Data(object):
def __init__(self, datafile):
'''read LAMMPS data file (from data.py in Pizza.py)'''
# for extract method
self.atomtypes = []
self.bondtypes = []
self.atoms = []
self.bonds = []
self.idmap = {}
self.nselect = 1
f = open(datafile, "r")
self.title = f.readline()
self.names = {}
headers = {}
while 1:
line = f.readline().strip()
if '#' in line:
line = line[:line.index('#')].strip()
if len(line) == 0:
continue
found = 0
for keyword in hkeywords:
if keyword in line:
found = 1
words = line.split()
if keyword == "xlo xhi" or keyword == "ylo yhi" or \
keyword == "zlo zhi":
headers[keyword] = (float(words[0]), float(words[1]))
elif keyword == "xy xz yz":
headers[keyword] = \
(float(words[0]), float(words[1]), float(words[2]))
else:
headers[keyword] = int(words[0])
if not found:
break
sections = {}
while 1:
if len(line) > 0:
found = 0
for pair in skeywords:
keyword, length = pair[0], pair[1]
if keyword == line:
found = 1
if length not in headers:
raise RuntimeError("data section {} "\
"has no matching header value".format(line))
f.readline()
list_ = []
for _ in range(headers[length]):
list_.append(f.readline())
sections[keyword] = list_
if not found:
raise RuntimeError("invalid section {} in data"\
" file".format(line))
#f.readline()
line = f.readline()
if not line:
break
if '#' in line:
line = line[:line.index('#')]
line = line.strip()
f.close()
self.headers = headers
self.sections = sections
def write(self, filename):
'''write out a LAMMPS data file (from data.py in Pizza.py)'''
with open(filename, "w") as f:
f.write(self.title + '\n')
for keyword in hkeywords:
if keyword in self.headers:
if keyword == "xlo xhi" or keyword == "ylo yhi" or \
keyword == "zlo zhi":
f.write("{0:f} {1:f} {2}\n".format(
self.headers[keyword][0],
self.headers[keyword][1], keyword))
elif keyword == "xy xz yz":
f.write("{0:f} {1:f} {2:f} {3}\n".format(
self.headers[keyword][0],
self.headers[keyword][1],
self.headers[keyword][2], keyword))
else:
f.write("{0:d} {1}\n".format(self.headers[keyword],
keyword))
for pair in skeywords:
keyword = pair[0]
if keyword in self.sections:
f.write("\n{}\n\n".format(keyword))
for line in self.sections[keyword]:
f.write(line)
def extract_nonpol(self):
"""extract atom and bond info from nonpolarizable data"""
# extract atom IDs
missinglabels = False
for line in self.sections['Masses']:
tok = line.split()
if len(tok) < 4:
print("error: missing type for atom ID " + tok[0] +
" in Masses section")
missinglabels = True
continue
atomtype = {}
atomtype['id'] = int(tok[0])
atomtype['m'] = float(tok[1])
atomtype['type'] = tok[3]
self.atomtypes.append(atomtype)
if missinglabels:
sys.exit(0)
# extract atom registers
for line in self.sections['Atoms']:
tok = line.split()
atom = {}
atom['n'] = int(tok[0])
atom['mol'] = int(tok[1])
atom['id'] = int(tok[2])
atom['q'] = float(tok[3])
atom['x'] = float(tok[4])
atom['y'] = float(tok[5])
atom['z'] = float(tok[6])
#atom['note'] = ''.join([s + ' ' for s in tok[7:]]).strip()
atom['note'] = ' '.join(tok[7:])
self.atoms.append(atom)
self.idmap[atom['n']] = atom
if 'Velocities' in self.sections:
for line in self.sections['Velocities']:
tok = line.split()
atom = self.idmap[int(tok[0])]
atom['vx'] = float(tok[1])
atom['vy'] = float(tok[2])
atom['vz'] = float(tok[3])
def polarize(self, drude):
"""add Drude particles"""
if 'Pair Coeffs' in self.sections:
raise RuntimeError("cannot polarize a data with Pair Coeffs")
self.extract_nonpol()
natom = self.headers['atoms']
nbond = self.headers['bonds']
nattype = self.headers['atom types']
nbdtype = self.headers['bond types']
# create new atom types (IDs) for Drude particles and modify cores
newattypes = []
for att in self.atomtypes:
att['dflag'] = 'n'
for ddt in drude.types:
if ddt['type'] == att['type']:
nattype += 1
newid = {}
newid['id'] = ddt['id'] = nattype
newid['m'] = ddt['dm']
att['m'] -= ddt['dm']
# label drude particles and cores
att['dflag'] = 'c'
newid['dflag'] = 'd'
newid['type'] = att['type'] + ' DP'
att['type'] += ' DC'
ddt['type'] += ' DC'
newattypes.append(newid)
break
self.headers['atom types'] += len(newattypes)
self.sections['Masses'] = []
for att in self.atomtypes + newattypes:
self.sections['Masses'].append(massline(att))
# create new bond types for core-drude bonds
newbdtypes = []
for att in self.atomtypes:
for ddt in drude.types:
if ddt['type'] == att['type']:
nbdtype += 1
newbdtype = {}
newbdtype['id'] = ddt['bdid'] = nbdtype
newbdtype['k'] = ddt['k']
newbdtype['r0'] = 0.0
newbdtype['note'] = '# ' + ddt['type'] + '-DP'
newbdtypes.append(newbdtype)
break
self.headers['bond types'] += len(newbdtypes)
for bdt in newbdtypes:
self.sections['Bond Coeffs'].append(bdtline(bdt))
# create new atoms for Drude particles and new bonds with their cores
random.seed(123)
newatoms = []
newbonds = []
for atom in self.atoms:
atom['dflag'] = '' # [c]ore, [d]rude, [n]on-polarizable
atom['dd'] = 0 # partner drude or core
for att in self.atomtypes:
if att['id'] == atom['id']:
break
for ddt in drude.types:
if ddt['type'] == att['type']:
natom += 1
newatom = deepcopy(atom)
newatom['n'] = natom
self.idmap[natom] = newatom
newatom['id'] = ddt['id']
newatom['q'] = ddt['dq']
newatom['note'] = atom['note']
if '#' not in newatom['note']:
newatom['note'] += ' #'
newatom['note'] += ' DP'
newatom['dflag'] = 'd'
newatom['dd'] = atom['n']
# avoid superposition of cores and Drudes
newatom['x'] += 0.1 * (random.random() - 0.5)
newatom['y'] += 0.1 * (random.random() - 0.5)
newatom['z'] += 0.1 * (random.random() - 0.5)
if 'Velocities' in self.sections:
newatom['vx'] = atom['vx']
newatom['vy'] = atom['vy']
newatom['vz'] = atom['vz']
newatoms.append(newatom)
atom['q'] -= ddt['dq']
atom['dflag'] = 'c'
atom['dd'] = natom
if '#' not in atom['note']:
atom['note'] += ' #'
atom['note'] += ' DC'
nbond += 1
newbond = {}
newbond['n'] = nbond
newbond['id'] = ddt['bdid']
newbond['i'] = atom['n']
newbond['j'] = newatom['n']
newbond['note'] = '# ' + ddt['type'] + '-DP'
newbonds.append(newbond)
break
self.headers['atoms'] += len(newatoms)
self.headers['bonds'] += len(newbonds)
self.sections['Atoms'] = []
for atom in self.atoms + newatoms:
self.sections['Atoms'].append(atomline(atom))
for bond in newbonds:
self.sections['Bonds'].append(bondline(bond))
if 'Velocities' in self.sections:
self.sections['Velocities'] = []
for atom in self.atoms + newatoms:
self.sections['Velocities'].append(velline(atom))
# update list of atom IDs
for att in newattypes:
self.atomtypes.append(att)
def extract_pol(self, drude):
"""extract atom, drude, bonds info from polarizable data"""
# extract atom IDs
for line in self.sections['Masses']:
tok = line.split()
atomtype = {}
atomtype['id'] = int(tok[0])
atomtype['m'] = float(tok[1])
if len(tok) >= 4:
atomtype['type'] = tok[3]
atomtype['dflag'] = 'n'
if tok[-1] == "DC":
atomtype['dflag'] = 'c'
elif tok[-1] == "DP":
atomtype['dflag'] = 'd'
print(atomtype['dflag'])
else:
raise RuntimeError("comments in Masses section required "\
"to identify cores (DC) and Drudes (DP)")
self.atomtypes.append(atomtype)
# extract bond type data
for line in self.sections['Bond Coeffs']:
tok = line.split()
bondtype = {}
bondtype['id'] = int(tok[0])
bondtype['k'] = float(tok[1])
bondtype['r0'] = float(tok[2])
bondtype['note'] = ''.join([s + ' ' for s in tok[3:]]).strip()
self.bondtypes.append(bondtype)
# extract atom registers
for line in self.sections['Atoms']:
tok = line.split()
atom = {}
atom['n'] = int(tok[0])
atom['mol'] = int(tok[1])
atom['id'] = int(tok[2])
atom['q'] = float(tok[3])
atom['x'] = float(tok[4])
atom['y'] = float(tok[5])
atom['z'] = float(tok[6])
# atom['note'] = ''.join([s + ' ' for s in tok[7:-1]]).strip()
if tok[-1] == 'DC':
atom['note'] = ' '.join(tok[7:-1])
else:
atom['note'] = ' '.join(tok[7:])
self.atoms.append(atom)
self.idmap[atom['n']] = atom
if 'Velocities' in self.sections:
for line in self.sections['Velocities']:
tok = line.split()
atom = self.idmap[int(tok[0])]
atom['vx'] = float(tok[1])
atom['vy'] = float(tok[2])
atom['vz'] = float(tok[3])
# extract bond data
for line in self.sections['Bonds']:
tok = line.split()
bond = {}
bond['n'] = int(tok[0])
bond['id'] = int(tok[1])
bond['i'] = int(tok[2])
bond['j'] = int(tok[3])
bond['note'] = ''.join([s + ' ' for s in tok[4:]]).strip()
self.bonds.append(bond)
def depolarize(self, drude):
"""remove Drude particles"""
self.extract_pol(drude)
atom_tp_map = {}
bond_tp_map = {}
atom_map = {}
bond_map = {}
q = {}
atom_tp = {}
m = {}
for att in self.atomtypes:
if att['dflag'] != 'd':
atom_tp_map[att['id']] = len(atom_tp_map) + 1
m[att['id']] = att['m']
print(atom_tp_map)
for atom in self.atoms:
if atom['id'] in atom_tp_map:
atom_map[atom['n']] = len(atom_map) + 1
q[atom['n']] = atom['q']
atom_tp[atom['n']] = atom['id']
for bond in self.bonds:
if bond['i'] in atom_map and bond['j'] in atom_map:
bond_map[bond['n']] = len(bond_map) + 1
if bond['id'] not in bond_tp_map:
bond_tp_map[bond['id']] = len(bond_tp_map) + 1
else:
if bond['i'] in atom_map:
q[bond['i']] += q[bond['j']]
if atom_tp[bond['j']] in m:
m[atom_tp[bond['i']]] += m.pop(atom_tp[bond['j']])
else:
q[bond['j']] += q[bond['i']]
if atom_tp[bond['i']] in m:
m[atom_tp[bond['j']]] += m.pop(atom_tp[bond['i']])
size = len(self.atomtypes)
for iatom_tp in reversed(range(size)):
att = self.atomtypes[iatom_tp]
if att['id'] not in atom_tp_map:
del self.atomtypes[iatom_tp]
else:
att['m'] = m[att['id']]
att['id'] = atom_tp_map[att['id']]
size = len(self.bondtypes)
for ibond_tp in reversed(range(size)):
bdt = self.bondtypes[ibond_tp]
if bdt['id'] not in bond_tp_map:
del self.bondtypes[ibond_tp]
else:
bdt['id'] = bond_tp_map[bdt['id']]
size = len(self.atoms)
for iatom in reversed(range(size)):
atom = self.atoms[iatom]
if atom['n'] not in atom_map:
del self.atoms[iatom]
else:
atom['q'] = q[atom['n']]
atom['n'] = atom_map[atom['n']]
atom['id'] = atom_tp_map[atom['id']]
size = len(self.bonds)
for ibond in reversed(range(size)):
bond = self.bonds[ibond]
if bond['n'] not in bond_map:
del self.bonds[ibond]
else:
bond['n'] = bond_map[bond['n']]
bond['id'] = bond_tp_map[bond['id']]
bond['i'] = atom_map[bond['i']]
bond['j'] = atom_map[bond['j']]
self.sections['Atoms'] = []
for atom in self.atoms:
self.sections['Atoms'].append(atomline(atom))
self.headers['atoms'] = len(self.atoms)
self.sections['Masses'] = []
for att in self.atomtypes:
self.sections['Masses'].append(massline(att))
self.headers['atom types'] = len(self.atomtypes)
self.sections['Bonds'] = []
for bond in self.bonds:
self.sections['Bonds'].append(bondline(bond))
self.headers['bonds'] = len(self.bonds)
self.sections['Bond Coeffs'] = []
for bdt in self.bondtypes:
self.sections['Bond Coeffs'].append(bdtline(bdt))
self.headers['bond types'] = len(self.bondtypes)
if 'Velocities' in self.sections:
self.sections['Velocities'] = []
for atom in self.atoms:
self.sections['Velocities'].append(velline(atom))
def lmpscript(self, drude, outfile, thole = 2.6, cutoff = 12.0):
"""print lines for input script, including pair_style thole"""
pairfile = "pair-drude.lmp"
dfound = False
for att in self.atomtypes:
if att['dflag'] == 'd':
dfound = True
break
if not dfound:
print("# No polarizable atoms found.")
return
print("# Commands to include in the LAMMPS input script\n")
print("# adapt the pair_style command as needed")
print("pair_style hybrid/overlay ... coul/long/cs {0:.1f} "\
"thole {1:.3f} {0:.1f}\n".format(cutoff, thole))
print("# data file with Drude oscillators added")
print("read_data {0}\n".format(outfile))
print("# pair interactions with Drude particles written to file")
print("# Thole damping recommended if more than 1 Drude per molecule")
print("include {0}\n".format(pairfile))
with open(pairfile, "w") as f:
f.write("# interactions involving Drude particles\n")
f.write("pair_coeff * {0:3d}* coul/long/cs\n".format(att['id']))
f.write("# Thole damping if more than 1 Drude per molecule\n")
# Thole parameters for I,J pairs
ifound = False
for atti in self.atomtypes:
itype = atti['type'].split()[0]
for ddt in drude.types:
dtype = ddt['type'].split()[0]
if dtype == itype:
alphai = ddt['alpha']
tholei = ddt['thole']
ifound = True
break
jfound = False
for attj in self.atomtypes:
if attj['id'] < atti['id']:
continue
jtype = attj['type'].split()[0]
for ddt in drude.types:
dtype = ddt['type'].split()[0]
if dtype == jtype:
alphaj = ddt['alpha']
tholej = ddt['thole']
jfound = True
break
if ifound and jfound:
alphaij = (alphai * alphaj)**0.5
tholeij = (tholei + tholej) / 2.0
if tholeij == thole:
f.write("pair_coeff {0:4} {1:4} thole "\
"{2:7.3f}\n".format(atti['id'], attj['id'],
alphaij))
else:
f.write("pair_coeff {0:4} {1:4} thole {2:7.3f} "\
"{3:7.3f}\n".format(atti['id'],attj['id'],
alphaij, tholeij))
jfound = False
ifound = False
print("# atom groups convenient for thermostats (see package "
"documentation), etc.")
gatoms = gcores = gdrudes = ""
for att in self.atomtypes:
if att['dflag'] != 'd':
gatoms += " {0}".format(att['id'])
if att['dflag'] == 'c':
gcores += " {0}".format(att['id'])
if att['dflag'] == 'd':
gdrudes += " {0}".format(att['id'])
print("group ATOMS type" + gatoms)
print("group CORES type" + gcores)
print("group DRUDES type" + gdrudes)
print("")
print("# flag for each atom type: [C]ore, [D]rude, [N]on-polarizable")
drudetypes = ""
for att in self.atomtypes:
drudetypes += " {0}".format(att['dflag'].upper())
print("fix DRUDE all drude" + drudetypes)
print("")
print("# ATTENTION!")
print("# * read_data may need 'extra/special/per/atom' keyword, "
"LAMMPS will exit with a message.")
print("# * If using fix shake the group-ID must not include "
"Drude particles.")
print("# Use group ATOMS for example.")
print("# * Give all I<=J pair interactions, no mixing.")
print("# * Pair style coul/long/cs from CORESHELL package is used "\
"for interactions")
print("# of Drude particles. Alternatively pair lj/cut/thole/long "\
"could be used,")
print("# avoiding hybrid/overlay and allowing mixing. See doc "\
"pages.")
# --------------------------------------
kcal = 4.184 # kJ
eV = 96.485 # kJ/mol
fpe0 = 0.000719756 # (4 Pi eps0) in e^2/(kJ/mol A)
class Drude(object):
"""specification of drude oscillator types"""
def __init__(self, drudefile, polar = '', positive = False, metal = False):
self.types = []
with open(drudefile, "r") as f:
for line in f:
line = line.strip()
if line.startswith('#') or len(line) == 0:
continue
tok = line.split()
drude = {}
drude['type'] = tok[0]
drude['dm'] = float(tok[1])
dq = float(tok[2])
k = float(tok[3])
drude['alpha'] = alpha = float(tok[4])
drude['thole'] = float(tok[5])
if polar == 'q':
dq = (fpe0 * k * alpha)**0.5
elif polar == 'k':
k = dq*dq / (fpe0 * alpha)
if positive:
drude['dq'] = abs(dq)
else:
drude['dq'] = -abs(dq)
if metal:
drude['k'] = k / (2.0 * eV)
else:
drude['k'] = k / (2.0 * kcal)
self.types.append(drude)
# --------------------------------------
def main():
parser = argparse.ArgumentParser(description = usage,
formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('-f', '--ffdrude', default = 'drude.dff',
help = 'Drude parameter file (default: drude.dff)')
parser.add_argument('-t', '--thole', type = float, default = 2.6,
help = 'Thole damping parameter (default: 2.6)')
parser.add_argument('-c', '--cutoff', type = float, default = 12.0,
help = 'distance cutoff/A (default: 12.0)')
parser.add_argument('-q', '--qcalc', action = 'store_true',
help = 'Drude charges calculated from polarisability '\
'(default: q value from parameter file)')
parser.add_argument('-k', '--kcalc', action = 'store_true',
help = 'Drude force constants calculated from '\
'polarisability (default: k value from parameter file)')
parser.add_argument('-p', '--positive', action = 'store_true',
help = 'Drude particles have positive charge '\
'(default: negative charge)')
parser.add_argument('-m', '--metal', action = 'store_true',
help = 'LAMMPS metal units (default: real units)')
parser.add_argument('-d', '--depolarize', action = 'store_true',
help = 'remove Drude dipole polarization from '\
'LAMMPS data file')
parser.add_argument('infile', help = 'input LAMMPS data file')
parser.add_argument('outfile', help = 'output LAMMPS data file')
args = parser.parse_args()
if args.qcalc:
polar = 'q'
elif args.kcalc:
polar = 'p'
else:
polar = ''
data = Data(args.infile)
drude = Drude(args.ffdrude, polar, args.positive, args.metal)
if not args.depolarize:
data.polarize(drude)
data.lmpscript(drude, args.outfile, args.thole, args.cutoff)
else:
data.depolarize(drude)
data.write(args.outfile)
if __name__ == '__main__':
main()
|
akohlmey/lammps
|
tools/drude/polarizer.py
|
Python
|
gpl-2.0
| 28,457
|
[
"LAMMPS"
] |
833fdddbdea3acc8bb3c2e7dbedf295669d37e5fe6e1b437c8b75daff502438e
|
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn import metrics
import numpy as np
def get_gaussian_process_regressor():
gp = GaussianProcessRegressor()
return [gp],['Gaussian Process']
def get_mlp_regressor(num_hidden_units=51):
mlp = MLPRegressor(hidden_layer_sizes=num_hidden_units)
return [mlp],['Multi-Layer Perceptron']
def get_ensemble_models():
rf = RandomForestRegressor(n_estimators=51,min_samples_leaf=5,min_samples_split=3,random_state=42)
bag = BaggingRegressor(n_estimators=51,random_state=42)
extra = ExtraTreesRegressor(n_estimators=71,random_state=42)
ada = AdaBoostRegressor(random_state=42)
grad = GradientBoostingRegressor(n_estimators=101,random_state=42)
classifier_list = [rf,bag,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list, classifier_name_list
def get_linear_model():
elastic_net = ElasticNet()
return [elastic_net],['Elastic Net']
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name ,' ---------\n'
predicted_values = trained_model.predict(X_test)
print "Mean Absolute Error : ", metrics.mean_absolute_error(y_test,predicted_values)
print "Median Absolute Error : ", metrics.median_absolute_error(y_test,predicted_values)
print "Mean Squared Error : ", metrics.mean_squared_error(y_test,predicted_values)
print "R2 Score : ", metrics.r2_score(y_test,predicted_values)
print "---------------------------------------\n"
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is np.nan:
for i in range(len(dataframe)):
if i > 1000:
break
if type(dataframe[column][i]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
break
elif type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def spilt_date(list_of_date_string,separator='-',format='yyyy-mm-dd'):
month_list = list([])
day_list = list([])
year_list = list([])
for date_string in list_of_date_string:
date_list = date_string.strip().split(separator)
month_list.append(date_list[1])
day_list.append(date_list[2])
year_list.append(date_list[0])
return month_list,day_list,year_list
def isfloat(value):
try:
float(value)
return True
except:
return False
def handle_mixed_data_types(dataframe):
for column_name in dataframe.columns:
column_data = list(dataframe[column_name].values)
float_count = 0
float_sum = 0.0
string_count = 0
for data in column_data:
if isfloat(data):
float_count += 1.0
float_sum += float(data)
else:
string_count += 1
if float_count >= string_count:
mean = float_sum/float_count
for index,value in enumerate(column_data):
if not isfloat(value):
column_data[index] = mean
else:
column_data[index] = float(value)
dataframe[column_name] = column_data
return dataframe
weather_filename = 'weather.csv'
train_filename = 'train.csv'
key_filename = 'key.csv'
weather_frame = pd.read_csv(weather_filename)
train_frame = pd.read_csv(train_filename)
key_frame = pd.read_csv(key_filename)
weather_frame.drop(['codesum','depart'],axis=1,inplace=True)
weather_frame = handle_mixed_data_types(weather_frame)
final_frame = pd.merge(train_frame,key_frame,how='inner',left_on='store_nbr',right_on='store_nbr')
final_frame = pd.merge(final_frame,weather_frame,how='inner',left_on=['station_nbr','date'],right_on=['station_nbr','date'])
target_values = list(final_frame['units'].values)
final_frame['month'], final_frame['day'], final_frame['year'] = spilt_date(list(final_frame['date'].values))
del final_frame['units']
del final_frame['date']
X_train,X_test,y_train,y_test = train_test_split(final_frame.values,target_values,test_size=0.2,random_state=42)
regressor_list,regressor_name_list = get_ensemble_models()
for regressor,regressor_name in zip(regressor_list,regressor_name_list):
regressor.fit(X_train,y_train)
print_evaluation_metrics(regressor,regressor_name,X_test,y_test)
|
rupakc/Kaggle-Compendium
|
Walmart Recruiting - Sales in Stormy Weather/storm-baseline.py
|
Python
|
mit
| 5,165
|
[
"Gaussian"
] |
9a9bcc53021db63f18545ce7184e6351ad8ef0143480741bdd3985babf3978ee
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""Tool for applying environment variable to paths."""
import os
import re
def __sub(match):
"""Substitute function for environment variables."""
env = match.group('env')
if env in os.environ:
return os.environ[env]
return match.group()
def eval_path(path):
"""
Import environment variables into paths.
Inputs:
path[str]: Path containing environment variable: e.g., ${MOOSE_DIR}/python
"""
return re.sub(r'\$\{(?P<env>.*?)\}', __sub, path)
|
nuclear-wizard/moose
|
python/mooseutils/eval_path.py
|
Python
|
lgpl-2.1
| 805
|
[
"MOOSE"
] |
bce311a5fd87bdddde1bc0fa1347e4b04a0e7e4f97d6db4ee23b894ff7532129
|
#!/usr/bin/python
"""
This re-aligner script is part of the CLAM pipeline.
It takes bam file as input, and outputs a weighed bam file for multi-mapped reads.
Tested under python 2.7.3
"""
__author__ = 'Zijun Zhang'
__version__ = '1.0.0'
__email__ = 'zj.z@ucla.edu'
from optparse import OptionParser
import os, subprocess, shutil, sys, copy
import pysam
import numpy as np
from collections import defaultdict, deque
from time import strftime
import bisect, operator
import cPickle as pickle
import pybedtools
def main():
"""
The main wrapper for CLAM re-aligner.
"""
# options parsing
usage='usage: %prog <options> input_file.bam'
parser=OptionParser(usage)
parser.add_option('-o', dest='output_dir', default='./out_CLAM', help='Output file folder [Default %default]')
parser.add_option('-t', dest='tmp_dir', default='./tmp_CLAM', help='Temporary file folder [Default %default]')
parser.add_option('-w', dest='window_size', type='int', default=50, help='Local window size for EM [Default: %default]')
parser.add_option('--max-multihits', dest='max_multihits', type='int', default=100, help='Discard reads mapped to more than <max_multihits> locations. [Default: %default]')
parser.add_option('--min-unique-reads', dest='min_unique_reads', type='int', default=0, help='Discard genomic regions with less than <min_unique_reads> of unique reads. [Default: %default]')
parser.add_option('--is-stranded', dest='is_stranded', default=False, action='store_true', help='Indicates if the reads are mapped with strand information. [Default: %default]')
parser.add_option('--resume', dest='resume', action='store_true', default=False, help='Resume mode - skipping pre-processing [Default: %default]')
parser.add_option('--verbose', dest='verbose', action='store_true', default=False, help='Verbose mode - print out all intermediate steps [Default: %default]')
parser.add_option('--max-gap', dest='max_gaps', type='int', default=50, help='Maximum distance allowed in grouping reads. [Default: %default]')
#parser.add_option('-g', dest='gtf', default='/u/home/f/frankwoe/scratch/hg19_gencodeV19.sorted.bed', help='GTF file (only need to specified when using -c). [Default: %default]')
#parser.add_option('--covariate-site-min', dest='cov_site_min', type='float', default=0, help='Minumim value required for same genomic region in covariate file [Default: %default]')
#parser.add_option('--covariate-gene-min', dest='cov_gene_min', type='float', default=1.0, help='Minumim value required for same gene in covariates (e.g. RPKM/FPKM). [Default: %default]')
(options,args)=parser.parse_args()
if len(args)<1:
parser.error('Missing required input.')
input_file=args[0]
input_cov = args[1] if len(args)>1 else None
output_dir=os.path.abspath(options.output_dir)
tmp_dir=os.path.abspath(options.tmp_dir)
verbose=options.verbose
steps_finished=[]
print_time_stamp('Program start.')
# checking for output files that already exist; if non-exist, call preprocessing subroutines
# if resume is turned on, start from finished files
# NOTE: currently doesn't check if a file is created but truncated
if options.resume:
if os.path.isfile('%s/filter%d.sorted.bam' % (tmp_dir, options.max_multihits)):
filter_filename='%s/filter%d.sorted.bam' % (tmp_dir,options.max_multihits)
multiread_set=pickle.load(open(tmp_dir + '/multiread_set.pdata','rb'))
steps_finished.append('filtered%d.sorted.bam' % options.max_multihits)
if os.path.isfile(tmp_dir + '/genomic_regions_%s_%s.pdata' % (str(options.max_gaps), str(options.min_unique_reads))):
genomic_regions=pickle.load(open(tmp_dir + '/genomic_regions_%s_%s.pdata' % (str(options.max_gaps), str(options.min_unique_reads) ),'rb'))
location_to_reads=pickle.load(open(tmp_dir + '/loc2read.pdata','rb'))
read_to_locations=pickle.load(open(tmp_dir + '/read2loc.pdata','rb'))
steps_finished.extend(['genomic_regions.pdata', 'loc2read.pdata', 'read2loc.pdata'])
#if os.path.isfile(tmp_dir + '/preserved_nodes_%s_%s.pdata' % (str(options.cov_site_min), str(options.cov_gene_min)) ) and not input_cov is None:
# preserved_nodes=pickle.load(open(tmp_dir + '/preserved_nodes_%s_%s.pdata' % (str(options.cov_site_min), str(options.cov_gene_min)),'rb'))
# steps_finished.append('preserved_nodes.pdata')
if os.path.isfile(output_dir + '/CLAM_mapper.out'):
steps_finished.append('CLAM_mapper.out')
print_time_stamp('Resume mode On, found files: ' + ','.join(steps_finished))
else:
if(os.path.isdir(tmp_dir)):
shutil.rmtree(tmp_dir)
if(os.path.isdir(output_dir)):
shutil.rmtree(output_dir)
os.mkdir(tmp_dir)
os.mkdir(output_dir)
write_parameter_log(options, args, output_dir)
if not ( options.resume and 'filter_filename' in locals() ):
filter_filename, multiread_set=filter_multihits(input_file, options.max_multihits, verbose, tmp_dir)
bamfile=pysam.Samfile(filter_filename,'rb')
# Construct genomic regions and read-location / location-read dictionary
if not ( options.resume and 'genomic_regions.pdata' in steps_finished ):
genomic_regions, location_to_reads, read_to_locations=get_genomic_regions(bamfile, options.max_gaps, verbose, tmp_dir, options.is_stranded, options.min_unique_reads, multiread_set)
bamfile.close()
if not input_cov is None:
#if not (options.resume and 'preserved_nodes.pdata' in steps_finished ):
# preserved_nodes = filter_by_covr(input_cov, options.cov_site_min, options.cov_gene_min, options.gtf, genomic_regions, tmp_dir)
# pickle.dump(preserved_nodes, open(tmp_dir+'/preserved_nodes_%s_%s.pdata' % (str(options.cov_site_min), str(options.cov_gene_min)), 'wb'), -1)
genomic_regions, read_to_locations, location_to_reads = update_by_filter(genomic_regions, read_to_locations, location_to_reads, preserved_nodes)
print_time_stamp('Pre-process done.')
if options.resume and 'CLAM_mapper.out' in steps_finished:
nodes_finished = read_rm_out(output_dir+'/CLAM_mapper.out')
else:
nodes_finished=set()
# Call EM model to assign multi-mapped reads
print_time_stamp('EM start.')
iter=0
out=open(output_dir + '/CLAM_mapper.out','w',0) if options.resume else open(output_dir + '/CLAM_mapper.out','a',0)
seen=nodes_finished
for chr_strand in genomic_regions:
chr, strand = chr_strand.split(':')
for start, end in genomic_regions[chr_strand]:
node=chr_strand + ':' + str(start) + ':' + str(end)
k = len(seen)
if not (k+1) % 1000:
print_time_stamp(str(k+1) + ' finished.')
subgraph, seen=search_node_subg(node, location_to_reads, read_to_locations, seen)
if subgraph is None or len(subgraph)<2:
continue
node_track, multi_reads_weights = construct_track_lite(subgraph, location_to_reads, read_to_locations)
iter += 1
if len(multi_reads_weights)<1:
print_time_stamp('Error occured for: ' + ','.join(subgraph) + ': No fully contained reads found.' + str(len(obs_reads)))
continue
if verbose:
print_time_stamp('Round ' + str(iter) + ': seen = ' + str(len(seen)) + '; current subgraph = ' + str(len(subgraph)) + '; obs reads = ' + str(len(multi_reads_weights)))
new_reads_weights = runEM(node_track, multi_reads_weights, w=options.window_size)
wrt_content = make_write_content(new_reads_weights)
out.write(wrt_content)
out.close()
# write output files
print_time_stamp('Sorting output Bedfile.')
subprocess.call(''' sort -k1,1 -k2,2n %s/CLAM_mapper.out > %s/CLAM_mapper.sorted.out ''' % (output_dir, output_dir), shell=True)
header_cmd='samtools view -H ' + tmp_dir + '/filter100.sorted.bam > ' + output_dir + '/sam_header.sam'
subprocess.call(header_cmd, shell=True)
body_cmd = ''' awk '{if($6=="+"){print $4"\t256\t"$1"\t"$2+1"\t0\t"$3-$2+1"M\t*\t0\t0\t*\t*\tAS:f:"$5}else{print $4"\t272\t"$1"\t"$2+1"\t0\t"$3-$2+1"M\t*\t0\t0\t*\t*\tAS:f:"$5 }}' ''' + output_dir + '/CLAM_mapper.sorted.out > ' + output_dir + '/CLAM_mapper.sorted.sam'
subprocess.call(body_cmd, shell=True)
makeBam_cmd = 'cat %s/sam_header.sam %s/CLAM_mapper.sorted.sam | samtools view -bS - > %s/assigned_multimapped_reads.bam' % (output_dir, output_dir,output_dir)
subprocess.call(makeBam_cmd, shell=True)
index_cmd = 'samtools index %s/assigned_multimapped_reads.bam' % output_dir
subprocess.call(index_cmd, shell=True)
print_time_stamp('Re-alignment is done.')
def write_parameter_log(options, args, output_dir):
"""
Write paramter values to a log file, named by current time.
"""
with open(output_dir+'/CLAM_Aligner.Log.'+ strftime("%Y%m%d_%H%M") + '.txt', 'w') as log:
log.write('CLAM Re-aligner ' + __version__ + '\n')
log.write('Args:\n' + '\n'.join(args) + '\n')
log.write('resume: ' + str(options.resume) + '\n')
log.write('verbose: ' + str(options.verbose) + '\n')
log.write('output_dir: ' + str(options.output_dir) + '\n')
log.write('tmp_dir: ' + str(options.tmp_dir) + '\n')
log.write('window_size: ' + str(options.window_size) + '\n')
log.write('max_multihits: ' + str(options.max_multihits) + '\n')
log.write('is_stranded: ' + str(options.is_stranded) + '\n')
log.write('max-gap: ' + str(options.max_gaps) + '\n')
#log.write('gtf: ' + str(options.gtf) + '\n')
#if len(args)>1:
# log.write('cov_site_min: ' + str(options.cov_site_min) + '\n')
# log.write('cov_gene_min: ' + str(options.cov_gene_min) + '\n')
return
def read_rm_out(filename):
"""
sub-routine to check nodes with finished EM. Called if resume is turned on.
"""
nodes_finished=set()
with open(filename, 'r') as f:
for line in f:
ele=line.split('\t')
if len(ele) < 7:
continue
node_name, in_seen=ele[3].split('|')
if in_seen=='T':
nodes_finished.add(node_name)
return nodes_finished
def read_cufflinks(filename):
"""
sub-routine
"""
gene_fpkm={}
with open(filename,'r') as f:
f.readline()
for line in f:
ele=line.split('\t')
gene_fpkm[ele[0]] = float(ele[9])
return gene_fpkm
def search_node_subg(node, location_to_reads, read_to_locations, seen):
"""
Extract the complete independent subgraph given a node, and add all subgraph nodes into the record.
Returns the nodes in this subgraph and updated record.
"""
if node in seen:
return None, seen
tmp_net=set()
queue=[node]
while(len(queue)>0):
map(tmp_net.add, queue)
map(seen.add, queue)
new_queue=deque([])
for x in queue:
x_reads=location_to_reads[x]
new_queue.extend([next_node for x_read in x_reads for next_node in read_to_locations[x_read] if not next_node in tmp_net])
queue=list(set(new_queue))
subg=list(set(tmp_net))
return subg, seen
class Bit:
"""
Binary Indexed Tree to store values in genomic intervals.
Implementation modified from http://www.geeksforgeeks.org/binary-indexed-tree-or-fenwick-tree-2/
"""
def __init__(self, n):
sz = 1
while n >= sz:
sz *= 2
self.size = sz
self.array_size = n
self.data = [0]*sz
def sum(self, i):
assert i >= 0
if i==0:
return 0
if i > self.array_size:
i = self.array_size
s = 0
while i > 0:
s += self.data[i]
i -= i & -i
return s
def add(self, i, x):
assert i > 0
while i < self.size:
self.data[i] += x
i += i & -i
def construct_track_lite(subgraph, location_to_reads, read_to_locations):
"""
Construct BIT for each genomic region / node.
Returns a node-track dictionary and a dictionary for multi-mapped reads.
"""
node_track = {}
total_len = 0
obs_reads = deque([])
for node in subgraph:
chr, strand, start, end = node.split(':')
start, end = int(start), int(end)
this_len = end - start + 1
node_track[node] = Bit(this_len)
node_reads=location_to_reads[node]
obs_reads.extend(node_reads)
obs_reads = list(set(obs_reads))
multi_reads_weights = defaultdict(dict)
for i in range(len(obs_reads)):
read = obs_reads[i]
read_nodes = read_to_locations[read]
read_score = 1.0 / len(read_nodes)
is_multi = True if read_score!=1 else False
for nd in read_nodes:
chr, strand, nds, nde = nd.split(':')
nds, nde = int(nds), int(nde)
rds, rde = [int(x) for x in read_nodes[nd].split('\t')]
rlen = rde - rds + 1
read_ind = rds - nds + 1
read_end = read_ind + rlen - 1
read_center = int(np.ceil( (read_ind + read_end) / 2.0))
node_track[nd].add(read_center, read_score)
if is_multi:
multi_reads_weights[read][nd]=[read_score, read_ind, read_end]
return(node_track, multi_reads_weights)
def runEM(node_track, multi_reads_weights, w=50, epsilon=1e-6, max_iter=100, verbose=True):
"""
EM implementation for re-assigning multi-mapped reads, given the compatibility matrix of a subgraph.
"""
iter=0
residue=1
while iter < max_iter and residue > epsilon:
residue = 0
reweight=defaultdict(dict)
# calculate re-distribute probability: M-step
for read in multi_reads_weights:
for nd in multi_reads_weights[read]:
sz=node_track[nd].size-1
old_score, rind, rend = multi_reads_weights[read][nd]
rcenter = int(np.ceil((rind + rend) / 2.0))
reweight[read][nd] = max( 0, node_track[nd].sum(min(sz, rcenter + w)) - node_track[nd].sum(max(0,rcenter - w)) )
# update track; E-step
for read in reweight:
dn=sum([reweight[read][x] for x in reweight[read]])
if dn==0:
print >> sys.stdout, 'error occured.'
dn=1
for nd in reweight[read]:
old_score, rind, rend = multi_reads_weights[read][nd]
rcenter = int(np.ceil((rind+rend)/2.0))
new_score = reweight[read][nd] / float(dn)
node_track[nd].add(rcenter, new_score - old_score)
residue += (old_score - new_score)**2
multi_reads_weights[read][nd][0] = new_score
if verbose and (not iter % 10 or iter == max_iter):
print_time_stamp('Iter %d, residue = %f' % (iter, residue))
iter += 1
return multi_reads_weights
def get_genomic_regions(bamfile, distance, verbose, tmp_dir, is_stranded, min_unique_reads, multiread_set):
"""
Construct genomic regions by collapsing reads; also construct read-location dict while iterating the bam file.
"""
pileup=defaultdict(list)
location_to_reads=defaultdict(list)
read_to_locations=defaultdict(dict)
head=bamfile.header['SQ']
chrs=[x['SN'] for x in head]
print_time_stamp('Finding genomic regions with more than ' + str(min_unique_reads) + ' unique reads.')
for chr in chrs:
chr_aln=[x for x in bamfile.fetch(chr)]
tmp_cluster_pos=[0, 0, 0]
tmp_cluster_neg=[0, 0, 0]
tags_pos=[]
tags_neg=[]
if verbose:
print_time_stamp('finding genomic regions on ' + chr + '..')
for read in chr_aln:
read_len = read.qlen if read.qlen > 0 else read.positions[-1] - read.positions[0] + 1
if read_len > 100 or read.positions[-1] - read.positions[0] > 100: # possibly a junction read, discard
continue
pos=[int(x) for x in read.positions]
if read.is_reverse and is_stranded: # add here to avoid negative strand if not stranded library
if (pos[0] + pos[-1])/2 - tmp_cluster_neg[2] <= distance: # compute distance using read centers.
tmp_cluster_neg[1] = max(pos[-1], tmp_cluster_neg[1])
tmp_cluster_neg[2] = (pos[0] + pos[-1])/2
tags_neg.append([read.qname, read.positions[0], read.positions[-1]])
else:
if len(tags_neg) > 1 and sum([1 for x in tags_neg if not x[0] in multiread_set]) >= min_unique_reads:
node_name = chr + ':-:' + str(tmp_cluster_neg[0]) + ':' + str(tmp_cluster_neg[1])
pileup[chr + ':-'].append([tmp_cluster_neg[0], tmp_cluster_neg[1]])
location_to_reads[node_name].extend([x[0] for x in tags_neg])
for x_qname, x_pos0, x_pos1 in tags_neg:
read_to_locations[x_qname].update({node_name : str(x_pos0) + '\t' + str(x_pos1) })
tmp_cluster_neg=[pos[0], pos[-1], (pos[0] + pos[-1])/2]
tags_neg=[ [read.qname, read.positions[0], read.positions[-1]] ]
else:
if (pos[0] + pos[-1])/2 - tmp_cluster_pos[2] <= distance:
tmp_cluster_pos[1]=max(pos[-1], tmp_cluster_pos[1])
tmp_cluster_pos[2] = (pos[0] + pos[-1])/2
tags_pos.append([read.qname, read.positions[0], read.positions[-1]])
else:
if len(tags_pos) > 1 and sum([1 for x in tags_pos if not x[0] in multiread_set]) >= min_unique_reads:
node_name = chr + ':+:' + str(tmp_cluster_pos[0]) + ':' + str(tmp_cluster_pos[1])
pileup[chr + ':+'].append([tmp_cluster_pos[0], tmp_cluster_pos[1]])
location_to_reads[node_name].extend([x[0] for x in tags_pos])
for x_qname, x_pos0, x_pos1 in tags_pos:
read_to_locations[x_qname].update({node_name : str(x_pos0) + '\t' + str(x_pos1) })
tmp_cluster_pos=[pos[0], pos[-1], (pos[0] + pos[-1])/2]
tags_pos=[ [read.qname, read.positions[0], read.positions[-1]] ]
print_time_stamp('Save genomic regions to file.')
pickle.dump(pileup, open(tmp_dir + '/genomic_regions_%s_%s.pdata' % (str(distance), str(min_unique_reads)),'wb'), -1)
pickle.dump(location_to_reads, open(tmp_dir + '/loc2read.pdata','wb'), -1)
pickle.dump(read_to_locations, open(tmp_dir + '/read2loc.pdata','wb'), -1)
return pileup, location_to_reads, read_to_locations
def update_by_filter(genomic_regions, read_to_locations, location_to_reads, preserved_nodes):
"""
Remove nodes that don't pass the filtering.
Returns filtered nodes/genomic regions.
"""
new_genomic_regions = defaultdict(list)
new_read_to_locations = defaultdict(dict)
new_location_to_reads = defaultdict(list)
for chr_strand in genomic_regions:
for start, end in genomic_regions[chr_strand]:
node = chr_strand + ':' + str(start) + ':' + str(end)
if node in preserved_nodes:
new_genomic_regions[chr_strand].append([start, end])
for read in read_to_locations:
for node in read_to_locations[read]:
if node in preserved_nodes:
new_read_to_locations[read].update({node:read_to_locations[read][node]})
for location in location_to_reads:
if location in preserved_nodes:
new_location_to_reads[location]=location_to_reads[location]
return(new_genomic_regions, new_read_to_locations, new_location_to_reads)
def filter_by_covr(cov_filename, cov_site_min, cov_gene_min, gtffile, genomic_regions, tmp_dir):
"""
Wrapper for filtering nodes. Filter based on minimum unique read counts and minimum gene expression.
"""
node_list = [ chr_strand+':'+str(start)+':'+str(end) for chr_strand in genomic_regions for start,end in genomic_regions[chr_strand] ]
covfile=pysam.Samfile(cov_filename)
gene_preserved = deque()
site_preserved = deque()
if cov_site_min > 0:
k = 0
for chr_strand in genomic_regions:
chr, strand = chr_strand.split(':')
print_time_stamp('filtering site: '+chr_strand)
for start, end in genomic_regions[chr_strand]:
k += 1
if not k % 10000:
print_time_stamp('filtering site count: ' + str(k) + '/' + str(len(node_list)))
node = chr_strand + ':' + str(start) + ':' + str(end)
num_reads = sum([1 for x in covfile.fetch(chr, int(start), int(end)) if x.pos>start and x.pos<end])
if num_reads >= cov_site_min:
site_preserved.add(node)
else:
site_preserved=set(node_list)
if cov_gene_min>0:
genomic_regions_list = [ (chr_strand.split(':')[0], int(start), int(end), chr_strand+':'+':'.join([str(start),str(end)]), 'X', chr_strand.split(':')[1] ) for chr_strand in genomic_regions for start,end in genomic_regions[chr_strand]]
genomic_regions_bed = pybedtools.BedTool(genomic_regions_list)
gtf = pybedtools.BedTool(gtffile)
overlap_transcripts = genomic_regions_bed.intersect(gtf, wo=True, s=True)
overlap_transcripts.saveas(tmp_dir + '/genomic_regions.gtf.bed')
total=len(overlap_transcripts)
pybedtools.cleanup()
del overlap_transcripts
del gtf
del genomic_regions_list
cov_scale=sum([int(x.split('\t')[2]) for x in pysam.idxstats(cov_filename).split('\n') if len(x)>0]) / 1000000.0
#gene_fpkm=read_cufflinks('/u/home/f/frankwoe/scratch/Ule_RNAseq_hnRNPC/cufflinks_output_star/genes.fpkm_tracking')
gene_rpkm={}
k = 0
f = open(tmp_dir + '/genomic_regions.gtf.bed','r')
for ele in f:
line = ele.split()
k += 1
if not k % 10000:
print_time_stamp('filtering gene RPKM: ' + str(k) + '/' + str(total))
node=line[3]
#if not node in site_preserved:
# continue
gene_id = line[9]
#RPKM = gene_fpkm[gene_id] if gene_id in gene_fpkm else 0
if gene_id in gene_rpkm:
RPKM = gene_rpkm[gene_id]
else:
chr, start, end = line[6], line[7], line[8]
transcript_count = covfile.count(chr, int(start), int(end))
block_sizes = [int(x) for x in line[16].split(',') if x!='']
gene_len = sum(block_sizes) / 1000.0
RPKM = transcript_count / cov_scale / gene_len
gene_rpkm[gene_id]=RPKM
if RPKM >= cov_gene_min:
gene_preserved.append(node)
gene_preserved=set(gene_preserved)
f.close()
else:
gene_preserved=set(node_list)
return gene_preserved.intersection(site_preserved)
def make_dependency_dict(tmp_dir, is_stranded):
sort_cmd = 'LC_COLLATE=C sort -k1,1 -k2,2n %s/reg.bed > %s/reg_sorted.bed' % (tmp_dir, tmp_dir)
subprocess.call(sort_cmd, shell=True)
strand_opt = '-s' if is_stranded else ''
multi_reg_cmd = 'bedtools intersect %s -a %s/reg_sorted.bed -b %s/multi.bed -wo -sorted > %s/reg_multi.bed' % (strand_opt, tmp_dir, tmp_dir, tmp_dir)
subprocess.call(multi_reg_cmd, shell=True)
uni_reg_cmd = 'bedtools intersect %s -a %s/reg_sorted.bed -b %s/unique.bed -wo -sorted > %s/reg_unique.bed' % (strand_opt, tmp_dir, tmp_dir, tmp_dir)
subprocess.call(uni_reg_cmd, shell=True)
def print_time_stamp(msg):
"""
Reporter function for logging.
"""
current_time='[' + strftime("%Y-%m-%d %H:%M:%S") + '] '
print >> sys.stderr, current_time + msg
def filter_multihits(filename, max_hits, verbose, tmp_dir):
"""
Pre-processing function for cleaning up the input bam file.
"""
if verbose:
print_time_stamp('filtering multi-mapped up to %d hits.' % max_hits)
multiread_set=set()
subprocess.call("samtools view -h %s | awk '{if($2 !~ /_/ && $3 !~ /_/) {print}}' | samtools view -bS - > %s/filter_random.bam" % (filename, tmp_dir), shell=True)
oldfile=pysam.Samfile(tmp_dir + '/filter_random.bam','rb')
new_filename=os.path.abspath(tmp_dir + '/filter%d.bam' % max_hits)
sorted_filename=os.path.abspath(tmp_dir + '/filter%d.sorted.bam' % max_hits)
newfile=pysam.Samfile(new_filename, 'wb', template=oldfile)
for aligned_read in oldfile:
try:
if aligned_read.opt("NH") < max_hits:
newfile.write(aligned_read)
if aligned_read.opt("NH")>1:
multiread_set.add(aligned_read.qname)
except KeyError:
newfile.write(aligned_read)
oldfile.close()
newfile.close()
sort_cmd='samtools sort -T %s/ -o %s %s' % (tmp_dir, sorted_filename, new_filename)
index_cmd='samtools index %s' % sorted_filename
subprocess.call(sort_cmd, shell=True)
subprocess.call(index_cmd, shell=True)
subprocess.call('rm %s/filter_random.bam %s' % (tmp_dir, new_filename), shell=True)
pickle.dump(multiread_set, open(tmp_dir + '/multiread_set.pdata', 'wb'), -1)
return(sorted_filename, multiread_set)
def make_write_content(multi_reads_weights):
"""
Make the content for output files based on results in memory.
"""
content=''
for read in multi_reads_weights:
for node in multi_reads_weights[read]:
score, rds, rde = multi_reads_weights[read][node]
chr, strand, nds, nde = node.split(':')
read_start = int(nds) + rds - 1
read_len = int(rde) - int(rds) + 1
read_end = read_start + read_len - 1
content += '\t'.join([chr, str(read_start), str(read_end), read, str(score), strand]) + '\n'
return content
if __name__=='__main__':
main()
|
Xinglab/CLAM
|
deprecated/CLAM.lite_aligner.py
|
Python
|
gpl-3.0
| 23,890
|
[
"pysam"
] |
0df89cef6b0866ead802db078d3d97bb364880809cad4025e68597839f2f3f30
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Pre-defined Particle Systems"""
from __future__ import division, print_function, unicode_literals
__all__ = ['Fireworks', 'Spiral', 'Meteor', 'Sun', 'Fire', 'Galaxy', 'Flower', 'Explosion', 'Smoke']
from cocos.particle import ParticleSystem, Color
from cocos.euclid import Point2
class Fireworks(ParticleSystem):
# total particles
total_particles = 3000
# duration
duration = -1
# gravity
gravity = Point2(0, -90)
# angle
angle = 90
angle_var = 20
# radial
radial_accel = 0
radial_accel_var = 0
# speed of particles
speed = 180
speed_var = 50
# emitter variable position
pos_var = Point2(0, 0)
# life of particles
life = 3.5
life_var = 1
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.5, 0.5, 0.5, 1.0)
start_color_var = Color(0.5, 0.5, 0.5, 1.0)
end_color = Color(0.1, 0.1, 0.1, 0.2)
end_color_var = Color(0.1, 0.1, 0.1, 0.2)
# size, in pixels
size = 8.0
size_var = 2.0
# blend additive
blend_additive = False
# color modulate
color_modulate = True
class Explosion(ParticleSystem):
# total particle
total_particles = 700
# duration
duration = 0.1
# gravity
gravity = Point2(0, -90)
# angle
angle = 90.0
angle_var = 360.0
# radial
radial_accel = 0
radial_accel_var = 0
# speed of particles
speed = 70.0
speed_var = 40.0
# emitter variable position
pos_var = Point2(0, 0)
# life of particles
life = 5.0
life_var = 2.0
# emits per frame
emission_rate = total_particles / duration
# color of particles
start_color = Color(0.7, 0.2, 0.1, 1.0)
start_color_var = Color(0.5, 0.5, 0.5, 0.0)
end_color = Color(0.5, 0.5, 0.5, 0.0)
end_color_var = Color(0.5, 0.5, 0.5, 0.0)
# size, in pixels
size = 15.0
size_var = 10.0
# blend additive
blend_additive = False
# color modulate
color_modulate = True
class Fire(ParticleSystem):
# total particles
total_particles = 250
# duration
duration = -1
# gravity
gravity = Point2(0, 0)
# angle
angle = 90.0
angle_var = 10.0
# radial
radial_accel = 0
radial_accel_var = 0
# speed of particles
speed = 60.0
speed_var = 20.0
# emitter variable position
pos_var = Point2(40, 20)
# life of particles
life = 3.0
life_var = 0.25
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.76, 0.25, 0.12, 1.0)
start_color_var = Color(0.0, 0.0, 0.0, 0.0)
end_color = Color(0.0, 0.0, 0.0, 1.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# size, in pixels
size = 100.0
size_var = 10.0
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Flower(ParticleSystem):
# total particles
total_particles = 500
# duration
duration = -1
# gravity
gravity = Point2(0, 0)
# angle
angle = 90.0
angle_var = 360.0
# speed of particles
speed = 80.0
speed_var = 10.0
# radial
radial_accel = -60
radial_accel_var = 0
# tangential
tangential_accel = 15.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0, 0)
# life of particles
life = 4.0
life_var = 1.0
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.5, 0.5, 0.5, 1.0)
start_color_var = Color(0.5, 0.5, 0.5, 0.0)
end_color = Color(0.0, 0.0, 0.0, 1.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# size, in pixels
size = 30.0
size_var = 0.0
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Sun(ParticleSystem):
# total particles
total_particles = 350
# duration
duration = -1
# gravity
gravity = Point2(0, 0)
# angle
angle = 90.0
angle_var = 360.0
# speed of particles
speed = 20.0
speed_var = 5.0
# radial
radial_accel = 0
radial_accel_var = 0
# tangential
tangential_accel = 0.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0, 0)
# life of particles
life = 1.0
life_var = 0.5
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.75, 0.25, 0.12, 1.0)
start_color_var = Color(0.0, 0.0, 0.0, 0.0)
end_color = Color(0.0, 0.0, 0.0, 0.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# size, in pixels
size = 40.0
size_var = 00.0
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Spiral(ParticleSystem):
# total paticles
total_particles = 500
# duration
duration = -1
# gravity
gravity = Point2(0, 0)
# angle
angle = 90.0
angle_var = 0.0
# speed of particles
speed = 150.0
speed_var = 0.0
# radial
radial_accel = -380
radial_accel_var = 0
# tangential
tangential_accel = 45.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0, 0)
# life of particles
life = 12.0
life_var = 0.0
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.5, 0.5, 0.5, 1.0)
start_color_var = Color(0.5, 0.5, 0.5, 0.0)
end_color = Color(0.5, 0.5, 0.5, 1.0)
end_color_var = Color(0.5, 0.5, 0.5, 0.0)
# size, in pixels
size = 20.0
size_var = 10.0
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Meteor(ParticleSystem):
# total particles
total_particles = 150
# duration
duration = -1
# gravity
gravity = Point2(-200, 100)
# angle
angle = 90.0
angle_var = 360.0
# speed of particles
speed = 15.0
speed_var = 5.0
# radial
radial_accel = 0
radial_accel_var = 0
# tangential
tangential_accel = 0.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0, 0)
# life of particles
life = 2.0
life_var = 1.0
# size, in pixels
size = 60.0
size_var = 10.0
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.2, 0.7, 0.7, 1.0)
start_color_var = Color(0.0, 0.0, 0.0, 0.2)
end_color = Color(0.0, 0.0, 0.0, 1.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Galaxy(ParticleSystem):
# total particles
total_particles = 200
# duration
duration = -1
# gravity
gravity = Point2(0, 0)
# angle
angle = 90.0
angle_var = 360.0
# speed of particles
speed = 60.0
speed_var = 10.0
# radial
radial_accel = -80.0
radial_accel_var = 0
# tangential
tangential_accel = 80.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0, 0)
# life of particles
life = 4.0
life_var = 1.0
# size, in pixels
size = 37.0
size_var = 10.0
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.12, 0.25, 0.76, 1.0)
start_color_var = Color(0.0, 0.0, 0.0, 0.0)
end_color = Color(0.0, 0.0, 0.0, 0.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Smoke(ParticleSystem):
# total particles
total_particles = 80
# duration
duration = -1
# gravity
gravity = Point2(0, 0)
# angle
angle = 90.0
angle_var = 10.0
# speed of particles
speed = 25.0
speed_var = 10.0
# radial
radial_accel = 5
radial_accel_var = 0
# tangential
tangential_accel = 0.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0.1, 0)
# life of particles
life = 4.0
life_var = 1.0
# size, in pixels
size = 40.0
size_var = 10.0
# emits per frame
emission_rate = total_particles / life
start_color = Color(0.5, 0.5, 0.5, 0.1)
start_color_var = Color(0, 0, 0, 0.1)
end_color = Color(0.5, 0.5, 0.5, 0.1)
end_color_var = Color(0, 0, 0, 0.1)
# blend additive
blend_additive = True
# color modulate
color_modulate = False
|
vyscond/cocos
|
cocos/particle_systems.py
|
Python
|
bsd-3-clause
| 10,473
|
[
"Galaxy"
] |
4f3f1f3df4b23c9a328c8a0b3acdf5724d5363722a5fc6394bd9a9faf253bef5
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Largest CI coefficients
'''
from pyscf import gto, scf, mcscf, fci
mol = gto.Mole()
mol.build(
verbose = 0,
atom = 'N, 0., 0., 0. ; N, 0., 0., 1.4',
basis = 'cc-pvdz',
symmetry = True,
)
m = scf.RHF(mol)
m.kernel()
ncas = 6
nelec = 6
mc = mcscf.CASSCF(m, 6, 6)
mc.kernel()
# Output all determinants coefficients
print(' det-alpha, det-beta, CI coefficients')
occslst = fci.cistring._gen_occslst(range(ncas), nelec//2)
for i,occsa in enumerate(occslst):
for j,occsb in enumerate(occslst):
print(' %s %s %.12f' % (occsa, occsb, mc.ci[i,j]))
# Only output determinants which have coefficients > 0.05
nelec = (3,3) # 3 spin-up electrons and 3 spin-down electrons
print(' det-alpha, det-beta, CI coefficients')
for c,ia,ib in mc.fcisolver.large_ci(mc.ci, ncas, nelec, tol=.05, return_strs=False):
print(' %s %s %.12f' % (ia, ib, c))
|
gkc1000/pyscf
|
examples/fci/11-large_ci.py
|
Python
|
apache-2.0
| 984
|
[
"PySCF"
] |
6d124bc5dc39892642be08b17ed05356a468e23f8af6a7e9e996aac2dbfa2a05
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_dft.utils."""
from absl.testing import absltest
from absl.testing import parameterized
from jax.config import config
import jax.numpy as jnp
import numpy as np
from jax_dft import constants
from jax_dft import utils
# Set the default dtype as float64
config.update('jax_enable_x64', True)
class UtilsTest(parameterized.TestCase):
@parameterized.parameters(
(2, [4., 1., 0., 0.]),
(-2, [0., 0., 1., 2.]),
)
def test_shift(self, offset, expected_output):
np.testing.assert_allclose(
utils.shift(jnp.array([1., 2., 4., 1.]), offset=offset),
expected_output)
def test_get_dx(self):
self.assertAlmostEqual(utils.get_dx(jnp.linspace(0, 1, 11)), 0.1)
def test_get_dx_incorrect_ndim(self):
with self.assertRaisesRegex(
ValueError, 'grids.ndim is expected to be 1 but got 2'):
utils.get_dx(jnp.array([[-0.1], [0.], [0.1]]))
@parameterized.parameters(
(0., 2., 1 / (np.sqrt(2 * np.pi) * 2)),
(3., 0.5, 1 / (np.sqrt(2 * np.pi) * 0.5)),
)
def test_gaussian(self, center, sigma, expected_max_value):
gaussian = utils.gaussian(
grids=jnp.linspace(-10, 10, 201), center=center, sigma=sigma)
self.assertAlmostEqual(float(jnp.sum(gaussian) * 0.1), 1, places=5)
self.assertAlmostEqual(float(jnp.amax(gaussian)), expected_max_value)
@parameterized.parameters(-1., 0., 1.)
def test_soft_coulomb(self, center):
grids = jnp.linspace(-10, 10, 201)
soft_coulomb_interaction = utils.soft_coulomb(grids - center)
self.assertAlmostEqual(float(jnp.amax(soft_coulomb_interaction)), 1)
self.assertAlmostEqual(
float(grids[jnp.argmax(soft_coulomb_interaction)]), center)
@parameterized.parameters(-1., 0., 1.)
def test_exponential_coulomb(self, center):
grids = jnp.linspace(-10, 10, 201)
soft_coulomb_interaction = utils.exponential_coulomb(grids - center)
self.assertAlmostEqual(
float(jnp.amax(soft_coulomb_interaction)),
constants.EXPONENTIAL_COULOMB_AMPLITUDE)
self.assertAlmostEqual(
float(grids[jnp.argmax(soft_coulomb_interaction)]), center)
def test_get_atomic_chain_potential_soft_coulomb(self):
potential = utils.get_atomic_chain_potential(
grids=jnp.linspace(-10, 10, 201),
locations=jnp.array([0., 1.]),
nuclear_charges=jnp.array([2, 1]),
interaction_fn=utils.soft_coulomb)
# -2 / jnp.sqrt(10 ** 2 + 1) - 1 / jnp.sqrt(11 ** 2 + 1) = -0.28954318
self.assertAlmostEqual(float(potential[0]), -0.28954318)
# -2 / jnp.sqrt(0 ** 2 + 1) - 1 / jnp.sqrt(1 ** 2 + 1) = -2.70710678
self.assertAlmostEqual(float(potential[100]), -2.70710678)
# -2 / jnp.sqrt(10 ** 2 + 1) - 1 / jnp.sqrt(9 ** 2 + 1) = -0.30943896
self.assertAlmostEqual(float(potential[200]), -0.30943896)
def test_get_atomic_chain_potential_exponential_coulomb(self):
potential = utils.get_atomic_chain_potential(
grids=jnp.linspace(-10, 10, 201),
locations=jnp.array([0., 1.]),
nuclear_charges=jnp.array([2, 1]),
interaction_fn=utils.exponential_coulomb)
# -2 * 1.071295 * jnp.exp(-np.abs(10) / 2.385345) - 1.071295 * jnp.exp(
# -np.abs(11) / 2.385345) = -0.04302427
self.assertAlmostEqual(float(potential[0]), -0.04302427)
# -2 * 1.071295 * jnp.exp(-np.abs(0) / 2.385345) - 1.071295 * jnp.exp(
# -np.abs(1) / 2.385345) = -2.84702559
self.assertAlmostEqual(float(potential[100]), -2.84702559)
# -2 * 1.071295 * jnp.exp(-np.abs(10) / 2.385345) - 1.071295 * jnp.exp(
# -np.abs(9) / 2.385345) = -0.05699946
self.assertAlmostEqual(float(potential[200]), -0.05699946)
@parameterized.parameters(
([[-0.1], [0.], [0.1]], [1, 3], [1, 2],
'grids.ndim is expected to be 1 but got 2'),
([-0.1, 0., 0.1], [[1], [3]], [1, 2],
'locations.ndim is expected to be 1 but got 2'),
([-0.1, 0., 0.1], [1, 3], [[1], [2]],
'nuclear_charges.ndim is expected to be 1 but got 2'),
)
def test_get_atomic_chain_potential_incorrect_ndim(
self, grids, locations, nuclear_charges, expected_message):
with self.assertRaisesRegex(ValueError, expected_message):
utils.get_atomic_chain_potential(
grids=jnp.array(grids),
locations=jnp.array(locations),
nuclear_charges=jnp.array(nuclear_charges),
interaction_fn=utils.exponential_coulomb)
@parameterized.parameters(
# One pair of soft coulomb interaction.
# 1 * 2 / jnp.sqrt((1 - 3) ** 2 + 1)
([1, 3], [1, 2], utils.soft_coulomb, 0.89442719),
# Three pairs of soft coulomb interaction.
# 1 * 1 / jnp.sqrt((1 + 2) ** 2 + 1)
# + 1 * 2 / jnp.sqrt((3 + 2) ** 2 + 1)
# + 1 * 2 / jnp.sqrt((3 - 1) ** 2 + 1)
([-2, 1, 3], [1, 1, 2], utils.soft_coulomb, 1.602887227),
# One pair of exponential interaction.
# 1 * 2 * 1.071295 * jnp.exp(-np.abs(1 - 3) / 2.385345)
([1, 3], [1, 2], utils.exponential_coulomb, 0.92641057),
# Three pairs of exponential interaction.
# 1 * 1 * 1.071295 * jnp.exp(-np.abs(1 + 2) / 2.385345)
# + 1 * 2 * 1.071295 * jnp.exp(-np.abs(3 + 2) / 2.385345)
# + 1 * 2 * 1.071295 * jnp.exp(-np.abs(3 - 1) / 2.385345)
([-2, 1, 3], [1, 1, 2], utils.exponential_coulomb, 1.49438414),
)
def test_get_nuclear_interaction_energy(
self, locations, nuclear_charges, interaction_fn, ecpected_energy):
self.assertAlmostEqual(
float(utils.get_nuclear_interaction_energy(
locations=jnp.array(locations),
nuclear_charges=jnp.array(nuclear_charges),
interaction_fn=interaction_fn)),
ecpected_energy)
@parameterized.parameters(
([[1, 3], [0, 0]], [[1, 2], [1, 1]],
utils.soft_coulomb, [0.89442719, 1.]),
([[1, 3], [0, 0]], [[1, 2], [1, 1]],
utils.exponential_coulomb, [0.92641057, 1.071295]),
)
def test_get_nuclear_interaction_energy_batch(
self, locations, nuclear_charges, interaction_fn, ecpected_energies):
np.testing.assert_allclose(
utils.get_nuclear_interaction_energy_batch(
locations=jnp.array(locations),
nuclear_charges=jnp.array(nuclear_charges),
interaction_fn=interaction_fn),
ecpected_energies)
@parameterized.parameters(
([[1], [3]], [1, 2], 'locations.ndim is expected to be 1 but got 2'),
([1, 3], [[1], [2]],
'nuclear_charges.ndim is expected to be 1 but got 2'),
)
def test_get_nuclear_interaction_energy_incorrect_ndim(
self, locations, nuclear_charges, expected_message):
with self.assertRaisesRegex(ValueError, expected_message):
utils.get_nuclear_interaction_energy(
locations=jnp.array(locations),
nuclear_charges=jnp.array(nuclear_charges),
interaction_fn=utils.exponential_coulomb)
@parameterized.parameters(-0.1, 0.0, 0.1, 0.2, 0.3)
def test_float_value_in_array_true(self, value):
self.assertTrue(utils._float_value_in_array(
value, array=jnp.array([-0.1, 0.0, 0.1, 0.2, 0.3])))
@parameterized.parameters(-0.15, 0.05, 0.12)
def test_float_value_in_array_false(self, value):
self.assertFalse(utils._float_value_in_array(
value, array=jnp.array([-0.1, 0.0, 0.1, 0.2, 0.3])))
def test_flip_and_average_the_front_of_array_center_on_grids(self):
np.testing.assert_allclose(
utils.flip_and_average(
locations=jnp.array([-0.1, 0.3]),
grids=jnp.array([-0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
array=jnp.array([0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5, 0.1, 0.8])),
# The center is 0.1, which is the grid point with index 3.
# The array on the grids [-0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.4]
# are flipped:
# [0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5]
# -> [0.5, 0.3, 0.2, 0.7, 0.6, 0.2, 0.1]
# The averaged array is
# [0.3, 0.25, 0.4, 0.7, 0.4, 0.25, 0.3]
# Replace the corresponding range (slice(0, 7)) in the original array:
# [0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5, 0.1, 0.8]
# -> [0.3, 0.25, 0.4, 0.7, 0.4, 0.25, 0.3, 0.1, 0.8]
[0.3, 0.25, 0.4, 0.7, 0.4, 0.25, 0.3, 0.1, 0.8])
def test_flip_and_average_the_back_of_array_center_on_grids(self):
np.testing.assert_allclose(
utils.flip_and_average(
locations=jnp.array([0.4, 0.6]),
grids=jnp.array([-0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
array=jnp.array([0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5, 0.1, 0.8])),
# The center is 0.5, which is the grid point with index 7.
# The array on the grids [0.4, 0.5, 0.6] are flipped:
# [0.5, 0.1, 0.8]
# -> [0.8, 0.1, 0.5]
# The averaged array is
# [0.65, 0.1, 0.65]
# Replace the corresponding range (slice(6, 9)) in the original array:
# [0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5, 0.1, 0.8]
# -> [0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.65, 0.1, 0.65]
[0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.65, 0.1, 0.65])
def test_flip_and_average_the_front_of_array_center_not_on_grids(self):
np.testing.assert_allclose(
utils.flip_and_average(
locations=jnp.array([-0.1, 0.2]),
grids=jnp.array([-0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
array=jnp.array([0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5, 0.1, 0.8])),
# The center is 0.05, which is the grid point between index 2 and 3.
# The array on the grids [-0.2, -0.1, 0., 0.1, 0.2, 0.3]
# are flipped:
# [0.1, 0.2, 0.6, 0.7, 0.2, 0.3]
# -> [0.3, 0.2, 0.7, 0.6, 0.2, 0.1]
# The averaged array is
# [0.2, 0.2, 0.65, 0.65, 0.2, 0.2]
# Replace the corresponding range (slice(0, 6)) in the original array:
# [0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5, 0.1, 0.8]
# -> [0.2, 0.2, 0.65, 0.65, 0.2, 0.2, 0.5, 0.1, 0.8]
[0.2, 0.2, 0.65, 0.65, 0.2, 0.2, 0.5, 0.1, 0.8])
def test_flip_and_average_the_back_of_array_center_not_on_grids(self):
np.testing.assert_allclose(
utils.flip_and_average(
locations=jnp.array([0.4, 0.5]),
grids=jnp.array([-0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
array=jnp.array([0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5, 0.1, 0.8])),
# The center is 0.45, which is the grid point between index 6 and 7.
# The array on the grids [0.3, 0.4, 0.5, 0.6] are flipped:
# [0.3, 0.5, 0.1, 0.8]
# -> [0.8, 0.1, 0.5, 0.3]
# The averaged array is
# [0.55, 0.3, 0.3, 0.55]
# Replace the corresponding range (slice(5, 9)) in the original array:
# [0.1, 0.2, 0.6, 0.7, 0.2, 0.3, 0.5, 0.1, 0.8]
# -> [0.1, 0.2, 0.6, 0.7, 0.2, 0.55, 0.3, 0.3, 0.55]
[0.1, 0.2, 0.6, 0.7, 0.2, 0.55, 0.3, 0.3, 0.55])
def test_flip_and_average_location_not_on_grids(self):
with self.assertRaisesRegex(
ValueError, r'Location 0\.25 is not on the grids'):
utils.flip_and_average(
# 0.25 is not on the grids.
locations=jnp.array([0.0, 0.25]),
grids=jnp.array([-0.1, 0.0, 0.1, 0.2, 0.3]),
# Values of array do not matter in this test.
array=jnp.array([0.1, 0.2, 0.6, 0.7, 0.2]))
def test_location_center_at_grids_center_point_true(self):
self.assertTrue(
utils.location_center_at_grids_center_point(
locations=jnp.array([-0.5, 0.5]),
grids=jnp.array([-0.4, -0.2, 0., 0.2, 0.4])))
def test_location_center_at_grids_center_point_false(self):
# The center of the location is not at the center point of the grids.
self.assertFalse(
utils.location_center_at_grids_center_point(
locations=jnp.array([-0.5, 0.6]),
grids=jnp.array([-0.4, -0.2, 0., 0.2, 0.4])))
# The number of grids is not odd number, so there is no single center point
# on the grids.
self.assertFalse(
utils.location_center_at_grids_center_point(
locations=jnp.array([-0.5, 0.5]),
grids=jnp.array([-0.4, -0.2, 0., 0.2])))
def test_compute_distances_between_nuclei(self):
np.testing.assert_allclose(
utils.compute_distances_between_nuclei(
locations=np.array([
[-1., 1., 3.5, 5.],
[-4., 0., 3.5, 10.],
[-2., -1., 3.5, 55.],
]),
nuclei_indices=(1, 2)),
[2.5, 3.5, 4.5])
def test_compute_distances_between_nuclei_wrong_locations_ndim(self):
with self.assertRaisesRegex(
ValueError, 'The ndim of locations is expected to be 2 but got 3'):
utils.compute_distances_between_nuclei(
# Values of locations are not used in this test.
locations=np.array([
[[-1.], [1.], [3.5], [5.]],
[[-4.], [0.], [3.5], [10.]],
[[-2.], [-1.], [3.5], [55.]],
]),
# Unused in this test.
nuclei_indices=(1, 2))
def test_compute_distances_between_nuclei_wrong_nuclei_indices_size(self):
with self.assertRaisesRegex(
ValueError, 'The size of nuclei_indices is expected to be 2 but got 4'):
utils.compute_distances_between_nuclei(
# Values of locations are not used in this test.
locations=np.array([
[-1., 1., 3.5, 5.],
[-4., 0., 3.5, 10.],
[-2., -1., 3.5, 55.],
]),
# Wrong length of nuclei_indices.
nuclei_indices=(1, 2, 3, 4))
if __name__ == '__main__':
absltest.main()
|
google-research/google-research
|
jax_dft/jax_dft/utils_test.py
|
Python
|
apache-2.0
| 14,150
|
[
"Gaussian"
] |
28cb9dfc973ca3119e68e10eb4235496266c58206f345dc7173a22d05fb1ebc1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: systemd
author:
- Ansible Core Team
version_added: "2.2"
short_description: Manage services
description:
- Controls systemd services on remote hosts.
options:
name:
description:
- Name of the service. This parameter takes the name of exactly one service to work with.
- When using in a chroot environment you always need to specify the full name i.e. (crond.service).
aliases: [ service, unit ]
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
choices: [ reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
type: bool
force:
description:
- Whether to override existing symlinks.
type: bool
version_added: 2.6
masked:
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
type: bool
daemon_reload:
description:
- Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
- When set to C(yes), runs daemon-reload even if the module does not start or stop anything.
type: bool
default: no
aliases: [ daemon-reload ]
daemon_reexec:
description:
- Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
type: bool
default: no
aliases: [ daemon-reexec ]
version_added: "2.8"
user:
description:
- (deprecated) run ``systemctl`` talking to the service manager of the calling user, rather than the service manager
of the system.
- This option is deprecated and will eventually be removed in 2.11. The ``scope`` option should be used instead.
type: bool
default: no
scope:
description:
- run systemctl within a given service manager scope, either as the default system scope (system),
the current user's scope (user), or the scope of all users (global).
- "For systemd to work with 'user', the executing user must have its own instance of dbus started (systemd requirement).
The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
choices: [ system, user, global ]
default: system
version_added: "2.7"
no_block:
description:
- Do not synchronously wait for the requested operation to finish.
Enqueued job will continue without Ansible blocking on its completion.
type: bool
default: no
version_added: "2.3"
notes:
- Since 2.4, one of the following options is required 'state', 'enabled', 'masked', 'daemon_reload', ('daemon_reexec' since 2.8),
and all except 'daemon_reload' (and 'daemon_reexec' since 2.8) also require 'name'.
- Before 2.4 you always required 'name'.
- Globs are not supported in name, i.e ``postgres*.service``.
requirements:
- A system managed by systemd.
'''
EXAMPLES = '''
- name: Make sure a service is running
systemd:
state: started
name: httpd
- name: Stop service cron on debian, if running
systemd:
name: cron
state: stopped
- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
systemd:
state: restarted
daemon_reload: yes
name: crond
- name: Reload service httpd, in all cases
systemd:
name: httpd
state: reloaded
- name: Enable service httpd and ensure it is not masked
systemd:
name: httpd
enabled: yes
masked: no
- name: Enable a timer for dnf-automatic
systemd:
name: dnf-automatic.timer
state: started
enabled: yes
- name: Just force systemd to reread configs (2.4 and above)
systemd:
daemon_reload: yes
- name: Just force systemd to re-execute itself (2.8 and above)
systemd:
daemon_reexec: yes
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
''' # NOQA
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts.system.chroot import is_chroot
from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
from ansible.module_utils._text import to_native
def is_running_service(service_status):
return service_status['ActiveState'] in set(['active', 'activating'])
def is_deactivating_service(service_status):
return service_status['ActiveState'] in set(['deactivating'])
def request_was_ignored(out):
return '=' not in out and 'ignoring request' in out
def parse_systemctl_show(lines):
# The output of 'systemctl show' can contain values that span multiple lines. At first glance it
# appears that such values are always surrounded by {}, so the previous version of this code
# assumed that any value starting with { was a multi-line value; it would then consume lines
# until it saw a line that ended with }. However, it is possible to have a single-line value
# that starts with { but does not end with } (this could happen in the value for Description=,
# for example), and the previous version of this code would then consume all remaining lines as
# part of that value. Cryptically, this would lead to Ansible reporting that the service file
# couldn't be found.
#
# To avoid this issue, the following code only accepts multi-line values for keys whose names
# start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
# span multiple lines.
parsed = {}
multival = []
k = None
for line in lines:
if k is None:
if '=' in line:
k, v = line.split('=', 1)
if k.startswith('Exec') and v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(v)
continue
parsed[k] = v.strip()
k = None
else:
multival.append(line)
if line.rstrip().endswith('}'):
parsed[k] = '\n'.join(multival).strip()
multival = []
k = None
return parsed
# ===========================================
# Main control flow
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', aliases=['service', 'unit']),
state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
force=dict(type='bool'),
masked=dict(type='bool'),
daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
user=dict(type='bool'),
scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
no_block=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
required_by=dict(
state=('name', ),
enabled=('name', ),
masked=('name', ),
),
mutually_exclusive=[['scope', 'user']],
)
unit = module.params['name']
if unit is not None:
for globpattern in (r"*", r"?", r"["):
if globpattern in unit:
module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
systemctl = module.get_bin_path('systemctl', True)
if os.getenv('XDG_RUNTIME_DIR') is None:
os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
''' Set CLI options depending on params '''
if module.params['user'] is not None:
# handle user deprecation, mutually exclusive with scope
module.deprecate("The 'user' option is being replaced by 'scope'", version='2.11', collection_name='ansible.builtin')
if module.params['user']:
module.params['scope'] = 'user'
else:
module.params['scope'] = 'system'
# if scope is 'system' or None, we can ignore as there is no extra switch.
# The other choices match the corresponding switch
if module.params['scope'] != 'system':
systemctl += " --%s" % module.params['scope']
if module.params['no_block']:
systemctl += " --no-block"
if module.params['force']:
systemctl += " --force"
rc = 0
out = err = ''
result = dict(
name=unit,
changed=False,
status=dict(),
)
# Run daemon-reload first, if requested
if module.params['daemon_reload'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
# Run daemon-reexec
if module.params['daemon_reexec'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
if unit:
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
# load return of systemctl show into dictionary for easy access and return
if out:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
# Check for loading error
if is_systemd and not is_masked and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
else:
# list taken from man systemctl(1) for systemd 244
valid_enabled_states = [
"enabled",
"enabled-runtime",
"linked",
"linked-runtime",
"masked",
"masked-runtime",
"static",
"indirect",
"disabled",
"generated",
"transient"]
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
if out.strip() in valid_enabled_states:
is_systemd = True
else:
# fallback list-unit-files as show does not work on some systems (chroot)
# not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
(rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
if rc == 0:
is_systemd = True
else:
# Check for systemctl command
module.run_command(systemctl, check_rc=True)
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
masked = out.strip() == "masked"
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
enabled = True
elif rc == 1:
# if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if module.params['scope'] == 'system' and \
not module.params['user'] and \
is_initd and \
not out.strip().endswith('disabled') and \
sysv_is_enabled(unit):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if not is_running_service(result['status']):
action = 'start'
elif module.params['state'] == 'stopped':
if is_running_service(result['status']) or is_deactivating_service(result['status']):
action = 'stop'
else:
if not is_running_service(result['status']):
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# check for chroot
elif is_chroot(module):
module.warn("Target is a chroot. This can lead to false positives or prevent the init system tools from working.")
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
azaghal/ansible
|
lib/ansible/modules/systemd.py
|
Python
|
gpl-3.0
| 22,178
|
[
"Brian"
] |
bcd05735d94684afc48f54a40a584fe00a3397ee9bbcf44c06574f4ad1fe9750
|
import numpy as np
class Function(object):
def __init__(self, usage, name, evaluators):
self.usage = usage
self.name = name
self._evaluator = evaluators
def __call__(self, *args, d=0):
# The optional d parameter is being used to denote power of derivative
return self._evaluator[d](*args)
# PARAMETERS
tau = 1 # Sigmoid threshold unit
alpha = 0.5 # Parametrized rectified linear unit\
# BASIS FUNCTIONS: Regression
basis_identity = Function('basis', 'identity',
[lambda x: x,
lambda x: np.diag(np.ones(x.shape))])
basis_binary = Function('basis', 'binary',
[lambda x: piecewise(x, 0, 1),
lambda x: np.diag(np.zeros(np.shape(x)))])
basis_relu = Function('basis', 'relu',
[lambda x: piecewise(x, alpha * x, x),
lambda x: np.diag(piecewise(x, alpha, 1))])
basis_exponent = Function('basis', 'exponent',
[lambda x: piecewise(x, alpha*(np.exp(x) - 1), x),
lambda x: np.diag(piecewise(x, alpha*np.exp(x), np.ones(np.shape(x))))])
basis_logistic = Function('basis', 'logistic', # Commonly known as 'Sigmoid'
[lambda x: tau * (1 + np.exp(-x/tau))**-1, # S
lambda x: np.diag(np.exp(x / tau) / (np.exp(x / tau) + 1) ** 2)]) # S * (1 - S)
basis_softplus = Function('basis', 'softplus',
[lambda x: np.log(1 + np.exp(x)),
lambda x: np.diag((1 + np.exp(-x))**-1)])
basis_gaussian = Function('basis', 'gaussian',
[lambda x: np.exp(-x**2),
lambda x: np.diag(-2 * x * np.exp(-x**2))])
basis_tanh = Function('basis', 'tanh',
[lambda x: np.tanh(x),
lambda x: np.diag(1 - np.tanh(x)**2)])
basis_arctan = Function('basis', 'arctan',
[lambda x: np.arctan(x),
lambda x: np.diag(1 / (x**2 + 1))])
basis_sinusoid = Function('basis', 'sinusoid',
[lambda x: np.sin(x),
lambda x: np.diag(np.cos(x))])
basis_sinc = Function('basis', 'sinc',
[lambda x: piecewise_origin(x, np.sin(x) / x, 0),
lambda x: np.diag(piecewise_origin(x, np.cos(x) / x - np.sin(x) / x**2, 0))])
basis_softsign = Function('basis', 'softsign',
[lambda x: x / (1 + np.abs(x)),
lambda x: np.diag(1 / (1 + np.abs(x))**2)])
basis_bent = Function('basis', 'bent',
[lambda x: (np.sqrt(x**2 + 1) - 1) / 2 + x,
lambda x: np.diag(x / (2*np.sqrt(x**2 + 1)) + 1)])
basis_log = Function('basis', 'log',
[lambda x: piecewise(x, np.log(1 + x), -np.log(1 - x)),
lambda x: np.diag(piecewise(x, 1 / (1 + x), 1 / (1 - x)))])
# BASIS FUNCTIONS: Classification
def softmax(x):
temp = np.exp(x - x.max())
return temp / np.sum(temp)
basis_softmax = Function('basis', 'SMax',
[softmax,
lambda x: np.diag(softmax(x)) - softmax(x) @ softmax(x).T])
# COST FUNCTIONS
cost_sum_squared = Function('cost', 'SSE', # Same as RSS and SSR
[lambda O, P: sum((O - P)**2),
lambda O, P: -2 * (O - P)])
cost_cross_entropy = Function('cost', 'CEE',
[lambda O, P: (O * np.log(P)) + (1 - O) * np.log(1 - P),
lambda O, P: (P - O)])
def piecewise(x, lower, upper, thresh=0):
low_indices = np.where(x < thresh)
if type(lower) == float or type(lower) == int:
x[low_indices] = lower
else:
x[low_indices] = lower[low_indices]
up_indices = np.where(x > thresh)
if type(upper) == float or type(upper) == int:
x[up_indices] = upper
else:
x[up_indices] = upper[up_indices]
return x
def piecewise_origin(x, outer, inner, origin=0):
x[np.where(x == origin)] = inner
out_indices = np.where(x != origin)
if type(outer) == float or type(outer) == int:
x[out_indices] = outer
else:
x[out_indices] = outer[out_indices]
return x
|
Shoeboxam/Neural_Network
|
MFP_Simple/Function.py
|
Python
|
mit
| 4,531
|
[
"Gaussian"
] |
7a063cf7ebd934e3012f041355376555c5f29bb42ff35d0848d794857e98cd6a
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from pyro.contrib.gp.parameterized import Parameterized
def _zero_mean_function(x):
return 0
class GPModel(Parameterized):
r"""
Base class for Gaussian Process models.
The core of a Gaussian Process is a covariance function :math:`k` which governs
the similarity between input points. Given :math:`k`, we can establish a
distribution over functions :math:`f` by a multivarite normal distribution
.. math:: p(f(X)) = \mathcal{N}(0, k(X, X)),
where :math:`X` is any set of input points and :math:`k(X, X)` is a covariance
matrix whose entries are outputs :math:`k(x, z)` of :math:`k` over input pairs
:math:`(x, z)`. This distribution is usually denoted by
.. math:: f \sim \mathcal{GP}(0, k).
.. note:: Generally, beside a covariance matrix :math:`k`, a Gaussian Process can
also be specified by a mean function :math:`m` (which is a zero-value function
by default). In that case, its distribution will be
.. math:: p(f(X)) = \mathcal{N}(m(X), k(X, X)).
Gaussian Process models are :class:`~pyro.contrib.gp.util.Parameterized`
subclasses. So its parameters can be learned, set priors, or fixed by using
corresponding methods from :class:`~pyro.contrib.gp.util.Parameterized`. A typical
way to define a Gaussian Process model is
>>> X = torch.tensor([[1., 5, 3], [4, 3, 7]])
>>> y = torch.tensor([2., 1])
>>> kernel = gp.kernels.RBF(input_dim=3)
>>> kernel.variance = pyro.nn.PyroSample(dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
>>> kernel.lengthscale = pyro.nn.PyroSample(dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))
>>> gpr = gp.models.GPRegression(X, y, kernel)
There are two ways to train a Gaussian Process model:
+ Using an MCMC algorithm (in module :mod:`pyro.infer.mcmc`) on :meth:`model` to
get posterior samples for the Gaussian Process's parameters. For example:
>>> hmc_kernel = HMC(gpr.model)
>>> mcmc = MCMC(hmc_kernel, num_samples=10)
>>> mcmc.run()
>>> ls_name = "kernel.lengthscale"
>>> posterior_ls = mcmc.get_samples()[ls_name]
+ Using a variational inference on the pair :meth:`model`, :meth:`guide`:
>>> optimizer = torch.optim.Adam(gpr.parameters(), lr=0.01)
>>> loss_fn = pyro.infer.TraceMeanField_ELBO().differentiable_loss
>>>
>>> for i in range(1000):
... svi.step() # doctest: +SKIP
... optimizer.zero_grad()
... loss = loss_fn(gpr.model, gpr.guide) # doctest: +SKIP
... loss.backward() # doctest: +SKIP
... optimizer.step()
To give a prediction on new dataset, simply use :meth:`forward` like any PyTorch
:class:`torch.nn.Module`:
>>> Xnew = torch.tensor([[2., 3, 1]])
>>> f_loc, f_cov = gpr(Xnew, full_cov=True)
Reference:
[1] `Gaussian Processes for Machine Learning`,
Carl E. Rasmussen, Christopher K. I. Williams
:param torch.Tensor X: A input data for training. Its first dimension is the number
of data points.
:param torch.Tensor y: An output data for training. Its last dimension is the
number of data points.
:param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which
is the covariance function :math:`k`.
:param callable mean_function: An optional mean function :math:`m` of this Gaussian
process. By default, we use zero mean.
:param float jitter: A small positive term which is added into the diagonal part of
a covariance matrix to help stablize its Cholesky decomposition.
"""
def __init__(self, X, y, kernel, mean_function=None, jitter=1e-6):
super().__init__()
self.set_data(X, y)
self.kernel = kernel
self.mean_function = (
mean_function if mean_function is not None else _zero_mean_function
)
self.jitter = jitter
def model(self):
"""
A "model" stochastic function. If ``self.y`` is ``None``, this method returns
mean and variance of the Gaussian Process prior.
"""
raise NotImplementedError
def guide(self):
"""
A "guide" stochastic function to be used in variational inference methods. It
also gives posterior information to the method :meth:`forward` for prediction.
"""
raise NotImplementedError
def forward(self, Xnew, full_cov=False):
r"""
Computes the mean and covariance matrix (or variance) of Gaussian Process
posterior on a test input data :math:`X_{new}`:
.. math:: p(f^* \mid X_{new}, X, y, k, \theta),
where :math:`\theta` are parameters of this model.
.. note:: Model's parameters :math:`\theta` together with kernel's parameters
have been learned from a training procedure (MCMC or SVI).
:param torch.Tensor Xnew: A input data for testing. Note that
``Xnew.shape[1:]`` must be the same as ``X.shape[1:]``.
:param bool full_cov: A flag to decide if we want to predict full covariance
matrix or just variance.
:returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`
:rtype: tuple(torch.Tensor, torch.Tensor)
"""
raise NotImplementedError
def set_data(self, X, y=None):
"""
Sets data for Gaussian Process models.
Some examples to utilize this method are:
.. doctest::
:hide:
>>> X = torch.tensor([[1., 5, 3], [4, 3, 7]])
>>> y = torch.tensor([2., 1])
>>> kernel = gp.kernels.RBF(input_dim=3)
>>> kernel.variance = pyro.nn.PyroSample(dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
>>> kernel.lengthscale = pyro.nn.PyroSample(dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))
+ Batch training on a sparse variational model:
>>> Xu = torch.tensor([[1., 0, 2]]) # inducing input
>>> likelihood = gp.likelihoods.Gaussian()
>>> vsgp = gp.models.VariationalSparseGP(X, y, kernel, Xu, likelihood)
>>> optimizer = torch.optim.Adam(vsgp.parameters(), lr=0.01)
>>> loss_fn = pyro.infer.TraceMeanField_ELBO().differentiable_loss
>>> batched_X, batched_y = X.split(split_size=10), y.split(split_size=10)
>>> for Xi, yi in zip(batched_X, batched_y):
... optimizer.zero_grad()
... vsgp.set_data(Xi, yi)
... svi.step() # doctest: +SKIP
... loss = loss_fn(vsgp.model, vsgp.guide) # doctest: +SKIP
... loss.backward() # doctest: +SKIP
... optimizer.step()
+ Making a two-layer Gaussian Process stochastic function:
>>> gpr1 = gp.models.GPRegression(X, None, kernel)
>>> Z, _ = gpr1.model()
>>> gpr2 = gp.models.GPRegression(Z, y, kernel)
>>> def two_layer_model():
... Z, _ = gpr1.model()
... gpr2.set_data(Z, y)
... return gpr2.model()
References:
[1] `Scalable Variational Gaussian Process Classification`,
James Hensman, Alexander G. de G. Matthews, Zoubin Ghahramani
[2] `Deep Gaussian Processes`,
Andreas C. Damianou, Neil D. Lawrence
:param torch.Tensor X: A input data for training. Its first dimension is the
number of data points.
:param torch.Tensor y: An output data for training. Its last dimension is the
number of data points.
"""
if y is not None and X.size(0) != y.size(-1):
raise ValueError(
"Expected the number of input data points equal to the "
"number of output data points, but got {} and {}.".format(
X.size(0), y.size(-1)
)
)
self.X = X
self.y = y
def _check_Xnew_shape(self, Xnew):
"""
Checks the correction of the shape of new data.
:param torch.Tensor Xnew: A input data for testing. Note that
``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.
"""
if Xnew.dim() != self.X.dim():
raise ValueError(
"Train data and test data should have the same "
"number of dimensions, but got {} and {}.".format(
self.X.dim(), Xnew.dim()
)
)
if self.X.shape[1:] != Xnew.shape[1:]:
raise ValueError(
"Train data and test data should have the same "
"shape of features, but got {} and {}.".format(
self.X.shape[1:], Xnew.shape[1:]
)
)
|
uber/pyro
|
pyro/contrib/gp/models/model.py
|
Python
|
apache-2.0
| 8,928
|
[
"Gaussian"
] |
c4d1c46195863e0bfe1b6e3f0bd8e3d382b34ba672e1d1cf26effe4878412fcc
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Displays a GUI for the Orca Find window"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import os
import sys
from gi.repository import Gtk
import locale
from . import find
from . import guilabels
from . import orca_gtkbuilder
from . import orca_state
from . import orca_platform
OS = None
class OrcaFindGUI(orca_gtkbuilder.GtkBuilderWrapper):
def __init__(self, fileName, windowName):
"""Initialize the Orca configuration GUI.
Arguments:
- fileName: name of the GtkBuilder file.
- windowName: name of the component to get from the GtkBuilder file.
"""
orca_gtkbuilder.GtkBuilderWrapper.__init__(self, fileName, windowName)
# Initialize variables to None to keep pylint happy.
#
self.activeScript = None
self.caseSensitive = None
self.matchEntireWord = None
self.searchBackwards = None
self.searchString = None
self.startAtTop = None
self.windowWrap = None
def init(self):
# Initialize the dialog box controls.
self.searchString = ""
self.searchBackwards = False
self.caseSensitive = False
self.matchEntireWord = False
self.windowWrap = True
self.startAtTop = False
self.activeScript = orca_state.activeScript
def showGUI(self):
"""Show the Orca Find dialog. This assumes that the GUI has
already been created.
"""
findDialog = self.get_widget("findDialog")
ts = orca_state.lastInputEvent.timestamp
if ts == 0:
ts = Gtk.get_current_event_time()
findDialog.present_with_time(ts)
# Populate the dialog box from the previous searchQuery, should
# one exist. Note: This is necessary because we are destroying
# the dialog (rather than merely hiding it) before performing the
# search.
try:
searchForEntry = self.get_widget("searchForEntry")
searchForEntry.set_text(orca_state.searchQuery.searchString)
searchForEntry.select_region(0, len(searchForEntry.get_text()))
if orca_state.searchQuery.startAtTop:
self.get_widget("topRadioButton").set_active(True)
self.get_widget("matchCaseCheckbox").set_active(\
orca_state.searchQuery.caseSensitive)
self.get_widget("matchEntireWordCheckbox").set_active(\
orca_state.searchQuery.matchEntireWord)
self.get_widget("wrapAroundCheckbox").set_active(\
orca_state.searchQuery.windowWrap)
self.get_widget("searchBackwardsCheckbox").set_active(\
orca_state.searchQuery.searchBackwards)
except:
pass
def searchForEntryChanged(self, widget):
"""Signal handler for the "changed" signal for the
searchForEntry GtkEntry widget. The user has changed
the string to be searched for.
Arguments:
- widget: the component that generated the signal.
"""
self.searchString = widget.get_text()
findButton = self.get_widget("findButton")
if len(self.searchString) > 0:
findButton.set_sensitive(True)
else:
findButton.set_sensitive(False)
def startingPointChanged(self, widget):
"""Signal handler for the "toggled" signal for the
currentLocationRadioButton or topRadioButton GtkRadioButton
widgets. The user has toggled the starting point for the search.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.FIND_START_AT_CURRENT_LOCATION:
self.startAtTop = False
else:
self.startAtTop = True
def matchCaseChecked(self, widget):
"""Signal handler for the "toggled" signal for the
matchCaseCheckbox GtkCheckButton widget. The user has
[un]checked the "Match Case" checkbox.
Arguments:
- widget: the component that generated the signal.
"""
self.caseSensitive = widget.get_active()
def matchEntireWordChecked(self, widget):
"""Signal handler for the "toggled" signal for the
matchEntireWordCheckbox GtkCheckButton widget.
The user has [un]checked the "Match entire word"
checkbox.
Arguments:
- widget: the component that generated the signal.
"""
self.matchEntireWord = widget.get_active()
def searchBackwardsChecked(self, widget):
"""Signal handler for the "toggled" signal for the
searchBackwardsCheckbox GtkCheckButton widget.
The user has [un]checked the "Search backwards"
checkbox.
Arguments:
- widget: the component that generated the signal.
"""
self.searchBackwards = widget.get_active()
def wrapAroundChecked(self, widget):
"""Signal handler for the "toggled" signal for the
wrapAroundCheckbox GtkCheckButton widget. The user has
[un]checked the "Wrap around" checkbox.
Arguments:
- widget: the component that generated the signal.
"""
self.windowWrap = widget.get_active()
def closeButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the cancelButton
GtkButton widget. The user has clicked the Cancel button.
Hide the dialog.
Arguments:
- widget: the component that generated the signal.
"""
self.get_widget("findDialog").hide()
def findButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the findButton
GtkButton widget. The user has clicked the Find button.
Call the method to begin the search.
Arguments:
- widget: the component that generated the signal.
"""
orca_state.searchQuery = find.SearchQuery()
orca_state.searchQuery.searchString = self.searchString
orca_state.searchQuery.searchBackwards = self.searchBackwards
orca_state.searchQuery.caseSensitive = self.caseSensitive
orca_state.searchQuery.matchEntireWord = self.matchEntireWord
orca_state.searchQuery.startAtTop = self.startAtTop
orca_state.searchQuery.windowWrap = self.windowWrap
self.activeScript.findCommandRun = True
# Merely hiding the dialog causes the find to take place before
# the original window has fully regained focus.
self.get_widget("findDialog").destroy()
def findDialogDestroyed(self, widget):
"""Signal handler for the "destroyed" signal for the findDialog
GtkWindow widget. Reset OS to None.
Arguments:
- widget: the component that generated the signal.
"""
global OS
OS = None
def showFindUI():
global OS
if not OS:
uiFile = os.path.join(orca_platform.prefix,
orca_platform.datadirname,
orca_platform.package,
"ui",
"orca-find.ui")
OS = OrcaFindGUI(uiFile, "findDialog")
OS.init()
OS.showGUI()
def main():
locale.setlocale(locale.LC_ALL, '')
showFindUI()
Gtk.main()
sys.exit(0)
if __name__ == "__main__":
main()
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/orca_gui_find.py
|
Python
|
gpl-3.0
| 8,375
|
[
"ORCA"
] |
f4bc9b3c21aa439dd7c4ff9762a8d5bcf321853b07dc8624d7dcd19318ee7014
|
# Online haptic_map implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
import os
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import pickle
import unittest
import ghmm
import ghmmwrapper
import random
import time
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
#from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/rapid_categorization/taxel_based/')
from data_variable_length_force_sample import Fmat_original, temp_num_fol, temp_num_trunk
def callback(data, callback_args):
#rospy.loginfo('Getting data!')
global start_time
start_time = time.time()
# Fixing Transforms
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
# Gathering Force Data
force_vectors = np.row_stack([data.values_x, data.values_y, data.values_z])
fmags_instant = ut.norm(force_vectors)
threshold = 0.01
fmags_tuned = fmags_instant - threshold
fmags_tuned[np.where(fmags_tuned<0)]=0
fmags_instant_tuned = fmags_tuned
global fmags
for i in range(len(fmags_instant_tuned)):
if fmags_instant_tuned[i] > 0.0:
fmags[i].append(fmags_instant_tuned[i])
else:
fmags[i] = []
# Calculating no. of contact regions with hand-tuned force threshold
global total_contact
total_contact = sum(i > 0 for i in fmags_instant_tuned)
# Gathering Contact Data for Haptic Mapping
global global_contact_vector
for i in range(len(fmags_instant_tuned)):
global_contact_vector[i] = r1*((np.column_stack([data.centers_x[i], data.centers_y[i], data.centers_z[i]])).T) + t1
test_data()
global taxel_FLAG
for i in range(len(fmags_instant_tuned)):
if taxel_FLAG[i] > -1:
idx = taxel_FLAG[i]
contact_info = global_contact_vector[i]
pubdata(idx, contact_info)
def test_data():
global exp_time
# Do Stuff For Testing which basically returns which FLAG is true
global taxel_FLAG # -1 for not in Contact, 0 for Unknown (Red), 1 for Foliage (green), 2 for Trunk (brown)
global trunk_contact
trunk_contact = 0
global foliage_contact
foliage_contact = 0
global total_contact
global other_contact
other_contact = 0
num_samples = []
# For Testing
global start_time
global fmags
for i in range(384):
if (len(fmags[i]) > 0):
ts_obj = fmags[i]
final_ts_obj = ghmm.EmissionSequence(F,ts_obj)
# Find Viterbi Path
global model_ff
global model_tf
path_ff_obj = model_ff.viterbi(final_ts_obj)
path_tf_obj = model_tf.viterbi(final_ts_obj)
print path_ff_obj[1], path_tf_obj[1]
diff = abs(path_ff_obj[1]-path_tf_obj[1])
obj = max(path_ff_obj[1],path_tf_obj[1])
obj_min = min(abs(path_ff_obj[1]),abs(path_tf_obj[1]))
if ((obj == path_ff_obj[1]) and (diff > 5)):
#if ((obj == path_ff_obj[1])):
#print 'Taxel', i, 'is Foliage !'
taxel_FLAG[i] = 1
foliage_contact = foliage_contact+1
num_samples.append(len(fmags[i]))
elif ((obj == path_tf_obj[1]) and (diff > 20)):
#elif ((obj == path_tf_obj[1])):
#print 'Taxel', i, 'is Trunk !'
taxel_FLAG[i] = 2
trunk_contact = trunk_contact+1
num_samples.append(len(fmags[i]))
#elif ((obj == path_tf_obj[1]):
#print 'Taxel', i, 'is Uncertain'
#taxel_FLAG[i] = 0
else:
taxel_FLAG[i] = 0
#print 'Taxel', i, 'is Unknown'
other_contact = other_contact+1
else:
#print 'Taxel', i, 'is not in Contact'
taxel_FLAG[i] = -1
time_taken = time.time()-start_time
global quantitative_data
exp_time = exp_time+0.01
if len(num_samples) > 0:
quant_instant_data = [exp_time, time_taken, min(num_samples), total_contact, trunk_contact, foliage_contact, other_contact]
else:
quant_instant_data = [exp_time, time_taken, 0, total_contact, trunk_contact, foliage_contact, other_contact]
quantitative_data = np.row_stack([quantitative_data, quant_instant_data])
def getdata():
rospy.loginfo('Initializing the Node !')
rospy.init_node('Online_Haptic_Map_Builder', anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.loginfo('Waiting to Subscribe to the Skin Message...')
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray, callback, callback_args = (tf_lstnr))
rospy.spin()
def pubdata(idx, contact_info):
rospy.loginfo('Publishing data')
marker = Marker()
marker.ns = 'Haptic_Map_Markers'
marker.header.frame_id = '/torso_lift_link'
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.02
marker.scale.y = 0.02
marker.scale.z = 0.02
if idx == 1:
# Green for Foliage
marker.color.a = 1.0;
marker.color.r = 0.0;
marker.color.g = 1.0;
marker.color.b = 0.0;
elif idx == 2:
# Brown for Trunk
marker.color.a = 1.0;
marker.color.r = 0.5;
marker.color.g = 0.25;
marker.color.b = 0.125;
else:
# Red for Unknown
marker.color.a = 0.0;
marker.color.r = 1.0;
marker.color.g = 0.0;
marker.color.b = 0.0;
marker.pose.orientation.w = 1.0
marker.pose.position.x = contact_info[0]
marker.pose.position.y = contact_info[1]
marker.pose.position.z = contact_info[2]
markerArray.markers.append(marker)
# Renumber the marker IDs
id = 0
for m in markerArray.markers:
m.id = id
id += 1
# Publish the MarkerArray
publisher.publish(markerArray)
#rospy.sleep(0.01)
if __name__ == '__main__':
topic = 'visualization_marker_array'
publisher = rospy.Publisher(topic, MarkerArray)
markerArray = MarkerArray()
print "Initializing the HMM Models"
# HMM Implementation
Fmat = Fmat_original
Foliage_Trials = temp_num_fol
Trunk_Trials = temp_num_trunk
# Getting mean / covariance
i = 0
number_states = 10
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < Foliage_Trials):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 0:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_ff_force = np.zeros((number_states,1))
sigma_ff = np.zeros((number_states,1))
while (j < number_states):
mu_ff_force[j] = np.mean(feature_1_final_data[j])
sigma_ff[j] = scp.std(feature_1_final_data[j])
j = j+1
i = Foliage_Trials
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < (Foliage_Trials + Trunk_Trials)):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == Foliage_Trials:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_tf_force = np.zeros((number_states,1))
sigma_tf = np.zeros((number_states,1))
while (j < number_states):
mu_tf_force[j] = np.mean(feature_1_final_data[j])
sigma_tf[j] = scp.std(feature_1_final_data[j])
j = j+1
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
if number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_ff = [0.0]*number_states
B_tf = [0.0]*number_states
for num_states in range(number_states):
B_ff[num_states] = [mu_ff_force[num_states][0],sigma_ff[num_states][0]]
B_tf[num_states] = [mu_tf_force[num_states][0],sigma_tf[num_states][0]]
# pi - initial probabilities per state
if number_states == 3:
pi = [1./3.] * 3
elif number_states == 5:
pi = [0.2] * 5
elif number_states == 10:
pi = [0.1] * 10
elif number_states == 15:
pi = [1./15.] * 15
elif number_states == 20:
pi = [0.05] * 20
# generate FF, TF models from parameters
model_ff = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_ff, pi) # Will be Trained
model_tf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_tf, pi) # Will be Trained
total_seq = Fmat
for i in range((Foliage_Trials + Trunk_Trials)):
total_seq[i][:] = sum(total_seq[i][:],[])
total_seq_ff = total_seq[0:Foliage_Trials]
total_seq_tf = total_seq[Foliage_Trials:Foliage_Trials + Trunk_Trials]
#print len(total_seq_ff)
#print len(total_seq_tf)
print "Training the HMM Models..."
train_seq_ff = total_seq_ff
train_seq_tf = total_seq_tf
final_ts_ff = ghmm.SequenceSet(F,train_seq_ff)
final_ts_tf = ghmm.SequenceSet(F,train_seq_tf)
model_ff.baumWelch(final_ts_ff)
model_tf.baumWelch(final_ts_tf)
print "Models Trained: Ready to Collect Data !"
# Gather Data from Robot Online
taxel_FLAG = {}
for i in range(384):
taxel_FLAG[i] = -1 # -1 for not in Contact, 0 for Unknown (Red), 1 for Foliage (green), 2 for Trunk (brown)
fmags = {}
for i in range(384):
fmags[i] = []
global_contact_vector = {}
for i in range(384):
global_contact_vector[i] = []
FLAG_Trunk = False
FLAG_Foliage = False
FLAG_Unknown = True
total_contact = 0
trunk_contact = 0
foliage_contact = 0
other_contact = 0
start_time = 0.0
exp_time = 0.0
quantitative_data = [0,0,0,0,0,0,0]
getdata()
ut.save_pickle(quantitative_data, '/home/tapo/svn/robot1_data/usr/tapo/data/rapid_categorization/Taxel_Based/Tests/foliage_reach_6.pkl')
#ut.save_pickle(quantitative_data, '/home/tapo/svn/robot1_data/usr/tapo/data/rapid_categorization/Taxel_Based/Tests/trunk_reach_2.pkl')
|
tapomayukh/projects_in_python
|
rapid_categorization/haptic_map/online_haptic_map_taxel_based_quantitative.py
|
Python
|
mit
| 17,734
|
[
"Gaussian",
"Mayavi"
] |
fe4b071e5dce696a33edb5993a24e551d5d2cb713099175d154a73e26b4e5983
|
#!/usr/bin/env python3
import sys
import numpy as np
import argparse
import matplotlib.pyplot as plt
from plotTools import addToPlot
from spectraTools import spectraAnalysis
from netcdfTools import read3dDataFromNetCDF
from analysisTools import sensibleIds, groundOffset, calc_ts_entropy_profile
from utilities import filesFromList
'''
Description: A script to perform quadrant analysis on velocity data stored in a NETCDF file.
The analysis is performed for all points along a z-direction.
In case of PALM-generated results (featuring staggered grid), the velocity data must first be
interpolated onto cell-centers (i.e. scalar grid) with groupVectorDataNetCdf.py script.
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
def resample(X, n=None):
nt, nk, nj, ni = X.shape
Nn = nt
if(n is None): n = Nn
else: n = min( n, Nn )
Xr = np.zeros( (n,nk,nj,ni) )
#print(' Resampled array size = {} '.format( Xr.shape ))
for i in range(ni):
for j in range(nj):
for k in range(nk):
if(n is None):
n = Nn
ix = np.floor(np.random.rand(n)*Nn).astype(int)
Xr[:,k,j,i] = X[ix,k,j,i]
return Xr
#==========================================================#
def calc_skew( V, axs=(0) ):
import scipy.stats as st # contains st.entropy
V -= np.mean( V, axis=(0) )
vo = st.skew( V, axis=axs )
return vo
#==========================================================#
sepStr = ' # = # = # = # = # = # = # = # = '
parser = argparse.ArgumentParser()
parser.add_argument("fileKey", default=None,\
help="Search string for collecting files.")
parser.add_argument("-v", "--varname", type=str, default='u',\
help="Name of the variable in NETCDF file. Default='u' ")
parser.add_argument("-m", "--mode", type=str, default='mean', \
choices=['mean', 'std', 'var','skew','entropy','max'],\
help="Mode: mean, std, var, or entropy.")
parser.add_argument("-rs", "--resample", action="store_true", default=False,\
help="Include resampling of the time series.")
parser.add_argument("-Ns", "--Nsample", type=int, default=None,\
help="Number of terms used in resampling. Default=None")
parser.add_argument("-n", "--normalize", action="store_true", default=False,\
help="Normalize.")
parser.add_argument("-me", "--meanError", action="store_true", default=False,\
help="Plot std of mean error (with std option).")
parser.add_argument("-p", "--printOn", action="store_true", default=False,\
help="Print the numpy array data.")
parser.add_argument("-pp", "--printOnly", action="store_true", default=False,\
help="Only print the numpy array data. Don't save.")
parser.add_argument("-wa", "--writeAscii", action="store_true", default=False,\
help="Save profile data to an ascii file.")
parser.add_argument("-c", "--coarse", type=int, default=1,\
help="Coarsening level. Int > 1.")
args = parser.parse_args()
#==========================================================#
# Rename ...
fileKey = args.fileKey
normalize = args.normalize
meanErrorOn = args.meanError
mode = args.mode
resampleOn = args.resample
Ns = args.Nsample
cl = abs(args.coarse)
varname = args.varname
writeAscii = args.writeAscii
#==========================================================#
# Obtain a list of files to include.
fileNos, fileList = filesFromList( fileKey+'*' )
fig = plt.figure(num=1, figsize=(12,10))
for fn in fileNos:
VNU = varname.upper()
if('MAG' in VNU or 'U1' in VNU or 'U2' in VNU or 'DIR' in VNU):
dataDict = read3dDataFromNetCDF( fileList[fn] , 'u', cl )
u = dataDict['v']
dataDict = read3dDataFromNetCDF( fileList[fn] , 'v', cl )
v = dataDict['v']
if('MAG' in VNU ): # vr := Umag
vr = np.sqrt( u**2 + v**2 ); u = None; v = None
else:
um = np.mean( u, axis=(0) ); vm = np.mean( v, axis=(0) )
a = np.arctan( vm/(um+1.e-5) ); um = None; vm = None
if( 'U1' in VNU ):
vr = u * np.cos(a) + v * np.sin(a)
elif('U2' in VNU): # U2
vr =-u * np.sin(a) + v * np.cos(a)
else:# direction
vr = np.arctan( v/(u+1.e-5) ) * (180./np.pi)
elif('TKE' in VNU):
try:
dataDict = read3dDataFromNetCDF( fileList[fn] , 'e', cl )
e_sgs = dataDict['v']
except:
print(' No e_sgs -> Result is RESOLVED TKE! ')
e_sgs = None
dataDict = read3dDataFromNetCDF( fileList[fn] , 'u', cl )
u = dataDict['v']; up=u-np.mean(u, axis=0); u = None
dataDict = read3dDataFromNetCDF( fileList[fn] , 'v', cl )
v = dataDict['v']; vp=v-np.mean(v, axis=0); v = None
dataDict = read3dDataFromNetCDF( fileList[fn] , 'w', cl )
w = dataDict['v']; wp=w-np.mean(w, axis=0); w = None
e_res = 0.5*(np.mean(up**2,axis=0)+np.mean(vp**2,axis=0)+np.mean(wp**2,axis=0))
up = None; vp = None; wp = None
# vr := TKE
vr = e_res
if( e_sgs is not None ): vr += e_sgs
else:
dataDict = read3dDataFromNetCDF( fileList[fn] , varname, cl )
vr = dataDict['v']
x = dataDict['x']; y = dataDict['y']; z = dataDict['z']
time = dataDict['time']
dataDict = None
axs = (0,2,3)
#axs = (0)
if( resampleOn ):
vr2 = resample( vr, Ns )
else:
vr2 = None
# Process data vr --> vp
if( mode == 'mean'):
vp = np.mean( vr, axis=axs ); zp = z
if( vr2 is not None ): vp2 = np.mean( vr2, axis=axs )
plotStr = ["mean({}) vs z ".format(varname), varname ,"z"]
elif( mode == 'std'):
vp = np.std( vr, axis=axs ); zp = z
if( vr2 is not None ): vp2 = np.std( vr2, axis=axs )
if(meanErrorOn):
N = len( vr[:,0,0,0] )
vmerr = vp/np.sqrt(N)
if( len(vmerr.shape) == 3 ): vmerr = vmerr[:,0,0]
plotStr = ["std. error of mean({}) vs z ".format(varname), varname ,"z"]
fig = addToPlot(fig, vmerr, zp,'{}({}), {}'\
.format('std error of mean',varname,fileList[fn].split('_')[-1]), plotStr, False )
'''
N2 = len( vr2[:,0,0,0] )
vmerr2 = vp2/np.sqrt(N2)
if( len(vmerr2.shape) == 3 ): vmerr2 = vmerr2[:,0,0]
fig = addToPlot(fig, vmerr2, zp,'{}({}), {}'\
.format('std error of mean',varname,fileList[fn]), plotStr, False )
'''
plotStr = ["std({}) vs z ".format(varname), varname ,"z"]
elif( mode == 'var' ):
vp = np.var( vr, axis=axs ); zp = z
if( vr2 is not None ): vp2 = np.var( vr2, axis=axs )
plotStr = ["var({}) vs z ".format(varname), varname ,"z"]
elif( mode == 'entropy' ):
nbins=24
Bs = np.logspace(-0.75, 0.8, nbins, endpoint=True); Bs[-1] = 9.0
vp = calc_ts_entropy_profile(vr, z, alpha=1., nbins=Bs); zp = z
if( vr2 is not None ): vp2 = calc_ts_entropy_profile( vr2, z )
plotStr = ["entropy({}) vs z ".format(varname), varname ,"z"]
elif( mode == 'skew' ):
vp = calc_skew( vr, axs ); zp = z
if( vr2 is not None ): vp2 = calc_skew( vr2, axs )
plotStr = ["skew({}) vs z ".format(varname), varname ,"z"]
elif( mode == 'max'):
vp = np.max( vr, axis=axs ); zp = z
if( vr2 is not None ): vp2 = np.mean( vr2, axis=axs )
plotStr = ["max({}) vs z ".format(varname), varname ,"z"]
# ================================================================= #
if( len(vp.shape) == 3 ):
jx,ix = np.array( vp.shape[1:] )/2
try: vp = vp[:,jx,ix]
except: vp = vp[:,0,0]
if( writeAscii ):
print(' (2) Writing data to ascii file: {}_{}.dat'.format(varname,mode))
print(' x.shape = {} vs y.shape = {}'.format(np.shape(zp), np.shape(vp)))
hStr = ' {} '.format(varname)
fstr = fileList[fn].split('_')[-1]
fstr = fstr.split('.')[0]
np.savetxt(varname+'_'+mode+'_'+fstr+'.dat', np.c_[zp, vp], header=hStr)
if( vr2 is not None ):
if( len(vp2.shape) == 3 ): vp2 = vp2[:,1,1]
fig = addToPlot(fig, vp, zp,' {}({}), {}, N = {}'\
.format(mode,varname,fileList[fn].split('_')[-1], len(time)), plotStr, False )
if( vr2 is not None ):
fig = addToPlot(fig, vp2, zp,' {}({}), {}, Resampled with N = {}'\
.format(mode,varname,fileList[fn].split('_')[-1], Ns), plotStr, False )
fig = addToPlot(fig, np.abs(vp-vp2), zp,' {}({}), {}, Resampling error = |(v_o-v_rs)/v_o|'\
.format(mode,varname,fileList[fn].split('_')[-1]), plotStr, False )
plt.legend(loc=0)
plt.show()
|
mjsauvinen/P4UL
|
pyNetCDF/dataAnalysisNetCdf.py
|
Python
|
mit
| 8,428
|
[
"NetCDF"
] |
c6c2b96d1145a7a8fa197407d266557d53dc05e13858612be365641ccb5a4122
|
#! /usr/bin/env python
"""
StochPyTools
============
Written by T.R. Maarleveld, Amsterdam, The Netherlands
E-mail: tmd200@users.sourceforge.net
Last Change: June 08, 2015
"""
import re,sys,copy
from stochpy import model_dir as stochpy_model_dir
from ..modules.PyscesMiniModel import PySCeS_Connector
try:
import numpy as np
np.seterr(divide = 'ignore') # catch the divide by zero error if species start at zero
except ImportError:
print("Make sure that the NumPy module is installed")
print("This program does not work without NumPy")
print("See http://numpy.scipy.org/ for more information about NumPy")
sys.exit()
class Species():
def __init__(self):
""" Object that is created to store the species amounts """
pass
__species__ = Species()
class StochPySSA_Shared():
def Parse(self,model_file,model_dir,IsTauleaping=False,IsNRM=False,IsDelayed = False,IsSMM = False,IsQuiet=False):
"""
Parses the PySCeS MDL input file, where the model is desribed
Input:
- *model_file* filename.psc
- *model_dir* /home/user/Stochpy/pscmodels/filename.psc
"""
try:
self.parse = PySCeS_Connector(model_file,model_dir,IsTauleaping = IsTauleaping, IsNRM = IsNRM,IsDelayed = IsDelayed,IsSMM = IsSMM,IsQuiet=IsQuiet) # Parse model
if self.parse._IsConverted:
model_file += '.psc'
model_dir = stochpy_model_dir
self.N_matrix_transpose = copy.deepcopy(self.parse.N_matrix.transpose()) # June 5th 2012
self.X_matrixinit = copy.deepcopy(self.parse.X_matrix.transpose()[0])
self.rate_names = copy.deepcopy(self.parse.Mod.__reactions__)
self.rate_pos = {r_id:j for j,r_id in enumerate(self.rate_names)} # Determine once for each rate it's position
self.n_reactions = len(self.parse.Mod.__reactions__)
self.n_species = len(self.parse.species)
self.fixed_species = copy.deepcopy(self.parse.Mod.__fixed_species__)
self.__aDict__ = copy.deepcopy(self.parse.Mod.__aDict__) # support of assignments
self.__eDict__ = copy.deepcopy(self.parse.Mod.__eDict__) # support of events (with triggers)
self.species_names = copy.deepcopy(self.parse.species)
self.species_names += [species for species in list(self.__aDict__)]
self.species_names += [species for species in self.fixed_species]
self.species_pos = {s_id:i for i,s_id in enumerate(self.species_names)} # Determine once for each species (variable, assigned, fixed) it's position
if IsDelayed or IsSMM:
self.N_matrix_transpose_reactants = copy.copy(self.parse.N_matrix_reactants.transpose()) #24-10-2013
self.N_matrix_transpose_products = copy.copy(self.parse.N_matrix_products.transpose()) #24-10-2013
if IsSMM:
# If depends_on and reactants don't correspond, like in a net catalyzed reaction, then force consumption and production of the catalyst.
# Result: update of the catalyst in the tau_arrays without showing in the N_matrix
self.products = copy.deepcopy(self.parse.product_indices) # 28-10-2013 #Indices
for j in range(self.n_reactions):
self.products[j].extend( list(set(self.parse.depends_on[j]) - set(self.parse.reactant_indices[j])) )
except Exception as er:
print(er)
print("Error: StochPy failed parsing input file '{0:s}' from directory '{1:s}'".format(model_file, model_dir) )
sys.exit()
def SpeciesSelection(self):
""" Prepare output indices (if specific species are selected) """
self._IsSpeciesSelection = False
if self.settings.species_selection:
self.sim_output_indices = [0]
for s_id in self.settings.species_selection:
self.sim_output_indices.append(self.species_pos[s_id] + 1) # (time on first index)
self.sim_output_indices.append(-1)
self._IsSpeciesSelection = True
def RateSelection(self):
""" Prepare output indices (if specific rates are selected) """
self._IsRateSelection = False
if self.settings.rate_selection:
self.rate_output_indices = [0]
for r_id in self.settings.rate_selection:
self.rate_output_indices.append(self.rate_pos[r_id] + 1) # (time on first index)
self._IsRateSelection = True
def SetEvents(self):
""" Initialize events """
self.__events__ = copy.deepcopy(self.parse.Mod.__events__) # deepcopy, very important! Augustus 21, 2014
self._IsPerformEvent = False
for ev in self.__events__:
for s_id in sorted(self.species_names, reverse=True): # makes sure that the longest identifiers are replaced first
if s_id not in self.fixed_species:
ev.code_string = ev.code_string.replace('self.mod.{0:s}'.format(s_id),'X_matrix[{0:d}]'.format(self.species_pos[s_id]) )
ev.xcode = compile("self.state = {0:s}".format(ev.code_string),'event{0}'.format(ev),'exec')
def Propensities(self,IsTauleaping=False):
"""
Determines the propensities to fire for each reaction at the current time point. At t=0, all the rate equations are compiled.
Input:
- *IsTauleaping* (boolean) [default = False]
"""
if self._IsInitial:
code_str = self.volume_code + '\n' # 27-01-2014
self.sim_a_mu = np.zeros([self.n_reactions]) # Initialize a(mu)
for i in range(self.n_reactions):
code_str += "r_vec[{0:d}]={1}\n".format(i,self.parse.propensities[i])
self.req_eval_code = compile(code_str,"RateEqEvaluationCode","exec")
[setattr(__species__,self.parse.species[s],self.X_matrix[s]) for s in range(self.n_species)] # Set species quantities
[setattr(__species__,self.fixed_species[s],self.fixed_species_amount[s]) for s in range(len(self.fixed_species))]
self._IsInitial = False
#print(code_str)
else:
if not IsTauleaping:
[setattr(__species__,self.parse.species[s],self.X_matrix[s]) for s in self.species_to_update]
else:
[setattr(__species__,self.parse.species[s],self.X_matrix[s]) for s in range(self.n_species)] # Set species quantities
self.rateFunc(self.req_eval_code,self.sim_a_mu) # Calc. Propensities
assert self.sim_a_mu.min() >= 0, "Error: Negative propensities are found. Make sure that your rate equations are defined correctly!"
self.sim_a_mu = abs(self.sim_a_mu)
self.sim_a_0 = self.sim_a_mu.sum()
def BuildPropensityCodes(self, propensities = None): # 21-11-2013
"""
Makes a list of compiled propensity codes for each reaction. If a reaction fires, its code is executed.
Input:
- *propensities*: optional argument for providing the propensities that should be pre-compiled. If none, *self.propensities* is used.
"""
#Note2: This assumes that own reaction index is already inserted in the dep_graph.
if not propensities: #26-11-2013
propensities = self.parse.propensities
self.propensity_codes = []
for n,dependencies in enumerate(self.parse.dep_graph):
code_str = self.volume_code + '\n'
code_str += '\n'.join(['r_vec[{0:d}]={1:s}'.format(i,propensities[i]) for i in dependencies])
self.propensity_codes.append(compile(code_str,"PropensityEvalCode_{0}".format(n+1),"exec"))
code_str_all = self.volume_code + '\n'
code_str_all += '\n'.join(['r_vec[{0:d}]={1:s}'.format(i,propensity) for i,propensity in enumerate(propensities)])
self.propensity_codes.append(compile(code_str_all,"PropensityEvalAllCode","exec"))
def HandleEvents(self,IsTauleapingStep=False):
"""
Event handling
We distuingish two types of events:
1. time events where we reset the simulation time to the trigger time
2. trigger events which can involve species copy numbers, ..., ..., and also time.
"""
self._IsPerformEvent = False
for ev in self.__events__:
IsTrigger = ev(self.sim_t,self.X_matrix)
IsModify = False
if IsTrigger:
if '_TIME_' in ev.symbols and len(ev.symbols) == 1: # pure time event
n = re.search("\d*\.\d+|\d+",ev.formula)
ev.reset()
self._IsTimeEvent = True # 10-04-2014
if not ev(10**-99,self.X_matrix): # _TIME_ > 3.0
self.sim_t = float(n.group(0))
self.__events__.remove(ev)
IsModify = True
if np.isnan(self.reaction_index): # reaction_index = nan, ignore nothing happend (probably the end time is reached)
pass
elif not IsTauleapingStep:
self.X_matrix -= self.N_matrix_transpose[self.reaction_index] # reset reaction
elif IsTauleapingStep:
self.X_matrix -= np.dot(self.parse.N_matrix,self.K_vector).ravel() # reset reactions
else: # _TIME < 3.0, these can fire as long as it's valid
IsModify = True
ev.reset()
else: # Trigger event
IsModify = True
if IsModify:
for s_id in list(self.__eDict__[ev.name]['assignments']):
if s_id not in self.fixed_species:
s_index = self.species_pos[s_id]
try:
self.X_matrix[s_index] = float(int(self.__eDict__[ev.name]['assignments'][s_id])) # convert to int
except ValueError:
raise ValueError("Invalid assignment '{0:s}' for identifier {1:s}".format(self.__eDict__[ev.name]['assignments'][s_id],s_id))
else:
s_index = self.fixed_species.index(s_id)
self.fixed_species_amount[s_index] = float(self.__eDict__[ev.name]['assignments'][s_id]) # march 11, 2015: do not convert to integer, because it could be a parameter which does not have to be an integer
setattr(__species__,s_id, self.fixed_species_amount[s_index])
self._IsPerformEvent = True # SBML event
self.reaction_index = np.nan
def AssignmentRules(self):
"""
Builds the assignment rules # updated version 06/08/14
http://sbml.org/Software/libSBML/docs/java-api/org/sbml/libsbml/AssignmentRule.html
"""
code_string = """"""
if self.sim_t == 0:
self.assignment_labels = list(self.__aDict__)
self.assignment_species = np.zeros(len(self.__aDict__))
self._assignment_rules = [] # indices of species matrix species used for assignments
for s_id in self.parse.species:
for assign_species in list(self.__aDict__):
if s_id in self.__aDict__[assign_species]['formula']: # if 'normal' species in assignment relationship
index = self.species_pos[s_id]
if index not in self._assignment_rules:
self._assignment_rules.append(index)
for index in self._assignment_rules:
species_value = self.X_matrix[index]
code_string += "{0:s}={1}\n".format(self.parse.species[index],species_value)
for i,species in enumerate(self.__aDict__):
code_string += "self.assignment_species[{0:d}]={1}\n".format(i,self.__aDict__[species]['formula'])
self.rateFunc(code_string,self.assignment_species)
def rateFunc(self,rate_eval_code,r_vec):
"""
Calculate propensities from the compiled rate equations
Input:
- *rate_eval_code* compiled rate equations
- *r_vec* output for the calculated propensities
"""
try:
exec(rate_eval_code)
except Exception as er:
print(er)
print("Error: Propensities cannot be determined. Please check if all variable species amounts are initialized")
sys.exit()
def Initial_Conditions(self,IsTauleaping = False):
""" This function initiates the output format with the initial concentrations """
if self._IsTrackPropensities:
output_init = self.sim_a_mu.tolist()
output_init.insert(0,self.sim_t)
if self._IsRateSelection:
output_init = [output_init[j] for j in self.rate_output_indices]
self.propensities_output.append(output_init)
output_init = [self.sim_t]
for init in self.X_matrix: # Output at t = 0
assert init >= 0, "Error: StochPy detected (initial) negative species amounts."
output_init.append(int(init))
if self.__aDict__ != {}:
self.AssignmentRules()
output_init += [value for value in self.assignment_species]
for amount in self.fixed_species_amount:
output_init.append(amount)
if not IsTauleaping:
output_init.append(np.NAN)
if self._IsSpeciesSelection:
output_init = [output_init[i] for i in self.sim_output_indices]
self.sim_output.append(output_init)
self.V_output = [self._current_volume] # May 26, 2015
def GenerateOutput(self,IsTauleaping = False,completion_delayed = False):
"""
Add data of current state (species copy numbers, volume and propensities) to the output.
Input:
- *IsTauleaping* (boolean) [default = False]
- *completion_delayed* (boolean) [default = False]
Different output is generated for the tauleaping method and if there are completion delays
"""
if completion_delayed:
r_index = - (self.reaction_index + 1) # Completion reaction = - reaction index
if not completion_delayed and not IsTauleaping:
r_index = self.reaction_index + 1 # Initiation reaction = reaction index
timestep_output = self.X_matrix.tolist()
timestep_output += [amount for amount in self.fixed_species_amount]
if self.__aDict__ != {}:
self.AssignmentRules()
timestep_output += [value for value in self.assignment_species]
timestep_output.insert(0,self.sim_t)
if not IsTauleaping:
if not self._IsPerformEvent:
timestep_output.append(r_index)
else:
timestep_output.append(np.nan)
if self._IsSpeciesSelection:
timestep_output = [timestep_output[i] for i in self.sim_output_indices]
self.sim_output.append(timestep_output)
self.V_output.append(self._current_volume)
if self._IsTrackPropensities:
output_step = self.sim_a_mu.tolist()
output_step.insert(0,self.sim_t)
if self._IsRateSelection:
output_step = [output_step[j] for j in self.rate_output_indices]
self.propensities_output.append(output_step)
|
SystemsBioinformatics/stochpy
|
stochpy/implementations/StochPyTools.py
|
Python
|
gpl-3.0
| 17,061
|
[
"PySCeS"
] |
765c373f94fdf00791b12d180133cdcf4f5562431c7e8adc4f66bd145a60b720
|
#!/usr/bin/env python
'''
Force the FCI solver of CASSCF solving particular spin state.
'''
from pyscf import scf
from pyscf import gto
from pyscf import mcscf
mol = gto.M(
atom = '''
8 0 0 0
8 0 0 1.1''',
basis = 'ccpvdz',
symmetry = True,
spin = 2,
)
mf = scf.RHF(mol)
mf.kernel()
# Specify CI wfn spatial symmetry by assigning fcisolver.wfnsym
mc = mcscf.CASSCF(mf, 8, 12)
mc.fcisolver.wfnsym = 'A2u'
mc.kernel()
print('Triplet Sigma_u^- %.15g ref = -149.383495797891' % mc.e_tot)
# Specify CI wfn spatial symmetry and spin symmetry
mc.fix_spin_(ss=6) # Quintet, ss = S*(S+1) = 6
mc.fcisolver.wfnsym = 'A2u'
mc.kernel()
print('Quintet Sigma_u^- %.15g ref = -148.920732172378' % mc.e_tot)
#
# Similarly, you can get ground state wfn of triplet Sz=0
#
mc = mcscf.CASSCF(mf, 8, 12)#(6,6))
#mc.fcisolver = fci.direct_spin1_symm.FCI(mol)
#fci.addons.fix_spin_(mc.fcisolver, ss=2)
#mc.fcisolver.wfnsym = 'A2g'
mc.kernel()
print('Triplet Sigma_g^- %.15g ref = -149.688656224059' % mc.e_tot)
#
# In the following example, without fix_spin_ decoration, it's probably unable
# to converge to the correct spin state.
#
mol = gto.M(
atom = 'Mn 0 0 0; Mn 0 0 2.5',
basis = 'ccpvdz',
symmetry = 1,
)
mf = scf.RHF(mol)
mf.set(level_shift=0.4).run()
mc = mcscf.CASCI(mf, 12, 14)
mc.fcisolver.max_cycle = 100
mo = mc.sort_mo_by_irrep({'A1g': 2, 'A1u': 2,
'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1,
'E2uy': 1, 'E2ux': 1, 'E2gy': 1, 'E2gx': 1},
{'A1g': 5, 'A1u': 5,
'E1uy': 2, 'E1ux': 2, 'E1gy': 2, 'E1gx': 2})
mc.kernel(mo)
mc.fix_spin_(shift=.5, ss=0)
mc.kernel(mo)
|
gkc1000/pyscf
|
examples/mcscf/18-spatial_spin_symmetry.py
|
Python
|
apache-2.0
| 1,718
|
[
"PySCF"
] |
41a0cf729dc465ed25ee118e0f266c8387fadc03d6546920562cd6391c4b4a1c
|
#!/usr/bin/python
import itk
from sys import argv
from optparse import OptionParser, OptionGroup
class unsharpMaskImageFilter():
def __init__(self, InputImage, sigmaArray, ammount):
"""
Simple workflow implementing unsharp masking.
"""
im = itk.image(InputImage)
InType = itk.class_(im)
self.gaussianSmooth = itk.SmoothingRecursiveGaussianImageFilter[InType,InType].New(\
InputImage,
SigmaArray = sigmaArray)
self.substract = itk.SubtractImageFilter[InType,InType,InType].New(\
Input1 = InputImage,
Input2 = self.gaussianSmooth.GetOutput())
self.shiftScale = itk.ShiftScaleImageFilter[InType,InType].New(\
Input = self.substract.GetOutput(),
Scale = ammount,
Shift = 0)
self.addFilter = itk.AddImageFilter[InType,InType,InType].New(\
Input1 = self.shiftScale.GetOutput(),
Input2 = InputImage)
def GetOutput(self):
self.addFilter.Update()
return self.addFilter.GetOutput()
def launchFilterMultichannel(options, args):
"""
Multichannel unsharp mask workflow. This is acctually grayscale workflow
applied for each separate color channel. This function is limited only for
3D images, it will not work for
"""
# Define image dimensions, pixels and image type
imageDim = options.imageDim
MultichannelPixelType = itk.RGBPixel
ScalarPixelType = itk.UC
ScalarImageType = itk.Image[ScalarPixelType,imageDim]
MCImageType = itk.Image.RGBUC3
# Read image (define reader and writer)
reader = itk.ImageFileReader.IRGBUC3.New(FileName = options.inputFile)
writer = itk.ImageFileWriter[MCImageType].New()
# Split multichannel image apart into channel
extractR = itk.VectorIndexSelectionCastImageFilter[MCImageType,ScalarImageType].New(\
Input = reader.GetOutput(),
Index = 0)
extractG = itk.VectorIndexSelectionCastImageFilter[MCImageType,ScalarImageType].New(\
Input = reader.GetOutput(),
Index = 1)
extractB = itk.VectorIndexSelectionCastImageFilter[MCImageType,ScalarImageType].New(\
Input = reader.GetOutput(),
Index = 2)
# Apply unsharp mask to each channel separately
unsharpR = unsharpMaskImageFilter(extractR.GetOutput(),
sigmaArray = options.sigmaArray,
ammount = options.unsharpAmmount)
unsharpG = unsharpMaskImageFilter(extractG.GetOutput(),
sigmaArray = options.sigmaArray,
ammount = options.unsharpAmmount)
unsharpB = unsharpMaskImageFilter(extractB.GetOutput(),
sigmaArray = options.sigmaArray,
ammount = options.unsharpAmmount)
# Merge image back into multichannel image.
composeFilter = itk.ComposeImageFilter[ScalarImageType,MCImageType].New(\
Input1 = unsharpR.GetOutput(),
Input2 = unsharpG.GetOutput(),
Input3 = unsharpB.GetOutput())
# And then write it
writer = itk.ImageFileWriter[MCImageType].New(composeFilter, FileName = options.outputFile)
writer.Update();
def launchFilterGrayscale(options, args):
"""
Grayscale unsharp mask workflow
"""
# Define image dimensions, pixels and image type
imageDim = options.imageDim
InputPixelType = itk.F
OutputPixelType = itk.F
InputImageType = itk.Image[InputPixelType, imageDim]
OutputImageType = itk.Image[OutputPixelType, imageDim]
WritePixelType = itk.UC
WriteImageType = itk.Image[WritePixelType, imageDim]
# Read the input image, process it and then save
reader = itk.ImageFileReader[InputImageType].New(FileName = options.inputFile)
unsharp = unsharpMaskImageFilter(reader.GetOutput(),
sigmaArray = options.sigmaArray,
ammount = options.unsharpAmmount)
# Rescale image intensity to match 16bit grayscale image, then save
rescaler = itk.RescaleIntensityImageFilter[OutputImageType,WriteImageType].New(unsharp.GetOutput(), OutputMinimum=0, OutputMaximum=255)
writer = itk.ImageFileWriter[WriteImageType].New(rescaler, FileName = options.outputFile)
writer.Update();
def launchFilter(options, args):
"""
In this filter only 3D unsharp masking is possible. If you want to apply
unsharp mask to a 2D image use e.g. ImageMagic instead as it would be easier
and faster.
"""
if options.multichannelWorkflow:
launchFilterMultichannel(options, args)
else:
launchFilterGrayscale(options, args)
def parseArgs():
usage = "python unsharpMaskFilter.py -i b.nii.gz -o c.nii.gz --sigmaArray 0.05 0.05 0.05 --unsharpAmmount 4"
parser = OptionParser(usage = usage)
parser.add_option('--imageDim', '-d', dest='imageDim', type='int',
default=3, help='')
parser.add_option('--outputFile', '-o', dest='outputFile', type='str',
default=None, help='')
parser.add_option('--inputFile', '-i', dest='inputFile', type='str',
default=None, help='')
parser.add_option('--multichannelWorkflow', default=False,
dest='multichannelWorkflow', action='store_const', const=True,
help='Indicate that provided image is a RGB image and the RGB workflow has to be used.')
parser.add_option('--sigmaArray', default=[1,1,1],
type='float', nargs=3, dest='sigmaArray',
help='Sigma array used during gaussian smoothing')
parser.add_option('--unsharpAmmount', default=0.5,
type='float', dest='unsharpAmmount',
help='Sigma array used during gaussian smoothing')
(options, args) = parser.parse_args()
if (not options.outputFile) or (not options.inputFile):
parser.print_help()
exit(1)
return (options, args)
if __name__ == '__main__':
options, args = parseArgs()
launchFilter(options,args)
|
pmajka/poSSum
|
possum/dev_possum_unsharp_mask.py
|
Python
|
mit
| 6,096
|
[
"Gaussian"
] |
aa059905cbc277bbd15a3ff2509eff4211ef24da87fe1001af789a351d8dd579
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab.
# Distributed under the terms of the BSD License.
"""
Implementation for `pmg query` CLI.
"""
import json
import re
from monty.serialization import dumpfn
from tabulate import tabulate
from pymatgen.ext.matproj import MPRester
def do_query(args):
"""
Perform query to the Materials Project
Args:
args (dict): Args from argparse.
"""
m = MPRester()
try:
criteria = json.loads(args.criteria)
except json.decoder.JSONDecodeError:
criteria = args.criteria
if args.structure:
count = 0
for d in m.query(criteria, properties=["structure", "task_id"]):
s = d["structure"]
formula = re.sub(r"\s+", "", s.formula)
if args.structure == "poscar":
fname = "POSCAR.%s_%s" % (d["task_id"], formula)
else:
fname = "%s-%s.%s" % (d["task_id"], formula, args.structure)
s.to(filename=fname)
count += 1
print("%d structures written!" % count)
elif args.entries:
entries = m.get_entries(criteria)
dumpfn(entries, args.entries)
print("%d entries written to %s!" % (len(entries), args.entries))
else:
props = ["e_above_hull", "spacegroup"]
props += args.data
entries = m.get_entries(criteria, property_data=props)
t = []
headers = [
"mp-id",
"Formula",
"Spacegroup",
"E/atom (eV)",
"E above hull (eV)",
] + args.data
for e in entries:
row = [
e.entry_id,
e.composition.reduced_formula,
e.data["spacegroup"]["symbol"],
e.energy_per_atom,
e.data["e_above_hull"],
]
row += [e.data[s] for s in args.data]
t.append(row)
t = sorted(t, key=lambda x: x[headers.index("E above hull (eV)")])
print(tabulate(t, headers=headers, tablefmt="pipe", floatfmt=".3f"))
|
gmatteo/pymatgen
|
pymatgen/cli/pmg_query.py
|
Python
|
mit
| 2,070
|
[
"pymatgen"
] |
27fb44d989f9188e29a625f22bb87f479db5a1c5cb7388150d583d1823ca694c
|
"""Distutils file for metagenomix."""
from setuptools import setup, find_packages
requires = ['BioPython']
setup(
name = 'metagenomix',
description = 'Metagenomic analysis pipeline',
url = 'github/pgnd-meta',
author = 'Ana Bulovic',
author_email = 'bulovic.ana@gmail.com',
license = 'MIT',
long_description = open('README.md').read(),
packages = find_packages(),
scripts = [],
package_data = {'meta.data': ['taxid2namerank', 'ncbi_tax_tree', 'NCBI.db']},
data_files = [('', ['README.md'])],
entry_points = {
'console_scripts': [
'ncbi-download = meta.data.NCBI:__download__',
'fq2fa = meta.io.seq:fq2fa'
]
},
install_requires=requires,
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: Freeware',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
)
|
abulovic/pgnd-meta
|
setup.py
|
Python
|
mit
| 1,033
|
[
"Biopython"
] |
ea99d08514312285a70a5e24cce2ee4168e4a6d25c127bb7acc937f915a8f270
|
#!/usr/bin/env python
from datetime import datetime, timedelta
from collections import OrderedDict
import calendar
import sys
from ecmwfapi import ECMWFDataServer
import time
from dateutil.relativedelta import *
start_time = time.time()
server = ECMWFDataServer()
def retrieve_interim(strtDate,endDate,latNorth,latSouth,lonEast,lonWest,grd,eraDir):
"""
A function to demonstrate how to iterate efficiently over several years and months etc
for a particular interim_request.
Change the variables below to adapt the iteration to your needs.
You can use the variable "target" to organise the requested data in files as you wish.
In the example below the data are organised in files per month. (eg "interim_daily_201510.grb")
"""
grd = str(grd)
tol = float(grd)/2 # this tol adjust extent based on out perimeter (ele.tif) to one based on grid centers (ERA).
strtDate = str(strtDate)
endDate = str(endDate)
latNorth = str(float(latNorth) - tol)
latSouth = str(float(latSouth) + tol)
lonEast = str(float(lonEast) - tol)
lonWest = str(float(lonWest) + tol)
eraDir = eraDir
# string = strtDate
# strsplit = string.split('-' )
# yearStart = int(strsplit[0])
# monthStart = int(strsplit[1])
# dayStart = int(strsplit[2])
# """
# following control statement makwes sure previous month is downloaded for cases where startDate is first of month to ensure 00:00:00 timestamp is acquired.
# """
# if dayStart == 1:
# if monthStart == 1:
# monthStart = 12
# yearStart = yearStart -1
# else:
# monthStart = monthStart - 1
# string = endDate
# strsplit = string.split('-' )
# yearEnd = int(strsplit[0])
# monthEnd = int(strsplit[1])
grid=str(grd) + "/" + str(grd)
bbox=(str(latNorth) + "/" + str(lonWest) + "/" + str(latSouth) + "/" + str(lonEast))
# download buffer of +/- 1 month to ensure all necessary timestamps are there for interpolations and consistency between plevel and surf
dates = [str(strtDate), str(endDate)]
start = datetime.strptime(dates[0], "%Y-%m-%d")
end = datetime.strptime(dates[1], "%Y-%m-%d")
start = start+relativedelta(months=-1)
end = end+relativedelta(months=+1)
dateList = OrderedDict(((start + timedelta(_)).strftime(r"%Y-%m"), None) for _ in xrange((end - start).days)).keys()
print("Retrieving ERA-Interim data")
print("Bbox = " + bbox)
print("Grid = " + grd)
print("Start date = " , dateList[0])
print("End date = " , dateList[len(dateList)-1])
#for year in list(range(yearStart, yearEnd + 1)):
#for month in list(range(monthStart, monthEnd + 1)):
for date in dateList:
strsplit = date.split('-' )
year = int(strsplit[0])
month = int(strsplit[1])
startDate = "%04d%02d%02d" % (year, month, 1)
numberOfDays = calendar.monthrange(year, month)[1]
lastDate = "%04d%02d%02d" % (year, month, numberOfDays)
target = eraDir + "/interim_daily_SURF_%04d%02d.nc" % (year, month)
requestDates = (startDate + "/TO/" + lastDate)
interim_request(requestDates, target, grid, bbox)
def interim_request(requestDates, target, grid, bbox):
"""
An ERA interim request for analysis pressure level data.
Change the keywords below to adapt it to your needs.
(eg to add or to remove levels, parameters, times etc)
Request cost per day is 112 fields, 14.2326 Mbytes
"""
server.retrieve({
"dataset": "interim",
"date": requestDates,
"stream" : "oper",
"levtype": "sfc",
"param": "129.128/168.128/175.128/169.128/228.128/167.128/212.128",
"dataset": "interim",
"step": "3/6/9/12",
"grid": grid,
"time": "00/12",
"class": "ei",
"format": "netcdf",
"target": target,
"type": "fc",
"area": bbox,
'RESOL' : "AV",
})
if __name__ == "__main__":
strtDate = str(sys.argv[1])
endDate = str(sys.argv[2])
latNorth = str(float(sys.argv[3]))
latSouth = str(float(sys.argv[4]))
lonEast = str(float(sys.argv[5]))
lonWest = str(float(sys.argv[6]))
grd = str(sys.argv[7])
eraDir = sys.argv[8]
retrieve_interim(strtDate,endDate,latNorth,latSouth,lonEast,lonWest,grd,eraDir)
print("--- %s seconds ---" % (time.time() - start_time))
|
joelfiddes/toposubv2
|
workdir/eraRetrieveSURFACE.py
|
Python
|
gpl-3.0
| 4,560
|
[
"NetCDF"
] |
6432cedaa22526d6c1adaf0607c3b4c7340fadb2f54213289f20229ef7fd5738
|
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
"""
Test the infomax algorithm.
Parts of this code are taken from scikit-learn
"""
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import stats
from scipy import linalg
from mne.preprocessing.infomax_ import infomax
from mne.utils import requires_sklearn, run_tests_if_main
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
@requires_sklearn
def test_infomax_blowup():
""" Test the infomax algorithm blowup condition
"""
from sklearn.decomposition import RandomizedPCA
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 100
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
center_and_norm(m)
X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
k_ = infomax(X, extended=True, l_rate=0.1)
s_ = np.dot(k_, X.T)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
@requires_sklearn
def test_infomax_simple():
""" Test the infomax algorithm on very simple data.
"""
from sklearn.decomposition import RandomizedPCA
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 500
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
for add_noise in (False, True):
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, n_samples)
center_and_norm(m)
algos = [True, False]
for algo in algos:
X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
k_ = infomax(X, extended=algo)
s_ = np.dot(k_, X.T)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
@requires_sklearn
def test_non_square_infomax():
""" Test non-square infomax
"""
from sklearn.decomposition import RandomizedPCA
rng = np.random.RandomState(0)
n_samples = 200
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
n_observed = 6
mixing = rng.randn(n_observed, 2)
for add_noise in (False, True):
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(n_observed, n_samples)
center_and_norm(m)
pca = RandomizedPCA(n_components=2, whiten=True, random_state=rng)
m = m.T
m = pca.fit_transform(m)
# we need extended since input signals are sub-gaussian
unmixing_ = infomax(m, random_state=rng, extended=True)
s_ = np.dot(unmixing_, m.T)
# Check that the mixing model described in the docstring holds:
mixing_ = linalg.pinv(unmixing_.T)
assert_almost_equal(m, s_.T.dot(mixing_))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
run_tests_if_main()
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/mne/preprocessing/tests/test_infomax.py
|
Python
|
bsd-3-clause
| 5,400
|
[
"Gaussian"
] |
0fdec7cb4af7ee78853827daaa578ad793bdd3a6ca4d0f93b3db85c772926ee8
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
CM360 Report Run
Trigger a CM report run
- Specify an account id.
- Specify either report name or report id to run.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_read':'user', # Credentials used for reading data.
'account':'', # CM network id.
'report_id':'', # CM report id, empty if using name.
'report_name':'', # CM report name, empty if using id instead.
}
RECIPE = {
'tasks':[
{
'dcm':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'report_run_only':True,
'report':{
'account':{'field':{'name':'account','kind':'integer','order':1,'default':'','description':'CM network id.'}},
'report_id':{'field':{'name':'report_id','kind':'integer','order':2,'default':'','description':'CM report id, empty if using name.'}},
'name':{'field':{'name':'report_name','kind':'string','order':3,'default':'','description':'CM report name, empty if using id instead.'}}
}
}
}
]
}
dag_maker = DAG_Factory('dcm_run', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
|
google/starthinker
|
dags/dcm_run_dag.py
|
Python
|
apache-2.0
| 4,285
|
[
"VisIt"
] |
b0305933cd000753dd323bc3a5388549c7eeffcfa02ac8dbec25d3d14dfe7125
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Morphometrics functions for neurons or neuron populations'''
import math
import numpy as np
from neurom.geom import bounding_box
from neurom.core.types import NeuriteType
from neurom.core.types import tree_type_checker as is_type
from neurom.core.dataformat import COLS
from neurom.core._neuron import iter_neurites, iter_segments
from neurom import morphmath
def neuron_population(nrns):
'''Makes sure `nrns` behaves like a neuron population'''
return nrns.neurons if hasattr(nrns, 'neurons') else (nrns,)
def soma_surface_area(nrn, neurite_type=NeuriteType.soma):
'''Get the surface area of a neuron's soma.
Note:
The surface area is calculated by assuming the soma is spherical.
'''
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
return 4 * math.pi * nrn.soma.radius ** 2
def soma_surface_areas(nrn_pop, neurite_type=NeuriteType.soma):
'''Get the surface areas of the somata in a population of neurons
Note:
The surface area is calculated by assuming the soma is spherical.
Note:
If a single neuron is passed, a single element list with the surface
area of its soma member is returned.
'''
nrns = neuron_population(nrn_pop)
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
return [soma_surface_area(n) for n in nrns]
def soma_radii(nrn_pop, neurite_type=NeuriteType.soma):
''' Get the radii of the somata of a population of neurons
Note:
If a single neuron is passed, a single element list with the
radius of its soma member is returned.
'''
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
nrns = neuron_population(nrn_pop)
return [n.soma.radius for n in nrns]
def trunk_section_lengths(nrn, neurite_type=NeuriteType.all):
'''list of lengths of trunk sections of neurites in a neuron'''
neurite_filter = is_type(neurite_type)
return [morphmath.section_length(s.root_node.points)
for s in nrn.neurites if neurite_filter(s)]
def trunk_origin_radii(nrn, neurite_type=NeuriteType.all):
'''radii of the trunk sections of neurites in a neuron'''
neurite_filter = is_type(neurite_type)
return [s.root_node.points[0][COLS.R] for s in nrn.neurites if neurite_filter(s)]
def trunk_origin_azimuths(nrn, neurite_type=NeuriteType.all):
'''Get a list of all the trunk origin azimuths of a neuron or population
The azimuth is defined as Angle between x-axis and the vector
defined by (initial tree point - soma center) on the x-z plane.
The range of the azimuth angle [-pi, pi] radians
'''
neurite_filter = is_type(neurite_type)
nrns = neuron_population(nrn)
def _azimuth(section, soma):
'''Azimuth of a section'''
vector = morphmath.vector(section[0], soma.center)
return np.arctan2(vector[COLS.Z], vector[COLS.X])
return [_azimuth(s.root_node.points, n.soma)
for n in nrns
for s in n.neurites if neurite_filter(s)]
def trunk_origin_elevations(nrn, neurite_type=NeuriteType.all):
'''Get a list of all the trunk origin elevations of a neuron or population
The elevation is defined as the angle between x-axis and the
vector defined by (initial tree point - soma center)
on the x-y half-plane.
The range of the elevation angle [-pi/2, pi/2] radians
'''
neurite_filter = is_type(neurite_type)
nrns = neuron_population(nrn)
def _elevation(section, soma):
'''Elevation of a section'''
vector = morphmath.vector(section[0], soma.center)
norm_vector = np.linalg.norm(vector)
if norm_vector >= np.finfo(type(norm_vector)).eps:
return np.arcsin(vector[COLS.Y] / norm_vector)
else:
raise ValueError("Norm of vector between soma center and section is almost zero.")
return [_elevation(s.root_node.points, n.soma)
for n in nrns
for s in n.neurites if neurite_filter(s)]
def sholl_crossings(neurites, center, radii):
'''calculate crossings of neurites
Args:
nrn(morph): morphology on which to perform Sholl analysis
radii(iterable of floats): radii for which crossings will be counted
Returns:
Array of same length as radii, with a count of the number of crossings
for the respective radius
'''
def _count_crossings(neurite, radius):
'''count_crossings of segments in neurite with radius'''
r2 = radius ** 2
count = 0
for start, end in iter_segments(neurite):
start_dist2, end_dist2 = (morphmath.point_dist2(center, start),
morphmath.point_dist2(center, end))
count += int(start_dist2 <= r2 <= end_dist2 or
end_dist2 <= r2 <= start_dist2)
return count
return np.array([sum(_count_crossings(neurite, r)
for neurite in iter_neurites(neurites))
for r in radii])
def sholl_frequency(nrn, neurite_type=NeuriteType.all, step_size=10):
'''perform Sholl frequency calculations on a population of neurites
Args:
nrn(morph): nrn or population
neurite_type(NeuriteType): which neurites to operate on
step_size(float): step size between Sholl radii
Note:
Given a neuron, the soma center is used for the concentric circles,
which range from the soma radii, and the maximum radial distance
in steps of `step_size`. When a population is given, the concentric
circles range from the smallest soma radius to the largest radial neurite
distance. Finally, each segment of the neuron is tested, so a neurite that
bends back on itself, and crosses the same Sholl radius will get counted as
having crossed multiple times.
'''
nrns = neuron_population(nrn)
neurite_filter = is_type(neurite_type)
min_soma_edge = float('Inf')
max_radii = 0
neurites_list = []
for neuron in nrns:
neurites_list.extend(((neurites, neuron.soma.center)
for neurites in neuron.neurites
if neurite_filter(neurites)))
min_soma_edge = min(min_soma_edge, neuron.soma.radius)
max_radii = max(max_radii, np.max(np.abs(bounding_box(neuron))))
radii = np.arange(min_soma_edge, max_radii + step_size, step_size)
ret = np.zeros_like(radii)
for neurites, center in neurites_list:
ret += sholl_crossings(neurites, center, radii)
return ret
|
juanchopanza/NeuroM
|
neurom/fst/_neuronfunc.py
|
Python
|
bsd-3-clause
| 8,297
|
[
"NEURON"
] |
8f60532af439ff5cc532ea2acf9f7212399f408142bfb84c1bc61d95c925780f
|
# PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from math import *
import scipy as sp
import scipy.ndimage as ndi
import numpy as np
import pyvision as pv
import cv2
def normalizeMeanStd(matrix):
''' TODO: deprecated please use meanStd.'''
print '''normalizeMeanStd is deprecated. Please call as normalize.meanStd'''
return meanStd(matrix)
def clipRange(matrix,min_val,max_val):
''' zero mean, one standard deviation '''
is_image = False
if isinstance(matrix,pv.Image):
matrix = matrix.asMatrix2D()
is_image = True
# Otherwize, assume it is a numpy matrix
mask = matrix > max_val
matrix = max_val*mask + matrix*(~mask)
mask = matrix < min_val
matrix = min_val*mask + matrix*(~mask)
if is_image:
return pv.Image(matrix)
return matrix
def meanStd(matrix):
''' zero mean, one standard deviation '''
is_image = False
if isinstance(matrix,pv.Image):
matrix = matrix.asMatrix2D()
is_image = True
# Otherwize, assume it is a numpy matrix
matrix = matrix - matrix.mean()
matrix = (1.0/matrix.std()) * matrix
if is_image:
return pv.Image(matrix)
return matrix
def meanUnit(matrix):
''' zero mean, unit length '''
is_image = False
if isinstance(matrix,pv.Image):
matrix = matrix.asMatrix2D()
is_image = True
matrix = matrix - matrix.mean()
length = sqrt( (matrix*matrix).sum() )
if length > 0.0:
matrix = (1.0/length) * matrix
if is_image:
return pv.Image(matrix)
return matrix
def unit(matrix):
''' unit length '''
is_image = False
if isinstance(matrix,pv.Image):
matrix = matrix.asMatrix2D()
is_image = True
length = sqrt( (matrix*matrix).sum() )
if length < 0.00001: #Prevent divide by zero
length = 0.00001
matrix = (1.0/length) * matrix
if is_image:
return pv.Image(matrix)
return matrix
def selfQuotientImage(matrix,sigma=5.0):
'''
Compute a self quotient image.
Based on work by Wang et.al. "Self Quotient Image for Face Recognition" ICIP 2004
'''
is_image = False
if isinstance(matrix,pv.Image):
matrix = matrix.asMatrix2D()
is_image = True
assert matrix.min() >= 0
matrix = matrix + 0.01*matrix.max()
denom = ndi.gaussian_filter(matrix,sigma)
# make sure there are no divide by zeros
matrix = matrix/denom
if is_image:
return pv.Image(matrix)
return matrix
def gaussianFilter(im,sigma):
cvim = cv.CreateImage(im.size,cv.IPL_DEPTH_8U,3)
cv.Smooth(im.asOpenCV(),cvim,cv.CV_GAUSSIAN,0,0,sigma)
return pv.Image(cvim)
def highPassFilter(matrix,sigma):
'''
This function computes a high and low pass filter. This can be used
to reduce the effect of lighting.
A low pass image is first computed by convolving the image with a
Gausian filter of radius sigma. Second, a high pass image is computed
by subtracting the low pass image from the original image. This means that
the original image can be reconstructed by adding a low pass image and a high
pass image.
@returns: high_pass_image
'''
is_image = False
if isinstance(matrix,pv.Image):
matrix = matrix.asMatrix2D()
is_image = True
matrix = matrix - ndi.gaussian_filter(matrix,sigma)
if is_image:
return pv.Image(matrix)
return matrix
def lowPassFilter(matrix,sigma):
'''
This function computes a low pass filter. It basically smoothes the image
by convolving with a Gaussian. This is often used to reduce the effect of
noise in images or to reduce the effect of small registration errors.
@returns: an pv.Image set from a numpy matrix if input was an image or a numpy
matrix otherwize.
'''
is_image = False
if isinstance(matrix,pv.Image):
matrix = matrix.asMatrix2D()
is_image = True
matrix = ndi.gaussian_filter(matrix,sigma)
if is_image:
return pv.Image(matrix)
return matrix
def bandPassFilter(matrix,sigma_low, sigma_high):
'''
This function computes a high and low pass filter. This can be used
to reduce the effect of lighting.
A low pass image is first computed by convolving the image with a
Gausian filter of radius sigma. Second, a high pass image is computed
by subtracting the low pass image from the original image. This means that
the original image can be reconstructed by adding a low pass image and a high
pass image.
@returns: high_pass_image
'''
assert sigma_low > sigma_high
is_image = False
if isinstance(matrix,pv.Image):
matrix = matrix.asMatrix2D()
is_image = True
matrix = ndi.gaussian_filter(matrix,sigma_high) - ndi.gaussian_filter(matrix,sigma_low)
if is_image:
return pv.Image(matrix)
return matrix
|
wolfram2012/nimo
|
perception/ros_track_ssd/scripts/pyvision/other/normalize.py
|
Python
|
gpl-3.0
| 6,490
|
[
"Gaussian"
] |
a66e76fc26b6d6ac3b6ff1ccb54d6ca6ba9a3ecbb6a120a8b3bebfd0f660794b
|
# Orca
#
# Copyright 2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Superclass of classes used to generate presentations for objects."""
__id__ = "$Id:$"
__version__ = "$Revision:$"
__date__ = "$Date:$"
__copyright__ = "Copyright (c) 2009 Sun Microsystems Inc."
__license__ = "LGPL"
import sys
import time
import traceback
import pyatspi
from . import braille
from . import debug
from . import messages
from . import settings
from .orca_i18n import _ # for gettext support
import collections
def _formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.args
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
return (excName, excArgs, excTb)
# [[[WDW - general note -- for all the _generate* methods, it would be great if
# we could return an empty array if we can determine the method does not
# apply to the object. This would allow us to reduce the number of strings
# needed in formatting.py.]]]
# The prefix to use for the individual generator methods
#
METHOD_PREFIX = "_generate"
class Generator:
"""Takes accessible objects and generates a presentation for those
objects. See the generate method, which is the primary entry
point."""
# pylint: disable-msg=W0142
def __init__(self, script, mode):
# pylint: disable-msg=W0108
self._mode = mode
self._script = script
self._methodsDict = {}
for method in \
[z for z in [getattr(self, y).__get__(self, self.__class__) for y in [x for x in dir(self) if x.startswith(METHOD_PREFIX)]] if isinstance(z, collections.Callable)]:
name = method.__name__[len(METHOD_PREFIX):]
name = name[0].lower() + name[1:]
self._methodsDict[name] = method
self._verifyFormatting()
def _addGlobals(self, globalsDict):
"""Other things to make available from the formatting string.
"""
globalsDict['obj'] = None
globalsDict['role'] = None
globalsDict['pyatspi'] = pyatspi
def _verifyFormatting(self):
# Verify the formatting strings are OK. This is only
# for verification and does not effect the function of
# Orca at all.
# Populate the entire globals with empty arrays
# for the results of all the legal method names.
#
globalsDict = {}
for key in list(self._methodsDict.keys()):
globalsDict[key] = []
self._addGlobals(globalsDict)
for roleKey in self._script.formatting[self._mode]:
for key in ["focused", "unfocused"]:
try:
evalString = \
self._script.formatting[self._mode][roleKey][key]
except:
continue
else:
if not evalString:
# It's legal to have an empty string.
#
continue
while True:
try:
eval(evalString, globalsDict)
break
except NameError:
info = _formatExceptionInfo()
arg = info[1][0]
arg = arg.replace("name '", "")
arg = arg.replace("' is not defined", "")
if arg not in self._methodsDict:
debug.printException(debug.LEVEL_SEVERE)
debug.println(
debug.LEVEL_SEVERE,
"Unable to find function for '%s'\n" % arg)
globalsDict[arg] = []
except:
debug.printException(debug.LEVEL_SEVERE)
debug.println(
debug.LEVEL_SEVERE,
"While processing '%s' '%s' '%s' '%s'" \
% (roleKey, key, evalString, globalsDict))
break
def _overrideRole(self, newRole, args):
"""Convenience method to allow you to temporarily override the role in
the args dictionary. This changes the role in args ags
returns the old role so you can pass it back to _restoreRole.
"""
oldRole = args.get('role', None)
args['role'] = newRole
return oldRole
def _restoreRole(self, oldRole, args):
"""Convenience method to restore the old role back in the args
dictionary. The oldRole should have been obtained from
_overrideRole. If oldRole is None, then the 'role' key/value
pair will be deleted from args.
"""
if oldRole:
args['role'] = oldRole
else:
del args['role']
def generate(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the complete presentatin for the
object. The presentatin to be generated depends highly upon the
formatting strings in formatting.py.
args is a dictionary that may contain any of the following:
- alreadyFocused: if True, we're getting an object
that previously had focus
- priorObj: if set, represents the object that had focus before
this object
- includeContext: boolean (default=True) which says whether
the context for an object should be included as a prefix
and suffix
- role: a role to override the object's role
- formatType: the type of formatting, such as
'focused', 'basicWhereAmI', etc.
- forceMnemonic: boolean (default=False) which says if we
should ignore the settings.enableMnemonicSpeaking setting
- forceTutorial: boolean (default=False) which says if we
should force a tutorial to be spoken or not
"""
startTime = time.time()
result = []
globalsDict = {}
self._addGlobals(globalsDict)
globalsDict['obj'] = obj
try:
globalsDict['role'] = args.get('role', obj.getRole())
except:
msg = 'Cannot generate presentation for: %s. Aborting' % obj
debug.println(debug.LEVEL_FINEST, msg)
return result
try:
# We sometimes want to override the role. We'll keep the
# role in the args dictionary as a means to let us do so.
#
args['role'] = globalsDict['role']
# We loop through the format string, catching each error
# as we go. Each error should always be a NameError,
# where the name is the name of one of our generator
# functions. When we encounter this, we call the function
# and get its results, placing them in the globals for the
# the call to eval.
#
args['mode'] = self._mode
if not args.get('formatType', None):
if args.get('alreadyFocused', False):
args['formatType'] = 'focused'
else:
args['formatType'] = 'unfocused'
formatting = self._script.formatting.getFormat(**args)
# Add in the context if this is the first time
# we've been called.
#
if not args.get('recursing', False):
if args.get('includeContext', True):
prefix = self._script.formatting.getPrefix(**args)
suffix = self._script.formatting.getSuffix(**args)
formatting = '%s + %s + %s' % (prefix, formatting, suffix)
args['recursing'] = True
firstTimeCalled = True
else:
firstTimeCalled = False
details = debug.getAccessibleDetails(debug.LEVEL_ALL, obj)
duration = "%.4f" % (time.time() - startTime)
debug.println(debug.LEVEL_ALL, "\nPREPARATION TIME: %s" % duration)
debug.println(
debug.LEVEL_ALL,
"generate %s for %s %s (args=%s) using '%s'" \
% (self._mode,
args['formatType'],
details,
repr(args),
formatting))
assert(formatting)
while True:
currentTime = time.time()
try:
result = eval(formatting, globalsDict)
break
except NameError:
result = []
info = _formatExceptionInfo()
arg = info[1][0]
arg = arg.replace("name '", "")
arg = arg.replace("' is not defined", "")
if arg not in self._methodsDict:
debug.printException(debug.LEVEL_SEVERE)
debug.println(
debug.LEVEL_SEVERE,
"Unable to find function for '%s'\n" % arg)
break
globalsDict[arg] = self._methodsDict[arg](obj, **args)
duration = "%.4f" % (time.time() - currentTime)
debug.println(debug.LEVEL_ALL,
"GENERATION TIME: %s ----> %s=%s" \
% (duration, arg, repr(globalsDict[arg])))
except:
debug.printException(debug.LEVEL_SEVERE)
result = []
duration = "%.4f" % (time.time() - startTime)
debug.println(debug.LEVEL_ALL, "COMPLETION TIME: %s" % duration)
debug.println(debug.LEVEL_ALL, "generate %s results:" % self._mode)
for element in result:
debug.println(debug.LEVEL_ALL, " %s" % element)
return result
#####################################################################
# #
# Name, role, and label information #
# #
#####################################################################
def _generateRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings, with
the exception that the pyatspi.ROLE_UNKNOWN role will yield an
empty array. Note that a 'role' attribute in args will
override the accessible role of the obj.
"""
# Subclasses must override this.
return []
def _generateName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the name of the object. If the object is directly
displaying any text, that text will be treated as the name.
Otherwise, the accessible name of the object will be used. If
there is no accessible name, then the description of the
object will be used. This method will return an empty array
if nothing can be found. [[[WDW - I wonder if we should just
have _generateName, _generateDescription,
_generateDisplayedText, etc., that don't do any fallback.
Then, we can allow the formatting to do the fallback (e.g.,
'displayedText or name or description'). [[[JD to WDW - I
needed a _generateDescription for whereAmI. :-) See below.
"""
result = []
name = self._script.utilities.displayedText(obj)
if name:
result.append(name)
else:
try:
description = obj.description
except (LookupError, RuntimeError):
return result
if description:
result.append(description)
# To make the unlabeled icons in gnome-panel more accessible.
try:
role = args.get('role', obj.getRole())
except (LookupError, RuntimeError):
return result
if not result and obj.getRole() == pyatspi.ROLE_ICON \
and obj.parent.getRole() == pyatspi.ROLE_PANEL:
return self._generateName(obj.parent)
return result
def _generatePlaceholderText(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the 'placeholder' text. This is typically text that
serves as a functional label and is found in a text widget until
that widget is given focus at which point the text is removed,
the assumption being that the user was able to see the text prior
to giving the widget focus.
"""
result = [x for x in obj.getAttributes() if x.startswith('placeholder-text:')]
return [x.replace('placeholder-text:', '') for x in result]
def _generateLabelAndName(self, obj, **args):
"""Returns the label and the name as an array of strings for speech
and braille. The name will only be present if the name is
different from the label.
"""
result = []
label = self._generateLabel(obj, **args)
name = self._generateName(obj, **args)
result.extend(label)
if not len(label):
result.extend(name)
elif len(name) and name[0] != label[0]:
result.extend(name)
return result
def _generateLabelOrName(self, obj, **args):
"""Returns the label as an array of strings for speech and braille.
If the label cannot be found, the name will be used instead.
If the name cannot be found, an empty array will be returned.
"""
result = []
result.extend(self._generateLabel(obj, **args))
if not result:
if obj.name and (len(obj.name)):
result.append(obj.name)
return result
def _generateDescription(self, obj, **args):
"""Returns an array of strings fo use by speech and braille that
represent the description of the object, if that description
is different from that of the name and label.
"""
result = []
label = self._script.utilities.displayedLabel(obj)
if obj.description and not obj.description in [obj.name, label]:
result.append(obj.description)
return result
def _generateLabel(self, obj, **args):
"""Returns the label for an object as an array of strings for use by
speech and braille. The label is determined by the displayedLabel
method of the script utility, and an empty array will be returned if
no label can be found.
"""
result = []
label = self._script.utilities.displayedLabel(obj)
if label:
result.append(label)
return result
#####################################################################
# #
# Image information #
# #
#####################################################################
def _generateImageDescription(self, obj, **args ):
"""Returns an array of strings for use by speech and braille that
represent the description of the image on the object, if it
exists. Otherwise, an empty array is returned.
"""
result = []
try:
image = obj.queryImage()
except NotImplementedError:
pass
else:
description = image.imageDescription
if description and len(description):
result.append(description)
return result
#####################################################################
# #
# State information #
# #
#####################################################################
def _generateAvailability(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the grayed/sensitivity/availability state of the
object, but only if it is insensitive (i.e., grayed out and
inactive). Otherwise, and empty array will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'insensitive'
if not obj.getState().contains(pyatspi.STATE_SENSITIVE):
result.append(self._script.formatting.getString(**args))
return result
def _generateRequired(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the required state of the object, but only if it is
required (i.e., it is in a dialog requesting input and the
user must give it a value). Otherwise, and empty array will
be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'required'
if obj.getState().contains(pyatspi.STATE_REQUIRED) \
or (obj.getRole() == pyatspi.ROLE_RADIO_BUTTON \
and obj.parent.getState().contains(pyatspi.STATE_REQUIRED)):
result.append(self._script.formatting.getString(**args))
return result
def _generateReadOnly(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the read only state of this object, but only if it
is read only (i.e., it is a text area that cannot be edited).
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'readonly'
if settings.presentReadOnlyText \
and self._script.utilities.isReadOnlyTextArea(obj):
result.append(self._script.formatting.getString(**args))
return result
def _generateCellCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes that are in a table. An empty array will be
returned if this is not a checkable cell.
"""
result = []
try:
action = obj.queryAction()
except NotImplementedError:
action = None
if action:
for i in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(i) in ["toggle", _("toggle")]:
oldRole = self._overrideRole(pyatspi.ROLE_CHECK_BOX,
args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'checkbox'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_INDETERMINATE):
result.append(indicators[2])
elif state.contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateRadioState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'radiobutton'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateChildWidget(self, obj, **args):
widgetRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_TOGGLE_BUTTON]
isWidget = lambda x: x and x.getRole() in widgetRoles
# For GtkListBox, such as those found in the control center
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_LIST_BOX:
widget = pyatspi.findDescendant(obj, isWidget)
if widget:
return self.generate(widget, includeContext=False)
return []
def _generateToggleState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'togglebutton'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED) \
or state.contains(pyatspi.STATE_PRESSED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateMenuItemCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the menu item, only if it is
checked. Otherwise, and empty array will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'checkbox'
indicators = self._script.formatting.getString(**args)
if obj.getState().contains(pyatspi.STATE_CHECKED):
# Translators: this represents the state of a checked menu item.
#
result.append(indicators[1])
return result
def _generateExpandableState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the expanded/collapsed state of an object, such as a
tree node. If the object is not expandable, an empty array
will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'expansion'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
#####################################################################
# #
# Table interface information #
# #
#####################################################################
def _generateRowHeader(self, obj, **args):
"""Returns an array of strings to be used in speech and braille that
represent the row header for an object that is in a table, if
it exists. Otherwise, an empty array is returned.
"""
result = []
# Do not return yourself as a header.
#
role = args.get('role', obj.getRole())
if role in [pyatspi.ROLE_ROW_HEADER,
pyatspi.ROLE_TABLE_ROW_HEADER]:
return result
if not args.get('mode', None):
args['mode'] = self._mode
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(obj)
try:
rowIndex = table.getRowAtIndex(index)
except:
rowIndex = -1
if rowIndex >= 0:
# Get the header information. In Java Swing, the
# information is not exposed via the description
# but is instead a header object, so we fall back
# to that if it exists.
#
# [[[TODO: WDW - the more correct thing to do, I
# think, is to look at the row header object.
# We've been looking at the description for so
# long, though, that we'll give the description
# preference for now.]]]
#
desc = table.getRowDescription(rowIndex)
if not desc:
header = table.getRowHeader(rowIndex)
if header:
desc = self._script.utilities.displayedText(header)
if desc and len(desc):
text = desc
if args['mode'] == 'speech':
if settings.speechVerbosityLevel \
== settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('formatType', None) \
in ['basicWhereAmI', 'detailedWhereAmI']:
text = desc + " " + self.getLocalizedRoleName(
obj, pyatspi.ROLE_ROW_HEADER)
elif args['mode'] == 'braille':
text = desc + " " + self.getLocalizedRoleName(
obj, pyatspi.ROLE_ROW_HEADER)
result.append(text)
return result
def _generateColumnHeader(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the column header for an object
that is in a table, if it exists. Otherwise, an empty array
is returned.
"""
result = []
# Do not return yourself as a header.
#
try:
role = args.get('role', obj.getRole())
except:
role = None
if role in [pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_TABLE_COLUMN_HEADER]:
return result
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(obj)
columnIndex = table.getColumnAtIndex(index)
if columnIndex >= 0:
# Get the header information. In Java Swing, the
# information is not exposed via the description
# but is instead a header object, so we fall back
# to that if it exists.
#
# [[[TODO: WDW - the more correct thing to do, I
# think, is to look at the column header object.
# We've been looking at the description for so
# long, though, that we'll give the description
# preference for now.]]]
#
desc = table.getColumnDescription(columnIndex)
if not desc:
header = table.getColumnHeader(columnIndex)
if header:
desc = self._script.utilities.displayedText(header)
if desc and len(desc):
text = desc
if args['mode'] == 'speech':
if settings.speechVerbosityLevel \
== settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('formatType', None) \
in ['basicWhereAmI', 'detailedWhereAmI']:
text = desc + " " + self.getLocalizedRoleName(
obj, pyatspi.ROLE_COLUMN_HEADER)
elif args['mode'] == 'braille':
text = desc + " " + self.getLocalizedRoleName(
obj, pyatspi.ROLE_COLUMN_HEADER)
result.append(text)
return result
def _generateTableCell2ChildLabel(self, obj, **args):
"""Returns an array of strings for use by speech and braille for the
label of a toggle in a table cell that has a special 2 child
pattern that we run into. Otherwise, an empty array is
returned.
"""
result = []
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [False, False]
for i, child in enumerate(obj):
try:
action = child.queryAction()
except NotImplementedError:
continue
else:
for j in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(j) in ["toggle", _("toggle")]:
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
if not hasToggle[i]:
result.extend(self.generate(obj[i], **args))
return result
def _generateTableCell2ChildToggle(self, obj, **args):
"""Returns an array of strings for use by speech and braille for the
toggle value of a toggle in a table cell that has a special 2
child pattern that we run into. Otherwise, an empty array is
returned.
"""
result = []
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [False, False]
for i, child in enumerate(obj):
try:
action = child.queryAction()
except NotImplementedError:
continue
else:
for j in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(j) in ["toggle", _("toggle")]:
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
if hasToggle[i]:
result.extend(self.generate(obj[i], **args))
return result
def _generateColumnHeaderIfToggleAndNoText(self, obj, **args):
"""If this table cell has a "toggle" action, and doesn't have any
label associated with it then also speak the table column
header. See Orca bug #455230 for more details.
"""
# If we're reading just a single cell in speech, the new
# header portion is going to give us this information.
#
if args['mode'] == 'speech' and not args.get('readingRow', False):
return []
result = []
try:
parentTable = obj.parent.queryTable()
except:
return result
try:
action = obj.queryAction()
label = self._script.utilities.displayedText(
self._script.utilities.realActiveDescendant(obj))
except NotImplementedError:
action = None
label = None
if action and (label == None or len(label) == 0):
index = self._script.utilities.cellIndex(obj)
column = parentTable.getColumnAtIndex(index)
for j in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(j) in ["toggle",
_("toggle")]:
accHeader = \
parentTable.getColumnHeader(column)
result.append(accHeader.name)
return result
def _generateRealTableCell(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to
complexity in the code. This method is used to return an
array of strings for use by speech and braille for a single
table cell itself. The string, 'blank', is added for empty
cells.
"""
result = []
oldRole = self._overrideRole('REAL_ROLE_TABLE_CELL', args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateTable(self, obj, **args):
"""Returns an array of strings for use by speech and braille to present
the size of a table."""
try:
table = obj.queryTable()
except:
return []
return [messages.tableSize(table.nRows, table.nColumns)]
def _generateTableCellRow(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to complexity in
the code. This method is used to return an array of strings
(and possibly voice and audio specifications) for an entire row
in a table if that's what the user has requested and if the row
has changed. Otherwise, it will return an array for just the
current cell.
"""
result = []
try:
parentTable = obj.parent.queryTable()
except:
parentTable = None
isDetailedWhereAmI = args.get('formatType', None) == 'detailedWhereAmI'
if (settings.readTableCellRow or isDetailedWhereAmI) and parentTable \
and (not self._script.utilities.isLayoutOnly(obj.parent)):
parent = obj.parent
index = self._script.utilities.cellIndex(obj)
row = parentTable.getRowAtIndex(index)
column = parentTable.getColumnAtIndex(index)
# This is an indication of whether we should speak all the
# table cells (the user has moved focus up or down a row),
# or just the current one (focus has moved left or right in
# the same row).
#
presentAll = True
if isDetailedWhereAmI:
if parentTable.nColumns <= 1:
return result
elif "lastRow" in self._script.pointOfReference \
and "lastColumn" in self._script.pointOfReference:
pointOfReference = self._script.pointOfReference
presentAll = \
(self._mode == 'braille') \
or \
((pointOfReference["lastRow"] != row) \
or ((row == 0 or row == parentTable.nRows-1) \
and pointOfReference["lastColumn"] == column))
if presentAll:
args['readingRow'] = True
if self._script.utilities.isTableRow(obj):
cells = [x for x in obj]
else:
cells = [parentTable.getAccessibleAt(row, i) \
for i in range(parentTable.nColumns)]
for cell in cells:
if not cell:
continue
state = cell.getState()
showing = state.contains(pyatspi.STATE_SHOWING)
if showing:
cellResult = self._generateRealTableCell(cell, **args)
if cellResult and result and self._mode == 'braille':
result.append(braille.Region(
settings.brailleTableCellDelimiter))
result.extend(cellResult)
else:
result.extend(self._generateRealTableCell(obj, **args))
else:
result.extend(self._generateRealTableCell(obj, **args))
return result
#####################################################################
# #
# Text interface information #
# #
#####################################################################
def _generateCurrentLineText(self, obj, **args ):
"""Returns an array of strings for use by speech and braille
that represents the current line of text, if
this is a text object. [[[WDW - consider returning an empty
array if this is not a text object.]]]
"""
[text, caretOffset, startOffset] = self._script.getTextLineAtCaret(obj)
return [text]
def _generateDisplayedText(self, obj, **args ):
"""Returns an array of strings for use by speech and braille that
represents all the text being displayed by the object.
"""
displayedText = self._script.utilities.displayedText(obj)
if not displayedText:
return []
return [displayedText]
#####################################################################
# #
# Tree interface information #
# #
#####################################################################
def _generateNodeLevel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the tree node level of the object, or an empty
array if the object is not a tree node.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'nodelevel'
level = self._script.utilities.nodeLevel(obj)
if level >= 0:
result.append(self._script.formatting.getString(**args)\
% (level + 1))
return result
#####################################################################
# #
# Value interface information #
# #
#####################################################################
def _generateValue(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the value of the object. This is typically the
numerical value, but may also be the text of the 'value'
attribute if it exists on the object. [[[WDW - we should
consider returning an empty array if there is no value.
"""
return [self._script.utilities.textForValue(obj)]
#####################################################################
# #
# Hierarchy and related dialog information #
# #
#####################################################################
def _generateApplicationName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the name of the applicaton for the object.
"""
result = []
try:
result.append(obj.getApplication().name)
except:
pass
return result
def _generateNestingLevel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the nesting level of an object in a list.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'nestinglevel'
nestingLevel = self._script.utilities.nestingLevel(obj)
if nestingLevel:
result.append(self._script.formatting.getString(**args)\
% nestingLevel)
return result
def _generateRadioButtonGroup(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the radio button group label for the object, or an
empty array if the object has no such label.
"""
result = []
try:
role = obj.getRole()
except:
role = None
if role == pyatspi.ROLE_RADIO_BUTTON:
radioGroupLabel = None
relations = obj.getRelationSet()
for relation in relations:
if (not radioGroupLabel) \
and (relation.getRelationType() \
== pyatspi.RELATION_LABELLED_BY):
radioGroupLabel = relation.getTarget(0)
break
if radioGroupLabel:
result.append(self._script.utilities.\
displayedText(radioGroupLabel))
else:
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() in [pyatspi.ROLE_PANEL,
pyatspi.ROLE_FILLER]:
label = self._generateLabelAndName(parent)
if label:
result.extend(label)
break
parent = parent.parent
return result
def _generateRealActiveDescendantDisplayedText(self, obj, **args ):
"""Objects, such as tables and trees, can represent individual cells
via a complicated nested hierarchy. This method returns an
array of strings for use by speech and braille that represents
the text actually being painted in the cell, if it can be
found. Otherwise, an empty array is returned.
"""
result = []
text = self._script.utilities.displayedText(
self._script.utilities.realActiveDescendant(obj))
if text:
result = [text]
return result
def _generateRealActiveDescendantRoleName(self, obj, **args ):
"""Objects, such as tables and trees, can represent individual cells
via a complicated nested hierarchy. This method returns an
array of strings for use by speech and braille that represents
the role of the object actually being painted in the cell.
"""
rad = self._script.utilities.realActiveDescendant(obj)
args['role'] = rad.getRole()
return self._generateRoleName(rad, **args)
def _generateNamedContainingPanel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the nearest ancestor of an object which is a named panel.
"""
result = []
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() == pyatspi.ROLE_PANEL:
label = self._generateLabelAndName(parent)
if label:
result.extend(label)
break
parent = parent.parent
return result
|
h4ck3rm1k3/orca-sonar
|
src/orca/generator.py
|
Python
|
lgpl-2.1
| 46,495
|
[
"ORCA"
] |
a1e0426a1af61357f2158e596c66bcdf98e6e672a5ba728909079884dba9f9c0
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import http.client as http
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import webob
from glance.api import policy
from glance.api.v2 import policy as api_policy
from glance.common import exception
from glance.common import timeutils
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance.i18n import _
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
def _get_member_repo(self, req, image):
try:
return self.gateway.get_member_repo(image, req.context,
authorization_layer=False)
except exception.Forbidden as e:
msg = (_("Error fetching members of image %(image_id)s: "
"%(inner_msg)s") % {"image_id": image.image_id,
"inner_msg": e.msg})
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _lookup_image(self, req, image_id):
image_repo = self.gateway.get_repo(
req.context, authorization_layer=False)
try:
return image_repo.get(image_id)
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("You are not authorized to lookup image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _check_visibility_and_ownership(self, context, image,
ownership_check=None):
if image.visibility != 'shared':
message = _("Only shared images have members.")
raise exception.Forbidden(message)
# NOTE(abhishekk): Ownership check only needs to performed while
# adding new members to image
owner = image.owner
if not CONF.enforce_secure_rbac and not context.is_admin:
if ownership_check == 'create':
if owner is None or owner != context.owner:
message = _("You are not permitted to create image "
"members for the image.")
raise exception.Forbidden(message)
elif ownership_check == 'update':
if context.owner == owner:
message = _("You are not permitted to modify 'status' "
"on this image member.")
raise exception.Forbidden(message)
elif ownership_check == 'delete':
if context.owner != owner:
message = _("You cannot delete image member.")
raise exception.Forbidden(message)
def _lookup_member(self, req, image, member_id, member_repo=None):
if not member_repo:
member_repo = self._get_member_repo(req, image)
try:
# NOTE(abhishekk): This will verify whether user has permission
# to view image member or not.
api_policy.MemberAPIPolicy(
req.context,
image,
enforcer=self.policy).get_member()
return member_repo.get(member_id)
except (exception.NotFound):
msg = (_("%(m_id)s not found in the member list of the image "
"%(i_id)s.") % {"m_id": member_id,
"i_id": image.image_id})
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = (_("You are not authorized to lookup the members of the "
"image %s.") % image.image_id)
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
@utils.mutating
def create(self, req, image_id, member_id):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:returns: The response body is a mapping of the following form
::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
try:
image = self._lookup_image(req, image_id)
# Check for image visibility and ownership before getting member
# repo
# NOTE(abhishekk): Once we support RBAC policies we can remove
# ownership check from here. This is added here just to maintain
# behavior with and without auth layer.
self._check_visibility_and_ownership(req.context, image,
ownership_check='create')
member_repo = self._get_member_repo(req, image)
# NOTE(abhishekk): This will verify whether user has permission
# to accept membership or not.
api_policy.MemberAPIPolicy(
req.context,
image,
enforcer=self.policy).add_member()
image_member_factory = self.gateway.get_image_member_factory(
req.context, authorization_layer=False)
new_member = image_member_factory.new_image_member(image,
member_id)
member_repo.add(new_member)
return new_member
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.Forbidden:
msg = _("Not allowed to create members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.Duplicate:
msg = _("Member %(member_id)s is duplicated for image "
"%(image_id)s") % {"member_id": member_id,
"image_id": image_id}
LOG.warning(msg)
raise webob.exc.HTTPConflict(explanation=msg)
except exception.ImageMemberLimitExceeded as e:
msg = (_("Image member limit exceeded for image %(id)s: %(e)s:")
% {"id": image_id,
"e": encodeutils.exception_to_unicode(e)})
LOG.warning(msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
@utils.mutating
def update(self, req, image_id, member_id, status):
"""
Update the status of a member for a given image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:param status: the status of a member
:returns: The response body is a mapping of the following form
::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>,
'created_at': ..,
'updated_at': ..}
"""
try:
image = self._lookup_image(req, image_id)
# Check for image visibility and ownership before getting member
# repo.
# NOTE(abhishekk): Once we support RBAC policies we can remove
# ownership check from here. This is added here just to maintain
# behavior with and without auth layer.
self._check_visibility_and_ownership(req.context, image,
ownership_check='update')
member_repo = self._get_member_repo(req, image)
member = self._lookup_member(req, image, member_id,
member_repo=member_repo)
api_policy.MemberAPIPolicy(
req.context,
image,
enforcer=self.policy).modify_member()
member.status = status
member_repo.save(member)
return member
except exception.Forbidden:
msg = _("Not allowed to update members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except ValueError as e:
msg = (_("Incorrect request: %s")
% encodeutils.exception_to_unicode(e))
LOG.warning(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:returns: The response body is a mapping of the following form
::
{'members': [
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>,
'created_at': ..,
'updated_at': ..}, ..
]}
"""
try:
image = self._lookup_image(req, image_id)
# Check for image visibility and ownership before getting member
# repo.
self._check_visibility_and_ownership(req.context, image)
member_repo = self._get_member_repo(req, image)
# NOTE(abhishekk): This will verify whether user has permission
# to view image members or not. Each member will be checked with
# get_member policy below.
api_policy_check = api_policy.MemberAPIPolicy(
req.context,
image,
enforcer=self.policy)
api_policy_check.get_members()
except exception.Forbidden as e:
msg = (_("Not allowed to list members for image %(image_id)s: "
"%(inner_msg)s") % {"image_id": image.image_id,
"inner_msg": e.msg})
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
members = [
member for member in member_repo.list() if api_policy_check.check(
'get_member')]
return dict(members=members)
def show(self, req, image_id, member_id):
"""
Returns the membership of the tenant wrt to the image_id specified.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:returns: The response body is a mapping of the following form
::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
try:
image = self._lookup_image(req, image_id)
# Check for image visibility and ownership before getting member
# repo.
self._check_visibility_and_ownership(req.context, image)
return self._lookup_member(req, image, member_id)
except exception.Forbidden as e:
# Convert Forbidden to NotFound to prevent information
# leakage.
raise webob.exc.HTTPNotFound(explanation=e.msg)
except webob.exc.HTTPForbidden as e:
# Convert Forbidden to NotFound to prevent information
# leakage.
raise webob.exc.HTTPNotFound(explanation=e.explanation)
@utils.mutating
def delete(self, req, image_id, member_id):
"""
Removes a membership from the image.
"""
try:
image = self._lookup_image(req, image_id)
# Check for image visibility and ownership before getting member
# repo.
# NOTE(abhishekk): Once we support RBAC policies we can remove
# ownership check from here. This is added here just to maintain
# behavior with and without auth layer.
self._check_visibility_and_ownership(req.context, image,
ownership_check='delete')
member_repo = self._get_member_repo(req, image)
member = self._lookup_member(req, image, member_id,
member_repo=member_repo)
# NOTE(abhishekk): This will verify whether user has permission
# to delete image member or not.
api_policy.MemberAPIPolicy(
req.context,
image,
enforcer=self.policy).delete_member()
member_repo.remove(member)
return webob.Response(body='', status=http.NO_CONTENT)
except exception.Forbidden:
msg = _("Not allowed to delete members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def __init__(self):
super(RequestDeserializer, self).__init__()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
def create(self, request):
body = self._get_request_body(request)
try:
member_id = body['member']
if not member_id:
raise ValueError()
except KeyError:
msg = _("Member to be added not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except ValueError:
msg = _("Member can't be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _('Expected a member in the form: '
'{"member": "image_id"}')
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(member_id=member_id)
def update(self, request):
body = self._get_request_body(request)
try:
status = body['status']
except KeyError:
msg = _("Status not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _('Expected a status in the form: '
'{"status": "status"}')
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(status=status)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema()
def _format_image_member(self, member):
member_view = {}
attributes = ['member_id', 'image_id', 'status']
for key in attributes:
member_view[key] = getattr(member, key)
member_view['created_at'] = timeutils.isotime(member.created_at)
member_view['updated_at'] = timeutils.isotime(member.updated_at)
member_view['schema'] = '/v2/schemas/member'
member_view = self.schema.filter(member_view)
return member_view
def create(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = body
response.content_type = 'application/json'
def update(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = body
response.content_type = 'application/json'
def index(self, response, image_members):
image_members = image_members['members']
image_members_view = []
for image_member in image_members:
image_member_view = self._format_image_member(image_member)
image_members_view.append(image_member_view)
totalview = dict(members=image_members_view)
totalview['schema'] = '/v2/schemas/members'
body = jsonutils.dumps(totalview, ensure_ascii=False)
response.unicode_body = body
response.content_type = 'application/json'
def show(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = body
response.content_type = 'application/json'
_MEMBER_SCHEMA = {
'member_id': {
'type': 'string',
'description': _('An identifier for the image member (tenantId)')
},
'image_id': {
'type': 'string',
'description': _('An identifier for the image'),
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
},
'created_at': {
'type': 'string',
'description': _('Date and time of image member creation'),
# TODO(brian-rosmaita): our jsonschema library doesn't seem to like the
# format attribute, figure out why (and also fix in images.py)
# 'format': 'date-time',
},
'updated_at': {
'type': 'string',
'description': _('Date and time of last modification of image member'),
# 'format': 'date-time',
},
'status': {
'type': 'string',
'description': _('The status of this image member'),
'enum': [
'pending',
'accepted',
'rejected'
]
},
'schema': {
'readOnly': True,
'type': 'string'
}
}
def get_schema():
properties = copy.deepcopy(_MEMBER_SCHEMA)
schema = glance.schema.Schema('member', properties)
return schema
def get_collection_schema():
member_schema = get_schema()
return glance.schema.CollectionSchema('members', member_schema)
def create_resource():
"""Image Members resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageMembersController()
return wsgi.Resource(controller, deserializer, serializer)
|
openstack/glance
|
glance/api/v2/image_members.py
|
Python
|
apache-2.0
| 19,582
|
[
"Brian"
] |
186bc85070462fbabe53841ed2827a6bbec22bf1c56010c22fd470be52b50a1d
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("reddit.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
jacobparra/redditclone
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,276
|
[
"VisIt"
] |
3b2a7bb9f02c701cabc3c989499bfabd097ee460bcfea416d683148a2cba9f72
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Interfaces to assorted Freesurfer utility programs.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import str, open
import os
import re
import shutil
from ... import logging
from ...utils.filemanip import fname_presuffix, split_filename
from ..base import (TraitedSpec, File, traits, OutputMultiPath, isdefined,
CommandLine, CommandLineInputSpec)
from .base import (FSCommand, FSTraitedSpec, FSSurfaceCommand,
FSScriptCommand, FSScriptOutputSpec,
FSTraitedSpecOpenMP, FSCommandOpenMP)
__docformat__ = 'restructuredtext'
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz', gii='gii')
filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze',
'analyze4d', 'spm', 'afni', 'brik', 'bshort',
'bfloat', 'sdt', 'outline', 'otl', 'gdf',
'nifti1', 'nii', 'niigz']
implicit_filetypes = ['gii']
logger = logging.getLogger('interface')
def copy2subjdir(cls, in_file, folder=None, basename=None, subject_id=None):
"""Method to copy an input to the subjects directory"""
# check that the input is defined
if not isdefined(in_file):
return in_file
# check that subjects_dir is defined
if isdefined(cls.inputs.subjects_dir):
subjects_dir = cls.inputs.subjects_dir
else:
subjects_dir = os.getcwd() #if not use cwd
# check for subject_id
if not subject_id:
if isdefined(cls.inputs.subject_id):
subject_id = cls.inputs.subject_id
else:
subject_id = 'subject_id' #default
# check for basename
if basename == None:
basename = os.path.basename(in_file)
# check which folder to put the file in
if folder != None:
out_dir = os.path.join(subjects_dir, subject_id, folder)
else:
out_dir = os.path.join(subjects_dir, subject_id)
# make the output folder if it does not exist
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_file = os.path.join(out_dir, basename)
if not os.path.isfile(out_file):
shutil.copy(in_file, out_file)
return out_file
def createoutputdirs(outputs):
"""create all output directories. If not created, some freesurfer interfaces fail"""
for output in list(outputs.values()):
dirname = os.path.dirname(output)
if not os.path.isdir(dirname):
os.makedirs(dirname)
class SampleToSurfaceInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr="--mov %s",
desc="volume to sample values from")
reference_file = File(exists=True, argstr="--ref %s",
desc="reference volume (default is orig.mgz)")
hemi = traits.Enum("lh", "rh", mandatory=True, argstr="--hemi %s",
desc="target hemisphere")
surface = traits.String(argstr="--surf %s", desc="target surface (default is white)")
reg_xors = ["reg_file", "reg_header", "mni152reg"]
reg_file = File(exists=True, argstr="--reg %s", mandatory=True, xor=reg_xors,
desc="source-to-reference registration file")
reg_header = traits.Bool(argstr="--regheader %s", requires=["subject_id"],
mandatory=True, xor=reg_xors,
desc="register based on header geometry")
mni152reg = traits.Bool(argstr="--mni152reg",
mandatory=True, xor=reg_xors,
desc="source volume is in MNI152 space")
apply_rot = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr="--rot %.3f %.3f %.3f",
desc="rotation angles (in degrees) to apply to reg matrix")
apply_trans = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr="--trans %.3f %.3f %.3f",
desc="translation (in mm) to apply to reg matrix")
override_reg_subj = traits.Bool(argstr="--srcsubject %s", requires=["subject_id"],
desc="override the subject in the reg file header")
sampling_method = traits.Enum("point", "max", "average",
mandatory=True, argstr="%s", xor=["projection_stem"],
requires=["sampling_range", "sampling_units"],
desc="how to sample -- at a point or at the max or average over a range")
sampling_range = traits.Either(traits.Float,
traits.Tuple(traits.Float, traits.Float, traits.Float),
desc="sampling range - a point or a tuple of (min, max, step)")
sampling_units = traits.Enum("mm", "frac", desc="sampling range type -- either 'mm' or 'frac'")
projection_stem = traits.String(mandatory=True, xor=["sampling_method"],
desc="stem for precomputed linear estimates and volume fractions")
smooth_vol = traits.Float(argstr="--fwhm %.3f", desc="smooth input volume (mm fwhm)")
smooth_surf = traits.Float(argstr="--surf-fwhm %.3f", desc="smooth output surface (mm fwhm)")
interp_method = traits.Enum("nearest", "trilinear", argstr="--interp %s",
desc="interpolation method")
cortex_mask = traits.Bool(argstr="--cortex", xor=["mask_label"],
desc="mask the target surface with hemi.cortex.label")
mask_label = File(exists=True, argstr="--mask %s", xor=["cortex_mask"],
desc="label file to mask output with")
float2int_method = traits.Enum("round", "tkregister", argstr="--float2int %s",
desc="method to convert reg matrix values (default is round)")
fix_tk_reg = traits.Bool(argstr="--fixtkreg", desc="make reg matrix round-compatible")
subject_id = traits.String(desc="subject id")
target_subject = traits.String(argstr="--trgsubject %s",
desc="sample to surface of different subject than source")
surf_reg = traits.Bool(argstr="--surfreg", requires=["target_subject"],
desc="use surface registration to target subject")
ico_order = traits.Int(argstr="--icoorder %d", requires=["target_subject"],
desc="icosahedron order when target_subject is 'ico'")
reshape = traits.Bool(argstr="--reshape", xor=["no_reshape"],
desc="reshape surface vector to fit in non-mgh format")
no_reshape = traits.Bool(argstr="--noreshape", xor=["reshape"],
desc="do not reshape surface vector (default)")
reshape_slices = traits.Int(argstr="--rf %d", desc="number of 'slices' for reshaping")
scale_input = traits.Float(argstr="--scale %.3f",
desc="multiple all intensities by scale factor")
frame = traits.Int(argstr="--frame %d", desc="save only one frame (0-based)")
out_file = File(argstr="--o %s", genfile=True, desc="surface file to write")
out_type = traits.Enum(filetypes + implicit_filetypes,
argstr="--out_type %s", desc="output file type")
hits_file = traits.Either(traits.Bool, File(exists=True), argstr="--srchit %s",
desc="save image with number of hits at each voxel")
hits_type = traits.Enum(filetypes, argstr="--srchit_type", desc="hits file type")
vox_file = traits.Either(traits.Bool, File, argstr="--nvox %s",
desc="text file with the number of voxels intersecting the surface")
class SampleToSurfaceOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="surface file")
hits_file = File(exists=True, desc="image with number of hits at each voxel")
vox_file = File(exists=True,
desc="text file with the number of voxels intersecting the surface")
class SampleToSurface(FSCommand):
"""Sample a volume to the cortical surface using Freesurfer's mri_vol2surf.
You must supply a sampling method, range, and units. You can project
either a given distance (in mm) or a given fraction of the cortical
thickness at that vertex along the surface normal from the target surface,
and then set the value of that vertex to be either the value at that point
or the average or maximum value found along the projection vector.
By default, the surface will be saved as a vector with a length equal to the
number of vertices on the target surface. This is not a problem for Freesurfer
programs, but if you intend to use the file with interfaces to another package,
you must set the ``reshape`` input to True, which will factor the surface vector
into a matrix with dimensions compatible with proper Nifti files.
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> sampler = fs.SampleToSurface(hemi="lh")
>>> sampler.inputs.source_file = "cope1.nii.gz"
>>> sampler.inputs.reg_file = "register.dat"
>>> sampler.inputs.sampling_method = "average"
>>> sampler.inputs.sampling_range = 1
>>> sampler.inputs.sampling_units = "frac"
>>> sampler.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE
'mri_vol2surf --hemi lh --o ...lh.cope1.mgz --reg register.dat --projfrac-avg 1.000 --mov cope1.nii.gz'
>>> res = sampler.run() # doctest: +SKIP
"""
_cmd = "mri_vol2surf"
input_spec = SampleToSurfaceInputSpec
output_spec = SampleToSurfaceOutputSpec
def _format_arg(self, name, spec, value):
if name == "sampling_method":
range = self.inputs.sampling_range
units = self.inputs.sampling_units
if units == "mm":
units = "dist"
if isinstance(range, tuple):
range = "%.3f %.3f %.3f" % range
else:
range = "%.3f" % range
method = dict(point="", max="-max", average="-avg")[value]
return "--proj%s%s %s" % (units, method, range)
if name == "reg_header":
return spec.argstr % self.inputs.subject_id
if name == "override_reg_subj":
return spec.argstr % self.inputs.subject_id
if name in ["hits_file", "vox_file"]:
return spec.argstr % self._get_outfilename(name)
if name == "out_type":
if isdefined(self.inputs.out_file):
_, base, ext = split_filename(self._get_outfilename())
if ext != filemap[value]:
if ext in filemap.values():
raise ValueError(
"Cannot create {} file with extension "
"{}".format(value, ext))
else:
logger.warn("Creating {} file with extension {}: "
"{}{}".format(value, ext, base, ext))
if value in implicit_filetypes:
return ""
return super(SampleToSurface, self)._format_arg(name, spec, value)
def _get_outfilename(self, opt="out_file"):
outfile = getattr(self.inputs, opt)
if not isdefined(outfile) or isinstance(outfile, bool):
if isdefined(self.inputs.out_type):
if opt == "hits_file":
suffix = '_hits.' + filemap[self.inputs.out_type]
else:
suffix = '.' + filemap[self.inputs.out_type]
elif opt == "hits_file":
suffix = "_hits.mgz"
else:
suffix = '.mgz'
outfile = fname_presuffix(self.inputs.source_file,
newpath=os.getcwd(),
prefix=self.inputs.hemi + ".",
suffix=suffix,
use_ext=False)
return outfile
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self._get_outfilename())
hitsfile = self.inputs.hits_file
if isdefined(hitsfile):
outputs["hits_file"] = hitsfile
if isinstance(hitsfile, bool):
hitsfile = self._get_outfilename("hits_file")
voxfile = self.inputs.vox_file
if isdefined(voxfile):
if isinstance(voxfile, bool):
voxfile = fname_presuffix(self.inputs.source_file,
newpath=os.getcwd(),
prefix=self.inputs.hemi + ".",
suffix="_vox.txt",
use_ext=False)
outputs["vox_file"] = voxfile
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceSmoothInputSpec(FSTraitedSpec):
in_file = File(mandatory=True, argstr="--sval %s", desc="source surface file")
subject_id = traits.String(mandatory=True, argstr="--s %s", desc="subject id of surface file")
hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True, desc="hemisphere to operate on")
fwhm = traits.Float(argstr="--fwhm %.4f", xor=["smooth_iters"],
desc="effective FWHM of the smoothing process")
smooth_iters = traits.Int(argstr="--smooth %d", xor=["fwhm"],
desc="iterations of the smoothing process")
cortex = traits.Bool(True, argstr="--cortex", usedefault=True, desc="only smooth within $hemi.cortex.label")
reshape = traits.Bool(argstr="--reshape",
desc="reshape surface vector to fit in non-mgh format")
out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write")
class SurfaceSmoothOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="smoothed surface file")
class SurfaceSmooth(FSCommand):
"""Smooth a surface image with mri_surf2surf.
The surface is smoothed by an interative process of averaging the
value at each vertex with those of its adjacent neighbors. You may supply
either the number of iterations to run or a desired effective FWHM of the
smoothing process. If the latter, the underlying program will calculate
the correct number of iterations internally.
.. seealso::
SmoothTessellation() Interface
For smoothing a tessellated surface (e.g. in gifti or .stl)
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> smoother = fs.SurfaceSmooth()
>>> smoother.inputs.in_file = "lh.cope1.mgz"
>>> smoother.inputs.subject_id = "subj_1"
>>> smoother.inputs.hemi = "lh"
>>> smoother.inputs.fwhm = 5
>>> smoother.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE
'mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval lh.cope1.mgz --tval ...lh.cope1_smooth5.mgz --s subj_1'
>>> smoother.run() # doctest: +SKIP
"""
_cmd = "mri_surf2surf"
input_spec = SurfaceSmoothInputSpec
output_spec = SurfaceSmoothOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
in_file = self.inputs.in_file
if isdefined(self.inputs.fwhm):
kernel = self.inputs.fwhm
else:
kernel = self.inputs.smooth_iters
outputs["out_file"] = fname_presuffix(in_file,
suffix="_smooth%d" % kernel,
newpath=os.getcwd())
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr="--sval %s",
xor=['source_annot_file'],
desc="surface file with source values")
source_annot_file = File(exists=True, mandatory=True,
argstr="--sval-annot %s",
xor=['source_file'],
desc="surface annotation file")
source_subject = traits.String(mandatory=True, argstr="--srcsubject %s",
desc="subject id for source surface")
hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True,
desc="hemisphere to transform")
target_subject = traits.String(mandatory=True, argstr="--trgsubject %s",
desc="subject id of target surface")
target_ico_order = traits.Enum(1, 2, 3, 4, 5, 6, 7,
argstr="--trgicoorder %d",
desc=("order of the icosahedron if "
"target_subject is 'ico'"))
source_type = traits.Enum(filetypes, argstr='--sfmt %s',
requires=['source_file'],
desc="source file format")
target_type = traits.Enum(filetypes + implicit_filetypes, argstr='--tfmt %s',
desc="output format")
reshape = traits.Bool(argstr="--reshape",
desc="reshape output surface to conform with Nifti")
reshape_factor = traits.Int(argstr="--reshape-factor",
desc="number of slices in reshaped image")
out_file = File(argstr="--tval %s", genfile=True,
desc="surface file to write")
class SurfaceTransformOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="transformed surface file")
class SurfaceTransform(FSCommand):
"""Transform a surface file from one subject to another via a spherical registration.
Both the source and target subject must reside in your Subjects Directory,
and they must have been processed with recon-all, unless you are transforming
to one of the icosahedron meshes.
Examples
--------
>>> from nipype.interfaces.freesurfer import SurfaceTransform
>>> sxfm = SurfaceTransform()
>>> sxfm.inputs.source_file = "lh.cope1.nii.gz"
>>> sxfm.inputs.source_subject = "my_subject"
>>> sxfm.inputs.target_subject = "fsaverage"
>>> sxfm.inputs.hemi = "lh"
>>> sxfm.run() # doctest: +SKIP
"""
_cmd = "mri_surf2surf"
input_spec = SurfaceTransformInputSpec
output_spec = SurfaceTransformOutputSpec
def _format_arg(self, name, spec, value):
if name == "target_type":
if isdefined(self.inputs.out_file):
_, base, ext = split_filename(self._list_outputs()['out_file'])
if ext != filemap[value]:
if ext in filemap.values():
raise ValueError(
"Cannot create {} file with extension "
"{}".format(value, ext))
else:
logger.warn("Creating {} file with extension {}: "
"{}{}".format(value, ext, base, ext))
if value in implicit_filetypes:
return ""
return super(SurfaceTransform, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
if isdefined(self.inputs.source_file):
source = self.inputs.source_file
else:
source = self.inputs.source_annot_file
# Some recon-all files don't have a proper extension (e.g. "lh.thickness")
# so we have to account for that here
bad_extensions = [".%s" % e for e in ["area", "mid", "pial", "avg_curv", "curv", "inflated",
"jacobian_white", "orig", "nofix", "smoothwm", "crv",
"sphere", "sulc", "thickness", "volume", "white"]]
use_ext = True
if split_filename(source)[2] in bad_extensions:
source = source + ".stripme"
use_ext = False
ext = ""
if isdefined(self.inputs.target_type):
ext = "." + filemap[self.inputs.target_type]
use_ext = False
outputs["out_file"] = fname_presuffix(source,
suffix=".%s%s" % (self.inputs.target_subject, ext),
newpath=os.getcwd(),
use_ext=use_ext)
else:
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class Surface2VolTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, argstr='--surfval %s',
copyfile=False, mandatory=True, xor=['mkmask'],
desc='This is the source of the surface values')
hemi = traits.Str(argstr='--hemi %s', mandatory=True,
desc='hemisphere of data')
transformed_file = File(name_template="%s_asVol.nii", desc='Output volume',
argstr='--outvol %s',
name_source=['source_file'], hash_files=False)
reg_file = File(exists=True, argstr='--volreg %s',
mandatory=True,
desc='tkRAS-to-tkRAS matrix (tkregister2 format)',
xor=['subject_id'])
template_file = File(exists=True, argstr='--template %s',
desc='Output template volume')
mkmask = traits.Bool(desc='make a mask instead of loading surface values',
argstr='--mkmask', xor=['source_file'])
vertexvol_file = File(name_template="%s_asVol_vertex.nii",
desc=('Path name of the vertex output volume, which '
'is the same as output volume except that the '
'value of each voxel is the vertex-id that is '
'mapped to that voxel.'),
argstr='--vtxvol %s', name_source=['source_file'],
hash_files=False)
surf_name = traits.Str(argstr='--surf %s',
desc='surfname (default is white)')
projfrac = traits.Float(argstr='--projfrac %s', desc='thickness fraction')
subjects_dir = traits.Str(argstr='--sd %s',
desc=('freesurfer subjects directory defaults to '
'$SUBJECTS_DIR'))
subject_id = traits.Str(argstr='--identity %s', desc='subject id',
xor=['reg_file'])
class Surface2VolTransformOutputSpec(TraitedSpec):
transformed_file = File(exists=True,
desc='Path to output file if used normally')
vertexvol_file = File(desc='vertex map volume path id. Optional')
class Surface2VolTransform(FSCommand):
"""Use FreeSurfer mri_surf2vol to apply a transform.
Examples
--------
>>> from nipype.interfaces.freesurfer import Surface2VolTransform
>>> xfm2vol = Surface2VolTransform()
>>> xfm2vol.inputs.source_file = 'lh.cope1.mgz'
>>> xfm2vol.inputs.reg_file = 'register.mat'
>>> xfm2vol.inputs.hemi = 'lh'
>>> xfm2vol.inputs.template_file = 'cope1.nii.gz'
>>> xfm2vol.inputs.subjects_dir = '.'
>>> xfm2vol.cmdline # doctest: +ALLOW_UNICODE
'mri_surf2vol --hemi lh --volreg register.mat --surfval lh.cope1.mgz --sd . --template cope1.nii.gz --outvol lh.cope1_asVol.nii --vtxvol lh.cope1_asVol_vertex.nii'
>>> res = xfm2vol.run()# doctest: +SKIP
"""
_cmd = 'mri_surf2vol'
input_spec = Surface2VolTransformInputSpec
output_spec = Surface2VolTransformOutputSpec
class ApplyMaskInputSpec(FSTraitedSpec):
in_file = File(exists=True, mandatory=True, position=-3, argstr="%s",
desc="input image (will be masked)")
mask_file = File(exists=True, mandatory=True, position=-2, argstr="%s",
desc="image defining mask space")
out_file = File(name_source=['in_file'], name_template='%s_masked',
hash_files=True, keep_extension=True,
position=-1, argstr="%s",
desc="final image to write")
xfm_file = File(exists=True, argstr="-xform %s",
desc="LTA-format transformation matrix to align mask with input")
invert_xfm = traits.Bool(argstr="-invert", desc="invert transformation")
xfm_source = File(exists=True, argstr="-lta_src %s", desc="image defining transform source space")
xfm_target = File(exists=True, argstr="-lta_dst %s", desc="image defining transform target space")
use_abs = traits.Bool(argstr="-abs", desc="take absolute value of mask before applying")
mask_thresh = traits.Float(argstr="-T %.4f", desc="threshold mask before applying")
keep_mask_deletion_edits = traits.Bool(
argstr="-keep_mask_deletion_edits",
desc="transfer voxel-deletion edits (voxels=1) from mask to out vol")
transfer = traits.Int(argstr="-transfer %d",
desc="transfer only voxel value # from mask to out")
class ApplyMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="masked image")
class ApplyMask(FSCommand):
"""Use Freesurfer's mri_mask to apply a mask to an image.
The mask file need not be binarized; it can be thresholded above a given
value before application. It can also optionally be transformed into input
space with an LTA matrix.
"""
_cmd = "mri_mask"
input_spec = ApplyMaskInputSpec
output_spec = ApplyMaskOutputSpec
class SurfaceSnapshotsInputSpec(FSTraitedSpec):
subject_id = traits.String(position=1, argstr="%s", mandatory=True,
desc="subject to visualize")
hemi = traits.Enum("lh", "rh", position=2, argstr="%s", mandatory=True,
desc="hemisphere to visualize")
surface = traits.String(position=3, argstr="%s", mandatory=True,
desc="surface to visualize")
show_curv = traits.Bool(argstr="-curv", desc="show curvature", xor=["show_gray_curv"])
show_gray_curv = traits.Bool(argstr="-gray", desc="show curvature in gray", xor=["show_curv"])
overlay = File(exists=True, argstr="-overlay %s", desc="load an overlay volume/surface",
requires=["overlay_range"])
reg_xors = ["overlay_reg", "identity_reg", "mni152_reg"]
overlay_reg = traits.File(exists=True, argstr="-overlay-reg %s", xor=reg_xors,
desc="registration matrix file to register overlay to surface")
identity_reg = traits.Bool(argstr="-overlay-reg-identity", xor=reg_xors,
desc="use the identity matrix to register the overlay to the surface")
mni152_reg = traits.Bool(argstr="-mni152reg", xor=reg_xors,
desc="use to display a volume in MNI152 space on the average subject")
overlay_range = traits.Either(traits.Float,
traits.Tuple(traits.Float, traits.Float),
traits.Tuple(traits.Float, traits.Float, traits.Float),
desc="overlay range--either min, (min, max) or (min, mid, max)",
argstr="%s")
overlay_range_offset = traits.Float(argstr="-foffset %.3f",
desc="overlay range will be symettric around offset value")
truncate_overlay = traits.Bool(argstr="-truncphaseflag 1",
desc="truncate the overlay display")
reverse_overlay = traits.Bool(argstr="-revphaseflag 1",
desc="reverse the overlay display")
invert_overlay = traits.Bool(argstr="-invphaseflag 1",
desc="invert the overlay display")
demean_overlay = traits.Bool(argstr="-zm", desc="remove mean from overlay")
annot_file = File(exists=True, argstr="-annotation %s", xor=["annot_name"],
desc="path to annotation file to display")
annot_name = traits.String(argstr="-annotation %s", xor=["annot_file"],
desc="name of annotation to display (must be in $subject/label directory")
label_file = File(exists=True, argstr="-label %s", xor=["label_name"],
desc="path to label file to display")
label_name = traits.String(argstr="-label %s", xor=["label_file"],
desc="name of label to display (must be in $subject/label directory")
colortable = File(exists=True, argstr="-colortable %s", desc="load colortable file")
label_under = traits.Bool(argstr="-labels-under", desc="draw label/annotation under overlay")
label_outline = traits.Bool(argstr="-label-outline", desc="draw label/annotation as outline")
patch_file = File(exists=True, argstr="-patch %s", desc="load a patch")
orig_suffix = traits.String(argstr="-orig %s", desc="set the orig surface suffix string")
sphere_suffix = traits.String(argstr="-sphere %s", desc="set the sphere.reg suffix string")
show_color_scale = traits.Bool(argstr="-colscalebarflag 1",
desc="display the color scale bar")
show_color_text = traits.Bool(argstr="-colscaletext 1",
desc="display text in the color scale bar")
six_images = traits.Bool(desc="also take anterior and posterior snapshots")
screenshot_stem = traits.String(desc="stem to use for screenshot file names")
stem_template_args = traits.List(traits.String, requires=["screenshot_stem"],
desc="input names to use as arguments for a string-formated stem template")
tcl_script = File(exists=True, argstr="%s", genfile=True,
desc="override default screenshot script")
class SurfaceSnapshotsOutputSpec(TraitedSpec):
snapshots = OutputMultiPath(File(exists=True),
desc="tiff images of the surface from different perspectives")
class SurfaceSnapshots(FSCommand):
"""Use Tksurfer to save pictures of the cortical surface.
By default, this takes snapshots of the lateral, medial, ventral,
and dorsal surfaces. See the ``six_images`` option to add the
anterior and posterior surfaces.
You may also supply your own tcl script (see the Freesurfer wiki for
information on scripting tksurfer). The screenshot stem is set as the
environment variable "_SNAPSHOT_STEM", which you can use in your
own scripts.
Node that this interface will not run if you do not have graphics
enabled on your system.
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> shots = fs.SurfaceSnapshots(subject_id="fsaverage", hemi="lh", surface="pial")
>>> shots.inputs.overlay = "zstat1.nii.gz"
>>> shots.inputs.overlay_range = (2.3, 6)
>>> shots.inputs.overlay_reg = "register.dat"
>>> res = shots.run() # doctest: +SKIP
"""
_cmd = "tksurfer"
input_spec = SurfaceSnapshotsInputSpec
output_spec = SurfaceSnapshotsOutputSpec
def _format_arg(self, name, spec, value):
if name == "tcl_script":
if not isdefined(value):
return "-tcl snapshots.tcl"
else:
return "-tcl %s" % value
elif name == "overlay_range":
if isinstance(value, float):
return "-fthresh %.3f" % value
else:
if len(value) == 2:
return "-fminmax %.3f %.3f" % value
else:
return "-fminmax %.3f %.3f -fmid %.3f" % (value[0], value[2], value[1])
elif name == "annot_name" and isdefined(value):
# Matching annot by name needs to strip the leading hemi and trailing
# extension strings
if value.endswith(".annot"):
value = value[:-6]
if re.match("%s[\.\-_]" % self.inputs.hemi, value[:3]):
value = value[3:]
return "-annotation %s" % value
return super(SurfaceSnapshots, self)._format_arg(name, spec, value)
def _run_interface(self, runtime):
if not isdefined(self.inputs.screenshot_stem):
stem = "%s_%s_%s" % (
self.inputs.subject_id, self.inputs.hemi, self.inputs.surface)
else:
stem = self.inputs.screenshot_stem
stem_args = self.inputs.stem_template_args
if isdefined(stem_args):
args = tuple([getattr(self.inputs, arg) for arg in stem_args])
stem = stem % args
# Check if the DISPLAY variable is set -- should avoid crashes (might not?)
if "DISPLAY" not in os.environ:
raise RuntimeError("Graphics are not enabled -- cannot run tksurfer")
runtime.environ["_SNAPSHOT_STEM"] = stem
self._write_tcl_script()
runtime = super(SurfaceSnapshots, self)._run_interface(runtime)
# If a display window can't be opened, this will crash on
# aggregate_outputs. Let's try to parse stderr and raise a
# better exception here if that happened.
errors = ["surfer: failed, no suitable display found",
"Fatal Error in tksurfer.bin: could not open display"]
for err in errors:
if err in runtime.stderr:
self.raise_exception(runtime)
# Tksurfer always (or at least always when you run a tcl script)
# exits with a nonzero returncode. We have to force it to 0 here.
runtime.returncode = 0
return runtime
def _write_tcl_script(self):
fid = open("snapshots.tcl", "w")
script = ["save_tiff $env(_SNAPSHOT_STEM)-lat.tif",
"make_lateral_view",
"rotate_brain_y 180",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-med.tif",
"make_lateral_view",
"rotate_brain_x 90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-ven.tif",
"make_lateral_view",
"rotate_brain_x -90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-dor.tif"]
if isdefined(self.inputs.six_images) and self.inputs.six_images:
script.extend(["make_lateral_view",
"rotate_brain_y 90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-pos.tif",
"make_lateral_view",
"rotate_brain_y -90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-ant.tif"])
script.append("exit")
fid.write("\n".join(script))
fid.close()
def _list_outputs(self):
outputs = self._outputs().get()
if not isdefined(self.inputs.screenshot_stem):
stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, self.inputs.surface)
else:
stem = self.inputs.screenshot_stem
stem_args = self.inputs.stem_template_args
if isdefined(stem_args):
args = tuple([getattr(self.inputs, arg) for arg in stem_args])
stem = stem % args
snapshots = ["%s-lat.tif", "%s-med.tif", "%s-dor.tif", "%s-ven.tif"]
if self.inputs.six_images:
snapshots.extend(["%s-pos.tif", "%s-ant.tif"])
snapshots = [self._gen_fname(f % stem, suffix="") for f in snapshots]
outputs["snapshots"] = snapshots
return outputs
def _gen_filename(self, name):
if name == "tcl_script":
return "snapshots.tcl"
return None
class ImageInfoInputSpec(FSTraitedSpec):
in_file = File(exists=True, position=1, argstr="%s", desc="image to query")
class ImageInfoOutputSpec(TraitedSpec):
info = traits.Any(desc="output of mri_info")
out_file = File(exists=True, desc="text file with image information")
data_type = traits.String(desc="image data type")
file_format = traits.String(desc="file format")
TE = traits.String(desc="echo time (msec)")
TR = traits.String(desc="repetition time(msec)")
TI = traits.String(desc="inversion time (msec)")
dimensions = traits.Tuple(desc="image dimensions (voxels)")
vox_sizes = traits.Tuple(desc="voxel sizes (mm)")
orientation = traits.String(desc="image orientation")
ph_enc_dir = traits.String(desc="phase encode direction")
class ImageInfo(FSCommand):
_cmd = "mri_info"
input_spec = ImageInfoInputSpec
output_spec = ImageInfoOutputSpec
def info_regexp(self, info, field, delim="\n"):
m = re.search("%s\s*:\s+(.+?)%s" % (field, delim), info)
if m:
return m.group(1)
else:
return None
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
info = runtime.stdout
outputs.info = info
# Pulse sequence parameters
for field in ["TE", "TR", "TI"]:
fieldval = self.info_regexp(info, field, ", ")
if fieldval.endswith(" msec"):
fieldval = fieldval[:-5]
setattr(outputs, field, fieldval)
# Voxel info
vox = self.info_regexp(info, "voxel sizes")
vox = tuple(vox.split(", "))
outputs.vox_sizes = vox
dim = self.info_regexp(info, "dimensions")
dim = tuple([int(d) for d in dim.split(" x ")])
outputs.dimensions = dim
outputs.orientation = self.info_regexp(info, "Orientation")
outputs.ph_enc_dir = self.info_regexp(info, "PhEncDir")
# File format and datatype are both keyed by "type"
ftype, dtype = re.findall("%s\s*:\s+(.+?)\n" % "type", info)
outputs.file_format = ftype
outputs.data_type = dtype
return outputs
class MRIsConvertInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
"""
annot_file = File(exists=True, argstr="--annot %s",
desc="input is annotation or gifti label data")
parcstats_file = File(exists=True, argstr="--parcstats %s",
desc="infile is name of text file containing label/val pairs")
label_file = File(exists=True, argstr="--label %s",
desc="infile is .label file, label is name of this label")
scalarcurv_file = File(exists=True, argstr="-c %s",
desc="input is scalar curv overlay file (must still specify surface)")
functional_file = File(exists=True, argstr="-f %s",
desc="input is functional time-series or other multi-frame data (must specify surface)")
labelstats_outfile = File(exists=False, argstr="--labelstats %s",
desc="outfile is name of gifti file to which label stats will be written")
patch = traits.Bool(argstr="-p", desc="input is a patch, not a full surface")
rescale = traits.Bool(argstr="-r", desc="rescale vertex xyz so total area is same as group average")
normal = traits.Bool(argstr="-n", desc="output is an ascii file where vertex data")
xyz_ascii = traits.Bool(argstr="-a", desc="Print only surface xyz to ascii file")
vertex = traits.Bool(argstr="-v", desc="Writes out neighbors of a vertex in each row")
scale = traits.Float(argstr="-s %.3f", desc="scale vertex xyz by scale")
dataarray_num = traits.Int(argstr="--da_num %d", desc="if input is gifti, 'num' specifies which data array to use")
talairachxfm_subjid = traits.String(argstr="-t %s", desc="apply talairach xfm of subject to vertex xyz")
origname = traits.String(argstr="-o %s", desc="read orig positions")
in_file = File(exists=True, mandatory=True, position=-2, argstr='%s', desc='File to read/convert')
out_file = File(argstr='%s', position=-1, genfile=True,
xor=['out_datatype'], mandatory=True,
desc='output filename or True to generate one')
out_datatype = traits.Enum("asc", "ico", "tri", "stl", "vtk", "gii", "mgh", "mgz",
xor=['out_file'], mandatory=True,
desc="These file formats are supported: ASCII: .asc"
"ICO: .ico, .tri GEO: .geo STL: .stl VTK: .vtk GIFTI: .gii MGH surface-encoded 'volume': .mgh, .mgz")
to_scanner = traits.Bool(argstr="--to-scanner",
desc="convert coordinates from native FS (tkr) coords to scanner coords")
to_tkr = traits.Bool(argstr="--to-tkr",
desc="convert coordinates from scanner coords to native FS (tkr) coords")
class MRIsConvertOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
"""
converted = File(exists=True, desc='converted output surface')
class MRIsConvert(FSCommand):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mris = fs.MRIsConvert()
>>> mris.inputs.in_file = 'lh.pial'
>>> mris.inputs.out_datatype = 'gii'
>>> mris.run() # doctest: +SKIP
"""
_cmd = 'mris_convert'
input_spec = MRIsConvertInputSpec
output_spec = MRIsConvertOutputSpec
def _format_arg(self, name, spec, value):
if name == "out_file" and not os.path.isabs(value):
value = os.path.abspath(value)
return super(MRIsConvert, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["converted"] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return os.path.abspath(self._gen_outfilename())
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return self.inputs.out_file
elif isdefined(self.inputs.annot_file):
_, name, ext = split_filename(self.inputs.annot_file)
elif isdefined(self.inputs.parcstats_file):
_, name, ext = split_filename(self.inputs.parcstats_file)
elif isdefined(self.inputs.label_file):
_, name, ext = split_filename(self.inputs.label_file)
elif isdefined(self.inputs.scalarcurv_file):
_, name, ext = split_filename(self.inputs.scalarcurv_file)
elif isdefined(self.inputs.functional_file):
_, name, ext = split_filename(self.inputs.functional_file)
elif isdefined(self.inputs.in_file):
_, name, ext = split_filename(self.inputs.in_file)
return name + ext + "_converted." + self.inputs.out_datatype
class MRIsCombineInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mris_convert to combine two surface files into one.
"""
in_files = traits.List(File(Exists=True), maxlen=2, minlen=2,
mandatory=True, position=1, argstr='--combinesurfs %s',
desc='Two surfaces to be combined.')
out_file = File(argstr='%s', position=-1, genfile=True,
mandatory=True,
desc='Output filename. Combined surfaces from in_files.')
class MRIsCombineOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mris_convert to combine two surface files into one.
"""
out_file = File(exists=True, desc='Output filename. Combined surfaces from '
'in_files.')
class MRIsCombine(FSSurfaceCommand):
"""
Uses Freesurfer's ``mris_convert`` to combine two surface files into one.
For complete details, see the `mris_convert Documentation.
<https://surfer.nmr.mgh.harvard.edu/fswiki/mris_convert>`_
If given an ``out_file`` that does not begin with ``'lh.'`` or ``'rh.'``,
``mris_convert`` will prepend ``'lh.'`` to the file name.
To avoid this behavior, consider setting ``out_file = './<filename>'``, or
leaving out_file blank.
In a Node/Workflow, ``out_file`` is interpreted literally.
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mris = fs.MRIsCombine()
>>> mris.inputs.in_files = ['lh.pial', 'rh.pial']
>>> mris.inputs.out_file = 'bh.pial'
>>> mris.cmdline # doctest: +ALLOW_UNICODE
'mris_convert --combinesurfs lh.pial rh.pial bh.pial'
>>> mris.run() # doctest: +SKIP
"""
_cmd = 'mris_convert'
input_spec = MRIsCombineInputSpec
output_spec = MRIsCombineOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
# mris_convert --combinesurfs uses lh. as the default prefix
# regardless of input file names, except when path info is
# specified
path, base = os.path.split(self.inputs.out_file)
if path == '' and base[:3] not in ('lh.', 'rh.'):
base = 'lh.' + base
outputs['out_file'] = os.path.abspath(os.path.join(path, base))
return outputs
def _normalize_filenames(self):
""" In a Node context, interpret out_file as a literal path to
reduce surprise.
"""
if isdefined(self.inputs.out_file):
self.inputs.out_file = os.path.abspath(self.inputs.out_file)
class MRITessellateInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
"""
in_file = File(exists=True, mandatory=True, position=-3, argstr='%s', desc='Input volume to tesselate voxels from.')
label_value = traits.Int(position=-2, argstr='%d', mandatory=True,
desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)')
out_file = File(argstr='%s', position=-1, genfile=True, desc='output filename or True to generate one')
tesselate_all_voxels = traits.Bool(argstr='-a', desc='Tessellate the surface of all voxels with different labels')
use_real_RAS_coordinates = traits.Bool(argstr='-n', desc='Saves surface with real RAS coordinates where c_(r,a,s) != 0')
class MRITessellateOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
"""
surface = File(exists=True, desc='binary surface of the tessellation ')
class MRITessellate(FSCommand):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> tess = fs.MRITessellate()
>>> tess.inputs.in_file = 'aseg.mgz'
>>> tess.inputs.label_value = 17
>>> tess.inputs.out_file = 'lh.hippocampus'
>>> tess.run() # doctest: +SKIP
"""
_cmd = 'mri_tessellate'
input_spec = MRITessellateInputSpec
output_spec = MRITessellateOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return self.inputs.out_file
else:
_, name, ext = split_filename(self.inputs.in_file)
return name + ext + '_' + str(self.inputs.label_value)
class MRIPretessInputSpec(FSTraitedSpec):
in_filled = File(exists=True, mandatory=True, position=-4, argstr='%s',
desc=('filled volume, usually wm.mgz'))
label = traits.Either(traits.Str('wm'), traits.Int(1), argstr='%s', default='wm',
mandatory=True, usedefault=True, position=-3,
desc=('label to be picked up, can be a Freesurfer\'s string like '
'\'wm\' or a label value (e.g. 127 for rh or 255 for lh)'))
in_norm = File(exists=True, mandatory=True, position=-2, argstr='%s',
desc=('the normalized, brain-extracted T1w image. Usually norm.mgz'))
out_file = File(position=-1, argstr='%s', name_source=['in_filled'], name_template='%s_pretesswm',
keep_extension=True, desc='the output file after mri_pretess.')
nocorners = traits.Bool(False, argstr='-nocorners', desc=('do not remove corner configurations'
' in addition to edge ones.'))
keep = traits.Bool(False, argstr='-keep', desc=('keep WM edits'))
test = traits.Bool(False, argstr='-test', desc=('adds a voxel that should be removed by '
'mri_pretess. The value of the voxel is set to that of an ON-edited WM, '
'so it should be kept with -keep. The output will NOT be saved.'))
class MRIPretessOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='output file after mri_pretess')
class MRIPretess(FSCommand):
"""
Uses Freesurfer's mri_pretess to prepare volumes to be tessellated.
Description
-----------
Changes white matter (WM) segmentation so that the neighbors of all
voxels labeled as WM have a face in common - no edges or corners
allowed.
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> pretess = fs.MRIPretess()
>>> pretess.inputs.in_filled = 'wm.mgz'
>>> pretess.inputs.in_norm = 'norm.mgz'
>>> pretess.inputs.nocorners = True
>>> pretess.cmdline # doctest: +ALLOW_UNICODE
'mri_pretess -nocorners wm.mgz wm norm.mgz wm_pretesswm.mgz'
>>> pretess.run() # doctest: +SKIP
"""
_cmd = 'mri_pretess'
input_spec = MRIPretessInputSpec
output_spec = MRIPretessOutputSpec
class MRIMarchingCubesInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
"""
in_file = File(exists=True, mandatory=True, position=1, argstr='%s', desc='Input volume to tesselate voxels from.')
label_value = traits.Int(position=2, argstr='%d', mandatory=True,
desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)')
connectivity_value = traits.Int(1, position=-1, argstr='%d', usedefault=True,
desc='Alter the marching cubes connectivity: 1=6+,2=18,3=6,4=26 (default=1)')
out_file = File(argstr='./%s', position=-2, genfile=True, desc='output filename or True to generate one')
class MRIMarchingCubesOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
"""
surface = File(exists=True, desc='binary surface of the tessellation ')
class MRIMarchingCubes(FSCommand):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mc = fs.MRIMarchingCubes()
>>> mc.inputs.in_file = 'aseg.mgz'
>>> mc.inputs.label_value = 17
>>> mc.inputs.out_file = 'lh.hippocampus'
>>> mc.run() # doctest: +SKIP
"""
_cmd = 'mri_mc'
input_spec = MRIMarchingCubesInputSpec
output_spec = MRIMarchingCubesOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = self._gen_outfilename()
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + ext + '_' + str(self.inputs.label_value))
class SmoothTessellationInputSpec(FSTraitedSpec):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
"""
in_file = File(exists=True, mandatory=True, argstr='%s',
position=-2, copyfile=True,
desc='Input volume to tesselate voxels from.')
curvature_averaging_iterations = traits.Int(argstr='-a %d', desc='Number of curvature averaging iterations (default=10)')
smoothing_iterations = traits.Int(argstr='-n %d', desc='Number of smoothing iterations (default=10)')
snapshot_writing_iterations = traits.Int(argstr='-w %d', desc='Write snapshot every "n" iterations')
use_gaussian_curvature_smoothing = traits.Bool(argstr='-g', desc='Use Gaussian curvature smoothing')
gaussian_curvature_norm_steps = traits.Int(argstr='%d ', desc='Use Gaussian curvature smoothing')
gaussian_curvature_smoothing_steps = traits.Int(argstr='%d', desc='Use Gaussian curvature smoothing')
disable_estimates = traits.Bool(argstr='-nw', desc='Disables the writing of curvature and area estimates')
normalize_area = traits.Bool(argstr='-area', desc='Normalizes the area after smoothing')
use_momentum = traits.Bool(argstr='-m', desc='Uses momentum')
out_file = File(argstr='%s', position=-1, genfile=True, desc='output filename or True to generate one')
out_curvature_file = File(argstr='-c %s', desc='Write curvature to ?h.curvname (default "curv")')
out_area_file = File(argstr='-b %s', desc='Write area to ?h.areaname (default "area")')
seed = traits.Int(argstr="-seed %d",
desc="Seed for setting random number generator")
class SmoothTessellationOutputSpec(TraitedSpec):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
"""
surface = File(exists=True, desc='Smoothed surface file ')
class SmoothTessellation(FSCommand):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
.. seealso::
SurfaceSmooth() Interface
For smoothing a scalar field along a surface manifold
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> smooth = fs.SmoothTessellation()
>>> smooth.inputs.in_file = 'lh.hippocampus.stl'
>>> smooth.run() # doctest: +SKIP
"""
_cmd = 'mris_smooth'
input_spec = SmoothTessellationInputSpec
output_spec = SmoothTessellationOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = self._gen_outfilename()
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + '_smoothed' + ext)
def _run_interface(self, runtime):
# The returncode is meaningless in BET. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(SmoothTessellation, self)._run_interface(runtime)
if "failed" in runtime.stderr:
self.raise_exception(runtime)
return runtime
class MakeAverageSubjectInputSpec(FSTraitedSpec):
subjects_ids = traits.List(traits.Str(), argstr='--subjects %s',
desc='freesurfer subjects ids to average',
mandatory=True, sep=' ')
out_name = File('average', argstr='--out %s',
desc='name for the average subject', usedefault=True)
class MakeAverageSubjectOutputSpec(TraitedSpec):
average_subject_name = traits.Str(desc='Output registration file')
class MakeAverageSubject(FSCommand):
"""Make an average freesurfer subject
Examples
--------
>>> from nipype.interfaces.freesurfer import MakeAverageSubject
>>> avg = MakeAverageSubject(subjects_ids=['s1', 's2'])
>>> avg.cmdline # doctest: +ALLOW_UNICODE
'make_average_subject --out average --subjects s1 s2'
"""
_cmd = 'make_average_subject'
input_spec = MakeAverageSubjectInputSpec
output_spec = MakeAverageSubjectOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['average_subject_name'] = self.inputs.out_name
return outputs
class ExtractMainComponentInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='%s', position=1,
desc='input surface file')
out_file = File(name_template='%s.maincmp', name_source='in_file',
argstr='%s', position=2,
desc='surface containing main component')
class ExtractMainComponentOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='surface containing main component')
class ExtractMainComponent(CommandLine):
"""Extract the main component of a tesselated surface
Examples
--------
>>> from nipype.interfaces.freesurfer import ExtractMainComponent
>>> mcmp = ExtractMainComponent(in_file='lh.pial')
>>> mcmp.cmdline # doctest: +ALLOW_UNICODE
'mris_extract_main_component lh.pial lh.maincmp'
"""
_cmd = 'mris_extract_main_component'
input_spec = ExtractMainComponentInputSpec
output_spec = ExtractMainComponentOutputSpec
class Tkregister2InputSpec(FSTraitedSpec):
target_image = File(exists=True, argstr="--targ %s",
xor=['fstarg'],
desc='target volume')
fstarg = traits.Bool(False, argstr='--fstarg',
xor=['target_image'],
desc='use subject\'s T1 as reference')
moving_image = File(exists=True, mandatory=True, argstr="--mov %s",
desc='moving volume')
# Input registration file options
fsl_in_matrix = File(exists=True, argstr="--fsl %s",
desc='fsl-style registration input matrix')
xfm = File(exists=True, argstr='--xfm %s',
desc='use a matrix in MNI coordinates as initial registration')
lta_in = File(exists=True, argstr='--lta %s',
desc='use a matrix in MNI coordinates as initial registration')
invert_lta_in = traits.Bool(requires=['lta_in'],
desc='Invert input LTA before applying')
# Output registration file options
fsl_out = traits.Either(True, File, argstr='--fslregout %s',
desc='compute an FSL-compatible resgitration matrix')
lta_out = traits.Either(True, File, argstr='--ltaout %s',
desc='output registration file (LTA format)')
invert_lta_out = traits.Bool(argstr='--ltaout-inv', requires=['lta_in'],
desc='Invert input LTA before applying')
subject_id = traits.String(argstr="--s %s",
desc='freesurfer subject ID')
noedit = traits.Bool(True, argstr="--noedit", usedefault=True,
desc='do not open edit window (exit)')
reg_file = File('register.dat', usedefault=True,
mandatory=True, argstr='--reg %s',
desc='freesurfer-style registration file')
reg_header = traits.Bool(False, argstr='--regheader',
desc='compute regstration from headers')
fstal = traits.Bool(False, argstr='--fstal',
xor=['target_image', 'moving_image', 'reg_file'],
desc='set mov to be tal and reg to be tal xfm')
movscale = traits.Float(argstr='--movscale %f',
desc='adjust registration matrix to scale mov')
class Tkregister2OutputSpec(TraitedSpec):
reg_file = File(exists=True, desc='freesurfer-style registration file')
fsl_file = File(desc='FSL-style registration file')
lta_file = File(desc='LTA-style registration file')
class Tkregister2(FSCommand):
"""
Examples
--------
Get transform matrix between orig (*tkRAS*) and native (*scannerRAS*)
coordinates in Freesurfer. Implements the first step of mapping surfaces
to native space in `this guide
<http://surfer.nmr.mgh.harvard.edu/fswiki/FsAnat-to-NativeAnat>`_.
>>> from nipype.interfaces.freesurfer import Tkregister2
>>> tk2 = Tkregister2(reg_file='T1_to_native.dat')
>>> tk2.inputs.moving_image = 'T1.mgz'
>>> tk2.inputs.target_image = 'structural.nii'
>>> tk2.inputs.reg_header = True
>>> tk2.cmdline # doctest: +ALLOW_UNICODE
'tkregister2 --mov T1.mgz --noedit --reg T1_to_native.dat --regheader \
--targ structural.nii'
>>> tk2.run() # doctest: +SKIP
The example below uses tkregister2 without the manual editing
stage to convert FSL-style registration matrix (.mat) to
FreeSurfer-style registration matrix (.dat)
>>> from nipype.interfaces.freesurfer import Tkregister2
>>> tk2 = Tkregister2()
>>> tk2.inputs.moving_image = 'epi.nii'
>>> tk2.inputs.fsl_in_matrix = 'flirt.mat'
>>> tk2.cmdline # doctest: +ALLOW_UNICODE
'tkregister2 --fsl flirt.mat --mov epi.nii --noedit --reg register.dat'
>>> tk2.run() # doctest: +SKIP
"""
_cmd = "tkregister2"
input_spec = Tkregister2InputSpec
output_spec = Tkregister2OutputSpec
def _format_arg(self, name, spec, value):
if name == 'lta_in' and self.inputs.invert_lta_in:
spec = '--lta-inv %s'
if name in ('fsl_out', 'lta_out') and value is True:
value = self._list_outputs()[name]
return super(Tkregister2, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
reg_file = os.path.abspath(self.inputs.reg_file)
outputs['reg_file'] = reg_file
cwd = os.getcwd()
fsl_out = self.inputs.fsl_out
if isdefined(fsl_out):
if fsl_out is True:
outputs['fsl_file'] = fname_presuffix(
reg_file, suffix='.mat', newpath=cwd, use_ext=False)
else:
outputs['fsl_file'] = os.path.abspath(self.inputs.fsl_out)
lta_out = self.inputs.lta_out
if isdefined(lta_out):
if lta_out is True:
outputs['lta_file'] = fname_presuffix(
reg_file, suffix='.lta', newpath=cwd, use_ext=False)
else:
outputs['lta_file'] = os.path.abspath(self.inputs.lta_out)
return outputs
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + '_smoothed' + ext)
class AddXFormToHeaderInputSpec(FSTraitedSpec):
# required
in_file = File(exists=True, mandatory=True, position=-
2, argstr="%s", desc="input volume")
# transform file does NOT need to exist at the time if using copy_name
transform = File(exists=False, mandatory=True,
position=-3, argstr="%s", desc="xfm file")
out_file = File('output.mgz', position=-1, argstr="%s",
usedefault=True, desc="output volume")
# optional
copy_name = traits.Bool(
argstr="-c", desc="do not try to load the xfmfile, just copy name")
verbose = traits.Bool(argstr="-v", desc="be verbose")
class AddXFormToHeaderOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output volume")
class AddXFormToHeader(FSCommand):
""" Just adds specified xform to the volume header
(!) WARNING: transform input **MUST** be an absolute path to a DataSink'ed transform or
the output will reference a transform in the workflow cache directory!
>>> from nipype.interfaces.freesurfer import AddXFormToHeader
>>> adder = AddXFormToHeader()
>>> adder.inputs.in_file = 'norm.mgz'
>>> adder.inputs.transform = 'trans.mat'
>>> adder.cmdline # doctest: +ALLOW_UNICODE
'mri_add_xform_to_header trans.mat norm.mgz output.mgz'
>>> adder.inputs.copy_name = True
>>> adder.cmdline # doctest: +ALLOW_UNICODE
'mri_add_xform_to_header -c trans.mat norm.mgz output.mgz'
>>> adder.run() # doctest: +SKIP
References:
----------
[https://surfer.nmr.mgh.harvard.edu/fswiki/mri_add_xform_to_header]
"""
_cmd = "mri_add_xform_to_header"
input_spec = AddXFormToHeaderInputSpec
output_spec = AddXFormToHeaderOutputSpec
def _format_arg(self, name, spec, value):
if name == 'transform':
return value # os.path.abspath(value)
# if name == 'copy_name' and value:
# self.input_spec.transform
return super(AddXFormToHeader, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
class CheckTalairachAlignmentInputSpec(FSTraitedSpec):
in_file = File(argstr='-xfm %s', xor=['subject'], exists=True, mandatory=True, position=-1,
desc="specify the talairach.xfm file to check")
subject = traits.String(argstr='-subj %s', xor=['in_file'], mandatory=True, position=-1,
desc="specify subject's name")
# optional
threshold = traits.Float(default=0.010, argstr='-T %.3f', desc="Talairach transforms for subjects with p-values <= T " +
"are considered as very unlikely default=0.010")
class CheckTalairachAlignmentOutputSpec(TraitedSpec):
out_file = traits.File(
exists=True, desc="The input file for CheckTalairachAlignment")
class CheckTalairachAlignment(FSCommand):
"""
This program detects Talairach alignment failures
Examples
========
>>> from nipype.interfaces.freesurfer import CheckTalairachAlignment
>>> checker = CheckTalairachAlignment()
>>> checker.inputs.in_file = 'trans.mat'
>>> checker.inputs.threshold = 0.005
>>> checker.cmdline # doctest: +ALLOW_UNICODE
'talairach_afd -T 0.005 -xfm trans.mat'
>>> checker.run() # doctest: +SKIP
"""
_cmd = "talairach_afd"
input_spec = CheckTalairachAlignmentInputSpec
output_spec = CheckTalairachAlignmentOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self.inputs.in_file
return outputs
class TalairachAVIInputSpec(FSTraitedSpec):
in_file = File(argstr='--i %s', exists=True, mandatory=True,
desc="input volume")
out_file = File(argstr='--xfm %s', mandatory=True, exists=False,
desc="output xfm file")
# optional
atlas = traits.String(
argstr='--atlas %s', desc="alternate target atlas (in freesurfer/average dir)")
class TalairachAVIOutputSpec(TraitedSpec):
out_file = traits.File(
exists=False, desc="The output transform for TalairachAVI")
out_log = traits.File(
exists=False, desc="The output log file for TalairachAVI")
out_txt = traits.File(
exists=False, desc="The output text file for TaliarachAVI")
class TalairachAVI(FSCommand):
"""
Front-end for Avi Snyders image registration tool. Computes the
talairach transform that maps the input volume to the MNI average_305.
This does not add the xfm to the header of the input file. When called
by recon-all, the xfm is added to the header after the transform is
computed.
Examples
========
>>> from nipype.interfaces.freesurfer import TalairachAVI
>>> example = TalairachAVI()
>>> example.inputs.in_file = 'norm.mgz'
>>> example.inputs.out_file = 'trans.mat'
>>> example.cmdline # doctest: +ALLOW_UNICODE
'talairach_avi --i norm.mgz --xfm trans.mat'
>>> example.run() # doctest: +SKIP
"""
_cmd = "talairach_avi"
input_spec = TalairachAVIInputSpec
output_spec = TalairachAVIOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
outputs['out_log'] = os.path.abspath('talairach_avi.log')
outputs['out_txt'] = os.path.join(os.path.dirname(
self.inputs.out_file), 'talsrcimg_to_' + str(self.inputs.atlas) + 't4_vox2vox.txt')
return outputs
class TalairachQCInputSpec(FSTraitedSpec):
log_file = File(argstr='%s', mandatory=True, exists=True,
position=0, desc="The log file for TalairachQC")
class TalairachQC(FSScriptCommand):
"""
Examples
========
>>> from nipype.interfaces.freesurfer import TalairachQC
>>> qc = TalairachQC()
>>> qc.inputs.log_file = 'dirs.txt'
>>> qc.cmdline # doctest: +ALLOW_UNICODE
'tal_QC_AZS dirs.txt'
"""
_cmd = "tal_QC_AZS"
input_spec = TalairachQCInputSpec
output_spec = FSScriptOutputSpec
class RemoveNeckInputSpec(FSTraitedSpec):
in_file = File(argstr="%s", exists=True, mandatory=True,
position=-4, desc="Input file for RemoveNeck")
out_file = File(argstr="%s", exists=False,
name_source=['in_file'], name_template="%s_noneck",
hash_files=False, keep_extension=True,
position=-1, desc="Output file for RemoveNeck")
transform = File(argstr="%s", exists=True, mandatory=True,
position=-3, desc="Input transform file for RemoveNeck")
template = File(argstr="%s", exists=True, mandatory=True,
position=-2, desc="Input template file for RemoveNeck")
# optional
radius = traits.Int(argstr="-radius %d", desc="Radius")
class RemoveNeckOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output file with neck removed")
class RemoveNeck(FSCommand):
"""
Crops the neck out of the mri image
Examples
========
>>> from nipype.interfaces.freesurfer import TalairachQC
>>> remove_neck = RemoveNeck()
>>> remove_neck.inputs.in_file = 'norm.mgz'
>>> remove_neck.inputs.transform = 'trans.mat'
>>> remove_neck.inputs.template = 'trans.mat'
>>> remove_neck.cmdline # doctest: +ALLOW_UNICODE
'mri_remove_neck norm.mgz trans.mat trans.mat norm_noneck.mgz'
"""
_cmd = "mri_remove_neck"
input_spec = RemoveNeckInputSpec
output_spec = RemoveNeckOutputSpec
def _gen_fname(self, name):
if name == 'out_file':
return os.path.abspath('nu_noneck.mgz')
return None
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
class MRIFillInputSpec(FSTraitedSpec):
in_file = File(argstr="%s", mandatory=True, exists=True, position=-2,
desc="Input white matter file")
out_file = File(argstr="%s", mandatory=True, exists=False, position=-1,
desc="Output filled volume file name for MRIFill")
# optional
segmentation = File(argstr="-segmentation %s", exists=True,
desc="Input segmentation file for MRIFill")
transform = File(argstr="-xform %s", exists=True,
desc="Input transform file for MRIFill")
log_file = File(argstr="-a %s", desc="Output log file for MRIFill")
class MRIFillOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output file from MRIFill")
log_file = File(desc="Output log file from MRIFill")
class MRIFill(FSCommand):
"""
This program creates hemispheric cutting planes and fills white matter
with specific values for subsequent surface tesselation.
Examples
========
>>> from nipype.interfaces.freesurfer import MRIFill
>>> fill = MRIFill()
>>> fill.inputs.in_file = 'wm.mgz' # doctest: +SKIP
>>> fill.inputs.out_file = 'filled.mgz' # doctest: +SKIP
>>> fill.cmdline # doctest: +SKIP
'mri_fill wm.mgz filled.mgz'
"""
_cmd = "mri_fill"
input_spec = MRIFillInputSpec
output_spec = MRIFillOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
if isdefined(self.inputs.log_file):
outputs["log_file"] = os.path.abspath(self.inputs.log_file)
return outputs
class MRIsInflateInputSpec(FSTraitedSpec):
in_file = File(argstr="%s", position=-2, mandatory=True,
exists=True, copyfile=True,
desc="Input file for MRIsInflate")
out_file = File(argstr="%s", position=-1, exists=False,
name_source=['in_file'], name_template="%s.inflated",
hash_files=False, keep_extension=True,
desc="Output file for MRIsInflate")
# optional
out_sulc = File( exists=False,
xor=['no_save_sulc'],
desc="Output sulc file")
no_save_sulc = traits.Bool(argstr='-no-save-sulc',
xor=['out_sulc'],
desc="Do not save sulc file as output")
class MRIsInflateOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output file for MRIsInflate")
out_sulc = File(exists=False, desc="Output sulc file")
class MRIsInflate(FSCommand):
"""
This program will inflate a cortical surface.
Examples
========
>>> from nipype.interfaces.freesurfer import MRIsInflate
>>> inflate = MRIsInflate()
>>> inflate.inputs.in_file = 'lh.pial'
>>> inflate.inputs.no_save_sulc = True
>>> inflate.cmdline # doctest: +SKIP
'mris_inflate -no-save-sulc lh.pial lh.inflated'
"""
_cmd = 'mris_inflate'
input_spec = MRIsInflateInputSpec
output_spec = MRIsInflateOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
if not self.inputs.no_save_sulc:
# if the sulc file will be saved
outputs["out_sulc"] = os.path.abspath(self.inputs.out_sulc)
return outputs
class SphereInputSpec(FSTraitedSpecOpenMP):
in_file = File(argstr="%s", position=-2, copyfile=True,
mandatory=True, exists=True,
desc="Input file for Sphere")
out_file = File(argstr="%s", position=-1, exists=False,
name_source=['in_file'], hash_files=False,
name_template='%s.sphere',
desc="Output file for Sphere")
# optional
seed = traits.Int(argstr="-seed %d",
desc="Seed for setting random number generator")
magic = traits.Bool(argstr="-q",
desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu")
in_smoothwm = File( exists=True, copyfile=True,
desc="Input surface required when -q flag is not selected")
class SphereOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output file for Sphere")
class Sphere(FSCommandOpenMP):
"""
This program will add a template into an average surface
Examples
========
>>> from nipype.interfaces.freesurfer import Sphere
>>> sphere = Sphere()
>>> sphere.inputs.in_file = 'lh.pial'
>>> sphere.cmdline # doctest: +ALLOW_UNICODE
'mris_sphere lh.pial lh.sphere'
"""
_cmd = 'mris_sphere'
input_spec = SphereInputSpec
output_spec = SphereOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
class FixTopologyInputSpec(FSTraitedSpec):
in_orig = File(exists=True, mandatory=True,
desc="Undocumented input file <hemisphere>.orig")
in_inflated = File(exists=True, mandatory=True,
desc="Undocumented input file <hemisphere>.inflated")
in_brain = File(exists=True, mandatory=True,
desc="Implicit input brain.mgz")
in_wm = File(exists=True, mandatory=True,
desc="Implicit input wm.mgz")
hemisphere = traits.String(position=-1, argstr="%s", mandatory=True,
desc="Hemisphere being processed")
subject_id = traits.String('subject_id', position=-2, argstr="%s",
mandatory=True, usedefault=True,
desc="Subject being processed")
copy_inputs = traits.Bool(mandatory=True,
desc="If running as a node, set this to True " +
"otherwise, the topology fixing will be done " +
"in place.")
# optional
seed = traits.Int(argstr="-seed %d",
desc="Seed for setting random number generator")
ga = traits.Bool(argstr="-ga",
desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu")
mgz = traits.Bool(argstr="-mgz",
desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu")
sphere = traits.File(argstr="-sphere %s",
desc="Sphere input file")
class FixTopologyOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output file for FixTopology")
class FixTopology(FSCommand):
"""
This program computes a mapping from the unit sphere onto the surface
of the cortex from a previously generated approximation of the
cortical surface, thus guaranteeing a topologically correct surface.
Examples
========
>>> from nipype.interfaces.freesurfer import FixTopology
>>> ft = FixTopology()
>>> ft.inputs.in_orig = 'lh.orig' # doctest: +SKIP
>>> ft.inputs.in_inflated = 'lh.inflated' # doctest: +SKIP
>>> ft.inputs.sphere = 'lh.qsphere.nofix' # doctest: +SKIP
>>> ft.inputs.hemisphere = 'lh'
>>> ft.inputs.subject_id = '10335'
>>> ft.inputs.mgz = True
>>> ft.inputs.ga = True
>>> ft.cmdline # doctest: +SKIP
'mris_fix_topology -ga -mgz -sphere qsphere.nofix 10335 lh'
"""
_cmd = 'mris_fix_topology'
input_spec = FixTopologyInputSpec
output_spec = FixTopologyOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
hemi = self.inputs.hemisphere
copy2subjdir(self, self.inputs.sphere, folder='surf')
# the orig file is edited in place
self.inputs.in_orig = copy2subjdir(self,
self.inputs.in_orig,
folder='surf',
basename='{0}.orig'.format(hemi))
copy2subjdir(self, self.inputs.in_inflated,
folder='surf',
basename='{0}.inflated'.format(hemi))
copy2subjdir(self, self.inputs.in_brain,
folder='mri', basename='brain.mgz')
copy2subjdir(self, self.inputs.in_wm,
folder='mri', basename='wm.mgz')
return super(FixTopology, self).run(**inputs)
def _format_arg(self, name, spec, value):
if name == 'sphere':
# get the basename and take out the hemisphere
suffix = os.path.basename(value).split('.', 1)[1]
return spec.argstr % suffix
return super(FixTopology, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.in_orig)
return outputs
class EulerNumberInputSpec(FSTraitedSpec):
in_file = File(argstr="%s", position=-1, mandatory=True, exists=True,
desc="Input file for EulerNumber")
class EulerNumberOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output file for EulerNumber")
class EulerNumber(FSCommand):
"""
This program computes EulerNumber for a cortical surface
Examples
========
>>> from nipype.interfaces.freesurfer import EulerNumber
>>> ft = EulerNumber()
>>> ft.inputs.in_file = 'lh.pial'
>>> ft.cmdline # doctest: +ALLOW_UNICODE
'mris_euler_number lh.pial'
"""
_cmd = 'mris_euler_number'
input_spec = EulerNumberInputSpec
output_spec = EulerNumberOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.in_file)
return outputs
class RemoveIntersectionInputSpec(FSTraitedSpec):
in_file = File(argstr="%s", position=-2, mandatory=True,
exists=True, copyfile=True,
desc="Input file for RemoveIntersection")
out_file = File(argstr="%s", position=-1, exists=False,
name_source=['in_file'], name_template='%s',
hash_files=False, keep_extension=True,
desc="Output file for RemoveIntersection")
class RemoveIntersectionOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output file for RemoveIntersection")
class RemoveIntersection(FSCommand):
"""
This program removes the intersection of the given MRI
Examples
========
>>> from nipype.interfaces.freesurfer import RemoveIntersection
>>> ri = RemoveIntersection()
>>> ri.inputs.in_file = 'lh.pial'
>>> ri.cmdline # doctest: +ALLOW_UNICODE
'mris_remove_intersection lh.pial lh.pial'
"""
_cmd = 'mris_remove_intersection'
input_spec = RemoveIntersectionInputSpec
output_spec = RemoveIntersectionOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
class MakeSurfacesInputSpec(FSTraitedSpec):
# required
hemisphere = traits.Enum('lh', 'rh',
position=-1, argstr="%s", mandatory=True,
desc="Hemisphere being processed")
subject_id = traits.String('subject_id', usedefault=True,
position=-2, argstr="%s", mandatory=True,
desc="Subject being processed")
# implicit
in_orig = File(exists=True, mandatory=True, argstr='-orig %s',
desc="Implicit input file <hemisphere>.orig")
in_wm = File(exists=True, mandatory=True,
desc="Implicit input file wm.mgz")
in_filled = File(exists=True, mandatory=True,
desc="Implicit input file filled.mgz")
# optional
in_white = File(exists=True, desc="Implicit input that is sometimes used")
in_label = File(exists=True, xor=['noaparc'],
desc="Implicit input label/<hemisphere>.aparc.annot")
orig_white = File(argstr="-orig_white %s", exists=True,
desc="Specify a white surface to start with")
orig_pial = File(argstr="-orig_pial %s", exists=True, requires=['in_label'],
desc="Specify a pial surface to start with")
fix_mtl = traits.Bool(argstr="-fix_mtl",
desc="Undocumented flag")
no_white = traits.Bool(argstr="-nowhite",
desc="Undocumented flag")
white_only = traits.Bool(argstr="-whiteonly",
desc="Undocumented flage")
in_aseg = File(argstr="-aseg %s", exists=True,
desc="Input segmentation file")
in_T1 = File(argstr="-T1 %s", exists=True,
desc="Input brain or T1 file")
mgz = traits.Bool(
argstr="-mgz",
desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu")
noaparc = traits.Bool(
argstr="-noaparc", xor=['in_label'],
desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu")
maximum = traits.Float(
argstr="-max %.1f", desc="No documentation (used for longitudinal processing)")
longitudinal = traits.Bool(
argstr="-long", desc="No documentation (used for longitudinal processing)")
white = traits.String(argstr="-white %s",
desc="White surface name")
copy_inputs = traits.Bool(
desc="If running as a node, set this to True." +
"This will copy the input files to the node " +
"directory.")
class MakeSurfacesOutputSpec(TraitedSpec):
out_white = File(
exists=False, desc="Output white matter hemisphere surface")
out_curv = File(exists=False, desc="Output curv file for MakeSurfaces")
out_area = File(exists=False, desc="Output area file for MakeSurfaces")
out_cortex = File(exists=False, desc="Output cortex file for MakeSurfaces")
out_pial = File(exists=False, desc="Output pial surface for MakeSurfaces")
out_thickness = File(
exists=False, desc="Output thickness file for MakeSurfaces")
class MakeSurfaces(FSCommand):
"""
This program positions the tessellation of the cortical surface at the
white matter surface, then the gray matter surface and generate
surface files for these surfaces as well as a 'curvature' file for the
cortical thickness, and a surface file which approximates layer IV of
the cortical sheet.
Examples
========
>>> from nipype.interfaces.freesurfer import MakeSurfaces
>>> makesurfaces = MakeSurfaces()
>>> makesurfaces.inputs.hemisphere = 'lh'
>>> makesurfaces.inputs.subject_id = '10335'
>>> makesurfaces.inputs.in_orig = 'lh.pial'
>>> makesurfaces.inputs.in_wm = 'wm.mgz'
>>> makesurfaces.inputs.in_filled = 'norm.mgz'
>>> makesurfaces.inputs.in_label = 'aparc+aseg.nii'
>>> makesurfaces.inputs.in_T1 = 'T1.mgz'
>>> makesurfaces.inputs.orig_pial = 'lh.pial'
>>> makesurfaces.cmdline # doctest: +ALLOW_UNICODE
'mris_make_surfaces -T1 T1.mgz -orig pial -orig_pial pial 10335 lh'
"""
_cmd = 'mris_make_surfaces'
input_spec = MakeSurfacesInputSpec
output_spec = MakeSurfacesOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(self, self.inputs.in_wm,
folder='mri', basename='wm.mgz')
copy2subjdir(self, self.inputs.in_filled,
folder='mri', basename='filled.mgz')
copy2subjdir(self, self.inputs.in_white,
'surf', '{0}.white'.format(self.inputs.hemisphere))
for originalfile in [self.inputs.in_aseg,
self.inputs.in_T1]:
copy2subjdir(self, originalfile, folder='mri')
for originalfile in [self.inputs.orig_white,
self.inputs.orig_pial,
self.inputs.in_orig]:
copy2subjdir(self, originalfile, folder='surf')
if isdefined(self.inputs.in_label):
copy2subjdir(self, self.inputs.in_label, 'label',
'{0}.aparc.annot'.format(self.inputs.hemisphere))
else:
os.makedirs(os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id,
'label'))
return super(MakeSurfaces, self).run(**inputs)
def _format_arg(self, name, spec, value):
if name in ['in_T1', 'in_aseg']:
# These inputs do not take full paths as inputs or even basenames
basename = os.path.basename(value)
# whent the -mgz flag is specified, it assumes the mgz extension
if self.inputs.mgz:
prefix = os.path.splitext(basename)[0]
else:
prefix = basename
if prefix == 'aseg':
return # aseg is already the default
return spec.argstr % prefix
elif name in ['orig_white', 'orig_pial']:
# these inputs do take full file paths or even basenames
basename = os.path.basename(value)
suffix = basename.split('.')[1]
return spec.argstr % suffix
elif name == 'in_orig':
if value.endswith('lh.orig') or value.endswith('rh.orig'):
# {lh,rh}.orig inputs are not sepcified on command line
return
else:
# if the input orig file is different than lh.orig or rh.orig
# these inputs do take full file paths or even basenames
basename = os.path.basename(value)
suffix = basename.split('.')[1]
return spec.argstr % suffix
return super(MakeSurfaces, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
# Outputs are saved in the surf directory
dest_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'surf')
# labels are saved in the label directory
label_dir = os.path.join(
self.inputs.subjects_dir, self.inputs.subject_id, 'label')
if not self.inputs.no_white:
outputs["out_white"] = os.path.join(
dest_dir, str(self.inputs.hemisphere) + '.white')
# The curv and area files must have the hemisphere names as a prefix
outputs["out_curv"] = os.path.join(
dest_dir, str(self.inputs.hemisphere) + '.curv')
outputs["out_area"] = os.path.join(
dest_dir, str(self.inputs.hemisphere) + '.area')
# Something determines when a pial surface and thickness file is generated
# but documentation doesn't say what.
# The orig_pial input is just a guess
if isdefined(self.inputs.orig_pial) or self.inputs.white == 'NOWRITE':
outputs["out_curv"] = outputs["out_curv"] + ".pial"
outputs["out_area"] = outputs["out_area"] + ".pial"
outputs["out_pial"] = os.path.join(
dest_dir, str(self.inputs.hemisphere) + '.pial')
outputs["out_thickness"] = os.path.join(
dest_dir, str(self.inputs.hemisphere) + '.thickness')
else:
# when a pial surface is generated, the cortex label file is not
# generated
outputs["out_cortex"] = os.path.join(
label_dir, str(self.inputs.hemisphere) + '.cortex.label')
return outputs
class CurvatureInputSpec(FSTraitedSpec):
in_file = File(argstr="%s", position=-2, mandatory=True, exists=True,
copyfile=True, desc="Input file for Curvature")
# optional
threshold = traits.Float(
argstr="-thresh %.3f", desc="Undocumented input threshold")
n = traits.Bool(argstr="-n",
desc="Undocumented boolean flag")
averages = traits.Int(argstr="-a %d",
desc="Perform this number iterative averages of curvature measure before saving")
save = traits.Bool(argstr="-w",
desc="Save curvature files (will only generate screen output without this option)")
distances = traits.Tuple(traits.Int, traits.Int, argstr="-distances %d %d",
desc="Undocumented input integer distances")
copy_input = traits.Bool(desc="Copy input file to current directory")
class CurvatureOutputSpec(TraitedSpec):
out_mean = File(exists=False, desc="Mean curvature output file")
out_gauss = File(exists=False, desc="Gaussian curvature output file")
class Curvature(FSCommand):
"""
This program will compute the second fundamental form of a cortical
surface. It will create two new files <hemi>.<surface>.H and
<hemi>.<surface>.K with the mean and Gaussian curvature respectively.
Examples
========
>>> from nipype.interfaces.freesurfer import Curvature
>>> curv = Curvature()
>>> curv.inputs.in_file = 'lh.pial'
>>> curv.inputs.save = True
>>> curv.cmdline # doctest: +ALLOW_UNICODE
'mris_curvature -w lh.pial'
"""
_cmd = 'mris_curvature'
input_spec = CurvatureInputSpec
output_spec = CurvatureOutputSpec
def _format_arg(self, name, spec, value):
if self.inputs.copy_input:
if name == 'in_file':
basename = os.path.basename(value)
return spec.argstr % basename
return super(Curvature, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
if self.inputs.copy_input:
in_file = os.path.basename(self.inputs.in_file)
else:
in_file = self.inputs.in_file
outputs["out_mean"] = os.path.abspath(in_file) + '.H'
outputs["out_gauss"] = os.path.abspath(in_file) + '.K'
return outputs
class CurvatureStatsInputSpec(FSTraitedSpec):
surface = File(argstr="-F %s", exists=True,
desc="Specify surface file for CurvatureStats")
curvfile1 = File(argstr="%s", position=-2, mandatory=True, exists=True,
desc="Input file for CurvatureStats")
curvfile2 = File(argstr="%s", position=-1, mandatory=True, exists=True,
desc="Input file for CurvatureStats")
hemisphere = traits.Enum('lh', 'rh',
position=-3, argstr="%s", mandatory=True,
desc="Hemisphere being processed")
subject_id = traits.String('subject_id', usedefault=True,
position=-4, argstr="%s", mandatory=True,
desc="Subject being processed")
out_file = File(argstr="-o %s", exists=False,
name_source=['hemisphere'], name_template='%s.curv.stats',
hash_files=False, desc="Output curvature stats file")
# optional
min_max = traits.Bool(argstr="-m",
desc="Output min / max information for the processed curvature.")
values = traits.Bool(argstr="-G",
desc="Triggers a series of derived curvature values")
write = traits.Bool(argstr="--writeCurvatureFiles",
desc="Write curvature files")
copy_inputs = traits.Bool(
desc="If running as a node, set this to True." +
"This will copy the input files to the node " +
"directory.")
class CurvatureStatsOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output curvature stats file")
class CurvatureStats(FSCommand):
"""
In its simplest usage, 'mris_curvature_stats' will compute a set
of statistics on its input <curvFile>. These statistics are the
mean and standard deviation of the particular curvature on the
surface, as well as the results from several surface-based
integrals.
Additionally, 'mris_curvature_stats' can report the max/min
curvature values, and compute a simple histogram based on
all curvature values.
Curvatures can also be normalised and constrained to a given
range before computation.
Principal curvature (K, H, k1 and k2) calculations on a surface
structure can also be performed, as well as several functions
derived from k1 and k2.
Finally, all output to the console, as well as any new
curvatures that result from the above calculations can be
saved to a series of text and binary-curvature files.
Examples
========
>>> from nipype.interfaces.freesurfer import CurvatureStats
>>> curvstats = CurvatureStats()
>>> curvstats.inputs.hemisphere = 'lh'
>>> curvstats.inputs.curvfile1 = 'lh.pial'
>>> curvstats.inputs.curvfile2 = 'lh.pial'
>>> curvstats.inputs.surface = 'lh.pial'
>>> curvstats.inputs.out_file = 'lh.curv.stats'
>>> curvstats.inputs.values = True
>>> curvstats.inputs.min_max = True
>>> curvstats.inputs.write = True
>>> curvstats.cmdline # doctest: +ALLOW_UNICODE
'mris_curvature_stats -m -o lh.curv.stats -F pial -G --writeCurvatureFiles subject_id lh pial pial'
"""
_cmd = 'mris_curvature_stats'
input_spec = CurvatureStatsInputSpec
output_spec = CurvatureStatsOutputSpec
def _format_arg(self, name, spec, value):
if name in ['surface', 'curvfile1', 'curvfile2']:
prefix = os.path.basename(value).split('.')[1]
return spec.argstr % prefix
return super(CurvatureStats, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(self, self.inputs.surface, 'surf')
copy2subjdir(self, self.inputs.curvfile1, 'surf')
copy2subjdir(self, self.inputs.curvfile2, 'surf')
return super(CurvatureStats, self).run(**inputs)
class JacobianInputSpec(FSTraitedSpec):
# required
in_origsurf = File(argstr="%s", position=-3, mandatory=True, exists=True,
desc="Original surface")
in_mappedsurf = File(argstr="%s", position=-2, mandatory=True, exists=True,
desc="Mapped surface")
# optional
out_file = File(argstr="%s", exists=False, position=-1,
name_source=['in_origsurf'], hash_files=False,
name_template='%s.jacobian', keep_extension=False,
desc="Output Jacobian of the surface mapping")
class JacobianOutputSpec(TraitedSpec):
out_file = File(
exists=False, desc="Output Jacobian of the surface mapping")
class Jacobian(FSCommand):
"""
This program computes the Jacobian of a surface mapping.
Examples
========
>>> from nipype.interfaces.freesurfer import Jacobian
>>> jacobian = Jacobian()
>>> jacobian.inputs.in_origsurf = 'lh.pial'
>>> jacobian.inputs.in_mappedsurf = 'lh.pial'
>>> jacobian.cmdline # doctest: +ALLOW_UNICODE
'mris_jacobian lh.pial lh.pial lh.jacobian'
"""
_cmd = 'mris_jacobian'
input_spec = JacobianInputSpec
output_spec = JacobianOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class MRIsCalcInputSpec(FSTraitedSpec):
# required
in_file1 = File(argstr="%s", position=-3, mandatory=True, exists=True,
desc="Input file 1")
action = traits.String(argstr="%s", position=-2, mandatory=True,
desc="Action to perform on input file(s)")
out_file = File(argstr="-o %s", mandatory=True,
desc="Output file after calculation")
# optional
in_file2 = File(argstr="%s", exists=True, position=-1,
xor=['in_float', 'in_int'], desc="Input file 2")
in_float = traits.Float(argstr="%f", position=-1,
xor=['in_file2', 'in_int'], desc="Input float")
in_int = traits.Int(argstr="%d", position=-1,
xor=['in_file2', 'in_float'], desc="Input integer")
class MRIsCalcOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output file after calculation")
class MRIsCalc(FSCommand):
"""
'mris_calc' is a simple calculator that operates on FreeSurfer
curvatures and volumes. In most cases, the calculator functions with
three arguments: two inputs and an <ACTION> linking them. Some
actions, however, operate with only one input <file1>. In all cases,
the first input <file1> is the name of a FreeSurfer curvature overlay
(e.g. rh.curv) or volume file (e.g. orig.mgz). For two inputs, the
calculator first assumes that the second input is a file. If, however,
this second input file doesn't exist, the calculator assumes it refers
to a float number, which is then processed according to <ACTION>.Note:
<file1> and <file2> should typically be generated on the same subject.
Examples
========
>>> from nipype.interfaces.freesurfer import MRIsCalc
>>> example = MRIsCalc()
>>> example.inputs.in_file1 = 'lh.area' # doctest: +SKIP
>>> example.inputs.in_file2 = 'lh.area.pial' # doctest: +SKIP
>>> example.inputs.action = 'add'
>>> example.inputs.out_file = 'area.mid'
>>> example.cmdline # doctest: +SKIP
'mris_calc -o lh.area.mid lh.area add lh.area.pial'
"""
_cmd = 'mris_calc'
input_spec = MRIsCalcInputSpec
output_spec = MRIsCalcOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class VolumeMaskInputSpec(FSTraitedSpec):
left_whitelabel = traits.Int(argstr="--label_left_white %d", mandatory=True,
desc="Left white matter label")
left_ribbonlabel = traits.Int(argstr="--label_left_ribbon %d", mandatory=True,
desc="Left cortical ribbon label")
right_whitelabel = traits.Int(argstr="--label_right_white %d", mandatory=True,
desc="Right white matter label")
right_ribbonlabel = traits.Int(argstr="--label_right_ribbon %d", mandatory=True,
desc="Right cortical ribbon label")
lh_pial = File(mandatory=True, exists=True,
desc="Implicit input left pial surface")
rh_pial = File(mandatory=True, exists=True,
desc="Implicit input right pial surface")
lh_white = File(mandatory=True, exists=True,
desc="Implicit input left white matter surface")
rh_white = File(mandatory=True, exists=True,
desc="Implicit input right white matter surface")
aseg = File(exists=True,
xor=['in_aseg'],
desc="Implicit aseg.mgz segmentation. " +
"Specify a different aseg by using the 'in_aseg' input.")
subject_id = traits.String('subject_id', usedefault=True,
position=-1, argstr="%s", mandatory=True,
desc="Subject being processed")
# optional
in_aseg = File(argstr="--aseg_name %s",
exists=True, xor=['aseg'],
desc="Input aseg file for VolumeMask")
save_ribbon = traits.Bool(argstr="--save_ribbon",
desc="option to save just the ribbon for the " +
"hemispheres in the format ?h.ribbon.mgz")
copy_inputs = traits.Bool(desc="If running as a node, set this to True." +
"This will copy the implicit input files to the " +
"node directory.")
class VolumeMaskOutputSpec(TraitedSpec):
out_ribbon = File(exists=False, desc="Output cortical ribbon mask")
lh_ribbon = File(exists=False, desc="Output left cortical ribbon mask")
rh_ribbon = File(exists=False, desc="Output right cortical ribbon mask")
class VolumeMask(FSCommand):
"""
Computes a volume mask, at the same resolution as the
<subject>/mri/brain.mgz. The volume mask contains 4 values: LH_WM
(default 10), LH_GM (default 100), RH_WM (default 20), RH_GM (default
200).
The algorithm uses the 4 surfaces situated in <subject>/surf/
[lh|rh].[white|pial] and labels voxels based on the
signed-distance function from the surface.
Examples
========
>>> from nipype.interfaces.freesurfer import VolumeMask
>>> volmask = VolumeMask()
>>> volmask.inputs.left_whitelabel = 2
>>> volmask.inputs.left_ribbonlabel = 3
>>> volmask.inputs.right_whitelabel = 41
>>> volmask.inputs.right_ribbonlabel = 42
>>> volmask.inputs.lh_pial = 'lh.pial'
>>> volmask.inputs.rh_pial = 'lh.pial'
>>> volmask.inputs.lh_white = 'lh.pial'
>>> volmask.inputs.rh_white = 'lh.pial'
>>> volmask.inputs.subject_id = '10335'
>>> volmask.inputs.save_ribbon = True
>>> volmask.cmdline # doctest: +ALLOW_UNICODE
'mris_volmask --label_left_ribbon 3 --label_left_white 2 --label_right_ribbon 42 --label_right_white 41 --save_ribbon 10335'
"""
_cmd = 'mris_volmask'
input_spec = VolumeMaskInputSpec
output_spec = VolumeMaskOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial')
copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial')
copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white')
copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white')
copy2subjdir(self, self.inputs.in_aseg, 'mri')
copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.mgz')
return super(VolumeMask, self).run(**inputs)
def _format_arg(self, name, spec, value):
if name == 'in_aseg':
return spec.argstr % os.path.basename(value).rstrip('.mgz')
return super(VolumeMask, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'mri')
outputs["out_ribbon"] = os.path.join(out_dir, 'ribbon.mgz')
if self.inputs.save_ribbon:
outputs["rh_ribbon"] = os.path.join(out_dir, 'rh.ribbon.mgz')
outputs["lh_ribbon"] = os.path.join(out_dir, 'lh.ribbon.mgz')
return outputs
class ParcellationStatsInputSpec(FSTraitedSpec):
# required
subject_id = traits.String('subject_id', usedefault=True,
position=-3, argstr="%s", mandatory=True,
desc="Subject being processed")
hemisphere = traits.Enum('lh', 'rh',
position=-2, argstr="%s", mandatory=True,
desc="Hemisphere being processed")
# implicit
wm = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/mri/wm.mgz")
lh_white = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/lh.white")
rh_white = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/rh.white")
lh_pial = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/lh.pial")
rh_pial = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/rh.pial")
transform = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/mri/transforms/talairach.xfm")
thickness = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/?h.thickness")
brainmask = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/mri/brainmask.mgz")
aseg = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/mri/aseg.presurf.mgz")
ribbon = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/mri/ribbon.mgz")
cortex_label = File(exists=True,
desc="implicit input file {hemi}.cortex.label")
# optional
surface = traits.String(position=-1, argstr="%s",
desc="Input surface (e.g. 'white')")
mgz = traits.Bool(argstr="-mgz",
desc="Look for mgz files")
in_cortex = traits.File(argstr="-cortex %s", exists=True,
desc="Input cortex label")
in_annotation = traits.File(argstr="-a %s", exists=True, xor=['in_label'],
desc="compute properties for each label in the annotation file separately")
in_label = traits.File(argstr="-l %s", exists=True, xor=['in_annotatoin', 'out_color'],
desc="limit calculations to specified label")
tabular_output = traits.Bool(argstr="-b",
desc="Tabular output")
out_table = traits.File(argstr="-f %s", exists=False, genfile=True,
requires=['tabular_output'], desc="Table output to tablefile")
out_color = traits.File(argstr="-c %s", exists=False, genfile=True, xor=['in_label'],
desc="Output annotation files's colortable to text file")
copy_inputs = traits.Bool(desc="If running as a node, set this to True." +
"This will copy the input files to the node " +
"directory.")
th3 = traits.Bool(argstr="-th3", requires=["cortex_label"],
desc="turns on new vertex-wise volume calc for mris_anat_stats")
class ParcellationStatsOutputSpec(TraitedSpec):
out_table = File(exists=False, desc="Table output to tablefile")
out_color = File(exists=False,
desc="Output annotation files's colortable to text file")
class ParcellationStats(FSCommand):
"""
This program computes a number of anatomical properties.
Examples
========
>>> from nipype.interfaces.freesurfer import ParcellationStats
>>> import os
>>> parcstats = ParcellationStats()
>>> parcstats.inputs.subject_id = '10335'
>>> parcstats.inputs.hemisphere = 'lh'
>>> parcstats.inputs.wm = './../mri/wm.mgz' # doctest: +SKIP
>>> parcstats.inputs.transform = './../mri/transforms/talairach.xfm' # doctest: +SKIP
>>> parcstats.inputs.brainmask = './../mri/brainmask.mgz' # doctest: +SKIP
>>> parcstats.inputs.aseg = './../mri/aseg.presurf.mgz' # doctest: +SKIP
>>> parcstats.inputs.ribbon = './../mri/ribbon.mgz' # doctest: +SKIP
>>> parcstats.inputs.lh_pial = 'lh.pial' # doctest: +SKIP
>>> parcstats.inputs.rh_pial = 'lh.pial' # doctest: +SKIP
>>> parcstats.inputs.lh_white = 'lh.white' # doctest: +SKIP
>>> parcstats.inputs.rh_white = 'rh.white' # doctest: +SKIP
>>> parcstats.inputs.thickness = 'lh.thickness' # doctest: +SKIP
>>> parcstats.inputs.surface = 'white'
>>> parcstats.inputs.out_table = 'lh.test.stats'
>>> parcstats.inputs.out_color = 'test.ctab'
>>> parcstats.cmdline # doctest: +SKIP
'mris_anatomical_stats -c test.ctab -f lh.test.stats 10335 lh white'
"""
_cmd = 'mris_anatomical_stats'
input_spec = ParcellationStatsInputSpec
output_spec = ParcellationStatsOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white')
copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial')
copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white')
copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial')
copy2subjdir(self, self.inputs.wm, 'mri', 'wm.mgz')
copy2subjdir(self, self.inputs.transform,
os.path.join('mri', 'transforms'),
'talairach.xfm')
copy2subjdir(self, self.inputs.brainmask, 'mri', 'brainmask.mgz')
copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.presurf.mgz')
copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz')
copy2subjdir(self, self.inputs.thickness, 'surf',
'{0}.thickness'.format(self.inputs.hemisphere))
if isdefined(self.inputs.cortex_label):
copy2subjdir(
self, self.inputs.cortex_label, 'label',
'{0}.cortex.label'.format(self.inputs.hemisphere))
createoutputdirs(self._list_outputs())
return super(ParcellationStats, self).run(**inputs)
def _gen_filename(self, name):
if name in ['out_table', 'out_color']:
return self._list_outputs()[name]
return None
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.out_table):
outputs["out_table"] = os.path.abspath(self.inputs.out_table)
else:
# subject stats directory
stats_dir = os.path.join(
self.inputs.subjects_dir, self.inputs.subject_id, 'stats')
if isdefined(self.inputs.in_annotation):
# if out_table is not defined just tag .stats on the end
# instead of .annot
if self.inputs.surface == 'pial':
basename = os.path.basename(
self.inputs.in_annotation).replace('.annot', '.pial.stats')
else:
basename = os.path.basename(
self.inputs.in_annotation).replace('.annot', '.stats')
elif isdefined(self.inputs.in_label):
# if out_table is not defined just tag .stats on the end
# instead of .label
if self.inputs.surface == 'pial':
basename = os.path.basename(
self.inputs.in_label).replace('.label', '.pial.stats')
else:
basename = os.path.basename(
self.inputs.in_label).replace('.label', '.stats')
else:
basename = str(self.inputs.hemisphere) + '.aparc.annot.stats'
outputs["out_table"] = os.path.join(stats_dir, basename)
if isdefined(self.inputs.out_color):
outputs["out_color"] = os.path.abspath(self.inputs.out_color)
else:
# subject label directory
out_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label')
if isdefined(self.inputs.in_annotation):
# find the annotation name (if it exists)
basename = os.path.basename(self.inputs.in_annotation)
for item in ['lh.', 'rh.', 'aparc.', 'annot']:
basename = basename.replace(item, '')
annot = basename
# if the out_color table is not defined, one with the annotation
# name will be created
if 'BA' in annot:
outputs["out_color"] = os.path.join(out_dir, annot + 'ctab')
else:
outputs["out_color"] = os.path.join(
out_dir, 'aparc.annot.' + annot + 'ctab')
else:
outputs["out_color"] = os.path.join(
out_dir, 'aparc.annot.ctab')
return outputs
class ContrastInputSpec(FSTraitedSpec):
# required
subject_id = traits.String('subject_id', argstr="--s %s", usedefault=True,
mandatory=True, desc="Subject being processed")
hemisphere = traits.Enum('lh', 'rh',
argstr="--%s-only", mandatory=True,
desc="Hemisphere being processed")
# implicit
thickness = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/?h.thickness")
white = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/<hemisphere>.white")
annotation = traits.File(mandatory=True, exists=True,
desc="Input annotation file must be <subject_id>/label/<hemisphere>.aparc.annot")
cortex = traits.File(mandatory=True, exists=True,
desc="Input cortex label must be <subject_id>/label/<hemisphere>.cortex.label")
orig = File(exists=True, mandatory=True,
desc="Implicit input file mri/orig.mgz")
rawavg = File(exists=True, mandatory=True,
desc="Implicit input file mri/rawavg.mgz")
copy_inputs = traits.Bool(desc="If running as a node, set this to True." +
"This will copy the input files to the node " +
"directory.")
class ContrastOutputSpec(TraitedSpec):
out_contrast = File(exists=False,
desc="Output contrast file from Contrast")
out_stats = File(exists=False,
desc="Output stats file from Contrast")
out_log = File(exists=True,
desc="Output log from Contrast")
class Contrast(FSCommand):
"""
Compute surface-wise gray/white contrast
Examples
========
>>> from nipype.interfaces.freesurfer import Contrast
>>> contrast = Contrast()
>>> contrast.inputs.subject_id = '10335'
>>> contrast.inputs.hemisphere = 'lh'
>>> contrast.inputs.white = 'lh.white' # doctest: +SKIP
>>> contrast.inputs.thickness = 'lh.thickness' # doctest: +SKIP
>>> contrast.inputs.annotation = '../label/lh.aparc.annot' # doctest: +SKIP
>>> contrast.inputs.cortex = '../label/lh.cortex.label' # doctest: +SKIP
>>> contrast.inputs.rawavg = '../mri/rawavg.mgz' # doctest: +SKIP
>>> contrast.inputs.orig = '../mri/orig.mgz' # doctest: +SKIP
>>> contrast.cmdline # doctest: +SKIP
'pctsurfcon --lh-only --s 10335'
"""
_cmd = 'pctsurfcon'
input_spec = ContrastInputSpec
output_spec = ContrastOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
hemi = self.inputs.hemisphere
copy2subjdir(self, self.inputs.annotation, 'label',
'{0}.aparc.annot'.format(hemi))
copy2subjdir(self, self.inputs.cortex, 'label',
'{0}.cortex.label'.format(hemi))
copy2subjdir(self, self.inputs.white, 'surf',
'{0}.white'.format(hemi))
copy2subjdir(self, self.inputs.thickness, 'surf',
'{0}.thickness'.format(hemi))
copy2subjdir(self, self.inputs.orig, 'mri', 'orig.mgz')
copy2subjdir(self, self.inputs.rawavg, 'mri', 'rawavg.mgz')
# need to create output directories
createoutputdirs(self._list_outputs())
return super(Contrast, self).run(**inputs)
def _list_outputs(self):
outputs = self._outputs().get()
subject_dir = os.path.join(
self.inputs.subjects_dir, self.inputs.subject_id)
outputs["out_contrast"] = os.path.join(
subject_dir, 'surf', str(self.inputs.hemisphere) + '.w-g.pct.mgh')
outputs["out_stats"] = os.path.join(
subject_dir, 'stats', str(self.inputs.hemisphere) + '.w-g.pct.stats')
outputs["out_log"] = os.path.join(
subject_dir, 'scripts', 'pctsurfcon.log')
return outputs
class RelabelHypointensitiesInputSpec(FSTraitedSpec):
# required
lh_white = File(mandatory=True, exists=True, copyfile=True,
desc="Implicit input file must be lh.white")
rh_white = File(mandatory=True, exists=True, copyfile=True,
desc="Implicit input file must be rh.white")
aseg = File(argstr="%s", position=-3, mandatory=True, exists=True,
desc="Input aseg file")
surf_directory = traits.Directory('.', argstr="%s", position=-2, exists=True,
usedefault=True,
desc="Directory containing lh.white and rh.white")
out_file = File(argstr="%s", position=-1, exists=False,
name_source=['aseg'], name_template='%s.hypos.mgz',
hash_files=False, keep_extension=False,
desc="Output aseg file")
class RelabelHypointensitiesOutputSpec(TraitedSpec):
out_file = File(argstr="%s", exists=False,
desc="Output aseg file")
class RelabelHypointensities(FSCommand):
"""
Relabel Hypointensities
Examples
========
>>> from nipype.interfaces.freesurfer import RelabelHypointensities
>>> relabelhypos = RelabelHypointensities()
>>> relabelhypos.inputs.lh_white = 'lh.pial'
>>> relabelhypos.inputs.rh_white = 'lh.pial'
>>> relabelhypos.inputs.surf_directory = '.'
>>> relabelhypos.inputs.aseg = 'aseg.mgz'
>>> relabelhypos.cmdline # doctest: +ALLOW_UNICODE
'mri_relabel_hypointensities aseg.mgz . aseg.hypos.mgz'
"""
_cmd = 'mri_relabel_hypointensities'
input_spec = RelabelHypointensitiesInputSpec
output_spec = RelabelHypointensitiesOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
class Aparc2AsegInputSpec(FSTraitedSpec):
# required
subject_id = traits.String('subject_id', argstr="--s %s", usedefault=True,
mandatory=True, desc="Subject being processed")
out_file = File(argstr="--o %s", exists=False, mandatory=True,
desc="Full path of file to save the output segmentation in")
# implicit
lh_white = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/lh.white")
rh_white = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/rh.white")
lh_pial = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/lh.pial")
rh_pial = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/rh.pial")
lh_ribbon = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/mri/lh.ribbon.mgz")
rh_ribbon = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/mri/rh.ribbon.mgz")
ribbon = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/mri/ribbon.mgz")
lh_annotation = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/label/lh.aparc.annot")
rh_annotation = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/label/rh.aparc.annot")
# optional
filled = File(exists=True,
desc="Implicit input filled file. Only required with FS v5.3.")
aseg = File(argstr="--aseg %s", exists=True,
desc="Input aseg file")
volmask = traits.Bool(argstr="--volmask",
desc="Volume mask flag")
ctxseg = File(argstr="--ctxseg %s", exists=True,
desc="")
label_wm = traits.Bool(argstr="--labelwm",
desc="""
For each voxel labeled as white matter in the aseg, re-assign
its label to be that of the closest cortical point if its
distance is less than dmaxctx
""")
hypo_wm = traits.Bool(argstr="--hypo-as-wm",
desc="Label hypointensities as WM")
rip_unknown = traits.Bool(argstr="--rip-unknown",
desc="Do not label WM based on 'unknown' corical label")
a2009s = traits.Bool(argstr="--a2009s",
desc="Using the a2009s atlas")
copy_inputs = traits.Bool(desc="If running as a node, set this to True." +
"This will copy the input files to the node " +
"directory.")
class Aparc2AsegOutputSpec(TraitedSpec):
out_file = File(argstr="%s", desc="Output aseg file")
class Aparc2Aseg(FSCommand):
"""
Maps the cortical labels from the automatic cortical parcellation
(aparc) to the automatic segmentation volume (aseg). The result can be
used as the aseg would. The algorithm is to find each aseg voxel
labeled as cortex (3 and 42) and assign it the label of the closest
cortical vertex. If the voxel is not in the ribbon (as defined by mri/
lh.ribbon and rh.ribbon), then the voxel is marked as unknown (0).
This can be turned off with --noribbon. The cortical parcellation is
obtained from subject/label/hemi.aparc.annot which should be based on
the curvature.buckner40.filled.desikan_killiany.gcs atlas. The aseg is
obtained from subject/mri/aseg.mgz and should be based on the
RB40_talairach_2005-07-20.gca atlas. If these atlases are used, then the
segmentations can be viewed with tkmedit and the
FreeSurferColorLUT.txt color table found in $FREESURFER_HOME. These
are the default atlases used by recon-all.
Examples
========
>>> from nipype.interfaces.freesurfer import Aparc2Aseg
>>> aparc2aseg = Aparc2Aseg()
>>> aparc2aseg.inputs.lh_white = 'lh.pial'
>>> aparc2aseg.inputs.rh_white = 'lh.pial'
>>> aparc2aseg.inputs.lh_pial = 'lh.pial'
>>> aparc2aseg.inputs.rh_pial = 'lh.pial'
>>> aparc2aseg.inputs.lh_ribbon = 'label.mgz'
>>> aparc2aseg.inputs.rh_ribbon = 'label.mgz'
>>> aparc2aseg.inputs.ribbon = 'label.mgz'
>>> aparc2aseg.inputs.lh_annotation = 'lh.pial'
>>> aparc2aseg.inputs.rh_annotation = 'lh.pial'
>>> aparc2aseg.inputs.out_file = 'aparc+aseg.mgz'
>>> aparc2aseg.inputs.label_wm = True
>>> aparc2aseg.inputs.rip_unknown = True
>>> aparc2aseg.cmdline # doctest: +SKIP
'mri_aparc2aseg --labelwm --o aparc+aseg.mgz --rip-unknown --s subject_id'
"""
_cmd = 'mri_aparc2aseg'
input_spec = Aparc2AsegInputSpec
output_spec = Aparc2AsegOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white')
copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial')
copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white')
copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial')
copy2subjdir(self, self.inputs.lh_ribbon, 'mri', 'lh.ribbon.mgz')
copy2subjdir(self, self.inputs.rh_ribbon, 'mri', 'rh.ribbon.mgz')
copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz')
copy2subjdir(self, self.inputs.aseg, 'mri')
copy2subjdir(self, self.inputs.filled, 'mri', 'filled.mgz')
copy2subjdir(self, self.inputs.lh_annotation, 'label')
copy2subjdir(self, self.inputs.rh_annotation, 'label')
return super(Aparc2Aseg, self).run(**inputs)
def _format_arg(self, name, spec, value):
if name == 'aseg':
# aseg does not take a full filename
basename = os.path.basename(value).replace('.mgz', '')
return spec.argstr % basename
elif name == 'out_file':
return spec.argstr % os.path.abspath(value)
return super(Aparc2Aseg, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
class Apas2AsegInputSpec(FSTraitedSpec):
# required
in_file = File(argstr="--i %s", mandatory=True, exists=True,
desc="Input aparc+aseg.mgz")
out_file = File(argstr="--o %s", mandatory=True,
desc="Output aseg file")
class Apas2AsegOutputSpec(TraitedSpec):
out_file = File(argstr="%s", exists=False,
desc="Output aseg file")
class Apas2Aseg(FSCommand):
"""
Converts aparc+aseg.mgz into something like aseg.mgz by replacing the
cortical segmentations 1000-1035 with 3 and 2000-2035 with 42. The
advantage of this output is that the cortical label conforms to the
actual surface (this is not the case with aseg.mgz).
Examples
========
>>> from nipype.interfaces.freesurfer import Apas2Aseg
>>> apas2aseg = Apas2Aseg()
>>> apas2aseg.inputs.in_file = 'aseg.mgz'
>>> apas2aseg.inputs.out_file = 'output.mgz'
>>> apas2aseg.cmdline # doctest: +ALLOW_UNICODE
'apas2aseg --i aseg.mgz --o output.mgz'
"""
_cmd = 'apas2aseg'
input_spec = Apas2AsegInputSpec
output_spec = Apas2AsegOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
class MRIsExpandInputSpec(FSTraitedSpec):
# Input spec derived from
# https://github.com/freesurfer/freesurfer/blob/102e053/mris_expand/mris_expand.c
in_file = File(
exists=True, mandatory=True, argstr='%s', position=-3, copyfile=False,
desc='Surface to expand')
distance = traits.Float(
mandatory=True, argstr='%g', position=-2,
desc='Distance in mm or fraction of cortical thickness')
out_name = traits.Str(
'expanded', argstr='%s', position=-1, usedefault=True,
desc=('Output surface file\n'
'If no path, uses directory of `in_file`\n'
'If no path AND missing "lh." or "rh.", derive from `in_file`'))
thickness = traits.Bool(
argstr='-thickness',
desc='Expand by fraction of cortical thickness, not mm')
thickness_name = traits.Str(
argstr="-thickness_name %s", copyfile=False,
desc=('Name of thickness file (implicit: "thickness")\n'
'If no path, uses directory of `in_file`\n'
'If no path AND missing "lh." or "rh.", derive from `in_file`'))
pial = traits.Str(
argstr='-pial %s', copyfile=False,
desc=('Name of pial file (implicit: "pial")\n'
'If no path, uses directory of `in_file`\n'
'If no path AND missing "lh." or "rh.", derive from `in_file`'))
sphere = traits.Str(
'sphere', copyfile=False, usedefault=True,
desc='WARNING: Do not change this trait')
spring = traits.Float(argstr='-S %g', desc="Spring term (implicit: 0.05)")
dt = traits.Float(argstr='-T %g', desc='dt (implicit: 0.25)')
write_iterations = traits.Int(
argstr='-W %d',
desc='Write snapshots of expansion every N iterations')
smooth_averages = traits.Int(
argstr='-A %d',
desc='Smooth surface with N iterations after expansion')
nsurfaces = traits.Int(
argstr='-N %d',
desc='Number of surfacces to write during expansion')
# # Requires dev version - Re-add when min_ver/max_ver support this
# # https://github.com/freesurfer/freesurfer/blob/9730cb9/mris_expand/mris_expand.c
# navgs = traits.Tuple(
# traits.Int, traits.Int,
# argstr='-navgs %d %d',
# desc=('Tuple of (n_averages, min_averages) parameters '
# '(implicit: (16, 0))'))
# target_intensity = traits.Tuple(
# traits.Float, traits.File(exists=True),
# argstr='-intensity %g %s',
# desc='Tuple of intensity and brain volume to crop to target intensity')
class MRIsExpandOutputSpec(TraitedSpec):
out_file = File(desc='Output surface file')
class MRIsExpand(FSSurfaceCommand):
"""
Expands a surface (typically ?h.white) outwards while maintaining
smoothness and self-intersection constraints.
Examples
========
>>> from nipype.interfaces.freesurfer import MRIsExpand
>>> mris_expand = MRIsExpand(thickness=True, distance=0.5)
>>> mris_expand.inputs.in_file = 'lh.white'
>>> mris_expand.cmdline # doctest: +ALLOW_UNICODE
'mris_expand -thickness lh.white 0.5 expanded'
>>> mris_expand.inputs.out_name = 'graymid'
>>> mris_expand.cmdline # doctest: +ALLOW_UNICODE
'mris_expand -thickness lh.white 0.5 graymid'
"""
_cmd = 'mris_expand'
input_spec = MRIsExpandInputSpec
output_spec = MRIsExpandOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self._associated_file(self.inputs.in_file,
self.inputs.out_name)
return outputs
def _normalize_filenames(self):
""" Find full paths for pial, thickness and sphere files for copying
"""
in_file = self.inputs.in_file
pial = self.inputs.pial
if not isdefined(pial):
pial = 'pial'
self.inputs.pial = self._associated_file(in_file, pial)
if isdefined(self.inputs.thickness) and self.inputs.thickness:
thickness_name = self.inputs.thickness_name
if not isdefined(thickness_name):
thickness_name = 'thickness'
self.inputs.thickness_name = self._associated_file(in_file,
thickness_name)
self.inputs.sphere = self._associated_file(in_file, self.inputs.sphere)
class LTAConvertInputSpec(CommandLineInputSpec):
# Inputs
_in_xor = ('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk')
in_lta = traits.Either(
File(exists=True), 'identity.nofile', argstr='--inlta %s',
mandatory=True, xor=_in_xor, desc='input transform of LTA type')
in_fsl = File(
exists=True, argstr='--infsl %s', mandatory=True, xor=_in_xor,
desc='input transform of FSL type')
in_mni = File(
exists=True, argstr='--inmni %s', mandatory=True, xor=_in_xor,
desc='input transform of MNI/XFM type')
in_reg = File(
exists=True, argstr='--inreg %s', mandatory=True, xor=_in_xor,
desc='input transform of TK REG type (deprecated format)')
in_niftyreg = File(
exists=True, argstr='--inniftyreg %s', mandatory=True, xor=_in_xor,
desc='input transform of Nifty Reg type (inverse RAS2RAS)')
in_itk = File(
exists=True, argstr='--initk %s', mandatory=True, xor=_in_xor,
desc='input transform of ITK type')
# Outputs
out_lta = traits.Either(
traits.Bool, File, argstr='--outlta %s',
desc='output linear transform (LTA Freesurfer format)')
out_fsl = traits.Either(traits.Bool, File, argstr='--outfsl %s',
desc='output transform in FSL format')
out_mni = traits.Either(traits.Bool, File, argstr='--outmni %s',
desc='output transform in MNI/XFM format')
out_reg = traits.Either(traits.Bool, File, argstr='--outreg %s',
desc='output transform in reg dat format')
out_itk = traits.Either(traits.Bool, File, argstr='--outitk %s',
desc='output transform in ITK format')
# Optional flags
invert = traits.Bool(argstr='--invert')
ltavox2vox = traits.Bool(argstr='--ltavox2vox', requires=['out_lta'])
source_file = File(exists=True, argstr='--src %s')
target_file = File(exists=True, argstr='--trg %s')
target_conform = traits.Bool(argstr='--trgconform')
class LTAConvertOutputSpec(TraitedSpec):
out_lta = File(exists=True,
desc='output linear transform (LTA Freesurfer format)')
out_fsl = File(exists=True, desc='output transform in FSL format')
out_mni = File(exists=True, desc='output transform in MNI/XFM format')
out_reg = File(exists=True, desc='output transform in reg dat format')
out_itk = File(exists=True, desc='output transform in ITK format')
class LTAConvert(CommandLine):
"""Convert different transformation formats.
Some formats may require you to pass an image if the geometry information
is missing form the transform file format.
For complete details, see the `lta_convert documentation.
<https://ftp.nmr.mgh.harvard.edu/pub/docs/html/lta_convert.help.xml.html>`_
"""
input_spec = LTAConvertInputSpec
output_spec = LTAConvertOutputSpec
_cmd = 'lta_convert'
def _format_arg(self, name, spec, value):
if name.startswith('out_') and value is True:
value = self._list_outputs()[name]
return super(LTAConvert, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
for name, default in (('out_lta', 'out.lta'), ('out_fsl', 'out.mat'),
('out_mni', 'out.xfm'), ('out_reg', 'out.dat'),
('out_itk', 'out.txt')):
attr = getattr(self.inputs, name)
if attr:
fname = default if attr is True else attr
outputs[name] = os.path.abspath(fname)
return outputs
|
mick-d/nipype
|
nipype/interfaces/freesurfer/utils.py
|
Python
|
bsd-3-clause
| 138,762
|
[
"Gaussian",
"VTK"
] |
fe52e21dc6385238c3ecb97a7427ba06dd8d01d417a4ae8d5a6252805c9766e9
|
# Copyright 2004-2015 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains displayables that move, zoom, rotate, or otherwise
# transform displayables. (As well as displayables that support them.)
import math
import types #@UnresolvedImport
import renpy.display #@UnusedImport
from renpy.display.render import render
from renpy.display.layout import Container
import renpy.display.accelerator
# The null object that's used if we don't have a defined child.
null = None
def get_null():
global null
if null is None:
null = renpy.display.layout.Null()
return null
# Convert a position from cartesian to polar coordinates.
def cartesian_to_polar(x, y, xaround, yaround):
"""
Converts cartesian coordinates to polar coordinates.
"""
dx = x - xaround
dy = y - yaround
radius = math.hypot(dx, dy)
angle = math.atan2(dx, -dy) / math.pi * 180
if angle < 0:
angle += 360
return angle, radius
def polar_to_cartesian(angle, radius, xaround, yaround):
"""
Converts polart coordinates to cartesian coordinates.
"""
angle = angle * math.pi / 180
dx = radius * math.sin(angle)
dy = -radius * math.cos(angle)
x = type(xaround)(xaround + dx)
y = type(yaround)(yaround + dy)
return x, y
def first_not_none(*args):
"""
Returns the first argument that is not None.
"""
for i in args:
if i is not None:
return i
return i
class TransformState(renpy.object.Object):
nearest = False
xoffset = None
yoffset = None
inherited_xpos = None
inherited_ypos = None
inherited_xanchor = None
inherited_yanchor = None
transform_anchor = False
additive = 0.0
debug = None
events = True
crop_relative = False
def __init__(self): # W0231
self.alpha = 1
self.nearest = False
self.additive = 0.0
self.rotate = None
self.rotate_pad = True
self.transform_anchor = False
self.zoom = 1
self.xzoom = 1
self.yzoom = 1
self.xpos = None
self.ypos = None
self.xanchor = None
self.yanchor = None
self.xoffset = 0
self.yoffset = 0
self.xaround = 0.0
self.yaround = 0.0
self.xanchoraround = 0.0
self.yanchoraround = 0.0
self.subpixel = False
self.crop = None
self.crop_relative = False
self.corner1 = None
self.corner2 = None
self.size = None
self.delay = 0
self.debug = None
self.events = True
# Note: When adding a new property, we need to add it to:
# - take_state
# - diff
# - renpy.atl.PROPERTIES
# - Proxies in Transform
# An xpos (etc) inherited from our child overrides an xpos inherited
# from an old transform, but not an xpos set in the current transform.
#
# inherited_xpos stores the inherited_xpos, which is overridden by the
# xpos, if not None.
self.inherited_xpos = None
self.inherited_ypos = None
self.inherited_xanchor = None
self.inherited_yanchor = None
def take_state(self, ts):
self.nearest = ts.nearest
self.alpha = ts.alpha
self.additive = ts.additive
self.rotate = ts.rotate
self.rotate_pad = ts.rotate_pad
self.transform_anchor = ts.transform_anchor
self.zoom = ts.zoom
self.xzoom = ts.xzoom
self.yzoom = ts.yzoom
self.xaround = ts.xaround
self.yaround = ts.yaround
self.xanchoraround = ts.xanchoraround
self.yanchoraround = ts.yanchoraround
self.crop = ts.crop
self.crop_relative = ts.crop_relative
self.corner1 = ts.corner1
self.corner2 = ts.corner2
self.size = ts.size
self.debug = ts.debug
self.events = ts.events
# Take the computed position properties, not the
# raw ones.
(self.inherited_xpos,
self.inherited_ypos,
self.inherited_xanchor,
self.inherited_yanchor,
_,
_,
_) = ts.get_placement()
self.xoffset = ts.xoffset
self.yoffset = ts.yoffset
self.subpixel = ts.subpixel
# Returns a dict, with p -> (old, new) where p is a property that
# has changed between this object and the new object.
def diff(self, newts):
rv = { }
def diff2(prop, new, old):
if new != old:
rv[prop] = (old, new)
def diff4(prop, new, inherited_new, old, inherited_old):
if new is None:
new_value = inherited_new
else:
new_value = new
if old is None:
old_value = inherited_old
else:
old_value = old
if new_value != old_value:
rv[prop] = (old_value, new_value)
diff2("nearest", newts.nearest, self.nearest)
diff2("alpha", newts.alpha, self.alpha)
diff2("additive", newts.additive, self.additive)
diff2("rotate", newts.rotate, self.rotate)
diff2("rotate_pad", newts.rotate_pad, self.rotate_pad)
diff2("transform_anchor", newts.transform_anchor, self.transform_anchor)
diff2("zoom", newts.zoom, self.zoom)
diff2("xzoom", newts.xzoom, self.xzoom)
diff2("yzoom", newts.yzoom, self.yzoom)
diff2("xaround", newts.xaround, self.xaround)
diff2("yaround", newts.yaround, self.yaround)
diff2("xanchoraround", newts.xanchoraround, self.xanchoraround)
diff2("yanchoraround", newts.yanchoraround, self.yanchoraround)
diff2("subpixel", newts.subpixel, self.subpixel)
diff2("crop", newts.crop, self.crop)
diff2("crop_relative", newts.crop_relative, self.crop_relative)
diff2("corner1", newts.corner1, self.corner1)
diff2("corner2", newts.corner2, self.corner2)
diff2("size", newts.size, self.size)
diff4("xpos", newts.xpos, newts.inherited_xpos, self.xpos, self.inherited_xpos)
diff4("xanchor", newts.xanchor, newts.inherited_xanchor, self.xanchor, self.inherited_xanchor)
diff2("xoffset", newts.xoffset, self.xoffset)
diff4("ypos", newts.ypos, newts.inherited_ypos, self.ypos, self.inherited_ypos)
diff4("yanchor", newts.yanchor, newts.inherited_yanchor, self.yanchor, self.inherited_yanchor)
diff2("yoffset", newts.yoffset, self.yoffset)
diff2("debug", newts.debug, self.debug)
diff2("events", newts.events, self.events)
return rv
def get_placement(self, cxoffset=0, cyoffset=0):
return (
first_not_none(self.xpos, self.inherited_xpos),
first_not_none(self.ypos, self.inherited_ypos),
first_not_none(self.xanchor, self.inherited_xanchor),
first_not_none(self.yanchor, self.inherited_yanchor),
self.xoffset + cxoffset,
self.yoffset + cyoffset,
self.subpixel,
)
# These update various properties.
def get_xalign(self):
return self.xpos
def set_xalign(self, v):
self.xpos = v
self.xanchor = v
xalign = property(get_xalign, set_xalign)
def get_yalign(self):
return self.ypos
def set_yalign(self, v):
self.ypos = v
self.yanchor = v
yalign = property(get_yalign, set_yalign)
def get_around(self):
return (self.xaround, self.yaround)
def set_around(self, value):
self.xaround, self.yaround = value
self.xanchoraround, self.yanchoraround = None, None
def set_alignaround(self, value):
self.xaround, self.yaround = value
self.xanchoraround, self.yanchoraround = value
around = property(get_around, set_around)
alignaround = property(get_around, set_alignaround)
def get_angle(self):
xpos = first_not_none(self.xpos, self.inherited_xpos, 0)
ypos = first_not_none(self.ypos, self.inherited_ypos, 0)
angle, _radius = cartesian_to_polar(xpos, ypos, self.xaround, self.yaround)
return angle
def get_radius(self):
xpos = first_not_none(self.xpos, self.inherited_xpos, 0)
ypos = first_not_none(self.ypos, self.inherited_ypos, 0)
_angle, radius = cartesian_to_polar(xpos, ypos, self.xaround, self.yaround)
return radius
def set_angle(self, value):
xpos = first_not_none(self.xpos, self.inherited_xpos, 0)
ypos = first_not_none(self.ypos, self.inherited_ypos, 0)
_angle, radius = cartesian_to_polar(xpos, ypos, self.xaround, self.yaround)
angle = value
self.xpos, self.ypos = polar_to_cartesian(angle, radius, self.xaround, self.yaround)
if self.xanchoraround:
self.xanchor, self.yanchor = polar_to_cartesian(angle, radius, self.xaround, self.yaround)
def set_radius(self, value):
xpos = first_not_none(self.xpos, self.inherited_xpos, 0)
ypos = first_not_none(self.ypos, self.inherited_ypos, 0)
angle, _radius = cartesian_to_polar(xpos, ypos, self.xaround, self.yaround)
radius = value
self.xpos, self.ypos = polar_to_cartesian(angle, radius, self.xaround, self.yaround)
if self.xanchoraround:
self.xanchor, self.yanchor = polar_to_cartesian(angle, radius, self.xaround, self.yaround)
angle = property(get_angle, set_angle)
radius = property(get_radius, set_radius)
def get_pos(self):
return self.xpos, self.ypos
def set_pos(self, value):
self.xpos, self.ypos = value
pos = property(get_pos, set_pos)
def get_anchor(self):
return self.xanchor, self.yanchor
def set_anchor(self, value):
self.xanchor, self.yanchor = value
anchor = property(get_anchor, set_anchor)
def get_align(self):
return self.xpos, self.ypos
def set_align(self, value):
self.xanchor, self.yanchor = value
self.xpos, self.ypos = value
align = property(get_align, set_align)
def get_offset(self):
return self.xoffset, self.yoffset
def set_offset(self, value):
self.xoffset, self.yoffset = value
offset = property(get_offset, set_offset)
def set_xcenter(self, value):
self.xpos = value
self.xanchor = 0.5
def get_xcenter(self):
return self.xpos
def set_ycenter(self, value):
self.ypos = value
self.yanchor = 0.5
def get_ycenter(self):
return self.ypos
xcenter = property(get_xcenter, set_xcenter)
ycenter = property(get_ycenter, set_ycenter)
class Proxy(object):
"""
This class proxies a field from the transform to its state.
"""
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
return getattr(instance.state, self.name)
def __set__(self, instance, value):
return setattr(instance.state, self.name, value)
class Transform(Container):
"""
Documented in sphinx, because we can't scan this object.
"""
__version__ = 5
transform_event_responder = True
# Proxying things over to our state.
nearest = Proxy("nearest")
alpha = Proxy("alpha")
additive = Proxy("additive")
rotate = Proxy("rotate")
rotate_pad = Proxy("rotate_pad")
transform_anchor = Proxy("transform_anchor")
zoom = Proxy("zoom")
xzoom = Proxy("xzoom")
yzoom = Proxy("yzoom")
xpos = Proxy("xpos")
ypos = Proxy("ypos")
xanchor = Proxy("xanchor")
yanchor = Proxy("yanchor")
xalign = Proxy("xalign")
yalign = Proxy("yalign")
around = Proxy("around")
alignaround = Proxy("alignaround")
angle = Proxy("angle")
radius = Proxy("radius")
xaround = Proxy("xaround")
yaround = Proxy("yaround")
xanchoraround = Proxy("xanchoraround")
yanchoraround = Proxy("yanchoraround")
pos = Proxy("pos")
anchor = Proxy("anchor")
align = Proxy("align")
crop = Proxy("crop")
crop_relative = Proxy("crop_relative")
corner1 = Proxy("corner1")
corner2 = Proxy("corner2")
size = Proxy("size")
delay = Proxy("delay")
xoffset = Proxy("xoffset")
yoffset = Proxy("yoffset")
offset = Proxy("offset")
subpixel = Proxy("subpixel")
xcenter = Proxy("xcenter")
ycenter = Proxy("ycenter")
debug = Proxy("debug")
events = Proxy("events")
def after_upgrade(self, version):
if version < 1:
self.active = False
self.state = TransformState()
self.state.xpos = self.xpos or 0
self.state.ypos = self.ypos or 0
self.state.xanchor = self.xanchor or 0
self.state.yanchor = self.yanchor or 0
self.state.alpha = self.alpha
self.state.rotate = self.rotate
self.state.zoom = self.zoom
self.state.xzoom = self.xzoom
self.state.yzoom = self.yzoom
self.hide_request = False
self.hide_response = True
if version < 2:
self.st = 0
self.at = 0
if version < 3:
self.st_offset = 0
self.at_offset = 0
self.child_st_base = 0
if version < 4:
self.style_arg = 'transform'
if version < 5:
self.replaced_request = False
self.replaced_response = True
DEFAULT_ARGUMENTS = {
"selected_activate" : { },
"selected_hover" : { },
"selected_idle" : { },
"selected_insensitive" : { },
"activate" : { },
"hover" : { },
"idle" : { },
"insensitive" : { },
"" : { },
}
# Compatibility with old versions of the class.
active = False
children = False
arguments = DEFAULT_ARGUMENTS
# Default before we set this.
child_size = (0, 0)
def __init__(self,
child=None,
function=None,
style='transform',
focus=None,
default=False,
**kwargs):
self.kwargs = kwargs
self.style_arg = style
super(Transform, self).__init__(style=style, focus=focus, default=default)
self.function = function
child = renpy.easy.displayable_or_none(child)
if child is not None:
self.add(child)
self.state = TransformState()
if kwargs:
# A map from prefix -> (prop -> value)
self.arguments = { }
# Fill self.arguments with a
for k, v in kwargs.iteritems():
prefix = ""
prop = k
while True:
if prop in renpy.atl.PROPERTIES and (not prefix or prefix in Transform.DEFAULT_ARGUMENTS):
if prefix not in self.arguments:
self.arguments[prefix] = { }
self.arguments[prefix][prop] = v
break
new_prefix, _, prop = prop.partition("_")
if not prop:
raise Exception("Unknown transform property: %r" % k)
if prefix:
prefix = prefix + "_" + new_prefix
else:
prefix = new_prefix
if "" in self.arguments:
for k, v in self.arguments[""].iteritems():
setattr(self.state, k, v)
else:
self.arguments = None
# This is the matrix transforming our coordinates into child coordinates.
self.forward = None
# Have we called the function at least once?
self.active = False
# Have we been requested to hide?
self.hide_request = False
# True if it's okay for us to hide.
self.hide_response = True
# Have we been requested to replaced?
self.replaced_request = False
# True if it's okay for us to replaced.
self.replaced_response = True
self.st = 0
self.at = 0
self.st_offset = 0
self.at_offset = 0
self.child_st_base = 0
def visit(self):
if self.child is None:
return [ ]
else:
return [ self.child ]
# The default function chooses entries from self.arguments that match
# the style prefix, and applies them to the state.
def default_function(self, state, st, at):
if self.arguments is None:
return None
prefix = self.style.prefix.strip("_")
prefixes = [ ]
while prefix:
prefixes.insert(0, prefix)
_, _, prefix = prefix.partition("_")
prefixes.insert(0, "")
for i in prefixes:
d = self.arguments.get(i, None)
if d is None:
continue
for k, v in d.iteritems():
setattr(state, k, v)
return None
def set_transform_event(self, event):
if self.child is not None:
self.child.set_transform_event(event)
super(Transform, self).set_transform_event(event)
def take_state(self, t):
"""
Takes the transformation state from object t into this object.
"""
if not isinstance(t, Transform):
return
self.state.take_state(t.state)
# The arguments will be applied when the default function is
# called.
def take_execution_state(self, t):
"""
Takes the execution state from object t into this object. This is
overridden by renpy.atl.TransformBase.
"""
if not isinstance(t, Transform):
return
self.hide_request = t.hide_request
self.replaced_request = t.replaced_request
self.state.xpos = t.state.xpos
self.state.ypos = t.state.ypos
self.state.xanchor = t.state.xanchor
self.state.yanchor = t.state.yanchor
self.child_st_base = t.child_st_base
if isinstance(self.child, Transform) and isinstance(t.child, Transform):
self.child.take_execution_state(t.child)
def copy(self):
"""
Makes a copy of this transform.
"""
d = self()
d.kwargs = { }
d.take_state(self)
d.take_execution_state(self)
d.st = self.st
d.at = self.at
return d
def _change_transform_child(self, child):
rv = self.copy()
if self.child is not None:
rv.set_child(self.child._change_transform_child(child))
return rv
def _hide(self, st, at, kind):
if not self.child:
return None
# Prevent time from ticking backwards, as can happen if we replace a
# transform but keep its state.
if st + self.st_offset <= self.st:
self.st_offset = self.st - st
if at + self.at_offset <= self.at:
self.at_offset = self.at - at
self.st = st = st + self.st_offset
self.at = at = at + self.at_offset
if not (self.hide_request or self.replaced_request):
d = self.copy()
else:
d = self
d.st_offset = self.st_offset
d.at_offset = self.at_offset
if kind == "hide":
d.hide_request = True
else:
d.replaced_request = True
d.hide_response = True
d.replaced_response = True
if d.function is not None:
d.function(d, st + d.st_offset, at + d.at_offset)
new_child = d.child._hide(st, at, kind)
if new_child is not None:
d.child = new_child
d.hide_response = False
d.replaced_response = False
if (not d.hide_response) or (not d.replaced_response):
renpy.display.render.redraw(d, 0)
return d
return None
def set_child(self, child):
child = renpy.easy.displayable(child)
self.child = child
self.children = [ child ]
self.child_st_base = self.st
child.per_interact()
renpy.display.render.invalidate(self)
def update_state(self):
"""
This updates the state to that at self.st, self.at.
"""
self.hide_response = True
self.replaced_response = True
# If we have to, call the function that updates this transform.
if self.arguments is not None:
self.default_function(self, self.st, self.at)
if self.function is not None:
fr = self.function(self, self.st, self.at)
# Order a redraw, if necessary.
if fr is not None:
renpy.display.render.redraw(self, fr)
state = self.state
self.active = True
# Use non-None elements of the child placement as defaults.
child = self.child
if child is not None and renpy.config.transform_uses_child_position:
pos = child.get_placement()
if pos[0] is not None:
state.inherited_xpos = pos[0]
if pos[2] is not None:
state.inherited_xanchor = pos[2]
if pos[1] is not None:
state.inherited_ypos = pos[1]
if pos[3] is not None:
state.inherited_yanchor = pos[3]
state.subpixel |= pos[6]
# The render method is now defined in accelerator.pyx.
def event(self, ev, x, y, st):
if self.hide_request:
return None
if not self.state.events:
return
children = self.children
offsets = self.offsets
if not offsets:
return None
for i in xrange(len(self.children)-1, -1, -1):
d = children[i]
xo, yo = offsets[i]
cx = x - xo
cy = y - yo
# Transform screen coordinates to child coordinates.
cx, cy = self.forward.transform(cx, cy)
rv = d.event(ev, cx, cy, st)
if rv is not None:
return rv
return None
def __call__(self, child=None, take_state=True):
if child is None:
child = self.child
# If we don't have a child for some reason, set it to null.
if child is None:
child = get_null()
rv = Transform(
child=child,
function=self.function,
style=self.style_arg,
**self.kwargs)
rv.take_state(self)
return rv
def get_placement(self):
if not self.active:
self.update_state()
if self.child is not None:
_cxpos, _cypos, _cxanchor, _cyanchor, cxoffset, cyoffset, _csubpixel = self.child.get_placement()
else:
cxoffset = 0
cyoffset = 0
cxoffset = cxoffset or 0
cyoffset = cyoffset or 0
rv = self.state.get_placement(cxoffset, cyoffset)
if self.state.transform_anchor:
xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel = rv
if (xanchor is not None) and (yanchor is not None):
cw, ch = self.child_size
rw, rh = self.render_size
if xanchor.__class__ is float:
xanchor *= cw
if yanchor.__class__ is float:
yanchor *= ch
xanchor -= cw / 2.0
yanchor -= ch / 2.0
xanchor, yanchor = self.reverse.transform(xanchor, yanchor)
xanchor += rw / 2.0
yanchor += rh / 2.0
xanchor = renpy.display.core.absolute(xanchor)
yanchor = renpy.display.core.absolute(yanchor)
rv = (xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel)
return rv
def update(self):
"""
This should be called when a transform property field is updated outside
of the callback method, to ensure that the change takes effect.
"""
renpy.display.render.invalidate(self)
def parameterize(self, name, parameters):
if parameters:
raise Exception("Image '%s' can't take parameters '%s'. (Perhaps you got the name wrong?)" %
(' '.join(name), ' '.join(parameters)))
# Note the call here.
return self()
def _show(self):
self.update_state()
Transform.render = types.MethodType(renpy.display.accelerator.transform_render, None, Transform)
class ATLTransform(renpy.atl.ATLTransformBase, Transform):
def __init__(self, atl, child=None, context={}, parameters=None, **properties):
renpy.atl.ATLTransformBase.__init__(self, atl, context, parameters)
Transform.__init__(self, child=child, function=self.execute, **properties)
self.raw_child = self.child
def __repr__(self):
return "<ATL Transform {:x} {!r}>".format(id(self), self.atl.loc)
def _show(self):
super(ATLTransform, self)._show()
self.execute(self, self.st, self.at)
class Motion(Container):
"""
This is used to move a child displayable around the screen. It
works by supplying a time value to a user-supplied function,
which is in turn expected to return a pair giving the x and y
location of the upper-left-hand corner of the child, or a
4-tuple giving that and the xanchor and yanchor of the child.
The time value is a floating point number that ranges from 0 to
1. If repeat is True, then the motion repeats every period
sections. (Otherwise, it stops.) If bounce is true, the
time value varies from 0 to 1 to 0 again.
The function supplied needs to be pickleable, which means it needs
to be defined as a name in an init block. It cannot be a lambda or
anonymous inner function. If you can get away with using Pan or
Move, use them instead.
Please note that floats and ints are interpreted as for xpos and
ypos, with floats being considered fractions of the screen.
"""
def __init__(self, function, period, child=None, new_widget=None, old_widget=None, repeat=False, bounce=False, delay=None, anim_timebase=False, tag_start=None, time_warp=None, add_sizes=False, style='motion', **properties):
"""
@param child: The child displayable.
@param new_widget: If child is None, it is set to new_widget,
so that we can speak the transition protocol.
@param old_widget: Ignored, for compatibility with the transition protocol.
@param function: A function that takes a floating point value and returns
an xpos, ypos tuple.
@param period: The amount of time it takes to go through one cycle, in seconds.
@param repeat: Should we repeat after a period is up?
@param bounce: Should we bounce?
@param delay: How long this motion should take. If repeat is None, defaults to period.
@param anim_timebase: If True, use the animation timebase rather than the shown timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can also be used as a transition. When used as a
transition, the motion is applied to the new_widget for delay
seconds.
"""
if child is None:
child = new_widget
if delay is None and not repeat:
delay = period
super(Motion, self).__init__(style=style, **properties)
if child is not None:
self.add(child)
self.function = function
self.period = period
self.repeat = repeat
self.bounce = bounce
self.delay = delay
self.anim_timebase = anim_timebase
self.time_warp = time_warp
self.add_sizes = add_sizes
self.position = None
def get_placement(self):
if self.position is None:
return super(Motion, self).get_placement()
else:
return self.position + (self.style.xoffset, self.style.yoffset, self.style.subpixel)
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if renpy.game.less_updates:
if self.delay:
t = self.delay
if self.repeat:
t = t % self.period
else:
t = self.period
elif self.delay and t >= self.delay:
t = self.delay
if self.repeat:
t = t % self.period
elif self.repeat:
t = t % self.period
renpy.display.render.redraw(self, 0)
else:
if t > self.period:
t = self.period
else:
renpy.display.render.redraw(self, 0)
if self.period > 0:
t /= self.period
else:
t = 1
if self.time_warp:
t = self.time_warp(t)
if self.bounce:
t = t * 2
if t > 1.0:
t = 2.0 - t
child = render(self.child, width, height, st, at)
cw, ch = child.get_size()
if self.add_sizes:
res = self.function(t, (width, height, cw, ch))
else:
res = self.function(t)
res = tuple(res)
if len(res) == 2:
self.position = res + (self.style.xanchor, self.style.yanchor)
else:
self.position = res
rv = renpy.display.render.Render(cw, ch)
rv.blit(child, (0, 0))
self.offsets = [ (0, 0) ]
return rv
class Interpolate(object):
anchors = {
'top' : 0.0,
'center' : 0.5,
'bottom' : 1.0,
'left' : 0.0,
'right' : 1.0,
}
def __init__(self, start, end):
if len(start) != len(end):
raise Exception("The start and end must have the same number of arguments.")
self.start = [ self.anchors.get(i, i) for i in start ]
self.end = [ self.anchors.get(i, i) for i in end ]
def __call__(self, t, sizes=(None, None, None, None)):
def interp(a, b, c):
if c is not None:
if type(a) is float:
a = a * c
if type(b) is float:
b = b * c
rv = a + t * (b - a)
return renpy.display.core.absolute(rv)
return [ interp(a, b, c) for a, b, c in zip(self.start, self.end, sizes) ]
def Pan(startpos, endpos, time, child=None, repeat=False, bounce=False,
anim_timebase=False, style='motion', time_warp=None, **properties):
"""
This is used to pan over a child displayable, which is almost
always an image. It works by interpolating the placement of the
upper-left corner of the screen, over time. It's only really
suitable for use with images that are larger than the screen,
and we don't do any cropping on the image.
@param startpos: The initial coordinates of the upper-left
corner of the screen, relative to the image.
@param endpos: The coordinates of the upper-left corner of the
screen, relative to the image, after time has elapsed.
@param time: The time it takes to pan from startpos to endpos.
@param child: The child displayable.
@param repeat: True if we should repeat this forever.
@param bounce: True if we should bounce from the start to the end
to the start.
@param anim_timebase: True if we use the animation timebase, False to use the
displayable timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can be used as a transition. See Motion for details.
"""
x0, y0 = startpos
x1, y1 = endpos
return Motion(Interpolate((-x0, -y0), (-x1, -y1)),
time,
child,
repeat=repeat,
bounce=bounce,
style=style,
anim_timebase=anim_timebase,
time_warp=time_warp,
add_sizes=True,
**properties)
def Move(startpos, endpos, time, child=None, repeat=False, bounce=False,
anim_timebase=False, style='motion', time_warp=None, **properties):
"""
This is used to pan over a child displayable relative to
the containing area. It works by interpolating the placement of the
the child, over time.
@param startpos: The initial coordinates of the child
relative to the containing area.
@param endpos: The coordinates of the child at the end of the
move.
@param time: The time it takes to move from startpos to endpos.
@param child: The child displayable.
@param repeat: True if we should repeat this forever.
@param bounce: True if we should bounce from the start to the end
to the start.
@param anim_timebase: True if we use the animation timebase, False to use the
displayable timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can be used as a transition. See Motion for details.
"""
return Motion(Interpolate(startpos, endpos),
time,
child,
repeat=repeat,
bounce=bounce,
anim_timebase=anim_timebase,
style=style,
time_warp=time_warp,
add_sizes=True,
**properties)
class Revolver(object):
def __init__(self, start, end, child, around=(0.5, 0.5), cor=(0.5, 0.5), pos=None):
self.start = start
self.end = end
self.around = around
self.cor = cor
self.pos = pos
self.child = child
def __call__(self, t, (w, h, cw, ch)):
# Converts a float to an integer in the given range, passes
# integers through unchanged.
def fti(x, r):
if x is None:
x = 0
if isinstance(x, float):
return int(x * r)
else:
return x
if self.pos is None:
pos = self.child.get_placement()
else:
pos = self.pos
xpos, ypos, xanchor, yanchor, _xoffset, _yoffset, _subpixel = pos
xpos = fti(xpos, w)
ypos = fti(ypos, h)
xanchor = fti(xanchor, cw)
yanchor = fti(yanchor, ch)
xaround, yaround = self.around
xaround = fti(xaround, w)
yaround = fti(yaround, h)
xcor, ycor = self.cor
xcor = fti(xcor, cw)
ycor = fti(ycor, ch)
angle = self.start + (self.end - self.start) * t
angle *= math.pi / 180
# The center of rotation, relative to the xaround.
x = xpos - xanchor + xcor - xaround
y = ypos - yanchor + ycor - yaround
# Rotate it.
nx = x * math.cos(angle) - y * math.sin(angle)
ny = x * math.sin(angle) + y * math.cos(angle)
# Project it back.
nx = nx - xcor + xaround
ny = ny - ycor + yaround
return (renpy.display.core.absolute(nx), renpy.display.core.absolute(ny), 0, 0)
def Revolve(start, end, time, child, around=(0.5, 0.5), cor=(0.5, 0.5), pos=None, **properties):
return Motion(Revolver(start, end, child, around=around, cor=cor, pos=pos),
time,
child,
add_sizes=True,
**properties)
def zoom_render(crend, x, y, w, h, zw, zh, bilinear):
"""
This creates a render that zooms its child.
`crend` - The render of the child.
`x`, `y`, `w`, `h` - A rectangle inside the child.
`zw`, `zh` - The size the rectangle is rendered to.
`bilinear` - Should we be rendering in bilinear mode?
"""
rv = renpy.display.render.Render(zw, zh)
if zw == 0 or zh == 0 or w == 0 or h == 0:
return rv
rv.forward = renpy.display.render.Matrix2D(w / zw, 0, 0, h / zh)
rv.reverse = renpy.display.render.Matrix2D(zw / w, 0, 0, zh / h)
rv.clipping = True
rv.blit(crend, rv.reverse.transform(-x, -y))
return rv
class ZoomCommon(renpy.display.core.Displayable):
def __init__(self,
time, child,
end_identity=False,
after_child=None,
time_warp=None,
bilinear=True,
opaque=True,
anim_timebase=False,
repeat=False,
style='motion',
**properties):
"""
@param time: The amount of time it will take to
interpolate from the start to the end rectange.
@param child: The child displayable.
@param after_child: If present, a second child
widget. This displayable will be rendered after the zoom
completes. Use this to snap to a sharp displayable after
the zoom is done.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
"""
super(ZoomCommon, self).__init__(style=style, **properties)
child = renpy.easy.displayable(child)
self.time = time
self.child = child
self.repeat = repeat
if after_child:
self.after_child = renpy.easy.displayable(after_child)
else:
if end_identity:
self.after_child = child
else:
self.after_child = None
self.time_warp = time_warp
self.bilinear = bilinear
self.opaque = opaque
self.anim_timebase = anim_timebase
def visit(self):
return [ self.child, self.after_child ]
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.time:
done = min(t / self.time, 1.0)
else:
done = 1.0
if self.repeat:
done = done % 1.0
if renpy.game.less_updates:
done = 1.0
self.done = done
if self.after_child and done == 1.0:
return renpy.display.render.render(self.after_child, width, height, st, at)
if self.time_warp:
done = self.time_warp(done)
rend = renpy.display.render.render(self.child, width, height, st, at)
rx, ry, rw, rh, zw, zh = self.zoom_rectangle(done, rend.width, rend.height)
if rx < 0 or ry < 0 or rx + rw > rend.width or ry + rh > rend.height:
raise Exception("Zoom rectangle %r falls outside of %dx%d parent surface." % ((rx, ry, rw, rh), rend.width, rend.height))
rv = zoom_render(rend, rx, ry, rw, rh, zw, zh, self.bilinear)
if self.done < 1.0:
renpy.display.render.redraw(self, 0)
return rv
def event(self, ev, x, y, st):
if not self.time:
done = 1.0
else:
done = min(st / self.time, 1.0)
if done == 1.0 and self.after_child:
return self.after_child.event(ev, x, y, st)
else:
return None
class Zoom(ZoomCommon):
def __init__(self, size, start, end, time, child, **properties):
end_identity = (end == (0.0, 0.0) + size)
super(Zoom, self).__init__(time, child, end_identity=end_identity, **properties)
self.size = size
self.start = start
self.end = end
def zoom_rectangle(self, done, width, height):
rx, ry, rw, rh = [ (a + (b - a) * done) for a, b in zip(self.start, self.end) ]
return rx, ry, rw, rh, self.size[0], self.size[1]
class FactorZoom(ZoomCommon):
def __init__(self, start, end, time, child, **properties):
end_identity = (end == 1.0)
super(FactorZoom, self).__init__(time, child, end_identity=end_identity, **properties)
self.start = start
self.end = end
def zoom_rectangle(self, done, width, height):
factor = self.start + (self.end - self.start) * done
return 0, 0, width, height, factor * width, factor * height
class SizeZoom(ZoomCommon):
def __init__(self, start, end, time, child, **properties):
end_identity = False
super(SizeZoom, self).__init__(time, child, end_identity=end_identity, **properties)
self.start = start
self.end = end
def zoom_rectangle(self, done, width, height):
sw, sh = self.start
ew, eh = self.end
zw = sw + (ew - sw) * done
zh = sh + (eh - sh) * done
return 0, 0, width, height, zw, zh
class RotoZoom(renpy.display.core.Displayable):
transform = None
def __init__(self,
rot_start,
rot_end,
rot_delay,
zoom_start,
zoom_end,
zoom_delay,
child,
rot_repeat=False,
zoom_repeat=False,
rot_bounce=False,
zoom_bounce=False,
rot_anim_timebase=False,
zoom_anim_timebase=False,
rot_time_warp=None,
zoom_time_warp=None,
opaque=False,
style='motion',
**properties):
super(RotoZoom, self).__init__(style=style, **properties)
self.rot_start = rot_start
self.rot_end = rot_end
self.rot_delay = rot_delay
self.zoom_start = zoom_start
self.zoom_end = zoom_end
self.zoom_delay = zoom_delay
self.child = renpy.easy.displayable(child)
self.rot_repeat = rot_repeat
self.zoom_repeat = zoom_repeat
self.rot_bounce = rot_bounce
self.zoom_bounce = zoom_bounce
self.rot_anim_timebase = rot_anim_timebase
self.zoom_anim_timebase = zoom_anim_timebase
self.rot_time_warp = rot_time_warp
self.zoom_time_warp = zoom_time_warp
self.opaque = opaque
def visit(self):
return [ self.child ]
def render(self, width, height, st, at):
if self.rot_anim_timebase:
rot_time = at
else:
rot_time = st
if self.zoom_anim_timebase:
zoom_time = at
else:
zoom_time = st
if self.rot_delay == 0:
rot_time = 1.0
else:
rot_time /= self.rot_delay
if self.zoom_delay == 0:
zoom_time = 1.0
else:
zoom_time /= self.zoom_delay
if self.rot_repeat:
rot_time %= 1.0
if self.zoom_repeat:
zoom_time %= 1.0
if self.rot_bounce:
rot_time *= 2
rot_time = min(rot_time, 2.0 - rot_time)
if self.zoom_bounce:
zoom_time *= 2
zoom_time = min(zoom_time, 2.0 - zoom_time)
if renpy.game.less_updates:
rot_time = 1.0
zoom_time = 1.0
rot_time = min(rot_time, 1.0)
zoom_time = min(zoom_time, 1.0)
if self.rot_time_warp:
rot_time = self.rot_time_warp(rot_time)
if self.zoom_time_warp:
zoom_time = self.zoom_time_warp(zoom_time)
angle = self.rot_start + (1.0 * self.rot_end - self.rot_start) * rot_time
zoom = self.zoom_start + (1.0 * self.zoom_end - self.zoom_start) * zoom_time
# angle = -angle * math.pi / 180
zoom = max(zoom, 0.001)
if self.transform is None:
self.transform = Transform(self.child)
self.transform.rotate = angle
self.transform.zoom = zoom
rv = renpy.display.render.render(self.transform, width, height, st, at)
if rot_time <= 1.0 or zoom_time <= 1.0:
renpy.display.render.redraw(self.transform, 0)
return rv
# For compatibility with old games.
renpy.display.layout.Transform = Transform
renpy.display.layout.RotoZoom = RotoZoom
renpy.display.layout.SizeZoom = SizeZoom
renpy.display.layout.FactorZoom = FactorZoom
renpy.display.layout.Zoom = Zoom
renpy.display.layout.Revolver = Revolver
renpy.display.layout.Motion = Motion
renpy.display.layout.Interpolate = Interpolate
# Leave these functions around - they might have been pickled somewhere.
renpy.display.layout.Revolve = Revolve # function
renpy.display.layout.Move = Move # function
renpy.display.layout.Pan = Pan # function
|
joxer/Baka-No-Voltron
|
tmp/android.dist/private/renpy/display/motion.py
|
Python
|
gpl-2.0
| 46,158
|
[
"VisIt"
] |
f70d43ce59af44ac8057de77d5cab4d9d73c39ae8783ed071e8bd17c53a7d74c
|
"""
Student Views
"""
import traceback
import datetime
import logging
import uuid
import json
import warnings
import re
from collections import defaultdict
from urlparse import urljoin
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404, HttpResponseRedirect)
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from openedx.core.djangoapps.course_groups.cohorts import get_cohort_by_name, add_user_to_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile, StudentProfile, TeacherProfile, ClassSet, School,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form, StudentRegistrationForm, TeacherRegistrationForm
from student.helpers import is_teacher
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date, get_course_by_id # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
check_classcode_exists,
search_school_details,
check_school_exists,
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.utils import get_programs_for_dashboard
from instructor.access import allow_access
import base64
import hmac
import hashlib
import urllib
from urlparse import parse_qs
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = microsite.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = microsite.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = microsite.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via microsite config
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the microsite configuration workflow, versus living here
youtube_video_id = microsite.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for microsite override of the courses list
context['courses_list'] = microsite.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): for use in Microsites. If not None, ONLY courses
of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# If we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, if we are not in a Microsite, then filter out any enrollments
# with courses attributed (by ORG) to Microsites.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in microsites/White Labels
if linkedin_config.enabled and not microsite.is_request_in_microsite():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
if not user.is_active:
response = redirect("signin_user")
return response
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Get any programs associated with courses being displayed.
# This is passed along in the template context to allow rendering of
# program-related information on the dashboard.
course_programs = _get_course_programs(user, [enrollment.course_id for enrollment in course_enrollments])
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
#'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'course_programs': course_programs,
'disable_courseware_js': True,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": None,
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
# MM NEW: If settings has a course specified to auto-enlist teachers by email dictionary.
# Check whether this course matches this list.
# Check whether the user's email matches the list of regex's.
# If so, enlist as a teacher role for the course.
auto_teacher_role_access = False
if settings.TEACHER_ROLE_COURSES is not None:
log.warning("we in A")
log.warning("Enrolling: {email}".format(email=user.email))
if request.POST.get("course_id") in settings.TEACHER_ROLE_COURSES:
log.warning("we in B")
if settings.REGISTRATION_TEACHER_EMAIL_PATTERNS_ALLOWED is not None:
log.warning("we in C")
# This Open edX instance has restrictions on what email addresses are allowed.
allowed_patterns = settings.REGISTRATION_TEACHER_EMAIL_PATTERNS_ALLOWED
# We append a '$' to the regexs to prevent the common mistake of using a
# pattern like '.*@edx\\.org' which would match 'bob@edx.org.badguy.com'
if any(re.match(pattern + "$", user.email) for pattern in allowed_patterns):
auto_teacher_role_access = is_teacher(user) # so long as there is a teacher profile
log.warning("we in D")
elif is_teacher(user):
log.warning("Whitelist Warning: {email} has teacher profile but was not found on whitelist. Email: {email}. Course_id: {cid}".format(email=user.email, cid=request.POST.get("course_id")))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
if auto_teacher_role_access:
rolename = 'teacher'
course = get_course_by_id(course_id)
allow_access(course, user, rolename)
allow_access(course, user, 'beta')
# if CohortManager has a cohort with course and cohort ="teacher"
log.warning("we in E")
try:
log.warning("we in F")
log.warning("courseid type: {course}".format(course=type(course_id)))
cohort = get_cohort_by_name(course_id, 'Teachers') #raises DoesNotExist when not present
log.warning("cohort type: {cohort}".format(cohort=type(cohort)))
log.warning("we in G")
with transaction.atomic():
add_user_to_cohort(cohort,user.email)
log.warning("we in H")
except CourseUserGroup.DoesNotExist:
log.warning("Cohort Teachers does not exist for auto teacher role access")
log.warning("we in I")
pass
except ValueError,e:
log.warning(e)
pass
except Exception,e: # pylint: disable=broad-except
log.warning(Exception)
log.warning(e)
traceback.print_exc()
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation "
"message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
delete_logged_in_cookies(response)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
#NEW: School Lookup
def lookup_school(request):
max_results = 20
min_char = 3
if request.method=="GET":
q = request.GET['term']
if q is not None and len(q)>=min_char:
data = json.dumps(search_school_details(q,max_results))
else:
data = 'fail'
else:
data = 'fail'
return HttpResponse(data, content_type="application/json")
def _do_create_account(form, custom_form=None,student_form=None,teacher_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if student_form:
errors.update(student_form.errors)
if errors:
raise ValidationError(errors)
if not check_classcode_exists(student_form.cleaned_data["class_code"]):
try:
errors["class_code"].append("The Class Code you provided does not match with any of our classes. Check with your supervising teacher.")
except KeyError:
errors.update({"class_code":"The Class Code you provided does not match with any of our classes. Check with your supervising teacher."})
elif teacher_form:
errors.update(teacher_form.errors)
if errors:
raise ValidationError(errors)
if teacher_form:
school = check_school_exists(teacher_form.cleaned_data["school_id"],teacher_form.cleaned_data["school"])
if not school:
school = School.objects.create(school_name = teacher_form.cleaned_data["school"])
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
first_name=form.cleaned_data["first_name"],
last_name=form.cleaned_data["last_name"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
if student_form:
student_classset = ClassSet.objects.get(class_code=student_form.cleaned_data["class_code"])
student_profile = StudentProfile(
user = user,
school_grade = student_form.cleaned_data["school_grade"],
indigenous = student_form.cleaned_data["indigenous"]
)
try:
student_profile.save()
except Exception:
log.exception("StudentProfile creation failed for user {id}.".format(id=user.id))
raise
student_profile.classSet.add(student_classset)
if teacher_form:
teacher_profile = TeacherProfile(
user = user,
school = school,
phone = teacher_form.cleaned_data["phone"],
hear_about_us = teacher_form.cleaned_data["hear_about_us"]
)
try:
teacher_profile.save()
except Exception:
log.exception("TeacherProfile creation failed for user {id}.".format(id=user.id))
raise
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
first_name = form.cleaned_data["first_name"]
last_name = form.cleaned_data["last_name"]
profile_fields = [
"level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
name=first_name+' '+last_name,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
#log.debug("reg_type is {reg}".format(reg=params["reg_type"]))
#NEW: Check which registration type
if ((params["reg_type"]!="1")&(params["reg_type"]!="2")):
raise ValidationError({"reg_type": "You must select if you are a teacher or student"})
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
student_form = None
teacher_form = None
if params["reg_type"] == "1": # Create a student profile form
student_form = StudentRegistrationForm(data=params)
elif params["reg_type"]== "2": # Create a teacher's profile form
log.warning("Making a teacher profile form")
teacher_form = TeacherRegistrationForm(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form,student_form,teacher_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
'age': profile.age,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# ----- CHANGED THIS: FEATURE NOW LOGS OUT AFTER DASHBOARD ------
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to course if course_id is defined, otherwise it will redirect to dashboard
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true'
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to course info page if course_id is known
if course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student[0], cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
#MM NEW: if student is enrolled with a class code, then enrol in the class code's course
try:
#for now, we'll assume there'll only be one class_code during this activation
sp = student[0].studentprofile
c = sp.classSet.all()
if c: #if there is a class_code, add
enrollment = CourseEnrollment.enroll(student[0],c[0].course_id)
except StudentProfile.DoesNotExist:
pass
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)}
# Support old password reset URLs that used base36 encoded user IDs.
# https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
def _get_course_programs(user, user_enrolled_courses): # pylint: disable=invalid-name
"""Build a dictionary of program data required for display on the student dashboard.
Given a user and an iterable of course keys, find all programs relevant to the
user and return them in a dictionary keyed by course key.
Arguments:
user (User): The user to authenticate as when requesting programs.
user_enrolled_courses (list): List of course keys representing the courses in which
the given user has active enrollments.
Returns:
dict, containing programs keyed by course. Empty if programs cannot be retrieved.
"""
course_programs = get_programs_for_dashboard(user, user_enrolled_courses)
programs_data = {}
for course_key, program in course_programs.viewitems():
if program.get('status') == 'active' and program.get('category') == 'xseries':
try:
programs_data[course_key] = {
'course_count': len(program['course_codes']),
'display_name': program['name'],
'category': program.get('category'),
'program_id': program['id'],
'program_marketing_url': urljoin(
settings.MKTG_URLS.get('ROOT'), 'xseries' + '/{}'
).format(program['marketing_slug']),
'display_category': 'XSeries'
}
except KeyError:
log.warning('Program structure is invalid, skipping display: %r', program)
return programs_data
@login_required
def sso(request):
payload = request.GET.get('sso')
signature = request.GET.get('sig')
if None in [payload, signature]:
return HttpResponseBadRequest('No SSO payload or signature. Please contact support if this problem persists.')
## Validate the payload
try:
payload = urllib.unquote(payload)
decoded = base64.decodestring(payload)
assert 'nonce' in decoded
assert len(payload) > 0
except AssertionError:
return HttpResponseBadRequest('Invalid payload. Please contact support if this problem persists.')
key = str(settings.DISCOURSE_SSO_SECRET) # must not be unicode
h = hmac.new(key, payload, digestmod=hashlib.sha256)
this_signature = h.hexdigest()
if this_signature != signature:
return HttpResponseBadRequest('Invalid payload. Please contact support if this problem persists.')
## Build the return payload
qs = parse_qs(decoded)
params = {
'nonce': qs['nonce'][0],
'email': request.user.email,
'external_id': request.user.id,
'username': request.user.username,
'moderator': is_teacher(request.user),
'admin': request.user.is_staff or request.user.is_superuser,
}
return_payload = base64.encodestring(urllib.urlencode(params))
h = hmac.new(key, return_payload, digestmod=hashlib.sha256)
query_string = urllib.urlencode({'sso': return_payload, 'sig': h.hexdigest()})
## Redirect back to Discourse
url = '%s/session/sso_login' % settings.DISCOURSE_BASE_URL
return HttpResponseRedirect('%s?%s' % (url, query_string))
@login_required
def codeframe(request,gist_id):
context = {}
context['gist_url'] = 'https://'+settings.SITE_NAME + '/gist/'+gist_id+".js"
return render_to_response('codeframe.html',context)
@login_required
def gistembed(request,gist_id):
context = {}
q = ["file","line","hide-footer","highlight-line","hide-line-numbers","show-loading","show-spinner"]
data_gist = {}
data_gist['id']=gist_id
data_gist['hide-footer'] = "true"
if request.method == "GET":
for k in q:
v = request.GET.get(k,None)
if v:
data_gist[k]=v
context['data_gist']=data_gist
return render_to_response('gistembed.html',context)
|
MakeHer/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 108,939
|
[
"VisIt"
] |
aaa202329aab57ca8a075a5fe97a02a4c118aa78ead396e9b3efbf254d06e733
|
"""Kernel K-means"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils import check_random_state
class KernelKMeans(BaseEstimator, ClusterMixin):
"""
Kernel K-means
Reference
---------
Kernel k-means, Spectral Clustering and Normalized Cuts.
Inderjit S. Dhillon, Yuqiang Guan, Brian Kulis.
KDD 2004.
"""
def __init__(self, n_clusters=3, max_iter=50, tol=1e-5, random_state=None,
kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None, verbose=0):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.verbose = verbose
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def fit(self, X, y=None, sample_weight=None):
n_samples = X.shape[0]
K = self._get_kernel(X)
sw = sample_weight if sample_weight else np.ones(n_samples)
self.sample_weight_ = sw
np.random.seed(self.random_state)
self.labels_ = np.random.randint(self.n_clusters, size=n_samples)
dist = np.zeros((n_samples, self.n_clusters))
self.within_distances_ = np.zeros(self.n_clusters)
for it in xrange(self.max_iter):
dist.fill(0)
self._compute_dist(K, dist, self.within_distances_,
update_within=True)
labels_old = self.labels_
self.labels_ = dist.argmin(axis=1)
# Compute the number of samples whose cluster did not change
# since last iteration.
n_same = np.sum((self.labels_ - labels_old) == 0)
if 1 - float(n_same) / n_samples < self.tol:
if self.verbose:
print "Converged at iteration", it + 1
break
self.X_fit_ = X
return self
def _compute_dist(self, K, dist, within_distances, update_within):
"""Compute a n_samples x n_clusters distance matrix using the
kernel trick."""
sw = self.sample_weight_
for j in xrange(self.n_clusters):
mask = self.labels_ == j
if np.sum(mask) == 0:
raise ValueError("Empty cluster found, try smaller n_cluster.")
denom = sw[mask].sum()
denomsq = denom * denom
if update_within:
KK = K[mask][:, mask] # K[mask, mask] does not work.
dist_j = np.sum(np.outer(sw[mask], sw[mask]) * KK / denomsq)
within_distances[j] = dist_j
dist[:, j] += dist_j
else:
dist[:, j] += within_distances[j]
dist[:, j] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / denom
def predict(self, X):
K = self._get_kernel(X, self.X_fit_)
n_samples = X.shape[0]
dist = np.zeros((n_samples, self.n_clusters))
self._compute_dist(K, dist, self.within_distances_,
update_within=False)
return dist.argmin(axis=1)
if __name__ == '__main__':
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=10, centers=2, random_state=0)
km = KernelKMeans(n_clusters=2, max_iter=2, random_state=0, verbose=1)
print km.fit_predict(X)[:10]
print km.predict(X[:10])
|
yao-matrix/mLearning
|
ml-workshop/src/kernel_kmeans.py
|
Python
|
apache-2.0
| 4,055
|
[
"Brian"
] |
2e58b40d7d904fec8757ae17ce3b59132d9cae5334dfdaca8cd7eb4911a21d77
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy import signals
from hyperspy import components1d
from hyperspy.decorators import lazifyTestClass
@lazifyTestClass
class TestRemoveBackground1DGaussian:
def setup_method(self, method):
gaussian = components1d.Gaussian()
gaussian.A.value = 10
gaussian.centre.value = 10
gaussian.sigma.value = 1
self.signal = signals.Signal1D(
gaussian.function(np.arange(0, 20, 0.01)))
self.signal.axes_manager[0].scale = 0.01
self.signal.metadata.Signal.binned = False
def test_background_remove_gaussian(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Gaussian',
show_progressbar=None)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
def test_background_remove_gaussian_full_fit(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Gaussian',
fast=False)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
@lazifyTestClass
class TestRemoveBackground1DPowerLaw:
def setup_method(self, method):
pl = components1d.PowerLaw()
pl.A.value = 1e10
pl.r.value = 3
self.signal = signals.Signal1D(
pl.function(np.arange(100, 200)))
self.signal.axes_manager[0].offset = 100
self.signal.metadata.Signal.binned = False
self.signal_noisy = self.signal.deepcopy()
self.signal_noisy.add_gaussian_noise(1)
self.atol = 0.04 * abs(self.signal.data).max()
self.atol_zero_fill = 0.04 * abs(self.signal.isig[10:].data).max()
def test_background_remove_pl(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='PowerLaw',
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.data, np.zeros(len(s1.data)), atol=self.atol)
assert s1.axes_manager.navigation_dimension == 0
def test_background_remove_pl_zero(self):
s1 = self.signal_noisy.remove_background(
signal_range=(110.0, 190.0),
background_type='PowerLaw',
zero_fill=True,
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.isig[10:], np.zeros(len(s1.data[10:])),
atol=self.atol_zero_fill)
assert np.allclose(s1.data[:10], np.zeros(10))
def test_background_remove_pl_int(self):
self.signal.change_dtype("int")
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='PowerLaw',
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.data, np.zeros(len(s1.data)), atol=self.atol)
def test_background_remove_pl_int_zero(self):
self.signal_noisy.change_dtype("int")
s1 = self.signal_noisy.remove_background(
signal_range=(110.0, 190.0),
background_type='PowerLaw',
zero_fill=True,
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.isig[10:], np.zeros(len(s1.data[10:])),
atol=self.atol_zero_fill)
assert np.allclose(s1.data[:10], np.zeros(10))
def compare_axes_manager_metadata(s0, s1):
assert s0.data.shape == s1.data.shape
assert s0.axes_manager.shape == s1.axes_manager.shape
for iaxis in range(len(s0.axes_manager._axes)):
a0, a1 = s0.axes_manager[iaxis], s1.axes_manager[iaxis]
assert a0.name == a1.name
assert a0.units == a1.units
assert a0.scale == a1.scale
assert a0.offset == a1.offset
assert s0.metadata.General.title == s1.metadata.General.title
@pytest.mark.parametrize('nav_dim', [0, 1])
@pytest.mark.parametrize('fast', [True, False])
@pytest.mark.parametrize('zero_fill', [True, False])
@pytest.mark.parametrize('show_progressbar', [True, False])
@pytest.mark.parametrize('plot_remainder', [True, False])
@pytest.mark.parametrize('background_type', ['Power Law', #'Polynomial',
'Offset'])
# Add Polynomial background test once
# https://github.com/hyperspy/hyperspy/pull/1989 is merged.
def test_remove_background_metadata_axes_manager_copy(nav_dim,
fast,
zero_fill,
show_progressbar,
plot_remainder,
background_type):
if nav_dim == 0:
s = signals.Signal1D(np.arange(10, 100)[::-1])
else:
s = signals.Signal1D(np.arange(10, 210)[::-1].reshape(2, 100))
s.axes_manager[0].name = 'axis0'
s.axes_manager[0].units = 'units0'
s.axes_manager[0].scale = 0.9
s.axes_manager[0].offset = 1.
s.metadata.General.title = "atitle"
s_r = s.remove_background(signal_range=(2, 50),
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar,
plot_remainder=plot_remainder,
background_type=background_type)
compare_axes_manager_metadata(s, s_r)
|
francisco-dlp/hyperspy
|
hyperspy/tests/signal/test_remove_background.py
|
Python
|
gpl-3.0
| 6,366
|
[
"Gaussian"
] |
48da13e32a0603201afa9ac552ed7e51f1e619bf5ff7885a92a2315c512777d9
|
import pytest
from io import BytesIO
from thefuck.types import Command
from thefuck.rules.gulp_not_task import match, get_new_command
def output(task):
return '''[00:41:11] Using gulpfile gulpfile.js
[00:41:11] Task '{}' is not in your gulpfile
[00:41:11] Please check the documentation for proper gulpfile formatting
'''.format(task)
def test_match():
assert match(Command('gulp srve', output('srve')))
@pytest.mark.parametrize('script, stdout', [
('gulp serve', ''),
('cat srve', output('srve'))])
def test_not_march(script, stdout):
assert not match(Command(script, stdout))
def test_get_new_command(mocker):
mock = mocker.patch('subprocess.Popen')
mock.return_value.stdout = BytesIO(b'serve \nbuild \ndefault \n')
command = Command('gulp srve', output('srve'))
assert get_new_command(command) == ['gulp serve', 'gulp default']
|
SimenB/thefuck
|
tests/rules/test_gulp_not_task.py
|
Python
|
mit
| 875
|
[
"GULP"
] |
7dcc944f7840d803ab6da8736c3ee56551e03c5fe6b6600cc2a86d35a6baea8b
|
import os
import pysam
import unittest
import collections
import copy
import array
from TestUtils import checkFieldEqual
SAMTOOLS = "samtools"
WORKDIR = "pysam_test_work"
DATADIR = "pysam_data"
class ReadTest(unittest.TestCase):
def buildRead(self):
'''build an example read.'''
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((0, 10), (2, 1), (0, 9), (1, 1), (0, 20))
a.next_reference_id = 0
a.next_reference_start = 200
a.template_length = 167
a.query_qualities = pysam.qualitystring_to_array("1234") * 10
# todo: create tags
return a
class TestAlignedSegment(ReadTest):
'''tests to check if aligned read can be constructed
and manipulated.
'''
def testEmpty(self):
a = pysam.AlignedSegment()
self.assertEqual(a.query_name, None)
self.assertEqual(a.query_sequence, None)
self.assertEqual(pysam.qualities_to_qualitystring(a.query_qualities), None)
self.assertEqual(a.flag, 0)
self.assertEqual(a.reference_id, 0)
self.assertEqual(a.mapping_quality, 0)
self.assertEqual(a.cigartuples, None)
self.assertEqual(a.tags, [])
self.assertEqual(a.next_reference_id, 0)
self.assertEqual(a.next_reference_start, 0)
self.assertEqual(a.template_length, 0)
def testStrOfEmptyRead(self):
a = pysam.AlignedSegment()
s = str(a)
self.assertEqual(
"None\t0\t0\t0\t0\tNone\t0\t0\t0\tNone\tNone\t[]",
s)
def testSettingTagInEmptyRead(self):
'''see issue 62'''
a = pysam.AlignedSegment()
a.tags = (("NM", 1),)
a.query_qualities = None
self.assertEqual(a.tags, [("NM", 1), ])
def testCompare(self):
'''check comparison functions.'''
a = self.buildRead()
b = self.buildRead()
self.assertEqual(0, a.compare(b))
self.assertEqual(0, b.compare(a))
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
b.tid = 2
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def testHashing(self):
a = self.buildRead()
b = self.buildRead()
self.assertEqual(hash(a), hash(b))
b.tid = 2
self.assertNotEqual(hash(a), hash(b))
def testUpdate(self):
'''check if updating fields affects other variable length data
'''
a = self.buildRead()
b = self.buildRead()
# check qname
b.query_name = "read_123"
checkFieldEqual(self, a, b, "query_name")
b.query_name = "read_12345678"
checkFieldEqual(self, a, b, "query_name")
b.query_name = "read_12345"
checkFieldEqual(self, a, b)
# check cigar
b.cigartuples = ((0, 10), )
checkFieldEqual(self, a, b, "cigartuples")
b.cigartuples = ((0, 10), (2, 1), (0, 10))
checkFieldEqual(self, a, b, "cigartuples")
b.cigartuples = ((0, 10), (2, 1), (0, 9), (1, 1), (0, 20))
checkFieldEqual(self, a, b)
# check seq
b.query_sequence = "ACGT"
checkFieldEqual(self,
a, b,
("query_sequence", "query_qualities", "query_length"))
b.query_sequence = "ACGT" * 3
checkFieldEqual(self,
a, b,
("query_sequence", "query_qualities", "query_length"))
b.query_sequence = "ACGT" * 10
checkFieldEqual(self, a, b, ("query_qualities",))
# reset qual
b = self.buildRead()
# check flags:
for x in (
"is_paired", "is_proper_pair",
"is_unmapped", "mate_is_unmapped",
"is_reverse", "mate_is_reverse",
"is_read1", "is_read2",
"is_secondary", "is_qcfail",
"is_duplicate", "is_supplementary"):
setattr(b, x, True)
self.assertEqual(getattr(b, x), True)
checkFieldEqual(self, a, b, ("flag", x,))
setattr(b, x, False)
self.assertEqual(getattr(b, x), False)
checkFieldEqual(self, a, b)
def testUpdate2(self):
'''issue 135: inplace update of sequence and quality score.
This does not work as setting the sequence will erase
the quality scores.
'''
a = self.buildRead()
a.query_sequence = a.query_sequence[5:10]
self.assertEqual(pysam.qualities_to_qualitystring(a.query_qualities), None)
a = self.buildRead()
s = pysam.qualities_to_qualitystring(a.query_qualities)
a.query_sequence = a.query_sequence[5:10]
a.query_qualities = pysam.qualitystring_to_array(s[5:10])
self.assertEqual(pysam.qualities_to_qualitystring(a.query_qualities), s[5:10])
def testLargeRead(self):
'''build an example read.'''
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 200
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((0, 4 * 200), )
a.next_reference_id = 0
a.next_reference_start = 200
a.template_length = 167
a.query_qualities = pysam.qualitystring_to_array("1234") * 200
return a
def testUpdateTlen(self):
'''check if updating tlen works'''
a = self.buildRead()
oldlen = a.template_length
oldlen *= 2
a.template_length = oldlen
self.assertEqual(a.template_length, oldlen)
def testPositions(self):
a = self.buildRead()
self.assertEqual(a.get_reference_positions(),
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59])
self.assertEqual(a.get_aligned_pairs(),
[(0, 20), (1, 21), (2, 22), (3, 23), (4, 24),
(5, 25), (6, 26), (7, 27), (8, 28), (9, 29),
(None, 30),
(10, 31), (11, 32), (12, 33), (13, 34), (14, 35),
(15, 36), (16, 37), (17, 38), (18, 39), (19, None),
(20, 40), (21, 41), (22, 42), (23, 43), (24, 44),
(25, 45), (26, 46), (27, 47), (28, 48), (29, 49),
(30, 50), (31, 51), (32, 52), (33, 53), (34, 54),
(35, 55), (36, 56), (37, 57), (38, 58), (39, 59)])
self.assertEqual(
a.get_reference_positions(),
[x[1] for x in a.get_aligned_pairs()
if x[0] is not None and x[1] is not None])
# alen is the length of the aligned read in genome
self.assertEqual(a.reference_length,
a.get_aligned_pairs()[-1][0] + 1)
# aend points to one beyond last aligned base in ref
self.assertEqual(a.get_reference_positions()[-1],
a.reference_end - 1)
def testFullReferencePositions(self):
'''see issue 26'''
a = self.buildRead()
a.cigar = [(4, 30), (0, 20), (1, 3), (0, 47)]
self.assertEqual(100,
len(a.get_reference_positions(full_length=True)))
def testBlocks(self):
a = self.buildRead()
self.assertEqual(a.get_blocks(),
[(20, 30), (31, 40), (40, 60)])
def test_infer_query_length(self):
'''Test infer_query_length on M|=|X|I|D|H|S cigar ops'''
a = self.buildRead()
a.cigarstring = '15M'
self.assertEqual(a.infer_query_length(), 15)
a.cigarstring = '15='
self.assertEqual(a.infer_query_length(), 15)
a.cigarstring = '15X'
self.assertEqual(a.infer_query_length(), 15)
a.cigarstring = '5M5I5M'
self.assertEqual(a.infer_query_length(), 15)
a.cigarstring = '5M5D5M'
self.assertEqual(a.infer_query_length(), 10)
a.cigarstring = '5H10M'
self.assertEqual(a.infer_query_length(), 15)
a.cigarstring = '5S10M'
self.assertEqual(a.infer_query_length(), 15)
def test_get_aligned_pairs_soft_clipping(self):
a = self.buildRead()
a.cigartuples = ((4, 2), (0, 35), (4, 3))
self.assertEqual(a.get_aligned_pairs(),
[(0, None), (1, None)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 35), range(20, 20 + 35))] +
[(37, None), (38, None), (39, None)]
)
self.assertEqual(a.get_aligned_pairs(True),
# [(0, None), (1, None)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 35), range(20, 20 + 35))]
# [(37, None), (38, None), (39, None)]
)
def test_get_aligned_pairs_hard_clipping(self):
a = self.buildRead()
a.cigartuples = ((5, 2), (0, 35), (5, 3))
self.assertEqual(a.get_aligned_pairs(),
# No seq, no seq pos
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 35), range(20, 20 + 35))])
self.assertEqual(a.get_aligned_pairs(True),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 35), range(20, 20 + 35))])
def test_get_aligned_pairs_skip(self):
a = self.buildRead()
a.cigarstring = "2M100D38M"
self.assertEqual(a.get_aligned_pairs(),
[(0, 20), (1, 21)] +
[(None, refpos) for refpos in range(22, 22 + 100)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 38),
range(20 + 2 + 100, 20 + 2 + 100 + 38))])
self.assertEqual(a.get_aligned_pairs(True),
[(0, 20), (1, 21)] +
# [(None, refpos) for refpos in range(21, 21+100)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 38),
range(20 + 2 + 100, 20 + 2 + 100 + 38))])
def test_get_aligned_pairs_match_mismatch(self):
a = self.buildRead()
a.cigartuples = ((7, 20), (8, 20))
self.assertEqual(a.get_aligned_pairs(),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 40), range(20, 20 + 40))])
self.assertEqual(a.get_aligned_pairs(True),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 40), range(20, 20 + 40))])
def test_get_aligned_pairs_padding(self):
a = self.buildRead()
a.cigartuples = ((7, 20), (6, 1), (8, 19))
def inner():
a.get_aligned_pairs()
# padding is not bein handled right now
self.assertRaises(NotImplementedError, inner)
def test_get_aligned_pairs(self):
a = self.buildRead()
a.query_sequence = "A" * 9
a.cigarstring = "9M"
a.set_tag("MD", "9")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'A'), (5, 25, 'A'),
(6, 26, 'A'), (7, 27, 'A'), (8, 28, 'A')])
a.set_tag("MD", "4C4")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'c'), (5, 25, 'A'),
(6, 26, 'A'), (7, 27, 'A'), (8, 28, 'A')])
a.cigarstring = "5M2D4M"
a.set_tag("MD", "4C^TT4")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'c'),
(None, 25, 'T'), (None, 26, 'T'),
(5, 27, 'A'), (6, 28, 'A'), (7, 29, 'A'), (8, 30, 'A')]
)
a.cigarstring = "5M2D2I2M"
a.set_tag("MD", "4C^TT2")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'c'),
(None, 25, 'T'), (None, 26, 'T'),
(5, None, None), (6, None, None),
(7, 27, 'A'), (8, 28, 'A')]
)
def test_get_aligned_pairs_skip_reference(self):
a = self.buildRead()
a.query_sequence = "A" * 10
a.cigarstring = "5M1N5M"
a.set_tag("MD", "10")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'A'), (None, 25, None),
(5, 26, 'A'), (6, 27, 'A'), (7, 28, 'A'),
(8, 29, 'A'), (9, 30, 'A')])
self.assertEqual(
a.get_aligned_pairs(with_seq=False),
[(0, 20), (1, 21), (2, 22),
(3, 23), (4, 24), (None, 25),
(5, 26), (6, 27), (7, 28),
(8, 29), (9, 30)])
self.assertEqual(
a.get_aligned_pairs(matches_only=True, with_seq=False),
[(0, 20), (1, 21),
(2, 22), (3, 23),
(4, 24), (5, 26),
(6, 27), (7, 28),
(8, 29), (9, 30)])
def testNoSequence(self):
'''issue 176: retrieving length without query sequence
with soft-clipping.
'''
a = self.buildRead()
a.query_sequence = None
a.cigarstring = "20M"
self.assertEqual(a.query_alignment_length, 20)
a.cigarstring = "20M1S"
self.assertEqual(a.query_alignment_length, 20)
a.cigarstring = "1S20M"
self.assertEqual(a.query_alignment_length, 20)
a.cigarstring = "1S20M1S"
self.assertEqual(a.query_alignment_length, 20)
class TestCigarStats(ReadTest):
def testStats(self):
a = self.buildRead()
a.cigarstring = None
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
a.cigarstring = "10M"
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
a.cigarstring = "10M2I2M"
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
for i, x in enumerate("MIDNSHP=X"):
a.cigarstring = "2{}".format(x)
expected = [[0] * 11, [0] * 11]
expected[0][i] = 2
expected[1][i] = 1
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
expected)
a.cigarstring = "10M"
a.set_tag("NM", 5)
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
a.cigarstring = None
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
class TestAlignedPairs(unittest.TestCase):
filename = os.path.join(DATADIR, "example_aligned_pairs.bam")
def testReferenceBases(self):
"""reference bases should always be the same nucleotide
"""
reference_bases = collections.defaultdict(list)
with pysam.AlignmentFile(self.filename) as inf:
for c in inf.pileup():
for r in c.pileups:
for read, ref, base in r.alignment.get_aligned_pairs(
with_seq=True):
if ref is None:
continue
reference_bases[ref].append(base.upper())
for x, y in reference_bases.items():
self.assertEqual(len(set(y)), 1)
class TestTags(ReadTest):
def testMissingTag(self):
a = self.buildRead()
self.assertRaises(KeyError, a.get_tag, "XP")
def testEmptyTag(self):
a = self.buildRead()
self.assertRaises(KeyError, a.get_tag, "XT")
def testSetTag(self):
a = self.buildRead()
self.assertEqual(False, a.has_tag("NM"))
a.set_tag("NM", 2)
self.assertEqual(True, a.has_tag("NM"))
self.assertEqual(a.get_tag("NM"), 2)
a.set_tag("NM", 3)
self.assertEqual(a.get_tag("NM"), 3)
a.set_tag("NM", None)
self.assertEqual(False, a.has_tag("NM"))
# check if deleting a non-existing tag is fine
a.set_tag("NM", None)
a.set_tag("NM", None)
def testArrayTags(self):
read = self.buildRead()
supported_dtypes = "bhBHf"
unsupported_dtypes = "lLd"
for dtype in supported_dtypes:
key = "F" + dtype
read.set_tag(key, array.array(dtype, range(10)))
ary = read.get_tag(key)
for dtype in unsupported_dtypes:
key = "F" + dtype
self.assertRaises(ValueError,
read.set_tag,
key,
array.array(dtype, range(10)))
def testAddTagsType(self):
a = self.buildRead()
a.tags = None
self.assertEqual(a.tags, [])
a.setTag('X1', 5.0)
a.setTag('X2', "5.0")
a.setTag('X3', 5)
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5)]))
# test setting float for int value
a.setTag('X4', 5, value_type='d')
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5),
('X4', 5.0)]))
# test setting int for float value - the
# value will be rounded.
a.setTag('X5', 5.2, value_type='i')
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5),
('X4', 5.0),
('X5', 5)]))
# test setting invalid type code
self.assertRaises(ValueError, a.setTag, 'X6', 5.2, 'g')
def testTagsUpdatingFloat(self):
a = self.buildRead()
a.tags = [('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]
self.assertEqual(a.tags,
[('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')])
a.tags += [('XC', 5.0)]
self.assertEqual(a.tags,
[('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ('XC', 5.0)])
def testAddTags(self):
a = self.buildRead()
a.tags = [('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]
self.assertEqual(sorted(a.tags),
sorted([('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]))
a.setTag('X1', 'C')
self.assertEqual(sorted(a.tags),
sorted([('X1', 'C'), ('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
a.setTag('X2', 5)
self.assertEqual(sorted(a.tags),
sorted([('X2', 5), ('X1', 'C'),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
# add with replacement
a.setTag('X2', 10)
self.assertEqual(sorted(a.tags),
sorted([('X2', 10), ('X1', 'C'),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
# add without replacement
a.setTag('X2', 5, replace=False)
self.assertEqual(sorted(a.tags),
sorted([('X2', 10), ('X1', 'C'),
('X2', 5),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
def testTagParsing(self):
'''test for tag parsing
see http://groups.google.com/group/pysam-user-group/browse_thread/thread/67ca204059ea465a
'''
samfile = pysam.AlignmentFile(
os.path.join(DATADIR, "ex8.bam"),
"rb")
for entry in samfile:
before = entry.get_tags()
entry.set_tags(before)
after = entry.get_tags()
self.assertEqual(after, before)
def testMDTagMatchOnly(self):
a = self.buildRead()
# Substitutions only
a.cigarstring = "21M"
a.query_sequence = "A" * 21
a.set_tag('MD', "5C0T0G05C0G0T5")
self.assertEqual(
"AAAAActgAAAAAcgtAAAAA",
a.get_reference_sequence())
a.cigarstring = "21M"
a.query_sequence = "A" * 21
a.set_tag('MD', "5CTG5CGT5")
self.assertEqual(
"AAAAActgAAAAAcgtAAAAA",
a.get_reference_sequence())
a.cigarstring = "11M"
a.query_sequence = "A" * 11
a.set_tag('MD', "CTG5CGT")
self.assertEqual(
"ctgAAAAAcgt",
a.get_reference_sequence())
def testMDTagInsertions(self):
a = self.buildRead()
# insertions are silent in the reference sequence
a.cigarstring = "5M1I5M"
a.query_sequence = "A" * 5 + "C" + "A" * 5
a.set_tag('MD', "10")
self.assertEqual(
a.get_reference_sequence(),
"A" * 10)
a.cigarstring = "1I10M"
a.query_sequence = "C" * 1 + "A" * 10
self.assertEqual(
a.get_reference_sequence(),
"A" * 10)
a.cigarstring = "10M1I"
a.query_sequence = "A" * 10 + "C" * 1
self.assertEqual(
a.get_reference_sequence(),
"A" * 10)
def testMDTagDeletions(self):
a = self.buildRead()
a.cigarstring = "5M1D5M"
a.query_sequence = "A" * 10
a.set_tag('MD', "5^C5")
self.assertEqual(
"A" * 5 + "C" + "A" * 5,
a.get_reference_sequence())
a.cigarstring = "5M3D5M"
a.query_sequence = "A" * 10
a.set_tag('MD', "5^CCC5")
self.assertEqual(
"A" * 5 + "C" * 3 + "A" * 5,
a.get_reference_sequence())
def testMDTagRefSkipping(self):
a = self.buildRead()
a.cigarstring = "5M1N5M"
a.query_sequence = "A" * 10
a.set_tag('MD', "10")
self.assertEqual(
"A" * 10,
a.get_reference_sequence())
a.cigarstring = "5M3N5M"
a.query_sequence = "A" * 10
a.set_tag('MD', "10")
self.assertEqual(
"A" * 10,
a.get_reference_sequence())
def testMDTagSoftClipping(self):
a = self.buildRead()
# softclipping
a.cigarstring = "5S5M1D5M5S"
a.query_sequence = "G" * 5 + "A" * 10 + "G" * 5
a.set_tag('MD', "5^C5")
self.assertEqual(
"A" * 5 + "C" + "A" * 5,
a.get_reference_sequence())
# all together
a.cigarstring = "5S5M1D5M1I5M5S"
a.query_sequence = "G" * 5 + "A" * 16 + "G" * 5
a.set_tag('MD', "2C2^T10")
self.assertEqual(
"AAcAATAAAAAAAAAA",
a.get_reference_sequence())
def testMDTagComplex(self):
a = self.buildRead()
a.cigarstring = "5S5M1I2D5M5S"
a.query_sequence = "G" * 5 + "A" * 11 + "G" * 5
a.set_tag('MD', "2C2^TC5")
self.assertEqual(
"AAcAATCAAAAA",
a.get_reference_sequence())
a.cigarstring = "5S5M2D1I5M5S"
a.query_sequence = "G" * 5 + "A" * 11 + "G" * 5
a.set_tag('MD', "2C2^TC5")
self.assertEqual(
"AAcAATCAAAAA",
a.get_reference_sequence())
# insertion in reference overlapping deletion in reference
# read: AACCCCA---AAA
# ref: AA----AGGGAAA
a.cigarstring = "2M4I1M3D3M"
a.set_tag("MD", "3^GGG3")
a.query_sequence = "AACCCCAAAA"
self.assertEqual(
"AAAGGGAAA",
a.get_reference_sequence())
a.cigarstring = "5M2D2I2M"
a.set_tag("MD", "4C^TT2")
a.query_sequence = "A" * 9
self.assertEqual(
"AAAAcTTAA",
a.get_reference_sequence())
class TestCopy(ReadTest):
def testCopy(self):
a = self.buildRead()
b = copy.copy(a)
# check if a and be are the same
self.assertEqual(a, b)
# check if they map to different objects
a.query_name = 'ReadA'
b.query_name = 'ReadB'
self.assertEqual(a.query_name, 'ReadA')
self.assertEqual(b.query_name, 'ReadB')
def testDeepCopy(self):
a = self.buildRead()
b = copy.deepcopy(a)
# check if a and be are the same
self.assertEqual(a, b)
# check if they map to different objects
a.query_name = 'ReadA'
b.query_name = 'ReadB'
self.assertEqual(a.query_name, 'ReadA')
self.assertEqual(b.query_name, 'ReadB')
class TestAsString(unittest.TestCase):
def testAsString(self):
with open(os.path.join(DATADIR, "ex2.sam")) as samf:
reference = [x[:-1] for x in samf if not x.startswith("@")]
with pysam.AlignmentFile(
os.path.join(DATADIR, "ex2.bam"), "r") as pysamf:
for s, p in zip(reference, pysamf):
self.assertEqual(s, p.tostring(pysamf))
if __name__ == "__main__":
unittest.main()
|
bioinformed/pysam
|
tests/AlignedSegment_test.py
|
Python
|
mit
| 26,632
|
[
"pysam"
] |
cfc9736e24cc11d157dbbf5acbba413959dc4913a915b457fd056d88f9463881
|
# Copyright (c) Materials Virtual Lab.
# Distributed under the terms of the BSD License.
"""
Implementation for `pmg query` CLI.
"""
import json
import re
from monty.serialization import dumpfn
from tabulate import tabulate
from pymatgen.ext.matproj import MPRester
def do_query(args):
"""
Perform query to the Materials Project
Args:
args (dict): Args from argparse.
"""
m = MPRester()
try:
criteria = json.loads(args.criteria)
except json.decoder.JSONDecodeError:
criteria = args.criteria
if args.structure:
count = 0
for d in m.query(criteria, properties=["structure", "task_id"]):
s = d["structure"]
formula = re.sub(r"\s+", "", s.formula)
if args.structure == "poscar":
fname = f"POSCAR.{d['task_id']}_{formula}"
else:
fname = f"{d['task_id']}-{formula}.{args.structure}"
s.to(filename=fname)
count += 1
print(f"{count} structures written!")
elif args.entries:
entries = m.get_entries(criteria)
dumpfn(entries, args.entries)
print(f"{len(entries)} entries written to {args.entries}!")
else:
props = ["e_above_hull", "spacegroup"]
props += args.data
entries = m.get_entries(criteria, property_data=props)
t = []
headers = [
"mp-id",
"Formula",
"Spacegroup",
"E/atom (eV)",
"E above hull (eV)",
] + args.data
for e in entries:
row = [
e.entry_id,
e.composition.reduced_formula,
e.data["spacegroup"]["symbol"],
e.energy_per_atom,
e.data["e_above_hull"],
]
row += [e.data[s] for s in args.data]
t.append(row)
t = sorted(t, key=lambda x: x[headers.index("E above hull (eV)")])
print(tabulate(t, headers=headers, tablefmt="pipe", floatfmt=".3f"))
|
materialsproject/pymatgen
|
pymatgen/cli/pmg_query.py
|
Python
|
mit
| 2,031
|
[
"pymatgen"
] |
873563a6cc22207e00118e685113fa50dd3a0eb58f5200978af3e77232e804a4
|
""" Testing the API and a bit more.
It will submit a number of test jobs locally (via runLocal), using the python unittest to assess the results.
Can be automatized.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=protected-access, wrong-import-position, invalid-name, missing-docstring
import os
import sys
import unittest
import multiprocessing
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger, rootPath
from DIRAC.tests.Utilities.IntegrationTest import IntegrationTest
from DIRAC.tests.Utilities.utils import find_all
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Interfaces.API.Dirac import Dirac
class UserJobTestCase(IntegrationTest):
""" Base class for the UserJob test cases
"""
def setUp(self):
super(UserJobTestCase, self).setUp()
self.d = Dirac()
integration_test_dir = '/DIRAC/tests/Workflow/Integration'
try:
self.exeScriptLocation = find_all('exe-script.py', rootPath, integration_test_dir)[0]
self.helloWorld = find_all("helloWorld.py", rootPath, integration_test_dir)[0]
self.mpExe = find_all('mpTest.py', rootPath, '/DIRAC/tests/Utilities')[0]
self.mpExeFlex = find_all('mpTest-flexible.py', rootPath, '/DIRAC/tests/Utilities')[0]
except IndexError: # we are in Jenkins
self.exeScriptLocation = find_all('exe-script.py', os.environ['WORKSPACE'], integration_test_dir)[0]
self.helloWorld = find_all("helloWorld.py", os.environ['WORKSPACE'], integration_test_dir)[0]
self.mpExe = find_all('mpTest.py', os.environ['WORKSPACE'], '/DIRAC/tests/Utilities')[0]
self.mpExeFlex = find_all('mpTest-flexible.py', os.environ['WORKSPACE'], '/DIRAC/tests/Utilities')[0]
gLogger.setLevel('DEBUG')
class HelloWorldSuccess(UserJobTestCase):
def test_execute(self):
j = Job()
j.setName("helloWorld-test")
j.setExecutable(self.exeScriptLocation)
j.setLogLevel('DEBUG')
try:
# This is the standard location in Jenkins
j.setInputSandbox(find_all('pilot.cfg', os.environ['WORKSPACE'] + '/PilotInstallDIR')[0])
except (IndexError, KeyError):
j.setInputSandbox(find_all('pilot.cfg', rootPath)[0])
j.setConfigArgs('pilot.cfg')
res = j.runLocal(self.d)
self.assertTrue(res['OK'])
class HelloWorldPlusSuccess(UserJobTestCase):
""" Adding quite a lot of calls from the API, for pure test purpose
"""
def test_execute(self):
job = Job()
job._siteSet = {'DIRAC.someSite.ch'}
job.setName("helloWorld-test")
job.setExecutable(self.helloWorld,
arguments="This is an argument",
logFile="aLogFileForTest.txt",
parameters=[('executable', 'string', '', "Executable Script"),
('arguments', 'string', '', 'Arguments for executable Script'),
('applicationLog', 'string', '', "Log file name"),
('someCustomOne', 'string', '', "boh")],
paramValues=[('someCustomOne', 'aCustomValue')])
job.setBannedSites(['LCG.SiteA.com', 'DIRAC.SiteB.org'])
job.setOwner('ownerName')
job.setOwnerGroup('ownerGroup')
job.setName('jobName')
job.setJobGroup('jobGroup')
job.setType('jobType')
job.setDestination('DIRAC.someSite.ch')
job.setCPUTime(12345)
job.setLogLevel('DEBUG')
try:
# This is the standard location in Jenkins
job.setInputSandbox(find_all('pilot.cfg', os.environ['WORKSPACE'] + '/PilotInstallDIR')[0])
except (IndexError, KeyError):
job.setInputSandbox(find_all('pilot.cfg', rootPath)[0])
job.setConfigArgs('pilot.cfg')
res = job.runLocal(self.d)
self.assertTrue(res['OK'])
def test_execute_success(self):
job = Job()
job._siteSet = {'DIRAC.someSite.ch'}
job.setName("helloWorld-test")
job.setExecutable(self.helloWorld,
logFile="aLogFileForTest.txt",
parameters=[('executable', 'string', '', "Executable Script"),
('arguments', 'string', '', 'Arguments for executable Script'),
('applicationLog', 'string', '', "Log file name"),
('someCustomOne', 'string', '', "boh")],
paramValues=[('someCustomOne', 'aCustomValue')])
job.setBannedSites(['LCG.SiteA.com', 'DIRAC.SiteB.org'])
job.setOwner('ownerName')
job.setOwnerGroup('ownerGroup')
job.setName('jobName')
job.setJobGroup('jobGroup')
job.setType('jobType')
job.setDestination('DIRAC.someSite.ch')
job.setCPUTime(12345)
job.setLogLevel('DEBUG')
try:
# This is the standard location in Jenkins
job.setInputSandbox(find_all('pilot.cfg', os.environ['WORKSPACE'] + '/PilotInstallDIR')[0])
except (IndexError, KeyError):
job.setInputSandbox(find_all('pilot.cfg', rootPath)[0])
job.setConfigArgs('pilot.cfg')
res = job.runLocal(self.d)
self.assertTrue(res['OK'])
class LSSuccess(UserJobTestCase):
def test_execute(self):
""" just testing unix "ls"
"""
job = Job()
job.setName("ls-test")
job.setExecutable("/bin/ls", '-l')
job.setLogLevel('DEBUG')
try:
# This is the standard location in Jenkins
job.setInputSandbox(find_all('pilot.cfg', os.environ['WORKSPACE'] + '/PilotInstallDIR')[0])
except (IndexError, KeyError):
job.setInputSandbox(find_all('pilot.cfg', rootPath)[0])
job.setConfigArgs('pilot.cfg')
res = job.runLocal(self.d)
self.assertTrue(res['OK'])
class MPSuccess(UserJobTestCase):
def test_fixed(self):
""" this tests executes a job that requires exactly 4 processors
"""
j = Job()
j.setName("MP-test")
j.setExecutable(self.mpExe)
j.setInputSandbox(find_all('mpTest.py', rootPath, 'DIRAC/tests/Utilities')[0])
j.setNumberOfProcessors(4) # This requires a fixed number of processors
j.setLogLevel('DEBUG')
try:
# This is the standard location in Jenkins
j.setInputSandbox(find_all('pilot.cfg', os.environ['WORKSPACE'] + '/PilotInstallDIR')[0])
except (IndexError, KeyError):
j.setInputSandbox(find_all('pilot.cfg', rootPath)[0])
j.setConfigArgs('pilot.cfg')
res = j.runLocal(self.d)
if multiprocessing.cpu_count() > 1:
self.assertTrue(res['OK'])
else:
self.assertFalse(res['OK'])
class MPSuccessMinMax(UserJobTestCase):
def test_min2(self):
""" this tests executes a job that requires at least 2 processors
"""
j = Job()
j.setName("MP-test-min2")
# FIXME: the number of processors should be discovered at runtime using JobParameters.getNumberOfJobProcessors()
# here, and later
j.setExecutable(self.mpExeFlex, arguments='2')
j.setInputSandbox(find_all('mpTest-flexible.py', rootPath, 'DIRAC/tests/Utilities')[0])
j.setNumberOfProcessors(minNumberOfProcessors=2) # This requires at least 2 processors
j.setLogLevel('DEBUG')
try:
# This is the standard location in Jenkins
j.setInputSandbox(find_all('pilot.cfg', os.environ['WORKSPACE'] + '/PilotInstallDIR')[0])
except (IndexError, KeyError):
j.setInputSandbox(find_all('pilot.cfg', rootPath)[0])
j.setConfigArgs('pilot.cfg')
res = j.runLocal(self.d)
self.assertTrue(res['OK'])
def test_min2max4(self):
""" this tests executes a job that requires 2 to 4 processors
"""
j = Job()
j.setName("MP-test-min2max4")
j.setExecutable(self.mpExeFlex, arguments='2')
j.setInputSandbox(find_all('mpTest-flexible.py', rootPath, 'DIRAC/tests/Utilities')[0])
j.setNumberOfProcessors(minNumberOfProcessors=2, maxNumberOfProcessors=4) # This requires 2 to 4 processors
j.setLogLevel('DEBUG')
try:
# This is the standard location in Jenkins
j.setInputSandbox(find_all('pilot.cfg', os.environ['WORKSPACE'] + '/PilotInstallDIR')[0])
except (IndexError, KeyError):
j.setInputSandbox(find_all('pilot.cfg', rootPath)[0])
j.setConfigArgs('pilot.cfg')
res = j.runLocal(self.d)
self.assertTrue(res['OK'])
def test_min1(self):
""" this tests executes a job that requires at least 1 processor
"""
j = Job()
j.setName("MP-test-min1")
j.setExecutable(self.mpExeFlex, arguments='2')
j.setInputSandbox(find_all('mpTest-flexible.py', rootPath, 'DIRAC/tests/Utilities')[0])
j.setNumberOfProcessors(minNumberOfProcessors=1) # This requires 1 to infinite processors
j.setLogLevel('DEBUG')
try:
# This is the standard location in Jenkins
j.setInputSandbox(find_all('pilot.cfg', os.environ['WORKSPACE'] + '/PilotInstallDIR')[0])
except (IndexError, KeyError):
j.setInputSandbox(find_all('pilot.cfg', rootPath)[0])
j.setConfigArgs('pilot.cfg')
res = j.runLocal(self.d)
self.assertTrue(res['OK'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(UserJobTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(HelloWorldSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(HelloWorldPlusSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(LSSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(MPSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(MPSuccessMinMax))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
yujikato/DIRAC
|
tests/Workflow/Integration/Test_UserJobs.py
|
Python
|
gpl-3.0
| 9,610
|
[
"DIRAC"
] |
09e20f36df2734003aa50572993a5070860d33e0d0c7165e586ddef0c47523d8
|
## @package shesha.init.rtc_init
## @brief Initialization of a Rtc object
## @author COMPASS Team <https://github.com/ANR-COMPASS>
## @version 5.2.1
## @date 2022/01/24
## @copyright GNU Lesser General Public License
#
# This file is part of COMPASS <https://anr-compass.github.io/compass/>
#
# Copyright (C) 2011-2022 COMPASS Team <https://github.com/ANR-COMPASS>
# All rights reserved.
# Distributed under GNU - LGPL
#
# COMPASS is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation, either version 3 of the License,
# or any later version.
#
# COMPASS: End-to-end AO simulation tool using GPU acceleration
# The COMPASS platform was designed to meet the need of high-performance for the simulation of AO systems.
#
# The final product includes a software package for simulating all the critical subcomponents of AO,
# particularly in the context of the ELT and a real-time core based on several control approaches,
# with performances consistent with its integration into an instrument. Taking advantage of the specific
# hardware architecture of the GPU, the COMPASS tool allows to achieve adequate execution speeds to
# conduct large simulation campaigns called to the ELT.
#
# The COMPASS platform can be used to carry a wide variety of simulations to both testspecific components
# of AO of the E-ELT (such as wavefront analysis device with a pyramid or elongated Laser star), and
# various systems configurations such as multi-conjugate AO.
#
# COMPASS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with COMPASS.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.txt>.
import shesha.config as conf
import shesha.constants as scons
from shesha.constants import CONST
from shesha.ao import imats, cmats, tomo, basis, modopti
from shesha.util import utilities, rtc_util
from shesha.init import dm_init
from typing import List
import numpy as np
from shesha.sutra_wrap import (carmaWrap_context, Sensors, Dms, Target, Rtc_brahma,
Rtc_cacao_FFF, Atmos, Telescope)
from shesha.sutra_wrap import Rtc_FFF as Rtc
def rtc_init(context: carmaWrap_context, tel: Telescope, wfs: Sensors, dms: Dms,
atmos: Atmos, p_wfss: list, p_tel: conf.Param_tel, p_geom: conf.Param_geom,
p_atmos: conf.Param_atmos, ittime: float, p_centroiders=None,
p_controllers=None, p_dms=None, do_refslp=False, brahma=False, cacao=False,
tar=None, dataBase={}, use_DB=False):
"""Initialize all the SutraRtc objects : centroiders and controllers
Args:
context: (carmaWrap_context): context
tel: (Telescope) : Telescope object
wfs: (Sensors) : Sensors object
dms: (Dms) : Dms object
atmos: (Atmos) : Atmos object
p_wfss: (list of Param_wfs) : wfs settings
p_tel: (Param_tel) : telescope settings
p_geom: (Param_geom) : geom settings
p_atmos: (Param_atmos) : atmos settings
ittime: (float) : iteration time [s]
Kwargs:
p_centroiders : (list of Param_centroider): centroiders settings
p_controllers : (list of Param_controller): controllers settings
p_dms: (list of Param_dms) : dms settings
do_refslp : (bool): do ref slopes flag, default=False
brahma: (bool) : brahma flag
cacao: (bool) : cacao flag
tar: (Target) : Target object
dataBase: (dict): dict containig paths to files to load
use_DB: (bool): use dataBase flag
Returns:
Rtc : (Rtc) : Rtc object
"""
# initialisation var
# ________________________________________________
if brahma:
rtc = Rtc_brahma(context, wfs, tar, "rtc_brahma")
elif cacao:
rtc = Rtc_cacao_FFF("compass_calPix", "compass_loopData")
else:
rtc = Rtc()
if p_wfss is None:
return rtc
if p_centroiders:
ncentro = len(p_centroiders)
else:
ncentro = 0
if p_controllers:
ncontrol = len(p_controllers)
else:
ncontrol = 0
if p_centroiders is not None:
for i in range(ncentro):
nwfs = p_centroiders[i].nwfs
init_centroider(context, nwfs, p_wfss[nwfs], p_centroiders[i], p_tel,
p_atmos, wfs, rtc)
if p_controllers is not None:
if (p_wfss is not None and p_dms is not None):
for i in range(ncontrol):
if not "dm" in dataBase:
imat = imats.imat_geom(wfs, dms, p_wfss, p_dms, p_controllers[i],
meth=0)
else:
imat = None
if p_dms[0].type == scons.DmType.PZT:
dm_init.correct_dm(context, dms, p_dms, p_controllers[i], p_geom,
imat, dataBase=dataBase, use_DB=use_DB)
init_controller(context, i, p_controllers[i], p_wfss, p_geom, p_dms,
p_atmos, ittime, p_tel, rtc, dms, wfs, tel, atmos,
p_centroiders, do_refslp, dataBase=dataBase,
use_DB=use_DB)
# add a geometric controller for processing error breakdown
roket_flag = True in [w.roket for w in p_wfss]
if (roket_flag):
p_controller = p_controllers[0]
Nphi = np.where(p_geom._spupil)[0].size
list_dmseen = [p_dms[j].type for j in p_controller.ndm]
nactu = np.sum([p_dms[j]._ntotact for j in p_controller.ndm])
nmodes = 0
if(p_controller.nmodes is not None):
nmodes = p_controller.nmodes
rtc.add_controller(context, scons.ControllerType.GEO, context.active_device,
0, p_controller.nslope, p_controller.nactu,
p_controller.nslope_buffer, p_controller.nstates, p_controller.nstate_buffer,
nmodes, p_controller.n_iir_in, p_controller.n_iir_out,
p_controller.polc, p_controller.modal, dms, p_controller.ndm,
p_controller.ndm.size, p_controller.nwfs, p_controller.nwfs.size, Nphi, True)
init_controller_geo(ncontrol, rtc, dms, p_geom, p_controller, p_dms,
roket=True)
return rtc
def rtc_standalone(context: carmaWrap_context, nwfs: int, nvalid: list, nactu: int,
centroider_type: list, delay: list, offset: list, scale: list,
brahma: bool = False, fp16: bool = False, cacao: bool = False) -> Rtc:
"""Initialize all the SutraRtc objects : centroiders and controllers
Args:
context: (carmaWrap_context): context
nwfs: (int): number of wavefront sensors
nvalid: (int): number of valid measures as input
nactu: (int): number of actuators as output
centroider_type: (list): type of centroiders
delay: (list): delay of each controller
offset: (list): offset added in the cog computation of each WFS
scale: (list): scale factor used in the cog computation of each WFS
Kwargs:
brahma: (bool) : brahma flag (default=False)
fp16: (bool) : fp16 flag (default=False)
cacao: (bool) : cacao flag (default=False)
Returns:
Rtc : (Rtc) : Rtc object
"""
print("start rtc_standalone")
if brahma:
rtc = Rtc_brahma(context, None, None, "rtc_brahma")
elif cacao:
if fp16:
from shesha.sutra_wrap import Rtc_cacao_FHF
rtc = Rtc_cacao_FHF("compass_calPix", "compass_loopData")
else:
rtc = Rtc_cacao_FFF("compass_calPix", "compass_loopData")
else:
if fp16:
from shesha.sutra_wrap import Rtc_FHF
rtc = Rtc_FHF()
else:
rtc = Rtc()
for k in range(nwfs):
# print(context, nvalid[k], offset[k], scale[k], False,
# context.active_device, centroider_type[k])
rtc.add_centroider(context, nvalid[k], offset[k], scale[k], False,
context.active_device, centroider_type[k])
nslopes = sum([c.nslopes for c in rtc.d_centro])
rtc.add_controller(context, "generic", context.active_device,delay[0],
nslopes, nactu, idx_centro=np.arange(nwfs),
ncentro=nwfs)
print("rtc_standalone set")
return rtc
def init_centroider(context, nwfs: int, p_wfs: conf.Param_wfs,
p_centroider: conf.Param_centroider, p_tel: conf.Param_tel,
p_atmos: conf.Param_atmos, wfs: Sensors, rtc: Rtc):
""" Initialize a centroider object in Rtc
Args:
context: (carmaWrap_context): context
nwfs : (int) : index of wfs
p_wfs : (Param_wfs): wfs settings
p_centroider : (Param_centroider) : centroider settings
wfs: (Sensors): Sensor object
rtc : (Rtc) : Rtc object
"""
if (p_wfs.type == scons.WFSType.SH):
if (p_centroider.type != scons.CentroiderType.CORR):
s_offset = p_wfs.npix // 2. - 0.5
else:
if (p_centroider.type_fct == scons.CentroiderFctType.MODEL):
if (p_wfs.npix % 2 == 0):
s_offset = p_wfs.npix // 2 - 0.5
else:
s_offset = p_wfs.npix // 2
else:
s_offset = p_wfs.npix // 2 - 0.5
s_scale = p_wfs.pixsize
elif (p_wfs.type == scons.WFSType.PYRHR or p_wfs.type == scons.WFSType.PYRLR):
s_offset = 0.
s_scale = (p_wfs.Lambda * 1e-6 / p_tel.diam) * \
p_wfs.pyr_ampl * CONST.RAD2ARCSEC
rtc.add_centroider(context, p_wfs._nvalid, s_offset, s_scale, p_centroider.filter_TT,
context.active_device, p_centroider.type, wfs.d_wfs[nwfs])
rtc.d_centro[-1].load_validpos(p_wfs._validsubsx, p_wfs._validsubsy,
p_wfs._nvalid * p_wfs.nPupils)
rtc.d_centro[-1].set_npix(p_wfs.npix)
if (p_centroider.type != scons.CentroiderType.MASKEDPIX):
p_centroider._nslope = 2 * p_wfs._nvalid
else:
p_centroider._nslope = p_wfs._validsubsx.size
if (p_centroider.type == scons.CentroiderType.PYR):
# FIXME SIGNATURE CHANGES
rtc.d_centro[nwfs].set_pyr_method(p_centroider.method)
rtc.d_centro[nwfs].set_pyr_thresh(p_centroider.thresh)
elif (p_wfs.type == scons.WFSType.SH):
if (p_centroider.type == scons.CentroiderType.TCOG):
rtc.d_centro[nwfs].set_threshold(p_centroider.thresh)
elif (p_centroider.type == scons.CentroiderType.BPCOG):
rtc.d_centro[nwfs].set_nmax(p_centroider.nmax)
elif (p_centroider.type == scons.CentroiderType.WCOG or
p_centroider.type == scons.CentroiderType.CORR):
r0 = p_atmos.r0 * (p_wfs.Lambda / 0.5)**(6 / 5.)
seeing = CONST.RAD2ARCSEC * (p_wfs.Lambda * 1.e-6) / r0
npix = seeing // p_wfs.pixsize
comp_weights(p_centroider, p_wfs, npix)
if p_centroider.type == scons.CentroiderType.WCOG:
rtc.d_centro[nwfs].init_weights()
rtc.d_centro[nwfs].load_weights(p_centroider.weights,
p_centroider.weights.ndim)
else:
corrnorm = np.ones((2 * p_wfs.npix, 2 * p_wfs.npix), dtype=np.float32)
p_centroider.sizex = 3
p_centroider.sizey = 3
p_centroider.interpmat = rtc_util.create_interp_mat(
p_centroider.sizex, p_centroider.sizey).astype(np.float32)
if (p_centroider.weights is None):
raise ValueError("p_centroider.weights is None")
rtc.d_centro[nwfs].init_corr(p_centroider.sizex, p_centroider.sizey,
p_centroider.interpmat)
rtc.d_centro[nwfs].load_corr(p_centroider.weights, corrnorm,
p_centroider.weights.ndim)
def comp_weights(p_centroider: conf.Param_centroider, p_wfs: conf.Param_wfs, npix: int):
""" Compute the weights used by centroider wcog and corr
Args:
p_centroider : (Param_centroider) : centroider settings
p_wfs : (Param_wfs) : wfs settings
npix: (int):
"""
if (p_centroider.type_fct == scons.CentroiderFctType.MODEL):
if (p_wfs.gsalt > 0):
tmp = p_wfs._lgskern
tmp2 = utilities.makegaussian(tmp.shape[1],
npix * p_wfs._nrebin).astype(np.float32)
tmp3 = np.zeros((tmp.shape[1], tmp.shape[1], p_wfs._nvalid),
dtype=np.float32)
for j in range(p_wfs._nvalid):
tmp3[:, :, j] = np.fft.ifft2(
np.fft.fft2(tmp[:, :, j]) * np.fft.fft2(tmp2.T)).real
tmp3[:, :, j] *= tmp3.shape[0] * tmp3.shape[1]
tmp3[:, :, j] = np.fft.fftshift(tmp3[:, :, j])
offset = (p_wfs._Ntot - p_wfs._nrebin * p_wfs.npix) // 2
j = offset + p_wfs._nrebin * p_wfs.npix
tmp = np.zeros((j - offset + 1, j - offset + 1, tmp3.shape[2]),
dtype=np.float32)
tmp3 = np.cumsum(tmp3[offset:j, offset:j, :], axis=0)
tmp[1:, 1:, :] = np.cumsum(tmp3, axis=1)
tmp = np.diff(tmp[::p_wfs._nrebin, ::p_wfs._nrebin, :], axis=0)
tmp = np.diff(tmp, axis=1)
p_centroider.weights = tmp
else:
p_centroider.type_fct = scons.CentroiderFctType.GAUSS
print("No LGS found, centroider weighting function becomes gaussian")
if (p_centroider.type_fct == scons.CentroiderFctType.GAUSS):
if p_centroider.width is None:
p_centroider.width = npix
if (p_wfs.npix % 2 == 1):
p_centroider.weights = utilities.makegaussian(
p_wfs.npix, p_centroider.width, p_wfs.npix // 2,
p_wfs.npix // 2).astype(np.float32)
elif (p_centroider.type == scons.CentroiderType.CORR):
p_centroider.weights = utilities.makegaussian(
p_wfs.npix, p_centroider.width, p_wfs.npix // 2,
p_wfs.npix // 2).astype(np.float32)
else:
p_centroider.weights = utilities.makegaussian(
p_wfs.npix, p_centroider.width, p_wfs.npix // 2 - 0.5,
p_wfs.npix // 2 - 0.5).astype(np.float32)
def init_controller(context, i: int, p_controller: conf.Param_controller, p_wfss: list,
p_geom: conf.Param_geom, p_dms: list, p_atmos: conf.Param_atmos,
ittime: float, p_tel: conf.Param_tel, rtc: Rtc, dms: Dms,
wfs: Sensors, tel: Telescope, atmos: Atmos,
p_centroiders: List[conf.Param_centroider], do_refslp=False,
dataBase={}, use_DB=False):
""" Initialize the controller part of rtc
Args:
context: (carmaWrap_context): context
i : (int) : controller index
p_controller: (Param_controller) : controller settings
p_wfss: (list of Param_wfs) : wfs settings
p_geom: (Param_geom) : geom settings
p_dms: (list of Param_dms) : dms settings
p_atmos: (Param_atmos) : atmos settings
ittime: (float) : iteration time [s]
p_tel: (Param_tel) : telescope settings
rtc: (Rtc) : Rtc objet
dms: (Dms) : Dms object
wfs: (Sensors) : Sensors object
tel: (Telescope) : Telescope object
atmos: (Atmos) : Atmos object
p_centroiders: (list of Param_centroider): centroiders settings
Kwargs:
do_refslp: (bool): do the reference slopes at startup,
dataBase: (dict): database used
use_DB: (bool): use database or not
"""
if (p_controller.type != scons.ControllerType.GEO):
nwfs = p_controller.nwfs
if (len(p_wfss) == 1):
nwfs = p_controller.nwfs
# TODO fixing a bug ... still not understood
p_controller.set_nvalid(int(np.sum([p_wfss[k]._nvalid for k in nwfs])))
tmp = 0
for c in p_centroiders:
if (c.nwfs in nwfs):
tmp = tmp + c._nslope
p_controller.set_nslope(int(tmp))
else:
nslope = np.sum([c._nslope for c in p_centroiders])
p_controller.set_nslope(int(nslope))
# parameter for add_controller(_geo)
ndms = p_controller.ndm.tolist()
nactu = np.sum([p_dms[j]._ntotact for j in ndms])
p_controller.set_nactu(int(nactu))
alt = np.array([p_dms[j].alt for j in p_controller.ndm], dtype=np.float32)
list_dmseen = [p_dms[j].type for j in p_controller.ndm]
if (p_controller.type == scons.ControllerType.GEO):
Nphi = np.where(p_geom._spupil)[0].size
else:
Nphi = -1
#nslope = np.sum([c._nslope for c in p_centroiders])
#p_controller.set_nslope(int(nslope))
nmodes = 0
if(p_controller.nmodes is not None):
nmodes = p_controller.nmodes
if (p_controller.type == scons.ControllerType.GENERIC_LINEAR):
configure_generic_linear(p_controller)
nmodes = p_controller.nmodes
#TODO : find a proper way to set the number of slope (other than 2 times nvalid)
rtc.add_controller(context, p_controller.type, context.active_device,p_controller.delay,
p_controller.nslope, p_controller.nactu, p_controller.nslope_buffer,
p_controller.nstates, p_controller.nstate_buffer, nmodes,
p_controller.n_iir_in, p_controller.n_iir_out,
p_controller.polc, p_controller.modal, dms, p_controller.ndm,
p_controller.ndm.size, p_controller.nwfs, p_controller.nwfs.size, Nphi, False)
print("CONTROLLER ADDED")
if (p_wfss is not None and do_refslp):
rtc.do_centroids_ref(i)
if (p_controller.type == scons.ControllerType.GEO):
init_controller_geo(i, rtc, dms, p_geom, p_controller, p_dms)
if (p_controller.type == scons.ControllerType.LS):
init_controller_ls(i, p_controller, p_wfss, p_geom, p_dms, p_atmos, ittime,
p_tel, rtc, dms, wfs, tel, atmos, dataBase=dataBase,
use_DB=use_DB)
if (p_controller.type == scons.ControllerType.CURED):
init_controller_cured(i, rtc, p_controller, p_dms, p_wfss)
if (p_controller.type == scons.ControllerType.MV):
init_controller_mv(i, p_controller, p_wfss, p_geom, p_dms, p_atmos, p_tel, rtc,
dms, wfs, atmos)
elif (p_controller.type == scons.ControllerType.GENERIC):
init_controller_generic(i, p_controller, p_dms, rtc)
try:
p_controller._imat = imats.imat_geom(wfs, dms, p_wfss, p_dms, p_controller,
meth=0)
except:
print("p_controller._imat not set")
def init_controller_geo(i: int, rtc: Rtc, dms: Dms, p_geom: conf.Param_geom,
p_controller: conf.Param_controller, p_dms: list, roket=False):
""" Initialize geometric controller
Args:
i: (int): controller index
rtc: (Rtc): rtc object
dms: (Dms): Dms object
p_geom: (Param_geom): geometry settings
p_controller: (Param_controller): controller settings
p_dms: (list of Param_dms): dms settings
Kwargs
roket: (bool): Flag to initialize ROKET
"""
indx_pup = np.where(p_geom._spupil.flatten('F'))[0].astype(np.int32)
indx_mpup = np.where(p_geom._mpupil.flatten('F'))[0].astype(np.int32)
cpt = 0
indx_dm = np.zeros((p_controller.ndm.size * indx_pup.size), dtype=np.int32)
for dmn in range(p_controller.ndm.size):
tmp_s = (p_geom._ipupil.shape[0] - (p_dms[dmn]._n2 - p_dms[dmn]._n1 + 1)) // 2
tmp_e0 = p_geom._ipupil.shape[0] - tmp_s
tmp_e1 = p_geom._ipupil.shape[1] - tmp_s
pup_dm = p_geom._ipupil[tmp_s:tmp_e0, tmp_s:tmp_e1]
indx_dm[cpt:cpt + np.where(pup_dm)[0].size] = np.where(pup_dm.flatten('F'))[0]
cpt += np.where(pup_dm)[0].size
# convert unitpervolt list to a np.ndarray
unitpervolt = np.array([p_dms[j].unitpervolt
for j in range(len(p_dms))], dtype=np.float32)
rtc.d_control[i].init_proj_sparse(dms, indx_dm, unitpervolt, indx_pup, indx_mpup,
roket=roket)
def init_controller_ls(i: int, p_controller: conf.Param_controller, p_wfss: list,
p_geom: conf.Param_geom, p_dms: list, p_atmos: conf.Param_atmos,
ittime: float, p_tel: conf.Param_tel, rtc: Rtc, dms: Dms,
wfs: Sensors, tel: Telescope, atmos: Atmos, dataBase: dict = {},
use_DB: bool = False):
""" Initialize the least square controller
Args:
i : (int) : controller index
p_controller: (Param_controller) : controller settings
p_wfss: (list of Param_wfs) : wfs settings
p_geom: (Param_geom) : geom settings
p_dms: (list of Param_dms) : dms settings
p_atmos: (Param_atmos) : atmos settings
ittime: (float) : iteration time [s]
p_tel: (Param_tel) : telescope settings
rtc: (Rtc) : Rtc objet
dms: (Dms) : Dms object
wfs: (Sensors) : Sensors object
tel: (Telescope) : Telescope object
atmos: (Atmos) : Atmos object
Kwargs:
dataBase: (dict): database used
use_DB: (bool): use database or not
"""
M2V = None
if p_controller.do_kl_imat:
IF = basis.compute_IFsparse(dms, p_dms, p_geom).T
M2V, _ = basis.compute_btt(IF[:, :-2], IF[:, -2:].toarray())
print("Filtering ", p_controller.nModesFilt, " modes based on mode ordering")
M2V = M2V[:, list(range(M2V.shape[1] - 2 - p_controller.nModesFilt)) + [-2, -1]]
if len(p_controller.klpush) == 1: # Scalar allowed, now we expand
p_controller.klpush = p_controller.klpush[0] * np.ones(M2V.shape[1])
imats.imat_init(i, rtc, dms, p_dms, wfs, p_wfss, p_tel, p_controller, M2V,
dataBase=dataBase, use_DB=use_DB)
if p_controller.modopti:
print("Initializing Modal Optimization : ")
p_controller.nrec = int(2**np.ceil(np.log2(p_controller.nrec)))
if p_controller.nmodes is None:
p_controller.nmodes = sum([p_dms[j]._ntotact for j in range(len(p_dms))])
IF = basis.compute_IFsparse(dms, p_dms, p_geom).T
M2V, _ = basis.compute_btt(IF[:, :-2], IF[:, -2:].toarray())
M2V = M2V[:, list(range(p_controller.nmodes - 2)) + [-2, -1]]
rtc.d_control[i].init_modalOpti(p_controller.nmodes, p_controller.nrec, M2V,
p_controller.gmin, p_controller.gmax,
p_controller.ngain, 1. / ittime)
ol_slopes = modopti.open_loopSlp(tel, atmos, wfs, rtc, p_controller.nrec, i,
p_wfss)
rtc.d_control[i].loadopen_loopSlp(ol_slopes)
rtc.d_control[i].modalControlOptimization()
else:
cmats.cmat_init(i, rtc, p_controller, p_wfss, p_atmos, p_tel, p_dms,
nmodes=p_controller.nmodes)
rtc.d_control[i].set_gain(p_controller.gain)
mgain = np.ones(
sum([p_dms[j]._ntotact for j in range(len(p_dms))]), dtype=np.float32)
cc = 0
for ndm in p_dms:
mgain[cc:cc + ndm._ntotact] = ndm.gain
cc += ndm._ntotact
rtc.d_control[i].set_modal_gains(mgain)
def init_controller_cured(i: int, rtc: Rtc, p_controller: conf.Param_controller,
p_dms: list, p_wfss: list):
""" Initialize the CURED controller
Args:
i : (int) : controller index
rtc: (Rtc) : Rtc objet
p_controller: (Param_controller) : controller settings
p_dms: (list of Param_dms) : dms settings
p_wfss: (list of Param_wfs) : wfs settings
"""
print("initializing cured controller")
if (scons.DmType.TT in [p_dms[j].type for j in range(len(p_dms))]):
tt_flag = True
else:
tt_flag = False
rtc.d_control[i].init_cured(p_wfss[0].nxsub, p_wfss[0]._isvalid,
p_controller.cured_ndivs, tt_flag)
rtc.d_control[i].set_gain(p_controller.gain)
def init_controller_mv(i: int, p_controller: conf.Param_controller, p_wfss: list,
p_geom: conf.Param_geom, p_dms: list, p_atmos: conf.Param_atmos,
p_tel: conf.Param_tel, rtc: Rtc, dms: Dms, wfs: Sensors,
atmos: Atmos):
""" Initialize the MV controller
Args:
i : (int) : controller index
p_controller: (Param_controller) : controller settings
p_wfss: (list of Param_wfs) : wfs settings
p_geom: (Param_geom) : geom settings
p_dms: (list of Param_dms) : dms settings
p_atmos: (Param_atmos) : atmos settings
p_tel: (Param_tel) : telescope settings
rtc: (Rtc) : Rtc objet
dms: (Dms) : Dms object
wfs: (Sensors) : Sensors object
atmos: (Atmos) : Atmos object
"""
p_controller._imat = imats.imat_geom(wfs, dms, p_wfss, p_dms, p_controller)
# imat_init(i,rtc,p_rtc,dms,wfs,p_wfss,p_tel,clean=1,simul_name=simul_name)
rtc.d_control[i].set_imat(p_controller._imat)
rtc.d_control[i].set_gain(p_controller.gain)
size = sum([p_dms[j]._ntotact for j in range(len(p_dms))])
mgain = np.ones(size, dtype=np.float32)
rtc.d_control[i].set_modal_gains(mgain)
tomo.do_tomo_matrices(i, rtc, p_wfss, dms, atmos, wfs, p_controller, p_geom, p_dms,
p_tel, p_atmos)
cmats.cmat_init(i, rtc, p_controller, p_wfss, p_atmos, p_tel, p_dms)
def init_controller_generic(i: int, p_controller: conf.Param_controller, p_dms: list,
rtc: Rtc):
""" Initialize the generic controller
Args:
i: (int): controller index
p_controller: (Param_controller): controller settings
p_dms: (list of Param_dm): dms settings
rtc: (Rtc): Rtc object
"""
size = sum([p_dms[j]._ntotact for j in range(len(p_dms))])
decayFactor = np.ones(size, dtype=np.float32)
mgain = np.ones(size, dtype=np.float32) * p_controller.gain
matE = np.identity(size, dtype=np.float32)
cmat = np.zeros((size, p_controller.nslope), dtype=np.float32)
if p_controller.command_law is not None:
rtc.d_control[i].set_commandlaw(p_controller.command_law)
rtc.d_control[i].set_decayFactor(decayFactor)
rtc.d_control[i].set_modal_gains(mgain)
rtc.d_control[i].set_cmat(cmat)
rtc.d_control[i].set_matE(matE)
def configure_generic_linear(p_controller: conf.Param_controller):
""" Configures the generic controller based on set parameters.
Args:
i: (int): controller index
p_controller: (Param_controller): controller settings
p_dms: (list of Param_dm): dms settings
rtc: (Rtc): Rtc object
"""
if not p_controller.get_modal() or p_controller.get_nmodes() is None:
p_controller.set_nmodes(p_controller.get_nactu())
if p_controller.get_nstate_buffer() == 0:
p_controller.set_nstates(p_controller.get_nmodes())
|
ANR-COMPASS/shesha
|
shesha/init/rtc_init.py
|
Python
|
gpl-3.0
| 27,942
|
[
"Gaussian"
] |
34617da23210c4e0c12964ec19645fed5ae0ebb5539261547d5d04ecd4d5e2ca
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Camelot is a python GUI framework on top of Elixir / Sqlalchemy inspired by
the Django admin interface. Start building applications at warp speed, simply
by adding some additional information to you Elixir model."""
__version__ = '12.06.29'
|
jeroendierckx/Camelot
|
camelot/__init__.py
|
Python
|
gpl-2.0
| 1,317
|
[
"VisIt"
] |
6b08c533955082cae8fa7eef4bb5dc2f4f0449eb69d89179b8d2f20acae3c92d
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import mock
import time
import unittest
import logging
import functools
from nose.tools import * # noqa: F403
import pytest
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate
from osf.models import (
Retraction,
NodeLicense,
OSFGroup,
Tag,
Preprint,
QuickFilesNode,
)
from addons.wiki.models import WikiPage
from addons.osfstorage.models import OsfStorageFile
from scripts.populate_institutions import main as populate_institutions
from osf_tests import factories
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.utils import run_celery_tasks
TEST_INDEX = 'test'
def query(term, raw=False):
results = search.search(build_query(term), index=elastic_search.INDEX, raw=raw)
return results
def query_collections(name):
term = 'category:collectionSubmission AND "{}"'.format(name)
return query(term, raw=True)
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
def query_file(name):
term = 'category:file AND "{}"'.format(name)
return query(term)
def query_tag_file(name):
term = 'category:file AND (tags:u"{}")'.format(name)
return query(term)
def retry_assertion(interval=0.3, retries=3):
def test_wrapper(func):
t_interval = interval
t_retries = retries
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError as e:
if retries:
time.sleep(t_interval)
retry_assertion(interval=t_interval, retries=t_retries - 1)(func)(*args, **kwargs)
else:
raise e
return wrapped
return test_wrapper
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestCollectionsSearch(OsfTestCase):
def setUp(self):
super(TestCollectionsSearch, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='Salif Keita')
self.node_private = factories.NodeFactory(creator=self.user, title='Salif Keita: Madan', is_public=False)
self.node_public = factories.NodeFactory(creator=self.user, title='Salif Keita: Yamore', is_public=True)
self.node_one = factories.NodeFactory(creator=self.user, title='Salif Keita: Mandjou', is_public=True)
self.node_two = factories.NodeFactory(creator=self.user, title='Salif Keita: Tekere', is_public=True)
self.reg_private = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=False, archive=True)
self.reg_public = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True, archive=True)
self.reg_one = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True, archive=True)
self.provider = factories.CollectionProviderFactory()
self.reg_provider = factories.RegistrationProviderFactory()
self.collection_one = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider)
self.collection_public = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider)
self.collection_private = factories.CollectionFactory(creator=self.user, is_public=False, provider=self.provider)
self.reg_collection = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=True)
self.reg_collection_private = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=False)
def test_only_public_collections_submissions_are_searchable(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_private, self.user)
self.reg_collection.collect_object(self.reg_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
assert_false(self.node_one.is_collected)
assert_false(self.node_public.is_collected)
self.collection_one.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_public.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
self.collection_private.collect_object(self.node_two, self.user)
self.reg_collection_private.collect_object(self.reg_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
def test_index_on_submission_privacy_changes(self):
# test_submissions_turned_private_are_deleted_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_one.collect_object(self.node_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
with run_celery_tasks():
self.node_one.is_public = False
self.node_one.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_turned_public_are_added_to_index
self.collection_public.collect_object(self.node_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.node_private.is_public = True
self.node_private.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 1)
def test_index_on_collection_privacy_changes(self):
# test_submissions_of_collection_turned_private_are_removed_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
with run_celery_tasks():
self.collection_public.is_public = False
self.collection_public.save()
self.reg_collection.is_public = False
self.reg_collection.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_of_collection_turned_public_are_added_to_index
self.collection_private.collect_object(self.node_one, self.user)
self.collection_private.collect_object(self.node_two, self.user)
self.collection_private.collect_object(self.node_public, self.user)
self.reg_collection_private.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_two.is_collected)
assert_true(self.node_public.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.collection_private.is_public = True
self.collection_private.save()
self.reg_collection.is_public = True
self.reg_collection.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
def test_collection_submissions_are_removed_from_index_on_delete(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
self.collection_public.delete()
self.reg_collection.delete()
assert_true(self.collection_public.deleted)
assert_true(self.reg_collection.deleted)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_removed_submission_are_removed_from_index(self):
self.collection_public.collect_object(self.node_one, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
self.collection_public.remove_object(self.node_one)
self.reg_collection.remove_object(self.reg_public)
assert_false(self.node_one.is_collected)
assert_false(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_collection_submission_doc_structure(self):
self.collection_public.collect_object(self.node_one, self.user)
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
with run_celery_tasks():
self.node_one.title = 'Keita Royal Family of Mali'
self.node_one.save()
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
assert_equal(docs[0]['_source']['abstract'], self.node_one.description)
assert_equal(docs[0]['_source']['contributors'][0]['url'], self.user.url)
assert_equal(docs[0]['_source']['contributors'][0]['fullname'], self.user.fullname)
assert_equal(docs[0]['_source']['url'], self.node_one.url)
assert_equal(docs[0]['_source']['id'], '{}-{}'.format(self.node_one._id,
self.node_one.collecting_metadata_list[0].collection._id))
assert_equal(docs[0]['_source']['category'], 'collectionSubmission')
def test_search_updated_after_id_change(self):
self.provider.primary_collection.collect_object(self.node_one, self.node_one.creator)
with run_celery_tasks():
self.node_one.save()
term = f'provider:{self.provider._id}'
docs = search.search(build_query(term), index=elastic_search.INDEX, raw=True)
assert_equal(len(docs['results']), 1)
self.provider._id = 'new_id'
self.provider.save()
docs = query(f'provider:new_id', raw=True)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestUserUpdate(OsfTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = factories.UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
@retry_assertion
def test_change_name(self):
# Add a user, change her name, and verify that only the new name is
# found in search.
user = factories.UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
# Test that disabled users are not in search index
user = factories.UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
@pytest.mark.enable_quickfiles_creation
def test_merged_user(self):
user = factories.UserFactory(fullname='Annie Lennox')
merged_user = factories.UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = factories.UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = factories.UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = factories.UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestProject(OsfTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.project = factories.ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
# Verify that a private project is not present in Elastic Search.
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
# Make project public, and verify that it is present in Elastic
# Search.
with run_celery_tasks():
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestOSFGroup(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestOSFGroup, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.user_two = factories.UserFactory(fullname='Grapes McGee')
self.group = OSFGroup(
name='Cornbread',
creator=self.user,
)
self.group.save()
self.project = factories.ProjectFactory(is_public=True, creator=self.user, title='Biscuits')
self.project.save()
def test_create_osf_group(self):
title = 'Butter'
group = OSFGroup(name=title, creator=self.user)
group.save()
docs = query(title)['results']
assert_equal(len(docs), 1)
def test_set_group_name(self):
title = 'Eggs'
self.group.set_group_name(title)
self.group.save()
docs = query(title)['results']
assert_equal(len(docs), 1)
docs = query('Cornbread')['results']
assert_equal(len(docs), 0)
def test_add_member(self):
self.group.make_member(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 1)
self.group.make_manager(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 1)
self.group.remove_member(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 0)
def test_connect_to_node(self):
self.project.add_osf_group(self.group)
docs = query('category:project AND "{}"'.format(self.group.name))['results']
assert_equal(len(docs), 1)
self.project.remove_osf_group(self.group)
docs = query('category:project AND "{}"'.format(self.group.name))['results']
assert_equal(len(docs), 0)
def test_remove_group(self):
group_name = self.group.name
self.project.add_osf_group(self.group)
docs = query('category:project AND "{}"'.format(group_name))['results']
assert_equal(len(docs), 1)
self.group.remove_group()
docs = query('category:project AND "{}"'.format(group_name))['results']
assert_equal(len(docs), 0)
docs = query(group_name)['results']
assert_equal(len(docs), 0)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestPreprint(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPreprint, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.preprint = Preprint(
title='Red Special',
description='We are the champions',
creator=self.user,
provider=factories.PreprintProviderFactory()
)
self.preprint.save()
self.file = OsfStorageFile.create(
target=self.preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.published_preprint = factories.PreprintFactory(
creator=self.user,
title='My Fairy King',
description='Under pressure',
)
def test_new_preprint_unsubmitted(self):
# Verify that an unsubmitted preprint is not present in Elastic Search.
title = 'Apple'
self.preprint.title = title
self.preprint.save()
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_new_preprint_unpublished(self):
# Verify that an unpublished preprint is not present in Elastic Search.
title = 'Banana'
self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title)
assert self.preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_unsubmitted_preprint_primary_file(self):
# Unpublished preprint's primary_file not showing up in Elastic Search
title = 'Cantaloupe'
self.preprint.title = title
self.preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
assert self.preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_publish_preprint(self):
title = 'Date'
self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title)
self.preprint.set_published(True, auth=Auth(self.preprint.creator), save=True)
assert self.preprint.title == title
docs = query(title)['results']
# Both preprint and primary_file showing up in Elastic
assert_equal(len(docs), 2)
def test_preprint_title_change(self):
title_original = self.published_preprint.title
new_title = 'New preprint title'
self.published_preprint.set_title(new_title, auth=Auth(self.user), save=True)
docs = query('category:preprint AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:preprint AND ' + new_title)['results']
assert_equal(len(docs), 1)
def test_preprint_description_change(self):
description_original = self.published_preprint.description
new_abstract = 'My preprint abstract'
self.published_preprint.set_description(new_abstract, auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
docs = query('category:preprint AND ' + description_original)['results']
assert_equal(len(docs), 0)
docs = query('category:preprint AND ' + new_abstract)['results']
assert_equal(len(docs), 1)
def test_set_preprint_private(self):
# Not currently an option for users, but can be used for spam
self.published_preprint.set_privacy('private', auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
# Both preprint and primary_file showing up in Elastic
assert_equal(len(docs), 0)
def test_set_primary_file(self):
# Only primary_file should be in index, if primary_file is changed, other files are removed from index.
self.file = OsfStorageFile.create(
target=self.published_preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.published_preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
assert_equal(len(docs), 2)
assert_equal(docs[1]['name'], self.file.name)
def test_set_license(self):
license_details = {
'id': 'NONE',
'year': '2015',
'copyrightHolders': ['Iron Man']
}
title = 'Elderberry'
self.published_preprint.title = title
self.published_preprint.set_preprint_license(license_details, Auth(self.user), save=True)
assert self.published_preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 2)
assert_equal(docs[0]['license']['copyright_holders'][0], 'Iron Man')
assert_equal(docs[0]['license']['name'], 'No license')
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
self.published_preprint.remove_tag(tag, Auth(self.user), save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
# with run_celery_tasks():
self.published_preprint.add_contributor(user2, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2, save=True)
self.published_preprint.remove_contributor(user2, Auth(self.user))
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2)
self.published_preprint.set_visible(user2, False, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.published_preprint.set_visible(user2, True, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_move_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
docs[0]['contributors'][0]['fullname'] == self.user.fullname
docs[0]['contributors'][1]['fullname'] == user2.fullname
self.published_preprint.move_contributor(user2, Auth(self.user), 0)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
docs[0]['contributors'][0]['fullname'] == user2.fullname
docs[0]['contributors'][1]['fullname'] == self.user.fullname
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
docs = query(self.published_preprint.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestNodeSearch(OsfTestCase):
def setUp(self):
super(TestNodeSearch, self).setUp()
with run_celery_tasks():
self.node = factories.ProjectFactory(is_public=True, title='node')
self.public_child = factories.ProjectFactory(parent=self.node, is_public=True, title='public_child')
self.private_child = factories.ProjectFactory(parent=self.node, title='private_child')
self.public_subchild = factories.ProjectFactory(parent=self.private_child, is_public=True)
self.node.node_license = factories.NodeLicenseRecordFactory()
self.node.save()
self.query = 'category:project & category:component'
@retry_assertion()
def test_node_license_added_to_search(self):
docs = query(self.query)['results']
node = [d for d in docs if d['title'] == self.node.title][0]
assert_in('license', node)
assert_equal(node['license']['id'], self.node.node_license.license_id)
@unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.')
@retry_assertion(retries=10)
def test_node_license_propogates_to_children(self):
docs = query(self.query)['results']
child = [d for d in docs if d['title'] == self.public_child.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
child = [d for d in docs if d['title'] == self.public_subchild.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
@unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.')
@retry_assertion(retries=10)
def test_node_license_updates_correctly(self):
other_license = NodeLicense.objects.get(name='MIT License')
new_license = factories.NodeLicenseRecordFactory(node_license=other_license)
self.node.node_license = new_license
self.node.save()
docs = query(self.query)['results']
for doc in docs:
assert_equal(doc['license'].get('id'), new_license.license_id)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestRegistrationRetractions(OsfTestCase):
def setUp(self):
super(TestRegistrationRetractions, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration = factories.RegistrationFactory(project=self.project, is_public=True)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_is_searchable(self):
self.registration.retract_registration(self.user)
self.registration.retraction.state = Retraction.APPROVED
self.registration.retraction.save()
self.registration.save()
self.registration.retraction._on_complete(self.user)
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_pending_retraction_wiki_content_is_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
with run_celery_tasks():
self.registration.save()
self.registration.reload()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_wiki_content_is_not_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
self.registration.retraction.state = Retraction.APPROVED
with run_celery_tasks():
self.registration.retraction.save()
self.registration.save()
self.registration.update_search()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 0)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestPublicNodes(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPublicNodes, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.component = factories.NodeFactory(
parent=self.project,
description='',
title=self.title,
creator=self.user,
is_public=True
)
self.registration = factories.RegistrationFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration.archive_job.target_addons.clear()
self.registration.archive_job.status = 'SUCCESS'
self.registration.archive_job.save()
def test_make_private(self):
# Make project public, then private, and verify that it is not present
# in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_search_node_partial(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Blue')['results']
assert_equal(len(find), 1)
def test_search_node_partial_with_sep(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Express')['results']
assert_equal(len(find), 1)
def test_search_node_not_name(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Green Flyer-Slow')['results']
assert_equal(len(find), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
# Make parent of component, public, then private, and verify that the
# component still appears but doesn't link to the parent in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_false(docs[0]['parent_title'])
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
with run_celery_tasks():
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
title_original = self.project.title
with run_celery_tasks():
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True
)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.project, key, value, self.consolidate_auth)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
# Add wiki text to page, then delete, then verify that project is not
# found when searching for wiki text.
wiki_content = 'Hammer to fall'
wp = WikiPage.objects.create_for_node(self.project, 'home', wiki_content, self.consolidate_auth)
with run_celery_tasks():
wp.update(self.user, '')
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
with run_celery_tasks():
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestAddContributor(OsfTestCase):
# Tests of the search.search_contributor method
def setUp(self):
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.name3 = u'j\xc3\xb3ebert3 Smith3'
self.name4 = u'B\xc3\xb3bbert4 Jones4'
with run_celery_tasks():
super(TestAddContributor, self).setUp()
self.user = factories.UserFactory(fullname=self.name1)
self.user3 = factories.UserFactory(fullname=self.name3)
def test_unreg_users_dont_show_in_search(self):
unreg = factories.UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
with run_celery_tasks():
unreg = factories.UnregUserFactory(fullname='Robert Paulson')
self.project = factories.ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
# Searching for full name yields exactly one result.
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
# Searching for first name yields exactly one result.
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
# Searching for part of first name yields exactly one
# result.
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_fullname_special_character(self):
# Searching for a fullname with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4)
assert_equal(len(contribs['users']), 0)
def test_search_firstname_special_charcter(self):
# Searching for a first name with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial_special_character(self):
# Searching for a partial name with a special character yields
# exctly one result.
contribs = search.search_contributor(self.name3.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_profile(self):
orcid = '123456'
user = factories.UserFactory()
user.social['orcid'] = orcid
user.save()
contribs = search.search_contributor(orcid)
assert_equal(len(contribs['users']), 1)
assert_equal(len(contribs['users'][0]['social']), 1)
assert_equal(contribs['users'][0]['social']['orcid'], user.social_links['orcid'])
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestProjectSearchResults(OsfTestCase):
def setUp(self):
self.singular = 'Spanish Inquisition'
self.plural = 'Spanish Inquisitions'
self.possessive = 'Spanish\'s Inquisition'
with run_celery_tasks():
super(TestProjectSearchResults, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project_singular = factories.ProjectFactory(
title=self.singular,
creator=self.user,
is_public=True,
)
self.project_plural = factories.ProjectFactory(
title=self.plural,
creator=self.user,
is_public=True,
)
self.project_possessive = factories.ProjectFactory(
title=self.possessive,
creator=self.user,
is_public=True,
)
self.project_unrelated = factories.ProjectFactory(
title='Cardinal Richelieu',
creator=self.user,
is_public=True,
)
def test_singular_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
time.sleep(1)
results = query(self.singular)['results']
assert_equal(len(results), 3)
def test_plural_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.plural)['results']
assert_equal(len(results), 3)
def test_possessive_query(self):
# Verify searching for possessive term includes singular,
# possessive and plural versions in results.
results = query(self.possessive)['results']
assert_equal(len(results), 3)
def job(**kwargs):
keys = [
'title',
'institution',
'department',
'location',
'startMonth',
'startYear',
'endMonth',
'endYear',
'ongoing',
]
job = {}
for key in keys:
if key[-5:] == 'Month':
job[key] = kwargs.get(key, 'December')
elif key[-4:] == 'Year':
job[key] = kwargs.get(key, '2000')
else:
job[key] = kwargs.get(key, 'test_{}'.format(key))
return job
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestUserSearchResults(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestUserSearchResults, self).setUp()
self.user_one = factories.UserFactory(jobs=[job(institution='Oxford'),
job(institution='Star Fleet')],
fullname='Date Soong')
self.user_two = factories.UserFactory(jobs=[job(institution='Grapes la Picard'),
job(institution='Star Fleet')],
fullname='Jean-Luc Picard')
self.user_three = factories.UserFactory(jobs=[job(institution='Star Fleet'),
job(institution='Federation Medical')],
fullname='Beverly Crusher')
self.user_four = factories.UserFactory(jobs=[job(institution='Star Fleet')],
fullname='William Riker')
self.user_five = factories.UserFactory(jobs=[job(institution='Traveler intern'),
job(institution='Star Fleet Academy'),
job(institution='Star Fleet Intern')],
fullname='Wesley Crusher')
for i in range(25):
factories.UserFactory(jobs=[job()])
self.current_starfleet = [
self.user_three,
self.user_four,
]
self.were_starfleet = [
self.user_one,
self.user_two,
self.user_three,
self.user_four,
self.user_five
]
@unittest.skip('Cannot guarentee always passes')
def test_current_job_first_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
current_starfleet_names = [u.fullname for u in self.current_starfleet]
for name in result_names[:2]:
assert_in(name, current_starfleet_names)
def test_had_job_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
were_starfleet_names = [u.fullname for u in self.were_starfleet]
for name in result_names:
assert_in(name, were_starfleet_names)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchExceptions(OsfTestCase):
# Verify that the correct exception is thrown when the connection is lost
@classmethod
def setUpClass(cls):
logging.getLogger('website.project.model').setLevel(logging.CRITICAL)
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._client = search.search_engine.CLIENT
search.search_engine.CLIENT = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.CLIENT = cls._client
@requires_search
def test_connection_error(self):
# Ensures that saving projects/users doesn't break as a result of connection errors
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project = factories.ProjectFactory(
title='Tom Sawyer',
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchMigration(OsfTestCase):
# Verify that the correct indices are created/deleted during migration
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
populate_institutions(default_args=True)
self.es = search.search_engine.CLIENT
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
self.project = factories.ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
self.preprint = factories.PreprintFactory(
creator=self.user
)
def test_first_migration_no_remove(self):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_remove(self):
for n in range(1, 21):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_first_migration_with_remove(self):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_remove(self):
for n in range(1, 21, 2):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
def test_migration_institutions(self):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
institution_bucket_found = False
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type=None, search_type='count', body=count_query)
for bucket in res['aggregations']['counts']['buckets']:
if bucket['key'] == u'institution':
institution_bucket_found = True
assert_equal(institution_bucket_found, True)
def test_migration_collections(self):
provider = factories.CollectionProviderFactory()
collection_one = factories.CollectionFactory(is_public=True, provider=provider)
collection_two = factories.CollectionFactory(is_public=True, provider=provider)
node = factories.NodeFactory(creator=self.user, title='Ali Bomaye', is_public=True)
collection_one.collect_object(node, self.user)
collection_two.collect_object(node, self.user)
assert node.is_collected
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type='collectionSubmission', search_type='count', body=count_query)
assert res['hits']['total'] == 2
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchFiles(OsfTestCase):
def setUp(self):
super(TestSearchFiles, self).setUp()
self.node = factories.ProjectFactory(is_public=True, title='Otis')
self.osf_storage = self.node.get_addon('osfstorage')
self.root = self.osf_storage.get_root()
def test_search_file(self):
self.root.append_file('Shake.wav')
find = query_file('Shake.wav')['results']
assert_equal(len(find), 1)
def test_search_file_name_without_separator(self):
self.root.append_file('Shake.wav')
find = query_file('Shake')['results']
assert_equal(len(find), 1)
def test_delete_file(self):
file_ = self.root.append_file('I\'ve Got Dreams To Remember.wav')
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 1)
file_.delete()
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 0)
def test_add_tag(self):
file_ = self.root.append_file('That\'s How Strong My Love Is.mp3')
tag = Tag(name='Redding')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Redding')['results']
assert_equal(len(find), 1)
def test_remove_tag(self):
file_ = self.root.append_file('I\'ve Been Loving You Too Long.mp3')
tag = Tag(name='Blue')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 1)
file_.tags.remove(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 0)
def test_make_node_private(self):
self.root.append_file('Change_Gonna_Come.wav')
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 1)
self.node.is_public = False
with run_celery_tasks():
self.node.save()
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 0)
def test_make_private_node_public(self):
self.node.is_public = False
self.node.save()
self.root.append_file('Try a Little Tenderness.flac')
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 0)
self.node.is_public = True
with run_celery_tasks():
self.node.save()
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 1)
def test_delete_node(self):
node = factories.ProjectFactory(is_public=True, title='The Soul Album')
osf_storage = node.get_addon('osfstorage')
root = osf_storage.get_root()
root.append_file('The Dock of the Bay.mp3')
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 1)
node.is_deleted = True
with run_celery_tasks():
node.save()
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 0)
def test_file_download_url_guid(self):
file_ = self.root.append_file('Timber.mp3')
file_guid = file_.get_guid(create=True)
file_.save()
find = query_file('Timber.mp3')['results']
assert_equal(find[0]['guid_url'], '/' + file_guid._id + '/')
def test_file_download_url_no_guid(self):
file_ = self.root.append_file('Timber.mp3')
path = file_.path
deep_url = '/' + file_.target._id + '/files/osfstorage' + path + '/'
find = query_file('Timber.mp3')['results']
assert_not_equal(file_.path, '')
assert_equal(file_.path, path)
assert_equal(find[0]['guid_url'], None)
assert_equal(find[0]['deep_url'], deep_url)
@pytest.mark.enable_quickfiles_creation
def test_quickfiles_files_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 1)
assert find[0]['node_url'] == '/{}/quickfiles/'.format(quickfiles.creator._id)
@pytest.mark.enable_quickfiles_creation
def test_qatest_quickfiles_files_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
file = quickfiles_root.append_file('GreenLight.mp3')
tag = Tag(name='qatest')
tag.save()
file.tags.add(tag)
file.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
@pytest.mark.enable_quickfiles_creation
def test_quickfiles_spam_user_files_do_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
self.node.creator.disable_account()
self.node.creator.confirm_spam()
self.node.creator.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
|
mfraezz/osf.io
|
osf_tests/test_elastic_search.py
|
Python
|
apache-2.0
| 62,142
|
[
"Brian"
] |
c0993646c7fa74d40354c066b4355558695bd98d77fd9b40f2b23ffc4d65dd74
|
from ase import *
from ase.dft.bee import BEEF_Ensemble
from gpaw import GPAW
from gpaw.test import equal
import numpy as np
xc = 'BEEF-vdW'
d = 0.75
tol1 = 1.e-10
tol2 = 1.e-2
tol3 = 1.e-1
# H2 molecule
h2 = Atoms('H2',[[0.,0.,0.],[0.,0.,d]])
h2.center(vacuum=2.)
cell = h2.get_cell()
calc = GPAW(xc=xc)
h2.set_calculator(calc)
e_h2 = h2.get_potential_energy()
f = h2.get_forces()
ens = BEEF_Ensemble(calc)
de_h2 = ens.get_ensemble_energies()
del h2, calc, ens
# H atom
h = Atoms('H')
h.set_cell(cell)
h.center()
calc = GPAW(xc=xc, spinpol=True)
h.set_calculator(calc)
e_h = h.get_potential_energy()
ens = BEEF_Ensemble(calc)
de_h = ens.get_ensemble_energies()
# forces
f0 = f[0].sum()
f1 = f[1].sum()
equal(f0, -f1, tol1)
equal(f0, 1.044, tol2)
# binding energy
E_bind = 2*e_h - e_h2
dE_bind = 2*de_h[:] - de_h2[:]
dE_bind = np.std(dE_bind)
equal(E_bind, 5.126, tol2)
equal(dE_bind, 0.2, tol3)
|
robwarm/gpaw-symm
|
gpaw/test/beefvdw.py
|
Python
|
gpl-3.0
| 901
|
[
"ASE",
"GPAW"
] |
85d584095e5a05cc0e2ff602f636e19151dfcf00e3225cb3eafa1977b8020b1c
|
import cairo
import pango
import gtk
from core.world import TheWorld
from ontology.thing import Thing
from widgets.primitives import Primitives
class NodeTree(Thing):
def __init__(self, root_node):
Thing.__init__(self)
self.root_node = root_node
self.x_offset = 0.0
self.y_offset = 0.0
self.width = 1.5
self.height = 1.5
self.cache_drawing_operations = False
self.cached_render = None
self.cached_scale = TheWorld.scale
# TODO - fix rotation for cached bitmaps
def draw(self, context):
Thing.draw(self, context)
# draw a bounding box
#Primitives.bounding_box(context, -0.5, -0.5, 1.0, 1.0)
# set origin to top left
#context.translate(-0.5 + (self.root_node.width / 2.0), -0.5 + (self.root_node.height / 2.0))
context.translate(-0.5, -0.5)
if self.cache_drawing_operations:
# check if rendering cache needs to be refreshed
if (self.cached_render is None) or (self.cached_scale is not TheWorld.scale):
self.cached_render = self.__render_cache(context)
self.cached_scale = TheWorld.scale
# blast cached rendering to screen
context.save()
pixel_width, pixel_height = context.user_to_device_distance(self.width, self.height)
context.scale(1.0 / pixel_width, 1.0 / pixel_height)
context.set_source_surface(self.cached_render)
context.paint()
context.restore()
else:
# draw connectors first for z-order
context.save()
self.x_offset = 0
self.y_offset = 0
self.draw_connectors(context, self.root_node)
self.x_offset = 0
self.y_offset = 0
self.draw_nodes(context, self.root_node)
context.restore()
def __render_cache(self, context):
pixel_width, pixel_height = context.user_to_device_distance(self.width, self.height)
print (pixel_width, pixel_height)
surface = context.get_target().create_similar(cairo.CONTENT_COLOR_ALPHA, int(pixel_width), int(pixel_height))
#ctx = cairo.Context(surface)
ctx = gtk.gdk.CairoContext(cairo.Context(surface)) # NB! pango only works with gtk.gdk.CairoContext NOT cairo.Context
ctx.scale(pixel_width, pixel_height)
# draw connectors first for z-order
self.x_offset = 0
self.y_offset = 0
self.draw_connectors(ctx, self.root_node)
self.x_offset = 0
self.y_offset = 0
self.draw_nodes(ctx, self.root_node)
del ctx
return surface
def draw_connectors(self, context, node):
node.x = self.x_offset * 0.08
node.y = self.y_offset * 0.04
# draw connectors
parents = node.parents()
if len(parents):
parent_node = parents[0]
(x1, y1) = (parent_node.center_x, parent_node.center_y)
(x2, y2) = (node.center_x, node.center_y)
context.set_line_width(0.001)
context.set_source_rgb(0.0, 0.0, 1.0)
context.move_to(x1, y1)
context.line_to(x2, y2)
context.stroke()
self.x_offset += 1
if not node.children():
self.y_offset += 1
for child in node.children():
self.draw_connectors(context, child)
self.x_offset -= 1
def draw_nodes(self, context, node):
context.save()
# don't need to recalc - already layed out in draw_connectors
#node.x = self.x_offset * 0.08
#node.y = self.y_offset * 0.02
## draw connectors
#parents = node.parents()
#if len(parents):
#parent_node = parents[0]
#(x1, y1) = (parent_node.center_x, parent_node.center_y)
#(x2, y2) = (node.center_x, node.center_y)
#context.set_line_width(0.002)
#context.set_source_rgb(0.0, 0.0, 0.5)
#context.move_to(x1, y1)
#context.line_to(x2, y2)
#context.stroke()
context.translate(node.center_x, node.center_y)
context.scale(node.width, node.height)
node.draw(context)
context.restore()
#self.x_offset += 1
#if not node.children():
# self.y_offset += 1
for child in node.children():
self.draw_nodes(context, child)
#self.x_offset -= 1
|
antoinevg/survival
|
widgets/asteditor/nodetree.py
|
Python
|
gpl-2.0
| 4,118
|
[
"BLAST"
] |
933b125acf6f3a03b35f209d74ab18d2896f38274dd27d2a1811f459e082aea9
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Carlos Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
# Portions copyright (c) 2012 Stanford University and the Authors.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit. Those portions are Copyright 2008-2012 Stanford University
# and Peter Eastman, and distributed under the following license:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
from __future__ import print_function, division
import os
import numpy as np
import xml.etree.ElementTree as etree
from copy import copy
from mdtraj.formats.pdb.pdbstructure import PdbStructure
from mdtraj.core.topology import Topology
from mdtraj.utils import ilen, cast_indices, in_units_of
from mdtraj.formats.registry import _FormatRegistry
from mdtraj.core import element as elem
from mdtraj.utils import six
if six.PY3:
from urllib.request import urlopen
from urllib.parse import urlparse
from urllib.parse import (uses_relative, uses_netloc, uses_params)
else:
from urllib2 import urlopen
from urlparse import urlparse
from urlparse import uses_relative, uses_netloc, uses_params
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
__all__ = ['load_pdb', 'PDBTrajectoryFile']
##############################################################################
# Code
##############################################################################
def _is_url(url):
"""Check to see if a URL has a valid protocol.
from pandas/io.common.py Copyright 2014 Pandas Developers
Used under the BSD licence
"""
try:
return urlparse(url).scheme in _VALID_URLS
except:
return False
@_FormatRegistry.register_loader('.pdb')
def load_pdb(filename, stride=None, atom_indices=None, frame=None):
"""Load a RCSB Protein Data Bank file from disk.
Parameters
----------
filename : str
Path to the PDB file on disk. The string could be a URL. Valid URL
schemes include http and ftp.
stride : int, default=None
Only read every stride-th model from the file
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. These indices are zero-based (not 1 based, as used by the PDB
format). So if you want to load only the first atom in the file, you
would supply ``atom_indices = np.array([0])``.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
Examples
--------
>>> import mdtraj as md
>>> pdb = md.load_pdb('2EQQ.pdb')
>>> print pdb
<mdtraj.Trajectory with 20 frames, 423 atoms at 0x110740a90>
See Also
--------
mdtraj.PDBTrajectoryFile : Low level interface to PDB files
"""
from mdtraj import Trajectory
if not isinstance(filename, six.string_types):
raise TypeError('filename must be of type string for load_pdb. '
'you supplied %s' % type(filename))
atom_indices = cast_indices(atom_indices)
filename = str(filename)
with PDBTrajectoryFile(filename) as f:
atom_slice = slice(None) if atom_indices is None else atom_indices
if frame is not None:
coords = f.positions[[frame], atom_slice, :]
else:
coords = f.positions[::stride, atom_slice, :]
assert coords.ndim == 3, 'internal shape error'
n_frames = len(coords)
topology = f.topology
if atom_indices is not None:
topology = topology.subset(atom_indices)
if f.unitcell_angles is not None and f.unitcell_lengths is not None:
unitcell_lengths = np.array([f.unitcell_lengths] * n_frames)
unitcell_angles = np.array([f.unitcell_angles] * n_frames)
else:
unitcell_lengths = None
unitcell_angles = None
in_units_of(coords, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(unitcell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(len(coords))
if frame is not None:
time *= frame
elif stride is not None:
time *= stride
return Trajectory(xyz=coords, time=time, topology=topology,
unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
@_FormatRegistry.register_fileobject('.pdb')
class PDBTrajectoryFile(object):
"""Interface for reading and writing Protein Data Bank (PDB) files
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
Attributes
----------
positions : np.ndarray, shape=(n_frames, n_atoms, 3)
topology : mdtraj.Topology
closed : bool
Notes
-----
When writing pdb files, mdtraj follows the PDB3.0 standard as closely as
possible. During *reading* however, we try to be more lenient. For instance,
we will parse common nonstandard atom names during reading, and convert them
into the standard names. The replacement table used by mdtraj is at
{mdtraj_source}/formats/pdb/data/pdbNames.xml.
See Also
--------
mdtraj.load_pdb : High-level wrapper that returns a ``md.Trajectory``
"""
distance_unit = 'angstroms'
_residueNameReplacements = {}
_atomNameReplacements = {}
_chain_names = [chr(ord('A') + i) for i in range(26)]
def __init__(self, filename, mode='r', force_overwrite=True):
self._open = False
self._file = None
self._topology = None
self._positions = None
self._mode = mode
self._last_topology = None
if mode == 'r':
PDBTrajectoryFile._loadNameReplacementTables()
if _is_url(filename):
self._file = urlopen(filename)
if filename.lower().endswith('.gz'):
import gzip
if six.PY3:
self._file = gzip.GzipFile(fileobj=self._file)
else:
self._file = gzip.GzipFile(fileobj=six.StringIO(
self._file.read()))
if six.PY3:
self._file = six.StringIO(self._file.read().decode('utf-8'))
else:
self._file = open(filename, 'r')
self._read_models()
elif mode == 'w':
self._header_written = False
self._footer_written = False
if os.path.exists(filename) and not force_overwrite:
raise IOError('"%s" already exists' % filename)
self._file = open(filename, 'w')
else:
raise ValueError("invalid mode: %s" % mode)
self._open = True
def write(self, positions, topology, modelIndex=None, unitcell_lengths=None, unitcell_angles=None):
"""Write a PDB file to disk
Parameters
----------
positions : array_like
The list of atomic positions to write.
topology : mdtraj.Topology
The Topology defining the model to write.
modelIndex : {int, None}
If not None, the model will be surrounded by MODEL/ENDMDL records
with this index
unitcell_lengths : {tuple, None}
Lengths of the three unit cell vectors, or None for a non-periodic system
unitcell_angles : {tuple, None}
Angles between the three unit cell vectors, or None for a non-periodic system
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if not self._header_written:
self._write_header(unitcell_lengths, unitcell_angles)
self._header_written = True
if ilen(topology.atoms) != len(positions):
raise ValueError('The number of positions must match the number of atoms')
if np.any(np.isnan(positions)):
raise ValueError('Particle position is NaN')
if np.any(np.isinf(positions)):
raise ValueError('Particle position is infinite')
self._last_topology = topology # Hack to save the topology of the last frame written, allows us to output CONECT entries in write_footer()
atomIndex = 1
posIndex = 0
if modelIndex is not None:
print("MODEL %4d" % modelIndex, file=self._file)
for (chainIndex, chain) in enumerate(topology.chains):
chainName = self._chain_names[chainIndex % len(self._chain_names)]
residues = list(chain.residues)
for (resIndex, res) in enumerate(residues):
if len(res.name) > 3:
resName = res.name[:3]
else:
resName = res.name
for atom in res.atoms:
if len(atom.name) < 4 and atom.name[:1].isalpha() and (atom.element is None or len(atom.element.symbol) < 2):
atomName = ' '+atom.name
elif len(atom.name) > 4:
atomName = atom.name[:4]
else:
atomName = atom.name
coords = positions[posIndex]
if atom.element is not None:
symbol = atom.element.symbol
else:
symbol = ' '
line = "ATOM %5d %-4s %3s %s%4d %s%s%s 1.00 0.00 %2s " % (
atomIndex % 100000, atomName, resName, chainName,
(res.resSeq) % 10000, _format_83(coords[0]),
_format_83(coords[1]), _format_83(coords[2]), symbol)
assert len(line) == 80, 'Fixed width overflow detected'
print(line, file=self._file)
posIndex += 1
atomIndex += 1
if resIndex == len(residues)-1:
print("TER %5d %3s %s%4d" % (atomIndex, resName, chainName, resIndex+1), file=self._file)
atomIndex += 1
if modelIndex is not None:
print("ENDMDL", file=self._file)
def _write_header(self, unitcell_lengths, unitcell_angles):
"""Write out the header for a PDB file.
Parameters
----------
unitcell_lengths : {tuple, None}
The lengths of the three unitcell vectors, ``a``, ``b``, ``c``
unitcell_angles : {tuple, None}
The angles between the three unitcell vectors, ``alpha``,
``beta``, ``gamma``
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if unitcell_lengths is None and unitcell_angles is None:
return
if unitcell_lengths is not None and unitcell_angles is not None:
if not len(unitcell_lengths) == 3:
raise ValueError('unitcell_lengths must be length 3')
if not len(unitcell_angles) == 3:
raise ValueError('unitcell_angles must be length 3')
else:
raise ValueError('either unitcell_lengths and unitcell_angles'
'should both be spefied, or neither')
box = list(unitcell_lengths) + list(unitcell_angles)
assert len(box) == 6
print("CRYST1%9.3f%9.3f%9.3f%7.2f%7.2f%7.2f P 1 1 " % tuple(box), file=self._file)
def _write_footer(self):
if not self._mode == 'w':
raise ValueError('file not opened for writing')
# Identify bonds that should be listed as CONECT records.
standardResidues = ['ALA', 'ASN', 'CYS', 'GLU', 'HIS', 'LEU', 'MET', 'PRO', 'THR', 'TYR',
'ARG', 'ASP', 'GLN', 'GLY', 'ILE', 'LYS', 'PHE', 'SER', 'TRP', 'VAL',
'A', 'G', 'C', 'U', 'I', 'DA', 'DG', 'DC', 'DT', 'DI', 'HOH']
conectBonds = []
if self._last_topology is not None:
for atom1, atom2 in self._last_topology.bonds:
if atom1.residue.name not in standardResidues or atom2.residue.name not in standardResidues:
conectBonds.append((atom1, atom2))
elif atom1.name == 'SG' and atom2.name == 'SG' and atom1.residue.name == 'CYS' and atom2.residue.name == 'CYS':
conectBonds.append((atom1, atom2))
if len(conectBonds) > 0:
# Work out the index used in the PDB file for each atom.
atomIndex = {}
nextAtomIndex = 0
prevChain = None
for chain in self._last_topology.chains:
for atom in chain.atoms:
if atom.residue.chain != prevChain:
nextAtomIndex += 1
prevChain = atom.residue.chain
atomIndex[atom] = nextAtomIndex
nextAtomIndex += 1
# Record which other atoms each atom is bonded to.
atomBonds = {}
for atom1, atom2 in conectBonds:
index1 = atomIndex[atom1]
index2 = atomIndex[atom2]
if index1 not in atomBonds:
atomBonds[index1] = []
if index2 not in atomBonds:
atomBonds[index2] = []
atomBonds[index1].append(index2)
atomBonds[index2].append(index1)
# Write the CONECT records.
for index1 in sorted(atomBonds):
bonded = atomBonds[index1]
while len(bonded) > 4:
print("CONECT%5d%5d%5d%5d" % (index1, bonded[0], bonded[1], bonded[2]), file=self._file)
del bonded[:4]
line = "CONECT%5d" % index1
for index2 in bonded:
line = "%s%5d" % (line, index2)
print(line, file=self._file)
print("END", file=self._file)
self._footer_written = True
@classmethod
def set_chain_names(cls, values):
"""Set the cycle of chain names used when writing PDB files
When writing PDB files, PDBTrajectoryFile translates each chain's
index into a name -- the name is what's written in the file. By
default, chains are named with the letters A-Z.
Parameters
----------
values : list
A list of chacters (strings of length 1) that the PDB writer will
cycle through to choose chain names.
"""
for item in values:
if not isinstance(item, six.string_types) and len(item) == 1:
raise TypeError('Names must be a single character string')
cls._chain_names = values
@property
def positions(self):
"""The cartesian coordinates of all of the atoms in each frame. Available when a file is opened in mode='r'
"""
return self._positions
@property
def topology(self):
"""The topology from this PDB file. Available when a file is opened in mode='r'
"""
return self._topology
@property
def unitcell_lengths(self):
"The unitcell lengths (3-tuple) in this PDB file. May be None"
return self._unitcell_lengths
@property
def unitcell_angles(self):
"The unitcell angles (3-tuple) in this PDB file. May be None"
return self._unitcell_angles
@property
def closed(self):
"Whether the file is closed"
return not self._open
def close(self):
"Close the PDB file"
if self._mode == 'w' and not self._footer_written:
self._write_footer()
if self._open:
self._file.close()
self._open = False
def _read_models(self):
if not self._mode == 'r':
raise ValueError('file not opened for reading')
self._topology = Topology()
pdb = PdbStructure(self._file, load_all_models=True)
atomByNumber = {}
for chain in pdb.iter_chains():
c = self._topology.add_chain()
for residue in chain.iter_residues():
resName = residue.get_name()
if resName in PDBTrajectoryFile._residueNameReplacements:
resName = PDBTrajectoryFile._residueNameReplacements[resName]
r = self._topology.add_residue(resName, c, residue.number)
if resName in PDBTrajectoryFile._atomNameReplacements:
atomReplacements = PDBTrajectoryFile._atomNameReplacements[resName]
else:
atomReplacements = {}
for atom in residue.atoms:
atomName = atom.get_name()
if atomName in atomReplacements:
atomName = atomReplacements[atomName]
atomName = atomName.strip()
element = atom.element
if element is None:
element = self._guess_element(atomName, residue)
newAtom = self._topology.add_atom(atomName, element, r)
atomByNumber[atom.serial_number] = newAtom
# load all of the positions (from every model)
_positions = []
for model in pdb.iter_models(use_all_models=True):
coords = []
for chain in model.iter_chains():
for residue in chain.iter_residues():
for atom in residue.atoms:
coords.append(atom.get_position())
_positions.append(coords)
self._positions = np.array(_positions)
## The atom positions read from the PDB file
self._unitcell_lengths = pdb.get_unit_cell_lengths()
self._unitcell_angles = pdb.get_unit_cell_angles()
self._topology.create_standard_bonds()
self._topology.create_disulfide_bonds(self.positions[0])
# Add bonds based on CONECT records.
connectBonds = []
for connect in pdb.models[0].connects:
i = connect[0]
for j in connect[1:]:
connectBonds.append((atomByNumber[i], atomByNumber[j]))
if len(connectBonds) > 0:
# Only add bonds that don't already exist.
existingBonds = set(self._topology.bonds)
for bond in connectBonds:
if bond not in existingBonds and (bond[1], bond[0]) not in existingBonds:
self._topology.add_bond(bond[0], bond[1])
existingBonds.add(bond)
@staticmethod
def _loadNameReplacementTables():
"""Load the list of atom and residue name replacements."""
if len(PDBTrajectoryFile._residueNameReplacements) == 0:
tree = etree.parse(os.path.join(os.path.dirname(__file__), 'data', 'pdbNames.xml'))
allResidues = {}
proteinResidues = {}
nucleicAcidResidues = {}
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
if name == 'All':
PDBTrajectoryFile._parseResidueAtoms(residue, allResidues)
elif name == 'Protein':
PDBTrajectoryFile._parseResidueAtoms(residue, proteinResidues)
elif name == 'Nucleic':
PDBTrajectoryFile._parseResidueAtoms(residue, nucleicAcidResidues)
for atom in allResidues:
proteinResidues[atom] = allResidues[atom]
nucleicAcidResidues[atom] = allResidues[atom]
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
for id in residue.attrib:
if id == 'name' or id.startswith('alt'):
PDBTrajectoryFile._residueNameReplacements[residue.attrib[id]] = name
if 'type' not in residue.attrib:
atoms = copy(allResidues)
elif residue.attrib['type'] == 'Protein':
atoms = copy(proteinResidues)
elif residue.attrib['type'] == 'Nucleic':
atoms = copy(nucleicAcidResidues)
else:
atoms = copy(allResidues)
PDBTrajectoryFile._parseResidueAtoms(residue, atoms)
PDBTrajectoryFile._atomNameReplacements[name] = atoms
def _guess_element(self, atom_name, residue):
"Try to guess the element name"
upper = atom_name.upper()
if upper.startswith('CL'):
element = elem.chlorine
elif upper.startswith('NA'):
element = elem.sodium
elif upper.startswith('MG'):
element = elem.magnesium
elif upper.startswith('BE'):
element = elem.beryllium
elif upper.startswith('LI'):
element = elem.lithium
elif upper.startswith('K'):
element = elem.potassium
elif upper.startswith('ZN'):
element = elem.zinc
elif len(residue) == 1 and upper.startswith('CA'):
element = elem.calcium
# TJL has edited this. There are a few issues here. First,
# parsing for the element is non-trivial, so I do my best
# below. Second, there is additional parsing code in
# pdbstructure.py, and I am unsure why it doesn't get used
# here...
elif len(residue) > 1 and upper.startswith('CE'):
element = elem.carbon # (probably) not Celenium...
elif len(residue) > 1 and upper.startswith('CD'):
element = elem.carbon # (probably) not Cadmium...
elif residue.name in ['TRP', 'ARG', 'GLN', 'HIS'] and upper.startswith('NE'):
element = elem.nitrogen # (probably) not Neon...
elif residue.name in ['ASN'] and upper.startswith('ND'):
element = elem.nitrogen # (probably) not ND...
elif residue.name == 'CYS' and upper.startswith('SG'):
element = elem.sulfur # (probably) not SG...
else:
try:
element = elem.get_by_symbol(atom_name[0])
except KeyError:
try:
symbol = atom_name[0:2].strip().rstrip("AB0123456789").lstrip("0123456789")
element = elem.get_by_symbol(symbol)
except KeyError:
element = None
return element
@staticmethod
def _parseResidueAtoms(residue, map):
for atom in residue.findall('Atom'):
name = atom.attrib['name']
for id in atom.attrib:
map[atom.attrib[id]] = name
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __len__(self):
"Number of frames in the file"
if str(self._mode) != 'r':
raise NotImplementedError('len() only available in mode="r" currently')
if not self._open:
raise ValueError('I/O operation on closed file')
return len(self._positions)
def _format_83(f):
"""Format a single float into a string of width 8, with ideally 3 decimal
places of precision. If the number is a little too large, we can
gracefully degrade the precision by lopping off some of the decimal
places. If it's much too large, we throw a ValueError"""
if -999.999 < f < 9999.999:
return '%8.3f' % f
if -9999999 < f < 99999999:
return ('%8.3f' % f)[:8]
raise ValueError('coordinate "%s" could not be represnted '
'in a width-8 field' % f)
|
marscher/mdtraj
|
MDTraj/formats/pdb/pdbfile.py
|
Python
|
lgpl-2.1
| 26,143
|
[
"MDTraj",
"OpenMM"
] |
dd8f702494b3c67fecaab8297b4e76e9ca2e4e0c6856c794f6a2607f07c766de
|
from stencil_kernel import *
import sys
import numpy
import math
width = 50
height = 50
image_in = open('mallard_tiny.raw', 'rb')
stdev_d = 1
stdev_s = 70
radius = 1
class Kernel(object):
def kernel(self, in_img, filter_d, filter_s, out_img):
for x in out_img.interior_points():
for y in in_img.neighbors(x, 1):
out_img[x] += in_img[y] * filter_d[int(distance(x, y))] * filter_s[abs(int(in_img[x]-in_img[y]))]
def gaussian(stdev, length):
result = StencilGrid([length])
scale = 1.0/(stdev*math.sqrt(2.0*math.pi))
divisor = -1.0 / (2.0 * stdev * stdev)
for x in xrange(length):
result[x] = scale * math.exp(float(x) * float(x) * divisor)
return result
pixels = map(ord, list(image_in.read(width * height))) # Read in grayscale values
intensity = float(sum(pixels))/len(pixels)
kernel = Kernel()
kernel.should_unroll = False
out_grid = StencilGrid([width,height])
out_grid.ghost_depth = radius
in_grid = StencilGrid([width,height])
in_grid.ghost_depth = radius
for x in range(-radius,radius+1):
for y in range(-radius,radius+1):
in_grid.neighbor_definition[1].append( (x,y) )
for x in range(0,width):
for y in range(0,height):
in_grid.data[(x, y)] = pixels[y * width + x]
kernel.kernel(in_grid, gaussian(stdev_d, radius*2), gaussian(stdev_s, 256), out_grid)
for x in range(0,width):
for y in range(0,height):
pixels[y * width + x] = out_grid.data[(x, y)]
out_intensity = float(sum(pixels))/len(pixels)
for i in range(0, len(pixels)):
pixels[i] = min(255, max(0, int(pixels[i] * (intensity/out_intensity))))
image_out = open('out.raw', 'wb')
image_out.write(''.join(map(chr, pixels)))
|
shoaibkamil/asp
|
tools/debugger/bilateral_filter.2.py
|
Python
|
bsd-3-clause
| 1,694
|
[
"Gaussian"
] |
424eafc92fcb224709a5dbba0542d8b4d1d0e0e03d57dda3af63c90341a3e312
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import sys
import gzip
import pysam
import BioClasses
import cPickle
def main( gtf_fn, genes_fn ):
# genes
genes = dict()
with open( gtf_fn ) as f:
c = 0
for row in f:
if c > 10000:
break
l = row.strip( "\n" ).split( "\t" )
record = BioClasses.GTFRecord( *l )
if record.feature == "gene":
genes[record.group_dict['gene_id']] = BioClasses.Gene( record )
else:
genes[record.group_dict['gene_id']].process_record( record )
c += 0
# for transcript_id,T in genes['ENSG00000211769'].transcripts.iteritems():
# print transcript_id,T.is_complete()
#
# sys.exit( 0 )
# get the tabix file handler
tabixfile = pysam.Tabixfile( genes_fn, "r" )
# iterate over the genes
d = 0
for gene_id,G in genes.iteritems():
if d > 1000:
break
# get the overall UTR for equal_cds transcripts for this gene
utr_region = G.get_overall_UTR_region( "equal_cds" )
if utr_region is None:
print >> sys.stderr, "No region for %s" % gene_id
continue
# get overlaps
tabix_result = BioClasses.TabixResult( utr_region, tabixfile, filetype="gtf" )
# if there are overlaps...
if tabix_result.record_count > 1:
for record in tabix_result.records:
print "\t".join([ gene_id, utr_region, record.group_dict['gene_id'], record.region_str() ]) # may result in duplicates
d += 0
tabixfile.close()
if __name__ == "__main__":
try:
gtf_fn = sys.argv[1]
genes_fn = sys.argv[2]
except IndexError:
print >> sys.stderr, "usage:./script.py <full-gtf> <bgzipped and indexed gene.gtf>"
sys.exit( 1 )
main( gtf_fn, genes_fn )
|
polarise/Code
|
identify_overlapping_UTR_genes.py
|
Python
|
gpl-2.0
| 1,644
|
[
"pysam"
] |
1c9ad206e5105d89454cd5334a7ffbc737248b615a840335c4d7034f521f5710
|
#!/usr/bin/env python
#
# A basic functional test of the total impact API
#
import mechanize
import urllib2
import urllib
import json
import time
import sys
import pickle
import urlparse
from pprint import pprint
from optparse import OptionParser
REQUEST_IDS = [
("crossref", ('doi', '10.1371/journal.pcbi.1000361')),
("delicious", ('url', 'http://total-impact.org/')),
("dryad", ('doi','10.5061/dryad.18')),
("github", ('github', 'egonw,cdk')),
("mendeley", ('doi', '10.1371/journal.pcbi.1000361')),
("topsy", ('url', 'http://total-impact.org')),
("webpage", ('url', 'http://nescent.org/')),
("wikipedia", ('doi', '10.1371/journal.pcbi.1000361'))
]
GOLD_RESPONSES = {
'crossref' : {
'aliases': ['doi', "title", "url"],
'biblio': [u'authors', u'journal', u'year', u'title'],
'metrics' : {}
},
'delicious' : {
'aliases': ["url"],
'biblio': [],
'metrics' : {
'delicious:bookmarks' : 65
}
},
'dryad' : {
'aliases': ['doi', 'url', 'title'],
'biblio': [u'authors', u'year', u'repository', u'title'],
'metrics' : {
'dryad:most_downloaded_file' : 63,
'dryad:package_views' : 149,
'dryad:total_downloads' : 169
}
},
'github' : {
'aliases': ['github', 'url', 'title'],
'biblio': [u'last_push_date', u'create_date', u'description', u'title', u'url', u'owner'],
'metrics' : {
'github:forks' : 27,
'github:watchers' : 31
}
},
'mendeley' : {
'aliases': [u'url', u'doi', u'title'],
'biblio': [u'authors', u'journal', u'year', u'title'],
'metrics' : {
'mendeley:readers' : 50,
'mendeley:groups' : 4
}
},
'topsy' : {
'aliases': ["url"],
'biblio': [],
'metrics' : {
'topsy:tweets' : 282,
'topsy:influential_tweets' : 26
}
},
'webpage' : {
'aliases': ['url'],
'biblio': [u'title', "h1"],
'metrics' : {}
},
'wikipedia' : {
'aliases': ['doi'],
'biblio': [],
'metrics' : {
'wikipedia:mentions' : 1
}
}
}
def request_provider_item(provider, nid, section):
base_url = 'http://localhost:5001/'
nid = urlparse.unquote(nid)
url = base_url + urllib.quote('provider/%s/%s/%s' % (provider, section, nid))
if options.debug:
print "\n", url
req = urllib2.Request(url)
try:
response = urllib2.urlopen(req)
result = json.loads(response.read())
except urllib2.HTTPError:
if options.debug:
print("HTTPError on %s %s %s, perhaps not implemented" % (provider, section, nid))
result = {}
if options.debug:
print result
return result
def checkItem(provider, id, section, api_response, options):
if options.debug:
print "Checking %s result (%s)..." % (provider, id)
# Check aliases are correct
if section=="aliases":
aliases = GOLD_RESPONSES[provider]['aliases']
alias_result = set([namespace for (namespace, nid) in api_response])
expected_result = set(aliases)
if (alias_result == expected_result):
if options.debug:
print "ALIASES CORRECT! %s" %(alias_result)
else:
if options.debug:
print "ALIASES **NOT** CORRECT, have %s, want %s" %(alias_result, expected_result)
return False
# Check biblio are correct
elif section=="biblio":
biblio = GOLD_RESPONSES[provider]['biblio']
if api_response:
biblio_result = set(api_response.keys())
else:
biblio_result = set([])
expected_result = set(biblio)
if (biblio_result == expected_result):
if options.debug:
print "BIBLIO CORRECT! %s" %(biblio_result)
else:
if options.debug:
print "BIBLIO **NOT** CORRECT, have %s, want %s" %(biblio_result, expected_result)
return False
# Check we've got some metric values
elif section=="metrics":
metrics = GOLD_RESPONSES[provider]['metrics']
for metric in metrics.keys():
try:
metric_data = api_response[metric]
except KeyError:
# didn't return anything. problem!
print "METRICS **NOT** CORRECT for %s: metric missing" % (metric)
pprint(api_response)
return False
# expect the returned value to be equal or larger than reference
if metric_data >= metrics[metric]:
if options.debug:
print "METRICS CORRECT! %s" %(metric_data)
else:
if options.debug:
print "METRICS **NOT** CORRECT for %s - %s, expected at least %s" % (metric, metric_data, metrics[metric])
pprint(api_response)
return False
return True
def make_call(nid, provider, options):
all_successful = True
for section in ["biblio", "aliases", "metrics"]:
api_response = request_provider_item(provider, nid, section)
is_response_correct = checkItem(provider,
nid,
section,
api_response,
options
)
if is_response_correct:
if not options.quiet:
print "happy %s" % section
else:
if not options.quiet:
print "INCORRECT %s" % section
all_successful = False
if options.printdata:
pprint(api_response)
print("\n")
return(all_successful)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-s", "--simultaneous", dest="simultaneous", default=1,
help="Number of simultaneous requests to make")
parser.add_option("-q", "--quiet", dest="quiet", default=False, action="store_true", help="Print less output")
parser.add_option("-v", "--debug", dest="debug", default=False, action="store_true", help="Print debug output")
parser.add_option("-p", "--printdata", dest="printdata", default=False,
action="store_true", help="Display item data")
(options, args) = parser.parse_args()
all_successful = True
for (provider, alias) in REQUEST_IDS:
print "\n**** %s *****" %(provider.upper())
(namespace, nid) = alias
print "\nCANNED DATA"
canned_success = make_call("example", provider, options)
print "\nLIVE DATA with item (%s, %s)" %(namespace, nid)
live_success = make_call(nid, provider, options)
all_successful = all_successful and canned_success and live_success
time.sleep(0.5)
if all_successful:
print "\nAll provider responses were HAPPY."
else:
print "\nSome provider responses had errors"
|
total-impact/total-impact-core
|
extras/functional_tests/alt_functional_test.py
|
Python
|
mit
| 7,133
|
[
"CDK"
] |
dcd40ebce7dd9107f41661d2a4d53099d2d2c88334f49820b72856d9a9e7e2bc
|
from __future__ import absolute_import
import unittest
import numpy as np
<<<<<<< HEAD
=======
import warnings
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.analysis.elasticity.strain import Deformation
from pymatgen.util.testing import PymatgenTest
class StressTest(PymatgenTest):
def setUp(self):
self.rand_stress = Stress(np.random.randn(3, 3))
self.symm_stress = Stress([[0.51, 2.29, 2.42],
[2.29, 5.14, 5.07],
[2.42, 5.07, 5.33]])
self.non_symm = Stress([[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.2, 0.5, 0.5]])
def test_properties(self):
# mean_stress
self.assertEqual(self.rand_stress.mean_stress,
1. / 3. * (self.rand_stress[0, 0] +
self.rand_stress[1, 1] +
self.rand_stress[2, 2]))
self.assertAlmostEqual(self.symm_stress.mean_stress, 3.66)
# deviator_stress
self.assertArrayAlmostEqual(self.symm_stress.deviator_stress,
Stress([[-3.15, 2.29, 2.42],
[2.29, 1.48, 5.07],
[2.42, 5.07, 1.67]]))
self.assertArrayAlmostEqual(self.non_symm.deviator_stress,
[[-0.2666666667, 0.2, 0.3],
[0.4, 0.133333333, 0.6],
[0.2, 0.5, 0.133333333]])
# deviator_principal_invariants
self.assertArrayAlmostEqual(self.symm_stress.dev_principal_invariants,
[0, 44.2563, 111.953628])
# von_mises
self.assertAlmostEqual(self.symm_stress.von_mises,
11.52253878275)
# piola_kirchoff 1, 2
f = Deformation.from_index_amount((0, 1), 0.03)
self.assertArrayAlmostEqual(self.symm_stress.piola_kirchoff_1(f),
[[0.4413, 2.29, 2.42],
[2.1358, 5.14, 5.07],
[2.2679, 5.07, 5.33]])
self.assertArrayAlmostEqual(self.symm_stress.piola_kirchoff_2(f),
[[0.377226, 2.1358, 2.2679],
[2.1358, 5.14, 5.07],
[2.2679, 5.07, 5.33]])
# voigt
self.assertArrayEqual(self.symm_stress.voigt,
[0.51, 5.14, 5.33, 5.07, 2.42, 2.29])
<<<<<<< HEAD
with self.assertRaises(ValueError):
self.non_symm.voigt
=======
with warnings.catch_warnings(record=True) as w:
self.non_symm.voigt
self.assertEqual(len(w), 1)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
if __name__ == '__main__':
unittest.main()
|
Bismarrck/pymatgen
|
pymatgen/analysis/elasticity/tests/test_stress.py
|
Python
|
mit
| 3,021
|
[
"pymatgen"
] |
9dac3cf80cbbcccfebe99f0281158896821be46f9ac0012567f707ed866a76c4
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/].*",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email joi@chromium.org if you have questions.')
def _CheckNoInterfacesInBase(input_api, output_api):
"""Checks to make sure no files in libbase.a have |@interface|."""
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('base/') and
not f.LocalPath().endswith('_unittest.mm')):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Objective-C interfaces or categories are forbidden in libbase. ' +
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
'browse_thread/thread/efb28c10435987fd',
files) ]
return []
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
source_extensions = r'\.(cc|cpp|cxx|mm)$'
file_inclusion_pattern = r'.+%s' % source_extensions
file_exclusion_patterns = (
r'.*[/\\](test_|mock_).+%s' % source_extensions,
r'.+_test_(base|support|util)%s' % source_extensions,
r'.+_(api|browser|perf|unit|ui)?test%s' % source_extensions,
r'.+profile_sync_service_harness%s' % source_extensions,
)
path_exclusion_patterns = (
r'.*[/\\](test|tool(s)?)[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (file_exclusion_patterns + path_exclusion_patterns +
_EXCLUDED_PATHS + input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
if not input_api.is_committing:
return [output_api.PresubmitPromptWarning(_TEST_ONLY_WARNING, problems)]
else:
# We don't warn on commit, to avoid stopping commits going through CQ.
return [output_api.PresubmitNotifyResult(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static ' +
'initialization into every file including the header. Instead, ' +
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith('test.cc')):
continue
for line_num, line in f.ChangedContents():
if 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling an API that accepts a wstring, fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoFRIEND_TEST(input_api, output_api):
"""Make sure that gtest's FRIEND_TEST() macro is not used, the
FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be used
instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes."""
problems = []
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
if 'FRIEND_TEST(' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('Chromium code should not use '
'gtest\'s FRIEND_TEST() macro. Include base/gtest_prod_util.h and use '
'FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))]
def _CheckNoScopedAllowIO(input_api, output_api):
"""Make sure that ScopedAllowIO is not used."""
problems = []
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
if 'ScopedAllowIO' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use '
'ScopedAllowIO. Post a task to the blocking pool or the FILE thread '
'instead.\n' + '\n'.join(problems))]
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoFRIEND_TEST(input_api, output_api))
results.extend(_CheckNoScopedAllowIO(input_api, output_api))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if input_api.verbose:
print 'Valid authors are %s' % ', '.join(valid_authors)
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
output_api, 'http://codereview.chromium.org',
('win_rel', 'linux_rel', 'mac_rel, win:compile'),
'tryserver@chromium.org'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasTestField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
affected_files = change.LocalPaths()
only_objc_files = all(f.endswith(('.mm', '.m')) for f in affected_files)
if only_objc_files:
return ['mac_rel']
preferred = ['win_rel', 'linux_rel', 'mac_rel']
preferred = ['win_rel', 'linux_rel', 'mac_rel', 'linux_clang']
if any(f.endswith(('.h', '.cc', '.cpp', '.cxx')) for f in affected_files):
preferred.append('linux_clang')
aura_re = '_aura[^/]*[.][^/]*'
if any(re.search(aura_re, f) for f in affected_files):
preferred.append('linux_chromeos')
# Nothing in chrome/
android_re_list = ('^base/',
'^build/common.gypi$',
'^content/',
'^ipc/',
'^jingle/',
'^media/',
'^net/',
'^sql/')
# Nothing that looks like win-only or aura-only
win_re = '_win\.(cc|h)$'
possibly_android = True
for non_android_re in (aura_re, win_re):
if all(re.search(non_android_re, f) for f in affected_files):
possibly_android = False
break
if possibly_android:
for f in change.AffectedFiles():
if any(re.search(r, f.LocalPath()) for r in android_re_list):
preferred.append('android')
break
return preferred
|
ropik/chromium
|
PRESUBMIT.py
|
Python
|
bsd-3-clause
| 13,604
|
[
"VisIt"
] |
b7bba8a608e9064da2cc7cb86c772e2c121a09c6c441d2975ee39b11a831b574
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import cookielib
import glob
import inspect
import logging
import httplib
import os
import random
import re
import socket
import string
import sys
import tempfile
import threading
import time
import urllib2
import urlparse
import lib.controller.checks
import lib.core.common
import lib.core.threads
import lib.core.convert
import lib.request.connect
import lib.utils.search
from lib.controller.checks import checkConnection
from lib.core.common import Backend
from lib.core.common import boldifyMessage
from lib.core.common import checkFile
from lib.core.common import dataToStdout
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import extractRegexResult
from lib.core.common import filterStringValue
from lib.core.common import findPageForms
from lib.core.common import getConsoleWidth
from lib.core.common import getFileItems
from lib.core.common import getFileType
from lib.core.common import getUnicode
from lib.core.common import isListLike
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import openFile
from lib.core.common import parseTargetDirect
from lib.core.common import parseTargetUrl
from lib.core.common import paths
from lib.core.common import randomStr
from lib.core.common import readCachedFileContent
from lib.core.common import readInput
from lib.core.common import resetCookieJar
from lib.core.common import runningAsAdmin
from lib.core.common import safeExpandUser
from lib.core.common import setOptimize
from lib.core.common import setPaths
from lib.core.common import singleTimeWarnMessage
from lib.core.common import UnicodeRawConfigParser
from lib.core.common import urldecode
from lib.core.convert import base64unpickle
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import mergedOptions
from lib.core.data import queries
from lib.core.datatype import AttribDict
from lib.core.datatype import InjectionDict
from lib.core.defaults import defaults
from lib.core.dicts import DBMS_DICT
from lib.core.dicts import DUMP_REPLACEMENTS
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import AUTH_TYPE
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import DUMP_FORMAT
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MOBILES
from lib.core.enums import OPTION_TYPE
from lib.core.enums import PAYLOAD
from lib.core.enums import PRIORITY
from lib.core.enums import PROXY_TYPE
from lib.core.enums import REFLECTIVE_COUNTER
from lib.core.enums import WIZARD
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapInstallationException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUnsupportedDBMSException
from lib.core.exception import SqlmapUserQuitException
from lib.core.log import FORMATTER
from lib.core.optiondict import optDict
from lib.core.settings import BURP_REQUEST_REGEX
from lib.core.settings import BURP_XML_HISTORY_REGEX
from lib.core.settings import CODECS_LIST_PAGE
from lib.core.settings import CRAWL_EXCLUDE_EXTENSIONS
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DBMS_ALIASES
from lib.core.settings import DEFAULT_PAGE_ENCODING
from lib.core.settings import DEFAULT_TOR_HTTP_PORTS
from lib.core.settings import DEFAULT_TOR_SOCKS_PORT
from lib.core.settings import DUMMY_URL
from lib.core.settings import IGNORE_SAVE_OPTIONS
from lib.core.settings import INJECT_HERE_MARK
from lib.core.settings import IS_WIN
from lib.core.settings import KB_CHARS_BOUNDARY_CHAR
from lib.core.settings import KB_CHARS_LOW_FREQUENCY_ALPHABET
from lib.core.settings import LOCALHOST
from lib.core.settings import MAX_CONNECT_RETRIES
from lib.core.settings import MAX_NUMBER_OF_THREADS
from lib.core.settings import NULL
from lib.core.settings import PARAMETER_SPLITTING_REGEX
from lib.core.settings import PROBLEMATIC_CUSTOM_INJECTION_PATTERNS
from lib.core.settings import SITE
from lib.core.settings import SOCKET_PRE_CONNECT_QUEUE_SIZE
from lib.core.settings import SQLMAP_ENVIRONMENT_PREFIX
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import SUPPORTED_OS
from lib.core.settings import TIME_DELAY_CANDIDATES
from lib.core.settings import UNION_CHAR_REGEX
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_INJECTABLE_REGEX
from lib.core.settings import VERSION_STRING
from lib.core.settings import WEBSCARAB_SPLITTER
from lib.core.threads import getCurrentThreadData
from lib.core.threads import setDaemon
from lib.core.update import update
from lib.parse.configfile import configFileParser
from lib.parse.payloads import loadBoundaries
from lib.parse.payloads import loadPayloads
from lib.parse.sitemap import parseSitemap
from lib.request.basic import checkCharEncoding
from lib.request.connect import Connect as Request
from lib.request.dns import DNSServer
from lib.request.basicauthhandler import SmartHTTPBasicAuthHandler
from lib.request.httpshandler import HTTPSHandler
from lib.request.pkihandler import HTTPSPKIAuthHandler
from lib.request.rangehandler import HTTPRangeHandler
from lib.request.redirecthandler import SmartRedirectHandler
from lib.request.templates import getPageTemplate
from lib.utils.crawler import crawl
from lib.utils.deps import checkDependencies
from lib.utils.search import search
from lib.utils.purge import purge
from thirdparty.keepalive import keepalive
from thirdparty.oset.pyoset import oset
from thirdparty.socks import socks
from xml.etree.ElementTree import ElementTree
authHandler = urllib2.BaseHandler()
httpsHandler = HTTPSHandler()
keepAliveHandler = keepalive.HTTPHandler()
proxyHandler = urllib2.ProxyHandler()
redirectHandler = SmartRedirectHandler()
rangeHandler = HTTPRangeHandler()
def _feedTargetsDict(reqFile, addedTargetUrls):
"""
Parses web scarab and burp logs and adds results to the target URL list
"""
def _parseWebScarabLog(content):
"""
Parses web scarab logs (POST method not supported)
"""
reqResList = content.split(WEBSCARAB_SPLITTER)
for request in reqResList:
url = extractRegexResult(r"URL: (?P<result>.+?)\n", request, re.I)
method = extractRegexResult(r"METHOD: (?P<result>.+?)\n", request, re.I)
cookie = extractRegexResult(r"COOKIE: (?P<result>.+?)\n", request, re.I)
if not method or not url:
logger.debug("not a valid WebScarab log data")
continue
if method.upper() == HTTPMETHOD.POST:
warnMsg = "POST requests from WebScarab logs aren't supported "
warnMsg += "as their body content is stored in separate files. "
warnMsg += "Nevertheless you can use -r to load them individually."
logger.warning(warnMsg)
continue
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, method, None, cookie, None))
addedTargetUrls.add(url)
def _parseBurpLog(content):
"""
Parses burp logs
"""
if not re.search(BURP_REQUEST_REGEX, content, re.I | re.S):
if re.search(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
reqResList = []
for match in re.finditer(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
port, request = match.groups()
request = request.decode("base64")
_ = re.search(r"%s:.+" % re.escape(HTTP_HEADER.HOST), request)
if _:
host = _.group(0).strip()
if not re.search(r":\d+\Z", host):
request = request.replace(host, "%s:%d" % (host, int(port)))
reqResList.append(request)
else:
reqResList = [content]
else:
reqResList = re.finditer(BURP_REQUEST_REGEX, content, re.I | re.S)
for match in reqResList:
request = match if isinstance(match, basestring) else match.group(0)
request = re.sub(r"\A[^\w]+", "", request)
schemePort = re.search(r"(http[\w]*)\:\/\/.*?\:([\d]+).+?={10,}", request, re.I | re.S)
if schemePort:
scheme = schemePort.group(1)
port = schemePort.group(2)
else:
scheme, port = None, None
if not re.search(r"^[\n]*(%s).*?\sHTTP\/" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), request, re.I | re.M):
continue
if re.search(r"^[\n]*%s.*?\.(%s)\sHTTP\/" % (HTTPMETHOD.GET, "|".join(CRAWL_EXCLUDE_EXTENSIONS)), request, re.I | re.M):
continue
getPostReq = False
url = None
host = None
method = None
data = None
cookie = None
params = False
newline = None
lines = request.split('\n')
headers = []
for index in xrange(len(lines)):
line = lines[index]
if not line.strip() and index == len(lines) - 1:
break
newline = "\r\n" if line.endswith('\r') else '\n'
line = line.strip('\r')
match = re.search(r"\A(%s) (.+) HTTP/[\d.]+\Z" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), line) if not method else None
if len(line.strip()) == 0 and method and method != HTTPMETHOD.GET and data is None:
data = ""
params = True
elif match:
method = match.group(1)
url = match.group(2)
if any(_ in line for _ in ('?', '=', CUSTOM_INJECTION_MARK_CHAR)):
params = True
getPostReq = True
# POST parameters
elif data is not None and params:
data += "%s%s" % (line, newline)
# GET parameters
elif "?" in line and "=" in line and ": " not in line:
params = True
# Headers
elif re.search(r"\A\S+:", line):
key, value = line.split(":", 1)
value = value.strip().replace("\r", "").replace("\n", "")
# Cookie and Host headers
if key.upper() == HTTP_HEADER.COOKIE.upper():
cookie = value
elif key.upper() == HTTP_HEADER.HOST.upper():
if '://' in value:
scheme, value = value.split('://')[:2]
splitValue = value.split(":")
host = splitValue[0]
if len(splitValue) > 1:
port = filterStringValue(splitValue[1], "[0-9]")
# Avoid to add a static content length header to
# headers and consider the following lines as
# POSTed data
if key.upper() == HTTP_HEADER.CONTENT_LENGTH.upper():
params = True
# Avoid proxy and connection type related headers
elif key not in (HTTP_HEADER.PROXY_CONNECTION, HTTP_HEADER.CONNECTION):
headers.append((getUnicode(key), getUnicode(value)))
if CUSTOM_INJECTION_MARK_CHAR in re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value or ""):
params = True
data = data.rstrip("\r\n") if data else data
if getPostReq and (params or cookie):
if not port and isinstance(scheme, basestring) and scheme.lower() == "https":
port = "443"
elif not scheme and port == "443":
scheme = "https"
if conf.forceSSL:
scheme = "https"
port = port or "443"
if not host:
errMsg = "invalid format of a request file"
raise SqlmapSyntaxException, errMsg
if not url.startswith("http"):
url = "%s://%s:%s%s" % (scheme or "http", host, port or "80", url)
scheme = None
port = None
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, conf.method or method, data, cookie, tuple(headers)))
addedTargetUrls.add(url)
checkFile(reqFile)
try:
with openFile(reqFile, "rb") as f:
content = f.read()
except (IOError, OSError, MemoryError), ex:
errMsg = "something went wrong while trying "
errMsg += "to read the content of file '%s' ('%s')" % (reqFile, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
if conf.scope:
logger.info("using regular expression '%s' for filtering targets" % conf.scope)
_parseBurpLog(content)
_parseWebScarabLog(content)
if not addedTargetUrls:
errMsg = "unable to find usable request(s) "
errMsg += "in provided file ('%s')" % reqFile
raise SqlmapGenericException(errMsg)
def _loadQueries():
"""
Loads queries from 'xml/queries.xml' file.
"""
def iterate(node, retVal=None):
class DictObject(object):
def __init__(self):
self.__dict__ = {}
def __contains__(self, name):
return name in self.__dict__
if retVal is None:
retVal = DictObject()
for child in node.findall("*"):
instance = DictObject()
retVal.__dict__[child.tag] = instance
if child.attrib:
instance.__dict__.update(child.attrib)
else:
iterate(child, instance)
return retVal
tree = ElementTree()
try:
tree.parse(paths.QUERIES_XML)
except Exception, ex:
errMsg = "something seems to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (paths.QUERIES_XML, getSafeExString(ex))
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException, errMsg
for node in tree.findall("*"):
queries[node.attrib['value']] = iterate(node)
def _setMultipleTargets():
"""
Define a configuration parameter if we are running in multiple target
mode.
"""
initialTargetsCount = len(kb.targets)
addedTargetUrls = set()
if not conf.logFile:
return
debugMsg = "parsing targets list from '%s'" % conf.logFile
logger.debug(debugMsg)
if not os.path.exists(conf.logFile):
errMsg = "the specified list of targets does not exist"
raise SqlmapFilePathException(errMsg)
if os.path.isfile(conf.logFile):
_feedTargetsDict(conf.logFile, addedTargetUrls)
elif os.path.isdir(conf.logFile):
files = os.listdir(conf.logFile)
files.sort()
for reqFile in files:
if not re.search("([\d]+)\-request", reqFile):
continue
_feedTargetsDict(os.path.join(conf.logFile, reqFile), addedTargetUrls)
else:
errMsg = "the specified list of targets is not a file "
errMsg += "nor a directory"
raise SqlmapFilePathException(errMsg)
updatedTargetsCount = len(kb.targets)
if updatedTargetsCount > initialTargetsCount:
infoMsg = "sqlmap parsed %d " % (updatedTargetsCount - initialTargetsCount)
infoMsg += "(parameter unique) requests from the "
infoMsg += "targets list ready to be tested"
logger.info(infoMsg)
def _adjustLoggingFormatter():
"""
Solves problem of line deletition caused by overlapping logging messages
and retrieved data info in inference mode
"""
if hasattr(FORMATTER, '_format'):
return
def format(record):
message = FORMATTER._format(record)
message = boldifyMessage(message)
if kb.get("prependFlag"):
message = "\n%s" % message
kb.prependFlag = False
return message
FORMATTER._format = FORMATTER.format
FORMATTER.format = format
def _setRequestFromFile():
"""
This function checks if the way to make a HTTP request is through supplied
textual file, parses it and saves the information into the knowledge base.
"""
if not conf.requestFile:
return
addedTargetUrls = set()
conf.requestFile = safeExpandUser(conf.requestFile)
infoMsg = "parsing HTTP request from '%s'" % conf.requestFile
logger.info(infoMsg)
if not os.path.isfile(conf.requestFile):
errMsg = "the specified HTTP request file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
_feedTargetsDict(conf.requestFile, addedTargetUrls)
def _setCrawler():
if not conf.crawlDepth:
return
if not any((conf.bulkFile, conf.sitemapUrl)):
crawl(conf.url)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
else:
targets = parseSitemap(conf.sitemapUrl)
for i in xrange(len(targets)):
try:
target = targets[i]
crawl(target)
if conf.verbose in (1, 2):
status = "%d/%d links visited (%d%%)" % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except Exception, ex:
errMsg = "problem occurred while crawling at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _doSearch():
"""
This function performs search dorking, parses results
and saves the testable hosts into the knowledge base.
"""
if not conf.googleDork:
return
kb.data.onlyGETs = None
def retrieve():
links = search(conf.googleDork)
if not links:
errMsg = "unable to find results for your "
errMsg += "search dork expression"
raise SqlmapGenericException(errMsg)
for link in links:
link = urldecode(link)
if re.search(r"(.*?)\?(.+)", link):
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
elif re.search(URI_INJECTABLE_REGEX, link, re.I):
if kb.data.onlyGETs is None and conf.data is None and not conf.googleDork:
message = "do you want to scan only results containing GET parameters? [Y/n] "
test = readInput(message, default="Y")
kb.data.onlyGETs = test.lower() != 'n'
if not kb.data.onlyGETs or conf.googleDork:
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
return links
while True:
links = retrieve()
if kb.targets:
infoMsg = "sqlmap got %d results for your " % len(links)
infoMsg += "search dork expression, "
if len(links) == len(kb.targets):
infoMsg += "all "
else:
infoMsg += "%d " % len(kb.targets)
infoMsg += "of them are testable targets"
logger.info(infoMsg)
break
else:
message = "sqlmap got %d results " % len(links)
message += "for your search dork expression, but none of them "
message += "have GET parameters to test for SQL injection. "
message += "Do you want to skip to the next result page? [Y/n]"
test = readInput(message, default="Y")
if test[0] in ("n", "N"):
raise SqlmapSilentQuitException
else:
conf.googlePage += 1
def _setBulkMultipleTargets():
if not conf.bulkFile:
return
conf.bulkFile = safeExpandUser(conf.bulkFile)
infoMsg = "parsing multiple targets list from '%s'" % conf.bulkFile
logger.info(infoMsg)
if not os.path.isfile(conf.bulkFile):
errMsg = "the specified bulk file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
found = False
for line in getFileItems(conf.bulkFile):
if re.match(r"[^ ]+\?(.+)", line, re.I) or CUSTOM_INJECTION_MARK_CHAR in line:
found = True
kb.targets.add((line.strip(), conf.method, conf.data, conf.cookie, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _setSitemapTargets():
if not conf.sitemapUrl:
return
infoMsg = "parsing sitemap '%s'" % conf.sitemapUrl
logger.info(infoMsg)
found = False
for item in parseSitemap(conf.sitemapUrl):
if re.match(r"[^ ]+\?(.+)", item, re.I):
found = True
kb.targets.add((item.strip(), None, None, None, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _findPageForms():
if not conf.forms or conf.crawlDepth:
return
if conf.url and not checkConnection():
return
infoMsg = "searching for forms"
logger.info(infoMsg)
if not any((conf.bulkFile, conf.googleDork, conf.sitemapUrl)):
page, _ = Request.queryPage(content=True)
findPageForms(page, conf.url, True, True)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
elif conf.sitemapUrl:
targets = parseSitemap(conf.sitemapUrl)
elif conf.googleDork:
targets = [_[0] for _ in kb.targets]
kb.targets.clear()
for i in xrange(len(targets)):
try:
target = targets[i]
page, _, _ = Request.getPage(url=target.strip(), crawling=True, raise404=False)
findPageForms(page, target, False, True)
if conf.verbose in (1, 2):
status = '%d/%d links visited (%d%%)' % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except KeyboardInterrupt:
break
except Exception, ex:
errMsg = "problem occurred while searching for forms at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _setDBMSAuthentication():
"""
Check and set the DBMS authentication credentials to run statements as
another user, not the session user
"""
if not conf.dbmsCred:
return
debugMsg = "setting the DBMS authentication credentials"
logger.debug(debugMsg)
match = re.search("^(.+?):(.*?)$", conf.dbmsCred)
if not match:
errMsg = "DBMS authentication credentials value must be in format "
errMsg += "username:password"
raise SqlmapSyntaxException(errMsg)
conf.dbmsUsername = match.group(1)
conf.dbmsPassword = match.group(2)
def _setMetasploit():
if not conf.osPwn and not conf.osSmb and not conf.osBof:
return
debugMsg = "setting the takeover out-of-band functionality"
logger.debug(debugMsg)
msfEnvPathExists = False
if IS_WIN:
try:
import win32file
except ImportError:
errMsg = "sqlmap requires third-party module 'pywin32' "
errMsg += "in order to use Metasploit functionalities on "
errMsg += "Windows. You can download it from "
errMsg += "'http://sourceforge.net/projects/pywin32/files/pywin32/'"
raise SqlmapMissingDependence(errMsg)
if not conf.msfPath:
def _(key, value):
retVal = None
try:
from _winreg import ConnectRegistry, OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE
_ = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
_ = OpenKey(_, key)
retVal = QueryValueEx(_, value)[0]
except:
logger.debug("unable to identify Metasploit installation path via registry key")
return retVal
conf.msfPath = _(r"SOFTWARE\Rapid7\Metasploit", "Location")
if conf.msfPath:
conf.msfPath = os.path.join(conf.msfPath, "msf3")
if conf.osSmb:
isAdmin = runningAsAdmin()
if not isAdmin:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a SMB relay attack because "
errMsg += "it will need to listen on a user-specified SMB "
errMsg += "TCP port for incoming connection attempts"
raise SqlmapMissingPrivileges(errMsg)
if conf.msfPath:
for path in (conf.msfPath, os.path.join(conf.msfPath, "bin")):
if any(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
conf.msfPath = path
break
if msfEnvPathExists:
debugMsg = "provided Metasploit Framework path "
debugMsg += "'%s' is valid" % conf.msfPath
logger.debug(debugMsg)
else:
warnMsg = "the provided Metasploit Framework path "
warnMsg += "'%s' is not valid. The cause could " % conf.msfPath
warnMsg += "be that the path does not exists or that one "
warnMsg += "or more of the needed Metasploit executables "
warnMsg += "within msfcli, msfconsole, msfencode and "
warnMsg += "msfpayload do not exist"
logger.warn(warnMsg)
else:
warnMsg = "you did not provide the local path where Metasploit "
warnMsg += "Framework is installed"
logger.warn(warnMsg)
if not msfEnvPathExists:
warnMsg = "sqlmap is going to look for Metasploit Framework "
warnMsg += "installation inside the environment path(s)"
logger.warn(warnMsg)
envPaths = os.environ.get("PATH", "").split(";" if IS_WIN else ":")
for envPath in envPaths:
envPath = envPath.replace(";", "")
if any(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
if msfEnvPathExists:
infoMsg = "Metasploit Framework has been found "
infoMsg += "installed in the '%s' path" % envPath
logger.info(infoMsg)
conf.msfPath = envPath
break
if not msfEnvPathExists:
errMsg = "unable to locate Metasploit Framework installation. "
errMsg += "You can get it at 'http://www.metasploit.com/download/'"
raise SqlmapFilePathException(errMsg)
def _setWriteFile():
if not conf.wFile:
return
debugMsg = "setting the write file functionality"
logger.debug(debugMsg)
if not os.path.exists(conf.wFile):
errMsg = "the provided local file '%s' does not exist" % conf.wFile
raise SqlmapFilePathException(errMsg)
if not conf.dFile:
errMsg = "you did not provide the back-end DBMS absolute path "
errMsg += "where you want to write the local file '%s'" % conf.wFile
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.wFileType = getFileType(conf.wFile)
def _setOS():
"""
Force the back-end DBMS operating system option.
"""
if not conf.os:
return
if conf.os.lower() not in SUPPORTED_OS:
errMsg = "you provided an unsupported back-end DBMS operating "
errMsg += "system. The supported DBMS operating systems for OS "
errMsg += "and file system access are %s. " % ', '.join([o.capitalize() for o in SUPPORTED_OS])
errMsg += "If you do not know the back-end DBMS underlying OS, "
errMsg += "do not provide it and sqlmap will fingerprint it for "
errMsg += "you."
raise SqlmapUnsupportedDBMSException(errMsg)
debugMsg = "forcing back-end DBMS operating system to user defined "
debugMsg += "value '%s'" % conf.os
logger.debug(debugMsg)
Backend.setOs(conf.os)
def _setTechnique():
validTechniques = sorted(getPublicTypeMembers(PAYLOAD.TECHNIQUE), key=lambda x: x[1])
validLetters = [_[0][0].upper() for _ in validTechniques]
if conf.tech and isinstance(conf.tech, basestring):
_ = []
for letter in conf.tech.upper():
if letter not in validLetters:
errMsg = "value for --technique must be a string composed "
errMsg += "by the letters %s. Refer to the " % ", ".join(validLetters)
errMsg += "user's manual for details"
raise SqlmapSyntaxException(errMsg)
for validTech, validInt in validTechniques:
if letter == validTech[0]:
_.append(validInt)
break
conf.tech = _
def _setDBMS():
"""
Force the back-end DBMS option.
"""
if not conf.dbms:
return
debugMsg = "forcing back-end DBMS to user defined value"
logger.debug(debugMsg)
conf.dbms = conf.dbms.lower()
regex = re.search("%s ([\d\.]+)" % ("(%s)" % "|".join([alias for alias in SUPPORTED_DBMS])), conf.dbms, re.I)
if regex:
conf.dbms = regex.group(1)
Backend.setVersion(regex.group(2))
if conf.dbms not in SUPPORTED_DBMS:
errMsg = "you provided an unsupported back-end database management "
errMsg += "system. Supported DBMSes are as follows: %s. " % ', '.join(sorted(_ for _ in DBMS_DICT))
errMsg += "If you do not know the back-end DBMS, do not provide "
errMsg += "it and sqlmap will fingerprint it for you."
raise SqlmapUnsupportedDBMSException(errMsg)
for dbms, aliases in DBMS_ALIASES:
if conf.dbms in aliases:
conf.dbms = dbms
break
def _setTamperingFunctions():
"""
Loads tampering functions from given script(s)
"""
if conf.tamper:
last_priority = PRIORITY.HIGHEST
check_priority = True
resolve_priorities = False
priorities = []
for tfile in re.split(PARAMETER_SPLITTING_REGEX, conf.tamper):
found = False
tfile = tfile.strip()
if not tfile:
continue
elif os.path.exists(os.path.join(paths.SQLMAP_TAMPER_PATH, tfile if tfile.endswith('.py') else "%s.py" % tfile)):
tfile = os.path.join(paths.SQLMAP_TAMPER_PATH, tfile if tfile.endswith('.py') else "%s.py" % tfile)
elif not os.path.exists(tfile):
errMsg = "tamper script '%s' does not exist" % tfile
raise SqlmapFilePathException(errMsg)
elif not tfile.endswith('.py'):
errMsg = "tamper script '%s' should have an extension '.py'" % tfile
raise SqlmapSyntaxException(errMsg)
dirname, filename = os.path.split(tfile)
dirname = os.path.abspath(dirname)
infoMsg = "loading tamper script '%s'" % filename[:-3]
logger.info(infoMsg)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
errMsg = "make sure that there is an empty file '__init__.py' "
errMsg += "inside of tamper scripts directory '%s'" % dirname
raise SqlmapGenericException(errMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding()))
except (ImportError, SyntaxError), msg:
raise SqlmapSyntaxException("cannot import tamper script '%s' (%s)" % (filename[:-3], msg))
priority = PRIORITY.NORMAL if not hasattr(module, '__priority__') else module.__priority__
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "tamper" and inspect.getargspec(function).args and inspect.getargspec(function).keywords == "kwargs":
found = True
kb.tamperFunctions.append(function)
function.func_name = module.__name__
if check_priority and priority > last_priority:
message = "it seems that you might have mixed "
message += "the order of tamper scripts. "
message += "Do you want to auto resolve this? [Y/n/q] "
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
resolve_priorities = True
elif test[0] in ("n", "N"):
resolve_priorities = False
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
check_priority = False
priorities.append((priority, function))
last_priority = priority
break
elif name == "dependencies":
function()
if not found:
errMsg = "missing function 'tamper(payload, **kwargs)' "
errMsg += "in tamper script '%s'" % tfile
raise SqlmapGenericException(errMsg)
if kb.tamperFunctions and len(kb.tamperFunctions) > 3:
warnMsg = "using too many tamper scripts is usually not "
warnMsg += "a good idea"
logger.warning(warnMsg)
if resolve_priorities and priorities:
priorities.sort(reverse=True)
kb.tamperFunctions = []
for _, function in priorities:
kb.tamperFunctions.append(function)
def _setWafFunctions():
"""
Loads WAF/IDS/IPS detecting functions from script(s)
"""
if conf.identifyWaf:
for found in glob.glob(os.path.join(paths.SQLMAP_WAF_PATH, "*.py")):
dirname, filename = os.path.split(found)
dirname = os.path.abspath(dirname)
if filename == "__init__.py":
continue
debugMsg = "loading WAF script '%s'" % filename[:-3]
logger.debug(debugMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
if filename[:-3] in sys.modules:
del sys.modules[filename[:-3]]
module = __import__(filename[:-3])
except ImportError, msg:
raise SqlmapSyntaxException("cannot import WAF script '%s' (%s)" % (filename[:-3], msg))
_ = dict(inspect.getmembers(module))
if "detect" not in _:
errMsg = "missing function 'detect(get_page)' "
errMsg += "in WAF script '%s'" % found
raise SqlmapGenericException(errMsg)
else:
kb.wafFunctions.append((_["detect"], _.get("__product__", filename[:-3])))
def _setThreads():
if not isinstance(conf.threads, int) or conf.threads <= 0:
conf.threads = 1
def _setDNSCache():
"""
Makes a cached version of socket._getaddrinfo to avoid subsequent DNS requests.
"""
def _getaddrinfo(*args, **kwargs):
if args in kb.cache:
return kb.cache[args]
else:
kb.cache[args] = socket._getaddrinfo(*args, **kwargs)
return kb.cache[args]
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
socket.getaddrinfo = _getaddrinfo
def _setSocketPreConnect():
"""
Makes a pre-connect version of socket.connect
"""
if conf.disablePrecon:
return
def _():
while kb.threadContinue and not conf.disablePrecon:
try:
for key in socket._ready:
if len(socket._ready[key]) < SOCKET_PRE_CONNECT_QUEUE_SIZE:
family, type, proto, address = key
s = socket.socket(family, type, proto)
s._connect(address)
with kb.locks.socket:
socket._ready[key].append(s._sock)
except KeyboardInterrupt:
break
except:
pass
finally:
time.sleep(0.01)
def connect(self, address):
found = False
key = (self.family, self.type, self.proto, address)
with kb.locks.socket:
if key not in socket._ready:
socket._ready[key] = []
if len(socket._ready[key]) > 0:
self._sock = socket._ready[key].pop(0)
found = True
if not found:
self._connect(address)
if not hasattr(socket.socket, "_connect"):
socket._ready = {}
socket.socket._connect = socket.socket.connect
socket.socket.connect = connect
thread = threading.Thread(target=_)
setDaemon(thread)
thread.start()
def _setHTTPHandlers():
"""
Check and set the HTTP/SOCKS proxy for all HTTP requests.
"""
global proxyHandler
for _ in ("http", "https"):
if hasattr(proxyHandler, "%s_open" % _):
delattr(proxyHandler, "%s_open" % _)
if conf.proxyList is not None:
if not conf.proxyList:
errMsg = "list of usable proxies is exhausted"
raise SqlmapNoneDataException(errMsg)
conf.proxy = conf.proxyList[0]
conf.proxyList = conf.proxyList[1:]
infoMsg = "loading proxy '%s' from a supplied proxy list file" % conf.proxy
logger.info(infoMsg)
elif not conf.proxy:
if conf.hostname in ("localhost", "127.0.0.1") or conf.ignoreProxy:
proxyHandler.proxies = {}
if conf.proxy:
debugMsg = "setting the HTTP/SOCKS proxy for all HTTP requests"
logger.debug(debugMsg)
try:
_ = urlparse.urlsplit(conf.proxy)
except Exception, ex:
errMsg = "invalid proxy address '%s' ('%s')" % (conf.proxy, getSafeExString(ex))
raise SqlmapSyntaxException, errMsg
hostnamePort = _.netloc.split(":")
scheme = _.scheme.upper()
hostname = hostnamePort[0]
port = None
username = None
password = None
if len(hostnamePort) == 2:
try:
port = int(hostnamePort[1])
except:
pass # drops into the next check block
if not all((scheme, hasattr(PROXY_TYPE, scheme), hostname, port)):
errMsg = "proxy value must be in format '(%s)://address:port'" % "|".join(_[0].lower() for _ in getPublicTypeMembers(PROXY_TYPE))
raise SqlmapSyntaxException(errMsg)
if conf.proxyCred:
_ = re.search("^(.*?):(.*?)$", conf.proxyCred)
if not _:
errMsg = "proxy authentication credentials "
errMsg += "value must be in format username:password"
raise SqlmapSyntaxException(errMsg)
else:
username = _.group(1)
password = _.group(2)
if scheme in (PROXY_TYPE.SOCKS4, PROXY_TYPE.SOCKS5):
proxyHandler.proxies = {}
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if scheme == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, hostname, port, username=username, password=password)
socks.wrapmodule(urllib2)
else:
socks.unwrapmodule(urllib2)
if conf.proxyCred:
# Reference: http://stackoverflow.com/questions/34079/how-to-specify-an-authenticated-proxy-for-a-python-http-connection
proxyString = "%s@" % conf.proxyCred
else:
proxyString = ""
proxyString += "%s:%d" % (hostname, port)
proxyHandler.proxies = {"http": proxyString, "https": proxyString}
proxyHandler.__init__(proxyHandler.proxies)
debugMsg = "creating HTTP requests opener object"
logger.debug(debugMsg)
handlers = filter(None, [proxyHandler if proxyHandler.proxies else None, authHandler, redirectHandler, rangeHandler, httpsHandler])
if not conf.dropSetCookie:
if not conf.loadCookies:
conf.cj = cookielib.CookieJar()
else:
conf.cj = cookielib.MozillaCookieJar()
resetCookieJar(conf.cj)
handlers.append(urllib2.HTTPCookieProcessor(conf.cj))
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html
if conf.keepAlive:
warnMsg = "persistent HTTP(s) connections, Keep-Alive, has "
warnMsg += "been disabled because of its incompatibility "
if conf.proxy:
warnMsg += "with HTTP(s) proxy"
logger.warn(warnMsg)
elif conf.authType:
warnMsg += "with authentication methods"
logger.warn(warnMsg)
else:
handlers.append(keepAliveHandler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
def _setSafeVisit():
"""
Check and set the safe visit options.
"""
if not any ((conf.safeUrl, conf.safeReqFile)):
return
if conf.safeReqFile:
checkFile(conf.safeReqFile)
raw = readCachedFileContent(conf.safeReqFile)
match = re.search(r"\A([A-Z]+) ([^ ]+) HTTP/[0-9.]+\Z", raw[:raw.find('\n')])
if match:
kb.safeReq.method = match.group(1)
kb.safeReq.url = match.group(2)
kb.safeReq.headers = {}
for line in raw[raw.find('\n') + 1:].split('\n'):
line = line.strip()
if line and ':' in line:
key, value = line.split(':', 1)
value = value.strip()
kb.safeReq.headers[key] = value
if key == HTTP_HEADER.HOST:
if not value.startswith("http"):
scheme = "http"
if value.endswith(":443"):
scheme = "https"
value = "%s://%s" % (scheme, value)
kb.safeReq.url = urlparse.urljoin(value, kb.safeReq.url)
else:
break
post = None
if '\r\n\r\n' in raw:
post = raw[raw.find('\r\n\r\n') + 4:]
elif '\n\n' in raw:
post = raw[raw.find('\n\n') + 2:]
if post and post.strip():
kb.safeReq.post = post
else:
kb.safeReq.post = None
else:
errMsg = "invalid format of a safe request file"
raise SqlmapSyntaxException, errMsg
else:
if not re.search("^http[s]*://", conf.safeUrl):
if ":443/" in conf.safeUrl:
conf.safeUrl = "https://" + conf.safeUrl
else:
conf.safeUrl = "http://" + conf.safeUrl
if conf.safeFreq <= 0:
errMsg = "please provide a valid value (>0) for safe frequency (--safe-freq) while using safe visit features"
raise SqlmapSyntaxException(errMsg)
def _setPrefixSuffix():
if conf.prefix is not None and conf.suffix is not None:
# Create a custom boundary object for user's supplied prefix
# and suffix
boundary = AttribDict()
boundary.level = 1
boundary.clause = [0]
boundary.where = [1, 2, 3]
boundary.prefix = conf.prefix
boundary.suffix = conf.suffix
if " like" in boundary.suffix.lower():
if "'" in boundary.suffix.lower():
boundary.ptype = 3
elif '"' in boundary.suffix.lower():
boundary.ptype = 5
elif "'" in boundary.suffix:
boundary.ptype = 2
elif '"' in boundary.suffix:
boundary.ptype = 4
else:
boundary.ptype = 1
# user who provides --prefix/--suffix does not want other boundaries
# to be tested for
conf.boundaries = [boundary]
def _setAuthCred():
"""
Adds authentication credentials (if any) for current target to the password manager
(used by connection handler)
"""
if kb.passwordMgr and all(_ is not None for _ in (conf.scheme, conf.hostname, conf.port, conf.authUsername, conf.authPassword)):
kb.passwordMgr.add_password(None, "%s://%s:%d" % (conf.scheme, conf.hostname, conf.port), conf.authUsername, conf.authPassword)
def _setHTTPAuthentication():
"""
Check and set the HTTP(s) authentication method (Basic, Digest, NTLM or PKI),
username and password for first three methods, or PEM private key file for
PKI authentication
"""
global authHandler
if not conf.authType and not conf.authCred and not conf.authFile:
return
if conf.authFile and not conf.authType:
conf.authType = AUTH_TYPE.PKI
elif conf.authType and not conf.authCred and not conf.authFile:
errMsg = "you specified the HTTP authentication type, but "
errMsg += "did not provide the credentials"
raise SqlmapSyntaxException(errMsg)
elif not conf.authType and conf.authCred:
errMsg = "you specified the HTTP authentication credentials, "
errMsg += "but did not provide the type"
raise SqlmapSyntaxException(errMsg)
elif (conf.authType or "").lower() not in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST, AUTH_TYPE.NTLM, AUTH_TYPE.PKI):
errMsg = "HTTP authentication type value must be "
errMsg += "Basic, Digest, NTLM or PKI"
raise SqlmapSyntaxException(errMsg)
if not conf.authFile:
debugMsg = "setting the HTTP authentication type and credentials"
logger.debug(debugMsg)
aTypeLower = conf.authType.lower()
if aTypeLower in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST):
regExp = "^(.*?):(.*?)$"
errMsg = "HTTP %s authentication credentials " % aTypeLower
errMsg += "value must be in format 'username:password'"
elif aTypeLower == AUTH_TYPE.NTLM:
regExp = "^(.*\\\\.*):(.*?)$"
errMsg = "HTTP NTLM authentication credentials value must "
errMsg += "be in format 'DOMAIN\username:password'"
elif aTypeLower == AUTH_TYPE.PKI:
errMsg = "HTTP PKI authentication require "
errMsg += "usage of option `--auth-pki`"
raise SqlmapSyntaxException(errMsg)
aCredRegExp = re.search(regExp, conf.authCred)
if not aCredRegExp:
raise SqlmapSyntaxException(errMsg)
conf.authUsername = aCredRegExp.group(1)
conf.authPassword = aCredRegExp.group(2)
kb.passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
_setAuthCred()
if aTypeLower == AUTH_TYPE.BASIC:
authHandler = SmartHTTPBasicAuthHandler(kb.passwordMgr)
elif aTypeLower == AUTH_TYPE.DIGEST:
authHandler = urllib2.HTTPDigestAuthHandler(kb.passwordMgr)
elif aTypeLower == AUTH_TYPE.NTLM:
try:
from ntlm import HTTPNtlmAuthHandler
except ImportError:
errMsg = "sqlmap requires Python NTLM third-party library "
errMsg += "in order to authenticate via NTLM, "
errMsg += "http://code.google.com/p/python-ntlm/"
raise SqlmapMissingDependence(errMsg)
authHandler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(kb.passwordMgr)
else:
debugMsg = "setting the HTTP(s) authentication PEM private key"
logger.debug(debugMsg)
_ = safeExpandUser(conf.authFile)
checkFile(_)
authHandler = HTTPSPKIAuthHandler(_)
def _setHTTPExtraHeaders():
if conf.headers:
debugMsg = "setting extra HTTP headers"
logger.debug(debugMsg)
conf.headers = conf.headers.split("\n") if "\n" in conf.headers else conf.headers.split("\\n")
for headerValue in conf.headers:
if not headerValue.strip():
continue
if headerValue.count(':') >= 1:
header, value = (_.lstrip() for _ in headerValue.split(":", 1))
if header and value:
conf.httpHeaders.append((header, value))
else:
errMsg = "invalid header value: %s. Valid header format is 'name:value'" % repr(headerValue).lstrip('u')
raise SqlmapSyntaxException(errMsg)
elif not conf.requestFile and len(conf.httpHeaders or []) < 2:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_LANGUAGE, "en-us,en;q=0.5"))
if not conf.charset:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "ISO-8859-15,utf-8;q=0.7,*;q=0.7"))
else:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "%s;q=0.7,*;q=0.1" % conf.charset))
# Invalidating any caching mechanism in between
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
conf.httpHeaders.append((HTTP_HEADER.CACHE_CONTROL, "no-cache,no-store"))
conf.httpHeaders.append((HTTP_HEADER.PRAGMA, "no-cache"))
def _defaultHTTPUserAgent():
"""
@return: default sqlmap HTTP User-Agent header
@rtype: C{str}
"""
return "%s (%s)" % (VERSION_STRING, SITE)
# Firefox 3 running on Ubuntu 9.04 updated at April 2009
#return "Mozilla/5.0 (X11; U; Linux i686; en-GB; rv:1.9.0.9) Gecko/2009042113 Ubuntu/9.04 (jaunty) Firefox/3.0.9"
# Internet Explorer 7.0 running on Windows 2003 Service Pack 2 english
# updated at March 2009
#return "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)"
def _setHTTPUserAgent():
"""
Set the HTTP User-Agent header.
Depending on the user options it can be:
* The default sqlmap string
* A default value read as user option
* A random value read from a list of User-Agent headers from a
file choosed as user option
"""
if conf.mobile:
message = "which smartphone do you want sqlmap to imitate "
message += "through HTTP User-Agent header?\n"
items = sorted(getPublicTypeMembers(MOBILES, True))
for count in xrange(len(items)):
item = items[count]
message += "[%d] %s%s\n" % (count + 1, item[0], " (default)" if item == MOBILES.IPHONE else "")
test = readInput(message.rstrip('\n'), default=items.index(MOBILES.IPHONE) + 1)
try:
item = items[int(test) - 1]
except:
item = MOBILES.IPHONE
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, item[1]))
elif conf.agent:
debugMsg = "setting the HTTP User-Agent header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, conf.agent))
elif not conf.randomAgent:
_ = True
for header, _ in conf.httpHeaders:
if header == HTTP_HEADER.USER_AGENT:
_ = False
break
if _:
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
else:
if not kb.userAgents:
debugMsg = "loading random HTTP User-Agent header(s) from "
debugMsg += "file '%s'" % paths.USER_AGENTS
logger.debug(debugMsg)
try:
kb.userAgents = getFileItems(paths.USER_AGENTS)
except IOError:
warnMsg = "unable to read HTTP User-Agent header "
warnMsg += "file '%s'" % paths.USER_AGENTS
logger.warn(warnMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
return
userAgent = random.sample(kb.userAgents or [_defaultHTTPUserAgent()], 1)[0]
infoMsg = "fetched random HTTP User-Agent header from "
infoMsg += "file '%s': '%s'" % (paths.USER_AGENTS, userAgent)
logger.info(infoMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, userAgent))
def _setHTTPReferer():
"""
Set the HTTP Referer
"""
if conf.referer:
debugMsg = "setting the HTTP Referer header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.REFERER, conf.referer))
def _setHTTPHost():
"""
Set the HTTP Host
"""
if conf.host:
debugMsg = "setting the HTTP Host header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.HOST, conf.host))
def _setHTTPCookies():
"""
Set the HTTP Cookie header
"""
if conf.cookie:
debugMsg = "setting the HTTP Cookie header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.COOKIE, conf.cookie))
def _setHTTPTimeout():
"""
Set the HTTP timeout
"""
if conf.timeout:
debugMsg = "setting the HTTP timeout"
logger.debug(debugMsg)
conf.timeout = float(conf.timeout)
if conf.timeout < 3.0:
warnMsg = "the minimum HTTP timeout is 3 seconds, sqlmap "
warnMsg += "will going to reset it"
logger.warn(warnMsg)
conf.timeout = 3.0
else:
conf.timeout = 30.0
socket.setdefaulttimeout(conf.timeout)
def _checkDependencies():
"""
Checks for missing dependencies.
"""
if conf.dependencies:
checkDependencies()
def _createTemporaryDirectory():
"""
Creates temporary directory for this run.
"""
try:
if not os.path.isdir(tempfile.gettempdir()):
os.makedirs(tempfile.gettempdir())
except IOError, ex:
errMsg = "there has been a problem while accessing "
errMsg += "system's temporary directory location(s) ('%s'). Please " % getSafeExString(ex)
errMsg += "make sure that there is enough disk space left. If problem persists, "
errMsg += "try to set environment variable 'TEMP' to a location "
errMsg += "writeable by the current user"
raise SqlmapSystemException, errMsg
if "sqlmap" not in (tempfile.tempdir or ""):
tempfile.tempdir = tempfile.mkdtemp(prefix="sqlmap", suffix=str(os.getpid()))
kb.tempDir = tempfile.tempdir
if not os.path.isdir(tempfile.tempdir):
os.makedirs(tempfile.tempdir)
def _cleanupOptions():
"""
Cleanup configuration attributes.
"""
debugMsg = "cleaning up configuration parameters"
logger.debug(debugMsg)
width = getConsoleWidth()
if conf.eta:
conf.progressWidth = width - 26
else:
conf.progressWidth = width - 46
for key, value in conf.items():
if value and any(key.endswith(_) for _ in ("Path", "File", "Dir")):
conf[key] = safeExpandUser(value)
if conf.testParameter:
conf.testParameter = urldecode(conf.testParameter)
conf.testParameter = conf.testParameter.replace(" ", "")
conf.testParameter = re.split(PARAMETER_SPLITTING_REGEX, conf.testParameter)
else:
conf.testParameter = []
if conf.user:
conf.user = conf.user.replace(" ", "")
if conf.rParam:
conf.rParam = conf.rParam.replace(" ", "")
conf.rParam = re.split(PARAMETER_SPLITTING_REGEX, conf.rParam)
else:
conf.rParam = []
if conf.paramDel and '\\' in conf.paramDel:
conf.paramDel = conf.paramDel.decode("string_escape")
if conf.skip:
conf.skip = conf.skip.replace(" ", "")
conf.skip = re.split(PARAMETER_SPLITTING_REGEX, conf.skip)
else:
conf.skip = []
if conf.cookie:
conf.cookie = re.sub(r"[\r\n]", "", conf.cookie)
if conf.delay:
conf.delay = float(conf.delay)
if conf.rFile:
conf.rFile = ntToPosixSlashes(normalizePath(conf.rFile))
if conf.wFile:
conf.wFile = ntToPosixSlashes(normalizePath(conf.wFile))
if conf.dFile:
conf.dFile = ntToPosixSlashes(normalizePath(conf.dFile))
if conf.sitemapUrl and not conf.sitemapUrl.lower().startswith("http"):
conf.sitemapUrl = "http%s://%s" % ('s' if conf.forceSSL else '', conf.sitemapUrl)
if conf.msfPath:
conf.msfPath = ntToPosixSlashes(normalizePath(conf.msfPath))
if conf.tmpPath:
conf.tmpPath = ntToPosixSlashes(normalizePath(conf.tmpPath))
if any((conf.googleDork, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.forms, conf.crawlDepth)):
conf.multipleTargets = True
if conf.optimize:
setOptimize()
if conf.data:
conf.data = re.sub(INJECT_HERE_MARK.replace(" ", r"[^A-Za-z]*"), CUSTOM_INJECTION_MARK_CHAR, conf.data, re.I)
if conf.url:
conf.url = re.sub(INJECT_HERE_MARK.replace(" ", r"[^A-Za-z]*"), CUSTOM_INJECTION_MARK_CHAR, conf.url, re.I)
if conf.os:
conf.os = conf.os.capitalize()
if conf.dbms:
conf.dbms = conf.dbms.capitalize()
if conf.testFilter:
conf.testFilter = conf.testFilter.strip('*+')
conf.testFilter = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testFilter)
try:
re.compile(conf.testFilter)
except re.error:
conf.testFilter = re.escape(conf.testFilter)
if conf.testSkip:
conf.testSkip = conf.testSkip.strip('*+')
conf.testSkip = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testSkip)
try:
re.compile(conf.testSkip)
except re.error:
conf.testSkip = re.escape(conf.testSkip)
if "timeSec" not in kb.explicitSettings:
if conf.tor:
conf.timeSec = 2 * conf.timeSec
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
warnMsg = "increasing default value for "
warnMsg += "option '--time-sec' to %d because " % conf.timeSec
warnMsg += "switch '--tor' was provided"
logger.warn(warnMsg)
else:
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
if conf.retries:
conf.retries = min(conf.retries, MAX_CONNECT_RETRIES)
if conf.code:
conf.code = int(conf.code)
if conf.csvDel:
conf.csvDel = conf.csvDel.decode("string_escape") # e.g. '\\t' -> '\t'
if conf.torPort and isinstance(conf.torPort, basestring) and conf.torPort.isdigit():
conf.torPort = int(conf.torPort)
if conf.torType:
conf.torType = conf.torType.upper()
if conf.outputDir:
paths.SQLMAP_OUTPUT_PATH = os.path.realpath(os.path.expanduser(conf.outputDir))
setPaths()
if conf.string:
try:
conf.string = conf.string.decode("unicode_escape")
except:
charset = string.whitespace.replace(" ", "")
for _ in charset:
conf.string = conf.string.replace(_.encode("string_escape"), _)
if conf.getAll:
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
if conf.noCast:
for _ in DUMP_REPLACEMENTS.keys():
del DUMP_REPLACEMENTS[_]
if conf.dumpFormat:
conf.dumpFormat = conf.dumpFormat.upper()
if conf.torType:
conf.torType = conf.torType.upper()
if conf.col:
conf.col = re.sub(r"\s*,\s*", ",", conf.col)
if conf.excludeCol:
conf.excludeCol = re.sub(r"\s*,\s*", ",", conf.excludeCol)
if conf.binaryFields:
conf.binaryFields = re.sub(r"\s*,\s*", ",", conf.binaryFields)
threadData = getCurrentThreadData()
threadData.reset()
def _dirtyPatches():
"""
Place for "dirty" Python related patches
"""
httplib._MAXLINE = 1 * 1024 * 1024 # to accept overly long result lines (e.g. SQLi results in HTTP header responses)
def _purgeOutput():
"""
Safely removes (purges) output directory.
"""
if conf.purgeOutput:
purge(paths.SQLMAP_OUTPUT_PATH)
def _setConfAttributes():
"""
This function set some needed attributes into the configuration
singleton.
"""
debugMsg = "initializing the configuration"
logger.debug(debugMsg)
conf.authUsername = None
conf.authPassword = None
conf.boundaries = []
conf.cj = None
conf.dbmsConnector = None
conf.dbmsHandler = None
conf.dnsServer = None
conf.dumpPath = None
conf.hashDB = None
conf.hashDBFile = None
conf.httpHeaders = []
conf.hostname = None
conf.ipv6 = False
conf.multipleTargets = False
conf.outputPath = None
conf.paramDict = {}
conf.parameters = {}
conf.path = None
conf.port = None
conf.proxyList = None
conf.resultsFilename = None
conf.resultsFP = None
conf.scheme = None
conf.tests = []
conf.trafficFP = None
conf.wFileType = None
def _setKnowledgeBaseAttributes(flushAll=True):
"""
This function set some needed attributes into the knowledge base
singleton.
"""
debugMsg = "initializing the knowledge base"
logger.debug(debugMsg)
kb.absFilePaths = set()
kb.adjustTimeDelay = None
kb.alerted = False
kb.alwaysRefresh = None
kb.arch = None
kb.authHeader = None
kb.bannerFp = AttribDict()
kb.binaryField = False
kb.brute = AttribDict({"tables": [], "columns": []})
kb.bruteMode = False
kb.cache = AttribDict()
kb.cache.content = {}
kb.cache.regex = {}
kb.cache.stdev = {}
kb.chars = AttribDict()
kb.chars.delimiter = randomStr(length=6, lowercase=True)
kb.chars.start = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.stop = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.at, kb.chars.space, kb.chars.dollar, kb.chars.hash_ = ("%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, _, KB_CHARS_BOUNDARY_CHAR) for _ in randomStr(length=4, lowercase=True))
kb.columnExistsChoice = None
kb.commonOutputs = None
kb.counters = {}
kb.data = AttribDict()
kb.dataOutputFlag = False
# Active back-end DBMS fingerprint
kb.dbms = None
kb.dbmsVersion = [UNKNOWN_DBMS_VERSION]
kb.delayCandidates = TIME_DELAY_CANDIDATES * [0]
kb.dep = None
kb.dnsMode = False
kb.dnsTest = None
kb.docRoot = None
kb.dumpTable = None
kb.dumpKeyboardInterrupt = False
kb.dynamicMarkings = []
kb.dynamicParameter = False
kb.endDetection = False
kb.explicitSettings = set()
kb.extendTests = None
kb.errorChunkLength = None
kb.errorIsNone = True
kb.falsePositives = []
kb.fileReadMode = False
kb.followSitemapRecursion = None
kb.forcedDbms = None
kb.forcePartialUnion = False
kb.forceWhere = None
kb.futileUnion = None
kb.headersFp = {}
kb.heuristicDbms = None
kb.heuristicMode = False
kb.heuristicTest = None
kb.hintValue = None
kb.htmlFp = []
kb.httpErrorCodes = {}
kb.inferenceMode = False
kb.ignoreCasted = None
kb.ignoreNotFound = False
kb.ignoreTimeout = False
kb.injection = InjectionDict()
kb.injections = []
kb.laggingChecked = False
kb.lastParserStatus = None
kb.locks = AttribDict()
for _ in ("cache", "count", "index", "io", "limit", "log", "socket", "redirect", "request", "value"):
kb.locks[_] = threading.Lock()
kb.matchRatio = None
kb.maxConnectionsFlag = False
kb.mergeCookies = None
kb.multiThreadMode = False
kb.negativeLogic = False
kb.nullConnection = None
kb.oldMsf = None
kb.orderByColumns = None
kb.originalCode = None
kb.originalPage = None
kb.originalPageTime = None
kb.originalTimeDelay = None
kb.originalUrls = dict()
# Back-end DBMS underlying operating system fingerprint via banner (-b)
# parsing
kb.os = None
kb.osVersion = None
kb.osSP = None
kb.pageCompress = True
kb.pageTemplate = None
kb.pageTemplates = dict()
kb.pageEncoding = DEFAULT_PAGE_ENCODING
kb.pageStable = None
kb.partRun = None
kb.permissionFlag = False
kb.postHint = None
kb.postSpaceToPlus = False
kb.postUrlEncode = True
kb.prependFlag = False
kb.processResponseCounter = 0
kb.previousMethod = None
kb.processUserMarks = None
kb.proxyAuthHeader = None
kb.queryCounter = 0
kb.redirectChoice = None
kb.reflectiveMechanism = True
kb.reflectiveCounters = {REFLECTIVE_COUNTER.MISS: 0, REFLECTIVE_COUNTER.HIT: 0}
kb.requestCounter = 0
kb.resendPostOnRedirect = None
kb.responseTimes = {}
kb.responseTimeMode = None
kb.responseTimePayload = None
kb.resumeValues = True
kb.safeCharEncode = False
kb.safeReq = AttribDict()
kb.singleLogFlags = set()
kb.reduceTests = None
kb.tlsSNI = {}
kb.stickyDBMS = False
kb.stickyLevel = None
kb.storeCrawlingChoice = None
kb.storeHashesChoice = None
kb.suppressResumeInfo = False
kb.technique = None
kb.tempDir = None
kb.testMode = False
kb.testOnlyCustom = False
kb.testQueryCount = 0
kb.testType = None
kb.threadContinue = True
kb.threadException = False
kb.tableExistsChoice = None
kb.timeValidCharsRun = 0
kb.uChar = NULL
kb.unionDuplicates = False
kb.xpCmdshellAvailable = False
if flushAll:
kb.headerPaths = {}
kb.keywords = set(getFileItems(paths.SQL_KEYWORDS))
kb.passwordMgr = None
kb.skipVulnHost = None
kb.tamperFunctions = []
kb.targets = oset()
kb.testedParams = set()
kb.userAgents = None
kb.vainRun = True
kb.vulnHosts = set()
kb.wafFunctions = []
kb.wordlists = None
def _useWizardInterface():
"""
Presents simple wizard interface for beginner users
"""
if not conf.wizard:
return
logger.info("starting wizard interface")
while not conf.url:
message = "Please enter full target URL (-u): "
conf.url = readInput(message, default=None)
message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
conf.data = readInput(message, default=None)
if not (filter(lambda _: '=' in unicode(_), (conf.url, conf.data)) or '*' in conf.url):
warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). "
if not conf.crawlDepth and not conf.forms:
warnMsg += "Will search for forms"
conf.forms = True
logger.warn(warnMsg)
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Injection difficulty (--level/--risk). Please choose:\n"
message += "[1] Normal (default)\n[2] Medium\n[3] Hard"
choice = readInput(message, default='1')
if choice == '2':
conf.risk = 2
conf.level = 3
elif choice == '3':
conf.risk = 3
conf.level = 5
else:
conf.risk = 1
conf.level = 1
if not conf.getAll:
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Enumeration (--banner/--current-user/etc). Please choose:\n"
message += "[1] Basic (default)\n[2] Intermediate\n[3] All"
choice = readInput(message, default='1')
if choice == '2':
map(lambda x: conf.__setitem__(x, True), WIZARD.INTERMEDIATE)
elif choice == '3':
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
else:
map(lambda x: conf.__setitem__(x, True), WIZARD.BASIC)
logger.debug("muting sqlmap.. it will do the magic for you")
conf.verbose = 0
conf.batch = True
conf.threads = 4
dataToStdout("\nsqlmap is running, please wait..\n\n")
def _saveConfig():
"""
Saves the command line options to a sqlmap configuration INI file
Format.
"""
if not conf.saveConfig:
return
debugMsg = "saving command line options to a sqlmap configuration INI file"
logger.debug(debugMsg)
config = UnicodeRawConfigParser()
userOpts = {}
for family in optDict.keys():
userOpts[family] = []
for option, value in conf.items():
for family, optionData in optDict.items():
if option in optionData:
userOpts[family].append((option, value, optionData[option]))
for family, optionData in userOpts.items():
config.add_section(family)
optionData.sort()
for option, value, datatype in optionData:
if datatype and isListLike(datatype):
datatype = datatype[0]
if option in IGNORE_SAVE_OPTIONS:
continue
if value is None:
if datatype == OPTION_TYPE.BOOLEAN:
value = "False"
elif datatype in (OPTION_TYPE.INTEGER, OPTION_TYPE.FLOAT):
if option in defaults:
value = str(defaults[option])
else:
value = "0"
elif datatype == OPTION_TYPE.STRING:
value = ""
if isinstance(value, basestring):
value = value.replace("\n", "\n ")
config.set(family, option, value)
confFP = openFile(conf.saveConfig, "wb")
try:
config.write(confFP)
except IOError, ex:
errMsg = "something went wrong while trying "
errMsg += "to write to the configuration file '%s' ('%s')" % (conf.saveConfig, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
infoMsg = "saved command line options to the configuration file '%s'" % conf.saveConfig
logger.info(infoMsg)
def setVerbosity():
"""
This function set the verbosity of sqlmap output messages.
"""
if conf.verbose is None:
conf.verbose = 1
conf.verbose = int(conf.verbose)
if conf.verbose == 0:
logger.setLevel(logging.ERROR)
elif conf.verbose == 1:
logger.setLevel(logging.INFO)
elif conf.verbose > 2 and conf.eta:
conf.verbose = 2
logger.setLevel(logging.DEBUG)
elif conf.verbose == 2:
logger.setLevel(logging.DEBUG)
elif conf.verbose == 3:
logger.setLevel(CUSTOM_LOGGING.PAYLOAD)
elif conf.verbose == 4:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)
elif conf.verbose >= 5:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)
def _normalizeOptions(inputOptions):
"""
Sets proper option types
"""
types_ = {}
for group in optDict.keys():
types_.update(optDict[group])
for key in inputOptions:
if key in types_:
value = inputOptions[key]
if value is None:
continue
type_ = types_[key]
if type_ and isinstance(type_, tuple):
type_ = type_[0]
if type_ == OPTION_TYPE.BOOLEAN:
try:
value = bool(value)
except (TypeError, ValueError):
value = False
elif type_ == OPTION_TYPE.INTEGER:
try:
value = int(value)
except (TypeError, ValueError):
value = 0
elif type_ == OPTION_TYPE.FLOAT:
try:
value = float(value)
except (TypeError, ValueError):
value = 0.0
inputOptions[key] = value
def _mergeOptions(inputOptions, overrideOptions):
"""
Merge command line options with configuration file and default options.
@param inputOptions: optparse object with command line options.
@type inputOptions: C{instance}
"""
if inputOptions.pickledOptions:
try:
inputOptions = base64unpickle(inputOptions.pickledOptions)
_normalizeOptions(inputOptions)
except Exception, ex:
errMsg = "provided invalid value '%s' for option '--pickled-options'" % inputOptions.pickledOptions
errMsg += " ('%s')" % ex if ex.message else ""
raise SqlmapSyntaxException(errMsg)
if inputOptions.configFile:
configFileParser(inputOptions.configFile)
if hasattr(inputOptions, "items"):
inputOptionsItems = inputOptions.items()
else:
inputOptionsItems = inputOptions.__dict__.items()
for key, value in inputOptionsItems:
if key not in conf or value not in (None, False) or overrideOptions:
conf[key] = value
for key, value in conf.items():
if value is not None:
kb.explicitSettings.add(key)
for key, value in defaults.items():
if hasattr(conf, key) and conf[key] is None:
conf[key] = value
lut = {}
for group in optDict.keys():
lut.update((_.upper(), _) for _ in optDict[group])
envOptions = {}
for key, value in os.environ.items():
if key.upper().startswith(SQLMAP_ENVIRONMENT_PREFIX):
_ = key[len(SQLMAP_ENVIRONMENT_PREFIX):].upper()
if _ in lut:
envOptions[lut[_]] = value
if envOptions:
_normalizeOptions(envOptions)
for key, value in envOptions.items():
conf[key] = value
mergedOptions.update(conf)
def _setTrafficOutputFP():
if conf.trafficFile:
infoMsg = "setting file for logging HTTP traffic"
logger.info(infoMsg)
conf.trafficFP = openFile(conf.trafficFile, "w+")
def _setDNSServer():
if not conf.dnsName:
return
infoMsg = "setting up DNS server instance"
logger.info(infoMsg)
isAdmin = runningAsAdmin()
if isAdmin:
try:
conf.dnsServer = DNSServer()
conf.dnsServer.run()
except socket.error, msg:
errMsg = "there was an error while setting up "
errMsg += "DNS server instance ('%s')" % msg
raise SqlmapGenericException(errMsg)
else:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a DNS data exfiltration attack "
errMsg += "as it will need to listen on privileged UDP port 53 "
errMsg += "for incoming address resolution attempts"
raise SqlmapMissingPrivileges(errMsg)
def _setProxyList():
if not conf.proxyFile:
return
conf.proxyList = []
for match in re.finditer(r"(?i)((http[^:]*|socks[^:]*)://)?([\w.]+):(\d+)", readCachedFileContent(conf.proxyFile)):
_, type_, address, port = match.groups()
conf.proxyList.append("%s://%s:%s" % (type_ or "http", address, port))
def _setTorProxySettings():
if not conf.tor:
return
if conf.torType == PROXY_TYPE.HTTP:
_setTorHttpProxySettings()
else:
_setTorSocksProxySettings()
def _setTorHttpProxySettings():
infoMsg = "setting Tor HTTP proxy settings"
logger.info(infoMsg)
found = None
for port in (DEFAULT_TOR_HTTP_PORTS if not conf.torPort else (conf.torPort,)):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((LOCALHOST, port))
found = port
break
except socket.error:
pass
s.close()
if found:
conf.proxy = "http://%s:%d" % (LOCALHOST, found)
else:
errMsg = "can't establish connection with the Tor proxy. "
errMsg += "Please make sure that you have Vidalia, Privoxy or "
errMsg += "Polipo bundle installed for you to be able to "
errMsg += "successfully use switch '--tor' "
raise SqlmapConnectionException(errMsg)
if not conf.checkTor:
warnMsg = "use switch '--check-tor' at "
warnMsg += "your own convenience when accessing "
warnMsg += "Tor anonymizing network because of "
warnMsg += "known issues with default settings of various 'bundles' "
warnMsg += "(e.g. Vidalia)"
logger.warn(warnMsg)
def _setTorSocksProxySettings():
infoMsg = "setting Tor SOCKS proxy settings"
logger.info(infoMsg)
# Has to be SOCKS5 to prevent DNS leaks (http://en.wikipedia.org/wiki/Tor_%28anonymity_network%29)
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if conf.torType == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, LOCALHOST, conf.torPort or DEFAULT_TOR_SOCKS_PORT)
socks.wrapmodule(urllib2)
def _checkWebSocket():
if conf.url and (conf.url.startswith("ws:/") or conf.url.startswith("wss:/")):
try:
from websocket import ABNF
except ImportError:
errMsg = "sqlmap requires third-party module 'websocket-client' "
errMsg += "in order to use WebSocket funcionality"
raise SqlmapMissingDependence(errMsg)
def _checkTor():
if not conf.checkTor:
return
infoMsg = "checking Tor connection"
logger.info(infoMsg)
try:
page, _, _ = Request.getPage(url="https://check.torproject.org/", raise404=False)
except SqlmapConnectionException:
page = None
if not page or 'Congratulations' not in page:
errMsg = "it seems that Tor is not properly set. Please try using options '--tor-type' and/or '--tor-port'"
raise SqlmapConnectionException(errMsg)
else:
infoMsg = "Tor is properly being used"
logger.info(infoMsg)
def _basicOptionValidation():
if conf.limitStart is not None and not (isinstance(conf.limitStart, int) and conf.limitStart > 0):
errMsg = "value for option '--start' (limitStart) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.limitStop is not None and not (isinstance(conf.limitStop, int) and conf.limitStop > 0):
errMsg = "value for option '--stop' (limitStop) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.level is not None and not (isinstance(conf.level, int) and conf.level >= 1 and conf.level <= 5):
errMsg = "value for option '--level' must be an integer value from range [1, 5]"
raise SqlmapSyntaxException(errMsg)
if conf.risk is not None and not (isinstance(conf.risk, int) and conf.risk >= 1 and conf.risk <= 3):
errMsg = "value for option '--risk' must be an integer value from range [1, 3]"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.limitStart, int) and conf.limitStart > 0 and \
isinstance(conf.limitStop, int) and conf.limitStop < conf.limitStart:
errMsg = "value for option '--start' (limitStart) must be smaller or equal than value for --stop (limitStop) option"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.firstChar, int) and conf.firstChar > 0 and \
isinstance(conf.lastChar, int) and conf.lastChar < conf.firstChar:
errMsg = "value for option '--first' (firstChar) must be smaller than or equal to value for --last (lastChar) option"
raise SqlmapSyntaxException(errMsg)
if conf.textOnly and conf.nullConnection:
errMsg = "switch '--text-only' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.url:
errMsg = "option '-d' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.identifyWaf and conf.skipWaf:
errMsg = "switch '--identify-waf' is incompatible with switch '--skip-waf'"
raise SqlmapSyntaxException(errMsg)
if conf.titles and conf.nullConnection:
errMsg = "switch '--titles' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.search:
errMsg = "switch '--dump' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.data and conf.nullConnection:
errMsg = "option '--data' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.nullConnection:
errMsg = "option '--string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.notString and conf.nullConnection:
errMsg = "option '--not-string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.noCast and conf.hexConvert:
errMsg = "switch '--no-cast' is incompatible with switch '--hex'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpAll and conf.search:
errMsg = "switch '--dump-all' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.notString:
errMsg = "option '--string' is incompatible with switch '--not-string'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp and conf.nullConnection:
errMsg = "option '--regexp' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp:
try:
re.compile(conf.regexp)
except re.error, ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.regexp, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude:
try:
re.compile(conf.crawlExclude)
except re.error, ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.crawlExclude, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.dumpAll:
errMsg = "switch '--dump' is incompatible with switch '--dump-all'"
raise SqlmapSyntaxException(errMsg)
if conf.predictOutput and (conf.threads > 1 or conf.optimize):
errMsg = "switch '--predict-output' is incompatible with option '--threads' and switch '-o'"
raise SqlmapSyntaxException(errMsg)
if conf.threads > MAX_NUMBER_OF_THREADS and not conf.get("skipThreadCheck"):
errMsg = "maximum number of used threads is %d avoiding potential connection issues" % MAX_NUMBER_OF_THREADS
raise SqlmapSyntaxException(errMsg)
if conf.forms and not any((conf.url, conf.googleDork, conf.bulkFile, conf.sitemapUrl)):
errMsg = "switch '--forms' requires usage of option '-u' ('--url'), '-g', '-m' or '-x'"
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude and not conf.crawlDepth:
errMsg = "option '--crawl-exclude' requires usage of switch '--crawl'"
raise SqlmapSyntaxException(errMsg)
if conf.safePost and not conf.safeUrl:
errMsg = "option '--safe-post' requires usage of option '--safe-url'"
raise SqlmapSyntaxException(errMsg)
if conf.safeFreq and not any((conf.safeUrl, conf.safeReqFile)):
errMsg = "option '--safe-freq' requires usage of option '--safe-url' or '--safe-req'"
raise SqlmapSyntaxException(errMsg)
if conf.safeReqFile and any((conf.safeUrl, conf.safePost)):
errMsg = "option '--safe-req' is incompatible with option '--safe-url' and option '--safe-post'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfUrl and not conf.csrfToken:
errMsg = "option '--csrf-url' requires usage of option '--csrf-token'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfToken and conf.threads > 1:
errMsg = "option '--csrf-url' is incompatible with option '--threads'"
raise SqlmapSyntaxException(errMsg)
if conf.requestFile and conf.url and conf.url != DUMMY_URL:
errMsg = "option '-r' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.proxy:
errMsg = "option '-d' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.tor:
errMsg = "option '-d' is incompatible with switch '--tor'"
raise SqlmapSyntaxException(errMsg)
if not conf.tech:
errMsg = "option '--technique' can't be empty"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.ignoreProxy:
errMsg = "switch '--tor' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.proxy:
errMsg = "switch '--tor' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.proxyFile:
errMsg = "switch '--proxy' is incompatible with option '--proxy-file'"
raise SqlmapSyntaxException(errMsg)
if conf.checkTor and not any((conf.tor, conf.proxy)):
errMsg = "switch '--check-tor' requires usage of switch '--tor' (or option '--proxy' with HTTP proxy address using Tor)"
raise SqlmapSyntaxException(errMsg)
if conf.torPort is not None and not (isinstance(conf.torPort, int) and conf.torPort >= 0 and conf.torPort <= 65535):
errMsg = "value for option '--tor-port' must be in range 0-65535"
raise SqlmapSyntaxException(errMsg)
if conf.torType not in getPublicTypeMembers(PROXY_TYPE, True):
errMsg = "option '--tor-type' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(PROXY_TYPE, True))
raise SqlmapSyntaxException(errMsg)
if conf.dumpFormat not in getPublicTypeMembers(DUMP_FORMAT, True):
errMsg = "option '--dump-format' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(DUMP_FORMAT, True))
raise SqlmapSyntaxException(errMsg)
if conf.skip and conf.testParameter:
errMsg = "option '--skip' is incompatible with option '-p'"
raise SqlmapSyntaxException(errMsg)
if conf.mobile and conf.agent:
errMsg = "switch '--mobile' is incompatible with option '--user-agent'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.ignoreProxy:
errMsg = "option '--proxy' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.timeSec < 1:
errMsg = "value for option '--time-sec' must be a positive integer"
raise SqlmapSyntaxException(errMsg)
if conf.uChar and not re.match(UNION_CHAR_REGEX, conf.uChar):
errMsg = "value for option '--union-char' must be an alpha-numeric value (e.g. 1)"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.uCols, basestring):
if not conf.uCols.isdigit() and ("-" not in conf.uCols or len(conf.uCols.split("-")) != 2):
errMsg = "value for option '--union-cols' must be a range with hyphon "
errMsg += "(e.g. 1-10) or integer value (e.g. 5)"
raise SqlmapSyntaxException(errMsg)
if conf.dbmsCred and ':' not in conf.dbmsCred:
errMsg = "value for option '--dbms-cred' must be in "
errMsg += "format <username>:<password> (e.g. \"root:pass\")"
raise SqlmapSyntaxException(errMsg)
if conf.charset:
_ = checkCharEncoding(conf.charset, False)
if _ is None:
errMsg = "unknown charset '%s'. Please visit " % conf.charset
errMsg += "'%s' to get the full list of " % CODECS_LIST_PAGE
errMsg += "supported charsets"
raise SqlmapSyntaxException(errMsg)
else:
conf.charset = _
if conf.loadCookies:
if not os.path.exists(conf.loadCookies):
errMsg = "cookies file '%s' does not exist" % conf.loadCookies
raise SqlmapFilePathException(errMsg)
def _resolveCrossReferences():
lib.core.threads.readInput = readInput
lib.core.common.getPageTemplate = getPageTemplate
lib.core.convert.singleTimeWarnMessage = singleTimeWarnMessage
lib.request.connect.setHTTPHandlers = _setHTTPHandlers
lib.utils.search.setHTTPHandlers = _setHTTPHandlers
lib.controller.checks.setVerbosity = setVerbosity
def initOptions(inputOptions=AttribDict(), overrideOptions=False):
_setConfAttributes()
_setKnowledgeBaseAttributes()
_mergeOptions(inputOptions, overrideOptions)
def init():
"""
Set attributes into both configuration and knowledge base singletons
based upon command line and configuration file options.
"""
_useWizardInterface()
setVerbosity()
_saveConfig()
_setRequestFromFile()
_cleanupOptions()
_dirtyPatches()
_purgeOutput()
_checkDependencies()
_createTemporaryDirectory()
_basicOptionValidation()
_setProxyList()
_setTorProxySettings()
_setDNSServer()
_adjustLoggingFormatter()
_setMultipleTargets()
_setTamperingFunctions()
_setWafFunctions()
_setTrafficOutputFP()
_resolveCrossReferences()
_checkWebSocket()
parseTargetUrl()
parseTargetDirect()
if any((conf.url, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.requestFile, conf.googleDork, conf.liveTest)):
_setHTTPTimeout()
_setHTTPExtraHeaders()
_setHTTPCookies()
_setHTTPReferer()
_setHTTPHost()
_setHTTPUserAgent()
_setHTTPAuthentication()
_setHTTPHandlers()
_setDNSCache()
_setSocketPreConnect()
_setSafeVisit()
_doSearch()
_setBulkMultipleTargets()
_setSitemapTargets()
_checkTor()
_setCrawler()
_findPageForms()
_setDBMS()
_setTechnique()
_setThreads()
_setOS()
_setWriteFile()
_setMetasploit()
_setDBMSAuthentication()
loadBoundaries()
loadPayloads()
_setPrefixSuffix()
update()
_loadQueries()
|
glaudsonml/kurgan-ai
|
tools/sqlmap/lib/core/option.py
|
Python
|
apache-2.0
| 91,797
|
[
"VisIt"
] |
4b6db88dd85f9a9d425bd3cc93e3ac549119d42f04b9260b03ee41f60a4d457a
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Miscellaneous algorithms for 2D contours and 3D triangularized meshes handling
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
'''
import numpy as np
from numpy import linalg as nla
import os.path as op
from ..external import six
from .. import logging
from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File,
BaseInterfaceInputSpec)
from warnings import warn
iflogger = logging.getLogger('interface')
class ComputeMeshWarpInputSpec(BaseInterfaceInputSpec):
surface1 = File(exists=True, mandatory=True,
desc=('Reference surface (vtk format) to which compute '
'distance.'))
surface2 = File(exists=True, mandatory=True,
desc=('Test surface (vtk format) from which compute '
'distance.'))
metric = traits.Enum('euclidean', 'sqeuclidean', usedefault=True,
desc=('norm used to report distance'))
weighting = traits.Enum(
'none', 'area', usedefault=True,
desc=('"none": no weighting is performed, surface": edge distance is '
'weighted by the corresponding surface area'))
out_warp = File('surfwarp.vtk', usedefault=True,
desc='vtk file based on surface1 and warpings mapping it '
'to surface2')
out_file = File('distance.npy', usedefault=True,
desc='numpy file keeping computed distances and weights')
class ComputeMeshWarpOutputSpec(TraitedSpec):
distance = traits.Float(desc="computed distance")
out_warp = File(exists=True, desc=('vtk file with the vertex-wise '
'mapping of surface1 to surface2'))
out_file = File(exists=True,
desc='numpy file keeping computed distances and weights')
class ComputeMeshWarp(BaseInterface):
"""
Calculates a the vertex-wise warping to get surface2 from surface1.
It also reports the average distance of vertices, using the norm specified
as input.
.. warning:
A point-to-point correspondence between surfaces is required
Example
-------
>>> import nipype.algorithms.mesh as m
>>> dist = m.ComputeMeshWarp()
>>> dist.inputs.surface1 = 'surf1.vtk'
>>> dist.inputs.surface2 = 'surf2.vtk'
>>> res = dist.run() # doctest: +SKIP
"""
input_spec = ComputeMeshWarpInputSpec
output_spec = ComputeMeshWarpOutputSpec
_redirect_x = True
def _triangle_area(self, A, B, C):
A = np.array(A)
B = np.array(B)
C = np.array(C)
ABxAC = nla.norm(A - B) * nla.norm(A - C)
prod = np.dot(B - A, C - A)
angle = np.arccos(prod / ABxAC)
area = 0.5 * ABxAC * np.sin(angle)
return area
def _run_interface(self, runtime):
try:
from tvtk.api import tvtk
except ImportError:
raise ImportError('Interface ComputeMeshWarp requires tvtk')
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'null'
except ImportError:
iflogger.warn(('ETS toolkit could not be imported'))
pass
except ValueError:
iflogger.warn(('ETS toolkit is already set'))
pass
r1 = tvtk.PolyDataReader(file_name=self.inputs.surface1)
r2 = tvtk.PolyDataReader(file_name=self.inputs.surface2)
vtk1 = r1.output
vtk2 = r2.output
r1.update()
r2.update()
assert(len(vtk1.points) == len(vtk2.points))
points1 = np.array(vtk1.points)
points2 = np.array(vtk2.points)
diff = points2 - points1
weights = np.ones(len(diff))
try:
errvector = nla.norm(diff, axis=1)
except TypeError: # numpy < 1.9
errvector = np.apply_along_axis(nla.norm, 1, diff)
pass
if self.inputs.metric == 'sqeuclidean':
errvector = errvector ** 2
if (self.inputs.weighting == 'area'):
faces = vtk1.polys.to_array().reshape(-1, 4).astype(int)[:, 1:]
for i, p1 in enumerate(points2):
# compute surfaces, set in weight
w = 0.0
point_faces = faces[(faces[:, :] == i).any(axis=1)]
for idset in point_faces:
fp1 = points1[int(idset[0])]
fp2 = points1[int(idset[1])]
fp3 = points1[int(idset[2])]
w += self._triangle_area(fp1, fp2, fp3)
weights[i] = w
result = np.vstack([errvector, weights])
np.save(op.abspath(self.inputs.out_file), result.transpose())
out_mesh = tvtk.PolyData()
out_mesh.points = vtk1.points
out_mesh.polys = vtk1.polys
out_mesh.point_data.vectors = diff
out_mesh.point_data.vectors.name = 'warpings'
writer = tvtk.PolyDataWriter(
file_name=op.abspath(self.inputs.out_warp))
writer.set_input_data(out_mesh)
writer.write()
self._distance = np.average(errvector, weights=weights)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = op.abspath(self.inputs.out_file)
outputs['out_warp'] = op.abspath(self.inputs.out_warp)
outputs['distance'] = self._distance
return outputs
class MeshWarpMathsInputSpec(BaseInterfaceInputSpec):
in_surf = File(exists=True, mandatory=True,
desc=('Input surface in vtk format, with associated warp '
'field as point data (ie. from ComputeMeshWarp'))
float_trait = traits.Either(traits.Float(1.0), traits.Tuple(
traits.Float(1.0), traits.Float(1.0), traits.Float(1.0)))
operator = traits.Either(
float_trait, File(exists=True), default=1.0, mandatory=True,
desc=('image, float or tuple of floats to act as operator'))
operation = traits.Enum('sum', 'sub', 'mul', 'div', usedefault=True,
desc=('operation to be performed'))
out_warp = File('warp_maths.vtk', usedefault=True,
desc='vtk file based on in_surf and warpings mapping it '
'to out_file')
out_file = File('warped_surf.vtk', usedefault=True,
desc='vtk with surface warped')
class MeshWarpMathsOutputSpec(TraitedSpec):
out_warp = File(exists=True, desc=('vtk file with the vertex-wise '
'mapping of surface1 to surface2'))
out_file = File(exists=True,
desc='vtk with surface warped')
class MeshWarpMaths(BaseInterface):
"""
Performs the most basic mathematical operations on the warping field
defined at each vertex of the input surface. A surface with scalar
or vector data can be used as operator for non-uniform operations.
.. warning:
A point-to-point correspondence between surfaces is required
Example
-------
>>> import nipype.algorithms.mesh as m
>>> mmath = m.MeshWarpMaths()
>>> mmath.inputs.in_surf = 'surf1.vtk'
>>> mmath.inputs.operator = 'surf2.vtk'
>>> mmath.inputs.operation = 'mul'
>>> res = mmath.run() # doctest: +SKIP
"""
input_spec = MeshWarpMathsInputSpec
output_spec = MeshWarpMathsOutputSpec
_redirect_x = True
def _run_interface(self, runtime):
try:
from tvtk.api import tvtk
except ImportError:
raise ImportError('Interface ComputeMeshWarp requires tvtk')
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'null'
except ImportError:
iflogger.warn(('ETS toolkit could not be imported'))
pass
except ValueError:
iflogger.warn(('ETS toolkit is already set'))
pass
r1 = tvtk.PolyDataReader(file_name=self.inputs.in_surf)
vtk1 = r1.output
r1.update()
points1 = np.array(vtk1.points)
if vtk1.point_data.vectors is None:
raise RuntimeError(('No warping field was found in in_surf'))
operator = self.inputs.operator
opfield = np.ones_like(points1)
if isinstance(operator, six.string_types):
r2 = tvtk.PolyDataReader(file_name=self.inputs.surface2)
vtk2 = r2.output
r2.update()
assert(len(points1) == len(vtk2.points))
opfield = vtk2.point_data.vectors
if opfield is None:
opfield = vtk2.point_data.scalars
if opfield is None:
raise RuntimeError(
('No operator values found in operator file'))
opfield = np.array(opfield)
if opfield.shape[1] < points1.shape[1]:
opfield = np.array([opfield.tolist()] * points1.shape[1]).T
else:
operator = np.atleast_1d(operator)
opfield *= operator
warping = np.array(vtk1.point_data.vectors)
if self.inputs.operation == 'sum':
warping += opfield
elif self.inputs.operation == 'sub':
warping -= opfield
elif self.inputs.operation == 'mul':
warping *= opfield
elif self.inputs.operation == 'div':
warping /= opfield
vtk1.point_data.vectors = warping
writer = tvtk.PolyDataWriter(
file_name=op.abspath(self.inputs.out_warp))
writer.set_input_data(vtk1)
writer.write()
vtk1.point_data.vectors = None
vtk1.points = points1 + warping
writer = tvtk.PolyDataWriter(
file_name=op.abspath(self.inputs.out_file))
writer.set_input_data(vtk1)
writer.write()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = op.abspath(self.inputs.out_file)
outputs['out_warp'] = op.abspath(self.inputs.out_warp)
return outputs
class P2PDistance(ComputeMeshWarp):
"""
Calculates a point-to-point (p2p) distance between two corresponding
VTK-readable meshes or contours.
A point-to-point correspondence between nodes is required
.. deprecated:: 1.0-dev
Use :py:class:`ComputeMeshWarp` instead.
"""
def __init__(self, **inputs):
super(P2PDistance, self).__init__(**inputs)
warn(('This interface has been deprecated since 1.0, please use '
'ComputeMeshWarp'),
DeprecationWarning)
|
wanderine/nipype
|
nipype/algorithms/mesh.py
|
Python
|
bsd-3-clause
| 10,921
|
[
"VTK"
] |
45e8fc0a7da6898974667ae50f8ed1c24f19374c8f17afa904ffcd968a979f00
|
# spector.py
# ALS 2017/06/01
import sys
import numpy as np
import astropy.table as at
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.io import fits
import astropy.constants as const
import modelBC03
import os
from ..obsobj import Operator
from .. import filters
from . import getconti
from . import extrap
from . import linelist
from . import lineflux
class Spector(Operator):
def __init__(self, **kwargs):
"""
Spector, an operator on spectrum
Params
----------
Operator params:
/either
obj (object of class obsobj): with attributes ra, dec, dir_obj
/or
ra (float)
dec (float)
/either
dir_obj (string)
/or
dir_parent (string): attr dir_obj is set to dir_parent+'SDSSJXXXX+XXXX/'
survey (str):
survey of the photometric system
if not provided, use self.obj.survey. Raise exception if self.obj.survey does not exist.
z (float):
redshift, if not provided, use obj.z or obj.sdss.z
survey_spec (str):
survey of the spectrum
if not provided, use the instrument of sdss_xid.csv
decompose_method = 'modelBC03' (str)
'modelBC03' or 'running_median'
the method for decomposing spectrum into continuum and lines, and extrapolate the continum.
Attributes
----------
Operator Attributes:
obj (instance of objObj)
ra (float)
dec (float)
dir_obj (string)
survey (str): e.g., 'hsc'
survey of the photometric system
survey_spec (str): e.g., 'sdss' or 'boss'
survey of the spectrum
bands (list):
e.g., ['g', 'r', 'i', 'z', 'y'] for survey = 'hsc'
z (float):
decompose_method (str)
'modelBC03' or 'running_median'
conti_model
if decompose_method is modelBC03 then this is an instance of modelBC03
"""
super(Spector, self).__init__(**kwargs)
# set survey
if hasattr(self.obj, 'survey'):
default_survey = self.obj.survey
self.survey = kwargs.pop('survey', default_survey)
else:
self.survey = kwargs.pop('survey')
# set survey_spec
self.survey_spec = kwargs.pop('survey_spec', 'auto')
if self.survey_spec in ['sdss', 'boss', 'eboss', 'auto']:
if self.survey_spec == 'auto':
self.obj.add_sdss(toload_photoobj=False)
self.survey_spec = self.obj.sdss.instrument.lower()
# # sanity check - spec name consistent
# if (self.survey_spec != self.obj.sdss.instrument.lower()):
# raise Exception("[spector] survey_spec inconsistent with sdss_xid.csv:instrument")
# set z
if 'z' in kwargs:
self.z = kwargs.pop('z')
elif hasattr(self.obj, 'z'):
self.z = self.obj.z
elif self.survey_spec in ['sdss', 'boss', 'eboss', 'auto']:
self.obj.add_sdss(toload_photoobj=False)
self.z = kwargs.pop('z', self.obj.sdss.z)
# set self.decompose_method
self.decompose_method = kwargs.pop('decompose_method', 'modelBC03')
self.conti_model = None
# set others
self.bands = filters.filtertools.surveybands[self.survey]
self.waverange = filters.filtertools.waverange[self.survey]
# define paths
self.fp_spec = self.dir_obj+'spec.fits'
self.fp_spec_decomposed = self.dir_obj+'spec_decomposed.ecsv'
self.fp_spec_contextrp = self.dir_obj+'spec_contextrp.ecsv'
self.fp_spec_mag = self.dir_obj+'spec_mag.csv'
self.fp_spec_lineflux = self.dir_obj+'spec_lineflux.csv'
self.fp_spec_linefrac = self.dir_obj+'spec_linefrac.csv'
self.spec, self.ws = self.get_spec_ws()
self.u_spec = 1.e-17*u.Unit('erg / (Angstrom cm2 s)') # default unit of spec
self.u_ws = u.AA # default unit of ws
def get_spec_ws(self, forceload_from_fits=False):
"""
read spec and ws, either from spec_decomposed.ecsv or spec.fits (if forced or spec_decomposed.ecsv does not exist)
Param
-----
forceload_from_fits=False:
if true, then load from fits.
Return
------
spec (astropy table column with unit)
ws (astropy table column with unit)
Default units
-------------
u_spec = 1.e-17*u.Unit('erg / (Angstrom cm2 s)')
u_ws = u.AA
"""
fn = self.fp_spec_decomposed
if not os.path.isfile(fn) or forceload_from_fits:
spec, ws, __ = self.__read_spec_ws_ivar_from_fits()
return spec, ws
else:
tab = at.Table.read(fn, format='ascii.ecsv')
return tab['spec'], tab['ws']
def get_spec_ws_from_spectab(self, component, fn=None):
"""
read certain spec table.ecsv to get spec of a given component
if fn not specified then use the default file that would contain the specified component,
e.g.:
'spec_decomposed.ecsv' for component = "all", "cont", "line"
'spec_contextrp.ecsv' for component = "contextrp"
Param
-----
component:
either ['all', 'line', 'cont', 'contextrp']
fn='spec_decomposed.ecsv'
Return
------
spec (astropy table column with unit)
ws (astropy table column with unit)
Default units
-------------
u_spec = 1.e-17*u.Unit('erg / (Angstrom cm2 s)')
u_ws = u.AA
"""
if fn == None:
if component in ['all', 'cont', 'line']:
fn = self.fp_spec_decomposed
self.make_spec_decomposed_ecsv(overwrite=False)
elif component in ['contextrp']:
fn = self.fp_spec_contextrp
self.make_spec_contextrp_ecsv(overwrite=False)
else:
raise Exception("[spector] component not recognized")
tab = at.Table.read(fn, format='ascii.ecsv')
col = self.__get_spectab_colname(component)
return tab[col], tab['ws']
def make_spec_decomposed_ecsv(self, overwrite=False):
"""
saving seperated continuum and line spectrum in csv file
Params
------
self
overwrite=False
Return
------
status
note: this part of the code can be refactorized better.
"""
fn = self.fp_spec_decomposed
if (not os.path.isfile(fn)) or overwrite:
spec, ws = self.get_spec_ws()
ws_uless = np.array((ws/self.u_ws).to(u.dimensionless_unscaled))
spec_uless = np.array((spec/self.u_spec).to(u.dimensionless_unscaled))
iscon, speccont, specline, __, model = getconti.decompose_cont_line_t2AGN(spec_uless, ws_uless, self.z, method=self.decompose_method)
self.conti_model = model # modelBC03 instance if set method='modelBC03', otherwise None.
tab = at.Table([ws_uless, spec_uless, speccont, specline, iscon], names=['ws', 'spec', 'speccont', 'specline', 'iscon'])
tab['ws'].unit = self.u_ws
tab['spec'].unit = self.u_spec
tab['speccont'].unit = self.u_spec
tab['specline'].unit = self.u_spec
tab.write(fn, format='ascii.ecsv', overwrite=overwrite)
# sanity check: units are identical
units = [tab[col].unit for col in ['spec', 'speccont', 'specline']]
if len(set(units)) > 1:
raise Exception("[spector] units in table spec_decomposed are not identical")
status = os.path.isfile(fn)
return status
def make_spec_contextrp_ecsv(self, overwrite=False, refit=False):
"""
extrapolate continuum to cover all of the wavelength range of filters
there are two methods:
for self.conti_model modelBC03: use the bestfit
for running_median: polynomial fit
"""
fn = self.fp_spec_contextrp
if (not os.path.isfile(fn)) or overwrite:
speccont, ws = self.get_spec_ws_from_spectab(component='cont')
ws_uless = np.array((ws/self.u_ws).to(u.dimensionless_unscaled))
speccont_uless = np.array((speccont/self.u_spec).to(u.dimensionless_unscaled))
l0, l1 = self.waverange
if self.decompose_method == 'modelBC03':
if self.conti_model is None or refit:
m = modelBC03.modelBC03(extinction_law='none')
m.fit(ws=ws_uless, spec=speccont_uless, z=self.z)
else:
m = self.conti_model # reuse
speccon_ext = m.bestfit
ws_ext = m.ws_bestfit
elif self.decompose_method == 'running_median':
speccon_ext, ws_ext = extrap.extrap_to_ends(ys=speccont_uless, xs=ws_uless, x_end0=l0, x_end1=l1, polydeg=1, extbase_length=2000.)
else:
raise Exception("method is not recognized")
assert (np.min(ws_ext*self.u_ws) < l0) & (np.max(ws_ext*self.u_ws) > l1)
col_contextrp = self.__get_spectab_colname('contextrp')
tab = at.Table([ws_ext, speccon_ext], names=['ws', col_contextrp])
tab['ws'].unit = self.u_ws
tab[col_contextrp].unit = self.u_spec
tab.write(fn, format='ascii.ecsv', overwrite=overwrite)
status = os.path.isfile(fn)
return status
def make_spec_mag(self, overwrite=False):
"""
make table spec_mag.csv that contains the convolved spectral magnitude and fnu in each band
Params
------
self
overwrite=False
Return
------
status
"""
#==========================================================================
fn = self.fp_spec_mag
self.make_spec_decomposed_ecsv(overwrite=False)
if not os.path.isfile(fn) or overwrite:
print("[spector] making spec_mag")
tabmag = at.Table()
tabfnu = at.Table()
for component in ['all', 'cont', 'line', 'contextrp']:
for band in self.bands:
colfnu = self.__get_specmag_colname(band, component=component, fluxquantity='fnu')
colmag = self.__get_specmag_colname(band, component=component, fluxquantity='mag')
try:
fnu = self._calc_Fnu_in_band(band=band, component=component)
except KeyboardInterrupt:
sys.exit(0)
except:
print(("[spector] skip calculating fnu of {} in band {}".format(component, band)))
else:
mag = fnu.to(u.ABmag)
fnu_nm = fnu.to(u.nanomaggy)
tabmag[colmag] = [mag.value]
tabfnu[colfnu] = [fnu_nm.value]
tab = at.hstack([tabmag, tabfnu])
tab.meta['comments'] = [
"survey_photo: {}".format(self.survey),
"survey_spec: {}".format(self.survey_spec),
"unit_mag: ABmag",
"unit_fnu: nanomaggy",
]
tab.write(fn, comment='#', format='ascii.csv', overwrite=overwrite)
else:
print("[spector] skip making spec_mag as file exists")
status = os.path.isfile(fn)
return status
def get_spec_mag_tab(self):
""" return spec_mag table"""
self.make_spec_mag(overwrite=False)
return at.Table.read(self.fp_spec_mag, comment='#', format='ascii.csv')
def get_spec_mag_value(self, band, component='all', fluxquantity='mag'):
"""
return requested value, reading from spec_mag.csv
Params
------
component='line':
or 'all', 'cont', 'contextrp'
fluxquantity='mag'
band='i'
Return
------
x (float): the value
"""
tab = self.get_spec_mag_tab()
col = self.__get_specmag_colname(band=band, component=component, fluxquantity=fluxquantity)
return tab[col][0]
def get_fnu_ratio_band1_over_band2(self, band1, band2, component='all'):
"""
return fnu_band1/fnu_band2
Params
------
band1, band2, component='all'
Return
------
x (float): the ratio
"""
fluxquantity = 'fnu'
tab = self.get_spec_mag_tab()
col1 = self.__get_specmag_colname(band=band1, component=component, fluxquantity=fluxquantity)
col2 = self.__get_specmag_colname(band=band2, component=component, fluxquantity=fluxquantity)
ratio = tab[col1][0]/tab[col2][0]
return ratio
def plot_spec(self, wfilters=True, wconti=True, wcontextrp=True, wline=True, wspec=True, overwrite=True):
"""
Params
------
self
wfilters=True
wconti=False
Return
------
status (bool)
"""
fn = self.dir_obj+'spec.pdf'
if not os.path.isfile(fn) or overwrite:
plt.close('all')
plt.figure(1, figsize=(12, 6))
plt.clf()
plt.plot(0., 0., ls='', color='black', label='z='+'%.3f'%self.z) # show z in legend
if wfilters:
for band in self.bands:
trans, ws_trans = self._get_norm_trans_func(band=band)
plt.plot(ws_trans, trans/max(trans), label=band)
if wspec:
spec, ws = self.get_spec_ws()
norm = max(spec)
plt.plot(ws, spec/norm, color='0.3', lw=1.5, label='__nolabel__')
if wline:
speccont, ws = self.get_spec_ws_from_spectab(component='cont')
specline, ws = self.get_spec_ws_from_spectab(component='line')
specplot = specline+speccont
specplot[specline==0] = np.nan
plt.plot(ws, specplot/norm, color='black', lw=1.5, label='line')
if wconti:
speccont, ws = self.get_spec_ws_from_spectab(component='cont')
plt.plot(ws, speccont/norm, color='cyan', lw=2, label='continuum')
if wcontextrp:
speccextrp, ws = self.get_spec_ws_from_spectab(component='contextrp')
plt.plot(ws, speccextrp/norm, color='grey', lw=0.5, label='extrapolated conti')
plt.legend(loc='upper right')
if self.survey_spec == 'sdss':
plt.xlim(2980.0, 11230.0)
elif self.survey_spec == 'boss':
plt.xlim(3500.0, 12000.0)
plt.ylim(0., 1.)
plt.savefig(fn, overwrite=overwrite)
status = os.path.isfile(fn)
return status
def calc_fline_over_fnuband(self, band, line):
"""
calculate the conversion ratio r = (flux_line / fnu_band), to convert continuum subtracted image to line intensity map
by multiplying this ratio to the band image (in fnu [erg/s/cm^2/Hz]) one can obtain the observed flux of the line (in f [erg/s/cm^2]). Notice that this is flux in observed frame, for measurements one still needs to transform it to the rest frame (depending on z).
r = (c / (T(w_k) * w_k)) * frac_k := dnu * frac_k
where
dnu = (c / (T(w_k) * w_k))
frac_k = (f_k * T(w_k) * w_k) / sum(f_i * T(w_i) * w_i)
Params
------
self
band (str)
line (str)
Return
------
ratio (quantity in unit of Hz)
"""
f, __ = self._get_line_flux(line=line, wunit=False)
w = self._get_line_obs_wave(line=line, wunit=False)
T = self._get_norm_trans(wavelength=w, band=band, bounds_error=True)
dnu = const.c / (T * w * u.AA)
frac = self._get_line_frac(band=band, line=line)
# sanity check: all strong lines are considered
tab_linefrac = at.Table.read(self.fp_spec_linefrac, format='ascii.csv', comment='#')
stronglines = self._list_stronglines_in_band(band=band)
for line in stronglines:
if 'frac_{}'.format(line) not in tab_linefrac.colnames:
raise Exception("[spector] strong line {} in band is not contained in spec_linefrac.csv".format(line))
r = (dnu * frac).to(u.Hz)
return r
def _get_line_frac(self, band, line='OIII5008'):
"""
read line flux from file spec_linefrac.csv.
Params
------
band, line='OIII5008', wunit=False
Return
------
frac (float)
"""
fn = self.fp_spec_linefrac
if not os.path.isfile(fn):
self.make_linefrac(band=band, overwrite=False)
tab = at.Table.read(fn, format='ascii.csv', comment='#')
if tab['lineband'][0] != band:
raise Exception("[spector] _get_line_frac the band required is not provided by the current spec_linefrac.csv")
linetag = 'frac_{}'.format(line)
frac = tab[linetag][0]
return frac
def make_linefrac(self, band, lines=['NeIII3870', 'NeIII3969', 'Hg', 'Hb', 'OIII4960', 'OIII5008', 'OI6302', 'OI6366'], tofixOIIIratio=True, overwrite=False):
"""
make file spec_linefrac.csv that contains the fraction each of the strong lines have in a specific band.
Columns: f_{band}_{line}, T_{band}_{line}, w_{band}_{line}, frac_{band}_{line}
The fraction is based on the f*T*w of the line. Only the strong lines are listed.
If tofixOIIIratio = True, then the ratio between OIII5008 and OIII4960 is fixed to the theoretical ratio of 2.98, see
Storey + 2000. http://adsabs.harvard.edu/abs/2000MNRAS.312..813S.
Params
------
band
lines=['NeIII3870', 'NeIII3969', 'Hg', 'Hb', 'OIII4960', 'OIII5008', 'OI6302', 'OI6366']
tofixOIIIratio=True
overwrite=False
Return
------
status
"""
fn = self.fp_spec_linefrac
self.make_lineflux(overwrite=overwrite)
if not os.path.isfile(fn) or overwrite:
print("[spector] making spec_linefrac")
tab = at.Table([[band]], names=['lineband'])
fwt_sum = 0.
for line in lines:
f, __ = self._get_line_flux(line=line, wunit=False)
w = self._get_line_obs_wave(line=line, wunit=False)
T = self._get_norm_trans(wavelength=w, band=band, bounds_error=False)
fwt = max(f*w*T, 0)
fwt_sum = fwt_sum + fwt
col_new = at.Table([[f], [w], [T], [fwt]], names=['f_{}'.format(line), 'w_{}'.format(line), 't_{}'.format(line), 'fwt_{}'.format(line)])
tab = at.hstack([tab, col_new])
for line in lines:
frac = tab['fwt_{}'.format(line)][0] / fwt_sum
col_new = at.Table([[frac]], names=['frac_{}'.format(line)])
tab = at.hstack([tab, col_new])
if tofixOIIIratio:
r = 2.98
frac_OIIItotal = tab['frac_OIII4960'] + tab['frac_OIII5008']
tab['frac_OIII5008'] = frac_OIIItotal * r / (1.+r)
tab['frac_OIII4960'] = frac_OIIItotal * 1. / (1.+r)
tab.write(fn, format='ascii.csv', overwrite=overwrite)
else:
print("[spector] skip making spec_linefrac as file exists")
status = os.path.isfile(fn)
return status
def make_lineflux(self, lines=['NeIII3870', 'NeIII3969', 'Hg', 'Hb', 'OIII4960', 'OIII5008', 'OI6302', 'OI6366'], u_flux=u.Unit("1E-17 erg cm-2 s-1"), overwrite=False):
"""
make file spec_lineflux.csv that contains the flux of the specified lines. The fluxes are calculated by integrating the line component of the spectrum over a window of +/- 1400 km/s.
WARNING: currently only Hb, and OIII lines are supported. For lines that are overlapped, e.g., Ha and NII, the current implemenation will double count the flux.
Params
------
lines=['Hb', 'OIII4960', 'OIII5008']
u_flux=u.Unit("1E-17 erg cm-2 s-1")
the unit of the output
overwrite=False
Return
------
status (bool)
"""
fn = self.fp_spec_lineflux
self.make_spec_decomposed_ecsv(overwrite=False)
if not os.path.isfile(fn) or overwrite:
print("[spector] making spec_lineflux")
tab = at.Table()
for line in lines:
f, ferr = self._calc_line_flux(line=line, u_flux=u_flux, wunit=False)
col_new = at.Table([[f], [ferr]], names=['f_{}'.format(line), 'ferr_{}'.format(line)])
tab = at.hstack([tab, col_new])
tab.meta['comments'] = ["unit_flux: {}".format(u_flux.to_string()),]
tab.write(fn, comment='#', format='ascii.csv', overwrite=overwrite)
else:
print("[spector] skip making spec_lineflux as file exists")
status = os.path.isfile(fn)
return status
def _calc_line_flux(self, line='OIII5008', dv=1400*u.km/u.s, u_flux=u.Unit("1E-17 erg cm-2 s-1"), wunit=False):
"""
calculate the flux of the line by trpz integrating the decomposed emission line component of the spectrum over a range of +/- dv
WARNING: currently only Hb, and OIII lines are supported. For lines that are overlapped, e.g., Ha and NII, the current implemenation will double count the flux.
WARNING: the errors of the flux is propogated from the variance ('ivar' column) of the sdss spectrum. However, sdss's noise could be correlated between neighbors but such a covariance is not quantified, which will result in 10-20% in the error estimates. We here artifically boost the flux error by 20% to incoporate such uncertainty on the error, see:
http://www.sdss.org/dr12/spectro/caveats/
Params
------
line='OIII5008' (str)
dv=1400*u.km/u.s (quantity)
half of the width over which the spectrum is integrated to calculate flux
u_flux=u.Unit("1E-17 erg cm-2 s-1")
the unit of the output
wunit=False
whether the output to come with unit or not
Return
------
f (float or quantity)
unit is in the dimension of erg cm-2 s-1
ferr (float or quantity)
same unit as f
"""
# sanity check
if line not in ['NeIII3870', 'NeIII3969', 'Hg', 'Hb', 'OIII4960', 'OIII5008', 'OI6302', 'OI6366']:
raise Exception("[spector] _calc_line_flux does not support lines other than Hb and OIII as those are not tested. ")
# get w range
beta = (dv/const.c).to_value(u.dimensionless_unscaled)
w = self._get_line_obs_wave(line=line, wunit=False)
w0 = w*(1-beta)
w1 = w*(1+beta)
# get spectrum
spec, ws = self.get_spec_ws_from_spectab(component='line')
__, __, ivar = self.__read_spec_ws_ivar_from_fits()
f, ferr = lineflux.calc_line_flux(spec, ws, ivar, w0, w1, u_flux)
# artificially boost the error to account for pixel covariance
ferr = ferr * 1.2
if wunit:
return f, ferr
else:
return f.to_value(u_flux), ferr.to_value(u_flux)
def _get_line_flux(self, line='OIII5008', wunit=False):
"""
read line flux from file spec_lineflux.csv. For details, see make_spec_lineflux().
Params
------
line='OIII5008', wunit=False
Return
------
f, ferr
"""
fn = self.fp_spec_lineflux
if not os.path.isfile(fn):
self.make_lineflux(overwrite=False)
tab = at.Table.read(fn, format='ascii.csv', comment='#')
f = tab['f_{}'.format(line)][0]
ferr = tab['ferr_{}'.format(line)][0]
if not wunit:
return f, ferr
else:
u_flux = u.Quantity(tab.meta['comments'][0].split(': ')[1])
return f*u_flux, ferr*u_flux
def _get_line_flux_sdss(self, line='OIII5008', wunit=False):
"""
Flux of line from SDSS spec.fits higher extensions
It is measured by Gaussian fit, for details, see:
http://classic.sdss.org/dr7/dm/flatFiles/spZline.html
PARAMS
------
line = 'OIII5008' (str)
wunit = False
Return
------
f (float):
flux
or astropy quantify with units u.Unit("1E-17 erg cm-2 s-1")
ferr (float)
error on flux
or astropy quantify with units u.Unit("1E-17 erg cm-2 s-1")
"""
# get sdss linename
linename = linelist.sdssLINENAME[line]
# read flux
table = self.obj.sdss.get_spec()[3].data
i = [table['LINENAME']==linename]
f = table[i]['LINEAREA'][0]
ferr = table[i]['LINEAREA_ERR'][0]
if not wunit:
return f, ferr
else:
return f*u.Unit("1E-17 erg cm-2 s-1"), ferr*u.Unit("1E-17 erg cm-2 s-1")
def _get_line_obs_wave(self, line='OIII5008', wunit=False):
"""
PARAMS
------
line = 'OIII5008' (str)
wunit = False
Return
------
w (float):
redshifted wavelength of line
or astropy quantify with units u.Unit("AA")
"""
w = filters.getllambda(ion=line, vacuum=True) * (1. + self.z)
try: len(w)
except:
pass
else:
if len(w) == 1:
w = w[0]
else:
raise Exception('got more than one wavelength')
if not wunit:
return w
else:
return w*u.Unit("AA")
def _list_stronglines_in_band(self, band, threshold=0.01):
"""
return a list of strong lines in band where the filter transmission function is higher than threshold (in fraction)
Params
------
self
band (str)
threshold (float):
the fractional threshold (relative to the filter peak) that defines the wavelength boundary of a band
wunit (bool)
Return
------
llist (array of str)
e.g., ['OIII5008', 'OIII4960', 'Hb', ...]
"""
w1, w2 = filters.filtertools.getFilterBoundaries(threshold=threshold, band=band, survey=self.survey, withunit=False)
llist = []
for line in linelist.strongline:
w = self._get_line_obs_wave(line=line, wunit=False)
if (w > w1) & (w < w2):
llist += [line]
return llist
def __read_spec_ws_ivar_from_fits(self, u_spec=1.e-17*u.Unit('erg / (Angstrom cm2 s)'), u_ws=u.AA, wunit=True):
"""
Params
------
self
u_spec=1.e-17*u.Unit('erg / (Angstrom cm2 s)')
u_ws=u.AA
wunit=True
Return
------
spec (nparray)
ws (nparray)
ivar (nparray)
Default units
-------------
u_spec = 1.e-17*u.Unit('erg / (Angstrom cm2 s)')
u_ws = u.AA
"""
fn = self.fp_spec
if self.survey_spec in ['sdss', 'boss', 'boss']:
if os.path.isfile(fn):
hdus = fits.open(fn)
else:
raise IOError("[Spector] spec fits file does not exist")
hdus[0].header
spectable = hdus[1].data
spec, ws, ivar = spectable['flux'], 10.**spectable['loglam'], spectable['ivar']
if wunit:
spec = spec*u_spec
ws = ws*u_ws
ivar = ivar / (u_spec**2)
# instrument_header = at.Table(hdus[2].data)['INSTRUMENT'][0].lower()
# if self.survey_spec != instrument_header:
# self.survey_spec = instrument_header
# print("[Spector] updating survey_spec to reflect instrument in spec.fits header -- {}".format(instrument_header))
return at.Column(spec, name=['spec']), at.Column(ws, name=['ws']), at.Column(ivar, name=['ivar'])
else:
raise NameError("[Spector] survey_spec not recognized")
def _get_norm_trans_func(self, band='i'):
"""
return normalized transmission function and its wavelength coordinate
the normalization is such that int{ trans{l} * dlnl} = 1.
Params
------
self
band='i'
Return
------
trans (array)
ws_trans (array)
"""
trans, ws_trans = filters.filtertools.getNormTransFunc(band=band, survey=self.survey)
return trans, ws_trans*u.AA
def _get_norm_trans(self, wavelength, band='i', bounds_error=False):
"""
return normalized transmission function at a specific wavelenth, see filters.getNormTrans()
the normalization is such that int{ trans{l} * dlnl} = 1.
Params
------
self
wave (float): wavelength to evaluate the function at
band='i'
bounds_error (bool):
whether to raise error when interpolation outside of array is attempted
Return
------
trans (float)
"""
trans = filters.filtertools.getNormTrans(wavelength, band=band, survey=self.survey, bounds_error=bounds_error)
return trans
def _calc_Fnu_in_band(self, band, component='all'):
"""
Params
------
band=band
component='all': from ['all', 'cont', 'line']
which spectral component to operate on
Return
------
Fnu (quantity in units "erg s-1 cm-2 Hz-1")
"""
spec, ws = self.get_spec_ws_from_spectab(component=component)
trans, ws_trans = self._get_norm_trans_func(band=band)
Fnu = filters.inttools.calc_Fnu_in_band_from_fl(fl=spec, ws=ws, trans=trans, ws_trans=ws_trans, isnormed=True)
return Fnu
def _calc_mAB_in_band(self, band, component='all'):
Fnu = self._calc_Fnu_in_band(band=band, component='all')
return Fnu.to(u.ABmag)
def __get_specmag_colname(self, band, component, fluxquantity):
"""
band : e.g. 'i'
component: from ['all', 'cont', 'line']
fluxquantity: from ['fnu', 'mag']
"""
tag_component = {'all': '', 'cont': 'cont', 'line': 'line', 'contextrp': 'contextrp'}
tag_quantity = {'fnu': 'Fnu', 'mag': 'Mag'}
return 'spec{0}{1}_{2}'.format(tag_component[component], tag_quantity[fluxquantity], band)
def __get_spectab_colname(self, component):
"""
band : e.g. 'i'
component: from ['all', 'cont', 'line']
fluxquantity: from ['fnu', 'mag']
"""
if component == 'ws':
return 'ws'
else:
tag_component = {'all': '', 'cont': 'cont', 'line': 'line', 'contextrp': 'contextrp'}
return 'spec{0}'.format(tag_component[component])
|
aileisun/bubbleimg
|
bubbleimg/spector/spector.py
|
Python
|
mit
| 26,316
|
[
"Gaussian"
] |
c19b16089d00a1b605b6d870a8f6a4570b1480656425cd3c9f6e7dd318570788
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers common to multiple models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import contextlib
import functools
from functools import partial
import math
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import inplace_ops
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Identity operation whose gradient is converted to a `Tensor`.
Currently, the gradient to `tf.concat` is particularly expensive to
compute if dy is an `IndexedSlices` (a lack of GPU implementation
forces the gradient operation onto CPU). This situation occurs when
the output of the `tf.concat` is eventually passed to `tf.gather`.
It is sometimes faster to convert the gradient to a `Tensor`, so as
to get the cheaper gradient for `tf.concat`. To do this, replace
`tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.
Args:
x: A `Tensor`.
Returns:
The input `Tensor`.
"""
return x
def is_xla_compiled():
"""Whether we are building graph that will be compiled by XLA.
This checks whether the code is executing within an XLA context.
If True, model authors should ensure the graph they build is compilable by
XLA. Specifically, they should ensure that all ops have XLA implementations
and that all shapes are statically known.
Returns:
bool, whether the current graph will be compiled for XLA.
"""
ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(ctxt) is not None
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
"""
assert "noise_shape" not in kwargs
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs)
def comma_separated_string_to_integer_list(s):
return [int(i) for i in s.split(",") if i]
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", values=[x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x + 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def inverse_exp_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
inv_base = tf.exp(tf.log(min_value) / float(max_step))
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
return inv_base**tf.maximum(float(max_step) - step, 0.0)
def inverse_lin_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from 0.01 to 1.0 reached at max_step."""
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
progress = tf.minimum(step / float(max_step), 1.0)
return progress * (1.0 - min_value) + min_value
def shakeshake2_py(x, y, equal=False, individual=False):
"""The shake-shake sum of 2 tensors, python version."""
if equal:
alpha = 0.5
elif individual:
alpha = tf.random_uniform(tf.get_shape(x)[:1])
else:
alpha = tf.random_uniform([])
return alpha * x + (1.0 - alpha) * y
@function.Defun()
def shakeshake2_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_indiv_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, individual=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_equal_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, equal=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun(grad_func=shakeshake2_grad)
def shakeshake2(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
@function.Defun(grad_func=shakeshake2_indiv_grad)
def shakeshake2_indiv(x1, x2):
return shakeshake2_py(x1, x2, individual=True)
@function.Defun(grad_func=shakeshake2_equal_grad)
def shakeshake2_eqgrad(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
def shakeshake(xs, equal_grad=False):
"""Multi-argument shake-shake, currently approximated by sums of 2."""
if len(xs) == 1:
return xs[0]
div = (len(xs) + 1) // 2
arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
if equal_grad:
return shakeshake2_eqgrad(arg1, arg2)
return shakeshake2(arg1, arg2)
def convert_rgb_to_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
x /= 255.0
return x
def convert_rgb_to_symmetric_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
# Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in
# the range [-1, 1].
x = (x / 127.5) - 1
return x
def convert_real_to_rgb(x):
"""Conversion of real numbers to pixel values."""
with tf.name_scope("real_to_rgb", values=[x]):
x *= 255.0
return x
def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1):
"""Make x n-d with squeeze and expand_dims."""
if len(x.shape) > n:
while len(x.shape) != n:
x = tf.squeeze(x, [squeeze_dim])
else:
while len(x.shape) != n:
x = tf.expand_dims(x, expand_dim)
return x
def standardize_images(x):
"""Image standardization on batches and videos."""
with tf.name_scope("standardize_images", [x]):
x_shape = shape_list(x)
x = tf.to_float(tf.reshape(x, [-1] + x_shape[-3:]))
x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_variance = tf.reduce_mean(
tf.square(x - x_mean), axis=[1, 2], keepdims=True)
num_pixels = tf.to_float(x_shape[-2] * x_shape[-3])
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
return tf.reshape(x, x_shape)
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result
# TODO(noam): remove this function after TPUs do gather faster.
def gather(params, indices, dtype=tf.float32):
"""Version of tf.gather that works faster on tpu."""
if not is_xla_compiled():
return tf.gather(params, indices)
vocab_size = params.get_shape().as_list()[0]
indices_flat = tf.reshape(indices, [-1])
out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
out = reshape_like(out, tf.expand_dims(indices, -1))
return out
# TODO(noam): remove this function after TPUs do cumsum faster.
def cumsum(x, axis=0, exclusive=False):
"""TPU hack for tf.cumsum.
This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
the axis dimension is very large.
Args:
x: a Tensor
axis: an integer
exclusive: a boolean
Returns:
Tensor of the same shape as x.
"""
if not is_xla_compiled():
return tf.cumsum(x, axis=axis, exclusive=exclusive)
x_shape = shape_list(x)
rank = len(x_shape)
length = x_shape[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
ret = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != rank - 1:
ret = tf.transpose(
ret,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return ret
def dropout_no_scaling(x, keep_prob):
"""Like tf.nn.dropout, but does not scale up. Works on integers also.
Args:
x: a Tensor
keep_prob: a floating point number
Returns:
Tensor of the same shape as x.
"""
if keep_prob == 1.0:
return x
mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)
return x * cast_like(mask, x)
def embedding(x,
vocab_size,
dense_size,
name=None,
reuse=None,
multiplier=1.0,
symbol_dropout_rate=0.0,
embedding_var=None,
dtype=tf.float32):
"""Embed x of type int64 into dense vectors, reducing to max 4 dimensions."""
with tf.variable_scope(
name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype):
if embedding_var is None:
embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
# On the backwards pass, we want to convert the gradient from
# an indexed-slices to a regular tensor before sending it back to the
# parameter server. This avoids excess computation on the parameter server.
if not tf.contrib.eager.in_eager_mode():
embedding_var = convert_gradient_to_tensor(embedding_var)
x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate)
emb_x = gather(embedding_var, x, dtype)
if multiplier != 1.0:
emb_x *= multiplier
static_shape = emb_x.shape.as_list()
if len(static_shape) < 5:
return emb_x
assert len(static_shape) == 5
# If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1.
return tf.squeeze(emb_x, 3)
def shift_right(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets
def shift_right_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def shift_right_2d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1]
return shifted_targets
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
"""Use a strided convolution to downsample x by 2, `nbr_steps` times.
We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
As detailed in http://distill.pub/2016/deconv-checkerboard/.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: number of halving downsample rounds to apply
output_filters: an int specifying the filter count for the convolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
`[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
if nbr_steps == 0:
out = conv(x, output_filters, (1, 1))
return out, [out]
hidden_layers = [x]
for i in range(nbr_steps):
hidden_layers.append(
conv(
hidden_layers[-1],
output_filters, (2, 2),
strides=2,
activation=tf.nn.relu,
name="conv" + str(i)))
return hidden_layers[-1], hidden_layers
def deconv_stride2_multistep(x,
nbr_steps,
output_filters,
name=None,
reuse=None):
"""Use a deconvolution to upsample x by 2**`nbr_steps`.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: an int specifying the number of doubling upsample rounds to
apply.
output_filters: an int specifying the filter count for the deconvolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or
`[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse):
def deconv1d(cur, i):
cur_shape = shape_list(cur)
thicker = conv(
cur,
output_filters * 2, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv1d" + str(i))
return tf.reshape(thicker,
[cur_shape[0], cur_shape[1] * 2, 1, output_filters])
def deconv2d(cur, i):
thicker = conv(
cur,
output_filters * 4, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv2d" + str(i))
return tf.depth_to_space(thicker, 2)
cur = x
for i in range(nbr_steps):
if cur.get_shape()[2] == 1:
cur = deconv1d(cur, i)
else:
cur_dim = shape_list(cur)[2]
if isinstance(cur_dim, int):
if cur_dim == 1:
cur = deconv1d(cur, i)
else:
cur = deconv2d(cur, i)
else:
cur = tf.cond(
tf.equal(cur_dim, 1),
lambda idx=i: deconv1d(cur, idx),
lambda idx=i: deconv2d(cur, idx))
return cur
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape."""
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. "
"Shape: " + str(static_shape))
# Add support for left padding.
if kwargs.get("padding") == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix)
original_name = kwargs.pop("name", None)
original_force2d = kwargs.pop("force2d", None)
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
return conv2d_kernel(kernel_size, "single")
def conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs):
return conv_internal(
tf.layers.conv2d,
inputs,
filters,
kernel_size,
dilation_rate=dilation_rate,
**kwargs)
def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):
return tf.squeeze(
conv(
tf.expand_dims(inputs, 2),
filters, (kernel_size, 1),
dilation_rate=(dilation_rate, 1),
**kwargs), 2)
def separable_conv(inputs, filters, kernel_size, **kwargs):
return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size,
**kwargs)
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv."""
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
tf.layers.conv2d(split, filters // separability, kernel_size,
**kwargs))
else:
parts.append(
tf.layers.separable_conv2d(split, filters // abs_sep,
kernel_size, **kwargs))
if separability > 1:
result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = tf.layers.separable_conv2d(inputs, filters, kernel_size,
**kwargs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"):
"""Version of conv1d that works on TPU (as of 11/2017).
Args:
inputs: a Tensor with shape [batch, length, input_depth].
filters: an integer.
kernel_size: an integer.
padding: a string - "SAME" or "LEFT".
name: a string.
Returns:
a Tensor with shape [batch, length, filters].
"""
if kernel_size == 1:
return dense(inputs, filters, name=name, use_bias=True)
if padding == "SAME":
assert kernel_size % 2 == 1
first_offset = -((kernel_size - 1) // 2)
else:
assert padding == "LEFT"
first_offset = -(kernel_size - 1)
last_offset = first_offset + kernel_size - 1
results = []
padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]])
for i in range(kernel_size):
shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs
shifted.set_shape(inputs.get_shape())
results.append(
dense(shifted, filters, use_bias=(i == 0), name=name + "_%d" % i))
ret = tf.add_n(results)
ret *= kernel_size**-0.5
return ret
def layer_norm_vars(filters):
"""Create Variables for layer norm."""
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
return scale, bias
def layer_norm_compute(x, epsilon, scale, bias):
"""Layer norm raw computation."""
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale, bias = layer_norm_vars(filters)
return layer_norm_compute(x, epsilon, scale, bias)
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
x_shape = shape_list(x)
if filters is None:
filters = x_shape[-1]
assert len(x_shape) == 4
assert filters % num_groups == 0
# Prepare variables.
scale = tf.get_variable(
"group_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"group_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
# Reshape and compute group norm.
x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
# Calculate mean and variance on heights, width, channels (not groups).
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
def noam_norm(x, epsilon=1.0, name=None):
"""One version of layer normalization."""
with tf.name_scope(name, default_name="noam_norm", values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt(
tf.to_float(shape[-1])))
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op
def apply_norm(x, norm_type, depth, epsilon):
"""Apply Normalization."""
if norm_type == "layer":
return layer_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "group":
return group_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "batch":
return tf.layers.batch_normalization(x, epsilon=epsilon)
if norm_type == "noam":
return noam_norm(x, epsilon)
if norm_type == "l2":
return l2_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "none":
return x
raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch',"
"'noam', 'lr', 'none'.")
def zero_add(previous_value, x, name=None, reuse=None):
"""Resnet connection with zero initialization.
Another type of resnet connection which returns previous_value + gamma * x.
gamma is a trainable scalar and initialized with zero. It is useful when a
module is plugged into a trained model and we want to make sure it matches the
original model's performance.
Args:
previous_value: A tensor.
x: A tensor.
name: name of variable scope; defaults to zero_add.
reuse: reuse scope.
Returns:
previous_value + gamma * x.
"""
with tf.variable_scope(name, default_name="zero_add", reuse=reuse):
gamma = tf.get_variable("gamma", (), initializer=tf.zeros_initializer())
return previous_value + gamma * x
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
norm_type,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
a Tensor
"""
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "z":
x = zero_add(previous_value, x)
elif c == "n":
x = apply_norm(x, norm_type, depth, epsilon)
else:
assert c == "d", ("Unknown sequence step %s" % c)
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return x
def layer_preprocess(layer_input, hparams):
"""Apply layer preprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_preprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
assert "a" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
assert "z" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
return layer_prepostprocess(
None,
layer_input,
sequence=hparams.layer_preprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_prepostprocess")
def layer_postprocess(layer_input, layer_output, hparams):
"""Apply layer postprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_postprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
layer_output: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
return layer_prepostprocess(
layer_input,
layer_output,
sequence=hparams.layer_postprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_postprocess")
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
"""A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
# Usage for normalize_fn kwarg:
# if not specified, use layer norm
# if given normalize_fn=None, don't use any normalization
# if given normalize_fn=norm, use the specified norm function
use_layer_norm = "normalizer_fn" not in kwargs
norm = kwargs.pop("normalizer_fn", None)
use_normalizer_fn = use_layer_norm or norm
if use_layer_norm:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if use_normalizer_fn:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 2d convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 1d convolutions."""
return conv_block_internal(conv1d, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(separable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(subseparable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
"""Pooling (supports "LEFT")."""
with tf.name_scope("pool", values=[inputs]):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
# Add support for left padding.
if padding == "LEFT":
assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
if len(static_shape) == 3:
width_padding = 2 * (window_size[1] // 2)
padding_ = [[0, 0], [width_padding, 0], [0, 0]]
else:
height_padding = 2 * (window_size[0] // 2)
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (window_size[1] // 2)))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding_)
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
padding = "VALID"
return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides)
def conv_block_downsample(x,
kernel,
strides,
padding,
separability=0,
name=None,
reuse=None):
"""Implements a downwards-striding conv block, like Xception exit flow."""
with tf.variable_scope(
name, default_name="conv_block_downsample", values=[x], reuse=reuse):
hidden_size = int(x.get_shape()[-1])
res = conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
strides=strides,
name="res_conv")
x = subseparable_conv_block(
x,
hidden_size, [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv0")
x = subseparable_conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv1")
x = pool(x, kernel, "MAX", padding, strides=strides)
x += res
x = subseparable_conv_block(
x,
2 * hidden_size, [((1, 1), kernel)],
first_relu=False,
padding=padding,
separability=separability,
name="conv2")
x = subseparable_conv_block(
x,
int(2.5 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv3")
return x
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = tf.to_float(tf.range(length))
log_timescale_increment = (
math.log(max_timescale / min_timescale) / (num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
This allows attention to learn to use absolute and relative positions.
The timing signal should be added to some precursor of both the source
and the target of the attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the depth dimension, padded with zeros to be the same depth as the input,
and added into input.
Args:
x: a Tensor with shape [?, length, ?, depth]
min_timescale: a float
max_timescale: a float
num_timescales: an int <= depth/2
Returns:
a Tensor the same shape as x.
"""
length = shape_list(x)[1]
depth = shape_list(x)[3]
signal = get_timing_signal(length, min_timescale, max_timescale,
num_timescales)
padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])
return x + tf.reshape(padded_signal, [1, length, 1, depth])
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))
def length_from_embedding(emb):
"""Compute the length of each sequence in the batch.
Args:
emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth].
Returns:
a Tensor with shape [batch].
"""
return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32)
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return ones_matrix_band_part(
target_length,
source_length,
-1,
0,
out_shape=[1, target_length, source_length])
def relu_density_logit(x, reduce_dims):
"""logit(density(x)).
Useful for histograms.
Args:
x: a Tensor, typically the output of tf.relu
reduce_dims: a list of dimensions
Returns:
a Tensor
"""
frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
return scaled
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
"""
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs
def dense_relu_dense(inputs,
filter_size,
output_size,
output_activation=None,
dropout=0.0,
dropout_broadcast_dims=None,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
layer_name = "%s_{}" % name if name else "{}"
h = dense(
inputs,
filter_size,
use_bias=True,
activation=tf.nn.relu,
name=layer_name.format("conv1"))
if dropout != 0.0:
h = dropout_with_broadcast_dims(
h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims)
o = dense(
h,
output_size,
activation=output_activation,
use_bias=True,
name=layer_name.format("conv2"))
return o
def dense_dropconnect(inputs,
output_size,
dropconnect_dropout=0.0,
name="dense_dropconnect",
**kwargs):
"""Dense layer with dropconnect."""
if dropconnect_dropout != 0.0:
tf.logging.info("Applying dropconnect as the kernel regularization.")
kwargs["kernel_regularizer"] = partial(
tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout)
return dense(inputs, output_size, use_bias=True, name=name, **kwargs)
def conv_relu_conv(inputs,
filter_size,
output_size,
first_kernel_size=3,
second_kernel_size=3,
padding="SAME",
nonpadding_mask=None,
dropout=0.0,
name=None,
cache=None,
decode_loop_step=None):
"""Hidden layer with RELU activation followed by linear projection.
Args:
inputs: A tensor.
filter_size: An integer.
output_size: An integer.
first_kernel_size: An integer.
second_kernel_size: An integer.
padding: A string.
nonpadding_mask: A tensor.
dropout: A float.
name: A string.
cache: A dict, containing Tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop.
Only used for inference on TPU. If it is not None, the function
will do inplace update for the cache instead of concatenating the
current result to the cache.
Returns:
A Tensor.
"""
with tf.variable_scope(name, "conv_relu_conv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if cache:
if decode_loop_step is None:
inputs = cache["f"] = tf.concat([cache["f"], inputs], axis=1)
else:
# Inplace update is required for inference on TPU.
# Inplace_ops only supports inplace_update on the first dimension.
# The performance of current implementation is better than updating
# the tensor by adding the result of matmul(one_hot,
# update_in_current_step)
tmp_f = tf.transpose(cache["f"], perm=[1, 0, 2])
tmp_f = inplace_ops.alias_inplace_update(
tmp_f,
decode_loop_step * tf.shape(inputs)[1],
tf.transpose(inputs, perm=[1, 0, 2]))
inputs = cache["f"] = tf.transpose(tmp_f, perm=[1, 0, 2])
inputs = cache["f"] = inputs[:, -first_kernel_size:, :]
h = tpu_conv1d(
inputs, filter_size, first_kernel_size, padding=padding, name="conv1")
if cache:
h = h[:, -1:, :]
h = tf.nn.relu(h)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
return tpu_conv1d(
h, output_size, second_kernel_size, padding=padding, name="conv2")
def sepconv_relu_sepconv(inputs,
filter_size,
output_size,
first_kernel_size=(1, 1),
second_kernel_size=(1, 1),
padding="LEFT",
nonpadding_mask=None,
dropout=0.0,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
h = separable_conv(
inputs,
filter_size,
first_kernel_size,
activation=tf.nn.relu,
padding=padding,
name="conv1")
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
ret = separable_conv(
h, output_size, second_kernel_size, padding=padding, name="conv2")
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv
def conv_hidden_relu(inputs,
hidden_size,
output_size,
kernel_size=(1, 1),
second_kernel_size=(1, 1),
dropout=0.0,
**kwargs):
"""Hidden layer with RELU activation followed by linear projection."""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
conv_f1 = conv if kernel_size == (1, 1) else separable_conv
h = conv_f1(
inputs,
hidden_size,
kernel_size,
activation=tf.nn.relu,
name="conv1",
**kwargs)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv
ret = conv_f2(h, output_size, second_kernel_size, name="conv2", **kwargs)
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
def conv_gru(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional GRU in 1 dimension."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start, padding):
return conv(
args,
filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate,
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="conv_gru", values=[x], reuse=reuse):
reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding))
gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding))
return gate * x + (1 - gate) * candidate
def gru_feedfwd(a_t, h_prev, filters, name=None):
"""position-wise Feed-fwd GRU gates following the MPNN.
Args:
a_t: Tensor of shape [batch, length, depth] of current input
h_prev: Tensor of shape [batch, length, depth] of prev input
filters: an integer specifying number of dimensions of the filters
name: A string
Returns:
h_t: [batch, length, filters] hidden state
"""
with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]):
# we use right matrix multiplication to handle batches
# W_z and W_r have shape 2d, d. U_z U_r have shape d,d
z_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z")))
r_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r")))
h_tilde = (
tf.tanh(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") +
tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U")))
h_t = (1. - z_t) * h_prev + z_t * h_tilde
return h_t
def conv_lstm(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional LSTM in 1 dimension."""
with tf.variable_scope(
name, default_name="conv_lstm", values=[x], reuse=reuse):
gates = conv(
x,
4 * filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate)
g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
return tf.sigmoid(g[2]) * tf.tanh(new_cell)
def diagonal_conv_gru(x,
kernel_size,
filters,
dropout=0.0,
name=None,
reuse=None):
"""Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start):
return conv(
args,
filters,
kernel_size,
padding="SAME",
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="diagonal_conv_gru", values=[x], reuse=reuse):
reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5))
gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0))
if dropout > 0.0:
candidate = tf.nn.dropout(candidate, 1.0 - dropout)
# Diagonal shift.
shift_filters = filters // 3
base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +
[[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)
shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)
shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)
x_shifted = tf.nn.depthwise_conv2d(
x, shift_filter, [1, 1, 1, 1], padding="SAME")
# Return the gated result and cost.
total_cost_avg = 0.5 * (reset_cost + gate_cost)
return gate * x_shifted + (1 - gate) * candidate, total_cost_avg
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", values=[x, y]):
x_length = shape_list(x)[axis]
y_length = shape_list(y)[axis]
if (isinstance(x_length, int) and isinstance(y_length, int) and
x_length == y_length and final_length_divisible_by == 1):
return x, y
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length."""
with tf.name_scope("pad_with_zeros", values=[logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return logits, labels
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return tf.to_float(tf.not_equal(labels, 0))
def weights_prepend_inputs_to_targets(labels):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats.
"""
past_first_zero = tf.cumsum(tf.to_float(tf.equal(labels, 0)), axis=1)
nonzero = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_first_zero * nonzero, 0))
def weights_multi_problem(labels, taskid=-1):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all labels past the taskid.
Args:
labels: A Tensor of int32s.
taskid: an int32 representing the task id for a problem.
Returns:
A Tensor of floats.
Raises:
ValueError: The Task ID must be valid.
"""
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
def weights_multi_problem_all(labels, taskid=-1):
"""Assign weight 1.0 to only examples from the given task."""
weights = tf.to_float(tf.not_equal(labels, 0))
if taskid < 0:
raise ValueError("Task ID must be non-negative.")
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
example_mask = tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
example_mask = tf.reduce_sum(example_mask, axis=1)
example_mask = tf.to_float(
tf.greater(example_mask, tf.zeros_like(example_mask)))
return weights * tf.expand_dims(example_mask, axis=-1)
def weights_multi_problem_input(labels, taskid=-1):
"""Assign weight 1.0 to only the inputs for the given task."""
weights_all_tokens = weights_multi_problem_all(labels, taskid)
weights_target = weights_multi_problem(labels, taskid)
return weights_all_tokens - weights_target
def weights_all(labels):
"""Assign weight 1.0 to all labels."""
return tf.ones_like(labels, dtype=tf.float32)
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one,
[[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
return ret
def padded_cross_entropy(logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True,
cutoff=0.0,
gaussian=False):
"""Compute cross-entropy assuming 0s are padding.
Computes a loss numerator (the sum of losses), and loss denominator
(the number of non-padding tokens).
Args:
logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.
optionally a FactoredTensor.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
cutoff: a float, at which point to have no loss.
gaussian: If true, use a Gaussian distribution for label smoothing
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
Raises:
ValueError: in case of unsupported argument types.
"""
if isinstance(logits, FactoredTensor):
if gaussian:
raise ValueError("Factored padded cross entropy with Gaussian smoothing "
"is not implemented yet.")
return padded_cross_entropy_factored(
logits,
labels,
label_smoothing,
weights_fn=weights_fn,
reduce_sum=reduce_sum)
confidence = 1.0 - label_smoothing
logits_shape = shape_list(logits)
vocab_size = logits_shape[-1]
with tf.name_scope("padded_cross_entropy", values=[logits, labels]):
if len(logits_shape) == 2:
# Deal with the case where we did not insert extra dimensions due to
# TPU issues. No pad-to-same-length happens in this case.
# TODO(noam): remove this logic once TPU can handle extra dimensions.
labels = tf.reshape(labels, [-1])
else:
logits, labels = pad_with_zeros(logits, labels)
logits = tf.reshape(
logits,
shape_list(labels) + [vocab_size],
name="padded_cross_entropy_size_check")
logits = tf.cast(logits, tf.float32)
xent = smoothing_cross_entropy(
logits, labels, vocab_size, confidence, gaussian=gaussian)
weights = weights_fn(labels)
if cutoff > 0.0:
xent = tf.nn.relu(xent - cutoff)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def _weights_one_third(labels):
"""Returns Tensor of shape [batch, height, width]. Each element is 1/3."""
return tf.ones(tf.shape(labels)[:-1]) / 3.
def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True):
"""Discretized mixture of logistics loss.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of 8-bit pixel
intensities. The computation assumes channels is 3.
weights_fn: A function of labels, returning a Tensor of shape
[batch, height, width] which weights each loss term. Default is to scale
each loss term by 1/3 so that they capture the average across channels.
reduce_sum: A boolean, to return scalar loss instead of per position.
Returns:
Tuple of loss tensors for numerator and denominator, each a scalar if
reduce_sum else of shape [batch, height, width]. The sum of their divisions
is the number of nats for each pixel in labels.
"""
real_labels = convert_rgb_to_symmetric_real(labels)
dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels)
weights = weights_fn(labels)
loss_num = weights * dml_loss_value
loss_den = weights_nonzero(weights)
if reduce_sum:
loss_num = tf.reduce_sum(loss_num)
loss_den = tf.reduce_sum(loss_den)
return loss_num, loss_den
def split_to_discretized_mix_logistic_params(inputs):
"""Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
"""
batch, height, width, output_dim = shape_list(inputs)
num_mixtures = output_dim // 10
logits, locs, log_scales, coeffs = tf.split(
inputs,
num_or_size_splits=[
num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3
],
axis=-1)
split_shape = [batch, height, width, num_mixtures, 3]
locs = tf.reshape(locs, split_shape)
log_scales = tf.reshape(log_scales, split_shape)
log_scales = tf.maximum(log_scales, -7.)
coeffs = tf.reshape(coeffs, split_shape)
coeffs = tf.tanh(coeffs)
return logits, locs, log_scales, coeffs
def discretized_mix_logistic_loss(pred, labels):
"""Computes negative log probability for the discretized mixture of logistics.
The distribution of a whole pixel is a mixture of 3-dimensional discretized
logistic distributions. The 3-D discretized logistic factorizes as 3 1-D
discretized logistic distributions, one for each channel. It defines
```none
P(X = x)
= sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k])
= sum_{k=1}^K probs[k] * [
prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ]
```
The means tensor is a linear combination of location parameters and previous
channels. The discretized logistic distribution assigns probability mass to an
event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X > x - 0.5) for 1 < x <
254; P(X <= 0.5) for x = 0; and 1 - P(X > 245.5) for x = 255. Instead of
8-bit inputs, this implementation assumes the events are rescaled to [-1, 1].
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of true pixel intensities
rescaled to [-1, 1]. The computation assumes channels is 3.
Returns:
A [batch, height, width] tensor of the negative log conditional probability
of each pixel given all previous pixels.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Tile labels to broadcast compute across the mixture dimension.
batch, height, width, num_mixtures = shape_list(logits)
labels = tf.tile(
tf.reshape(labels, [batch, height, width, 1, 3]),
[1, 1, 1, num_mixtures, 1])
# p(x) = sigmoid((x - means_i + 1/255.)/scale_i) -
# sigmoid((x - means_i - 1/255.)/scale_i)
# for each channel i. The means are linearly parameterized.
means_0 = locs[..., 0]
means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0]
means_2 = (
locs[..., 2] + coeffs[..., 1] * labels[..., 0] +
coeffs[..., 2] * labels[..., 1])
means = tf.stack([means_0, means_1, means_2], axis=-1)
centered_labels = labels - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
# Compute log probability for edge case of 0 (before scaling), 255 (before
# scaling), and all other cases respectively.
log_prob_0 = plus_in - tf.nn.softplus(plus_in)
log_prob_255 = -tf.nn.softplus(min_in)
prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12)
log_prob_event = tf.log(prob_event)
# Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps);
# (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may
# cause `tf.log(0.)`; (d) p(x) < 1e-5.
mid_in = inv_stdv * centered_labels
log_prob_event_approx = (
mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5))
log_probs = tf.where(
labels < -0.999, log_prob_0,
tf.where(
labels > 0.999, log_prob_255,
tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx)))
# Sum over channels and compute log-probability of each mixture.
log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1)
output = -tf.reduce_logsumexp(log_probs, axis=-1)
return output
def sample_from_discretized_mix_logistic(pred, seed=None):
"""Sampling from a discretized mixture of logistics.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
seed: Random seed.
Returns:
A tensor of shape [batch, height, width, 3] with real intensities scaled
between -1 and 1.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Sample mixture indicator given logits using the gumbel max trick.
num_mixtures = shape_list(logits)[-1]
gumbel_noise = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
sel = tf.one_hot(
tf.argmax(logits + gumbel_noise, -1),
depth=num_mixtures,
dtype=tf.float32)
# Select mixture component's parameters.
sel = tf.expand_dims(sel, -1)
locs = tf.reduce_sum(locs * sel, 3)
log_scales = tf.reduce_sum(log_scales * sel, 3)
coeffs = tf.reduce_sum(coeffs * sel, 3)
# Sample from 3-D logistic & clip to interval. Note we don't round to the
# nearest 8-bit value when sampling.
uniform_noise = tf.random_uniform(
tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
logistic_noise = tf.log(uniform_noise) - tf.log(1. - uniform_noise)
x = locs + tf.exp(log_scales) * logistic_noise
x0 = x[..., 0]
x1 = x[..., 1] + coeffs[..., 0] * x0
x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
x = tf.stack([x0, x1, x2], axis=-1)
x = tf.clip_by_value(x, -1., 1.)
return x
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size].
labels: Tensor of shape [batch_size, ?, ?, ?].
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the Gaussian
distribution.
gaussian: Uses a Gaussian distribution for label smoothing
Returns:
Tensor of shape [batch_size, ?, ?, ?].
"""
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence)
# Locations to evaluate the probability distributions.
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match
# logits: [batch_size, ?, ?, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
return xentropy - normalizing
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
"""Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors.
"""
with tf.name_scope("global_pool", values=[inputs]):
if mask is not None:
mask = tf.expand_dims(mask, axis=2)
inputs = tf.multiply(inputs, mask)
if pooling_type == "MAX":
# A tf.pool can be used here, but reduce is cleaner
output = tf.reduce_max(inputs, axis=1)
elif pooling_type == "AVR":
if mask is not None:
# Some elems are dummy elems so we can't just reduce the average.
output = tf.reduce_sum(inputs, axis=1)
num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
output = tf.div(output, tf.maximum(num_elems, 1))
else:
output = tf.reduce_mean(inputs, axis=1)
return output
def running_global_pool_1d(inputs, pooling_type="MAX"):
"""Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
"""
del pooling_type
with tf.name_scope("running_global_pool", values=[inputs]):
scan_fct = tf.maximum
# Permute inputs so seq_length is first.
elems = tf.transpose(inputs, [1, 0, 2])
# Perform scan.
cumulatives = tf.scan(scan_fct, elems, swap_memory=True)
# Permute output to get back to original order.
output = tf.transpose(cumulatives, [1, 0, 2])
return output
def gated_linear_unit_layer(x, name=None):
"""Gated linear unit layer.
Paper: Language Modeling with Gated Convolutional Networks.
Link: https://arxiv.org/abs/1612.08083
x = Wx * sigmoid(W'x).
Args:
x: A tensor
name: A string
Returns:
A tensor of the same shape as x.
"""
with tf.variable_scope(name, default_name="glu_layer", values=[x]):
depth = shape_list(x)[-1]
x = tf.layers.dense(x, depth * 2, activation=None)
x, gating_x = tf.split(x, 2, axis=-1)
return x * tf.nn.sigmoid(gating_x)
def sru_with_scan(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
This implementation uses tf.scan and can incur overhead, see the full SRU
function doc for details and an implementation that is sometimes faster.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0.
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
# SRU state manipulation function.
def next_state(cur_state, args_tup):
cur_x_times_one_minus_f, cur_f = args_tup
return cur_f * cur_state + cur_x_times_one_minus_f
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
tf.layers.dense(x, 3 * x_shape[-1], name="kernel_%d" % i), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
c_states = tf.scan(
next_state, (x_times_one_minus_f, f),
initializer=initial_state,
parallel_iterations=2,
name="scan_%d" % i)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
# Transpose back to batch-major.
x = tf.transpose(x, [1, 0, 2])
return tf.reshape(x, x_shape)
class CumsumprodCell(object):
"""Cumulative sum and product object for use with functional_rnn API."""
def __init__(self, initializer):
self._initializer = initializer
@property
def output_size(self):
return int(shape_list(self._initializer)[-1])
def zero_state(self, batch_size, dtype):
dtype = dtype or tf.float32
return tf.zeros([batch_size, self.output_size], dtype=dtype)
def __call__(self, inputs_t, state_t):
cur_x_times_one_minus_f, cur_f = tf.split(inputs_t, 2, axis=-1)
state_next = cur_f * state_t + cur_x_times_one_minus_f
outputs_t = state_next
return outputs_t, state_next
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
tf.layers.dense(x, 3 * x_shape[-1], name="kernel_%d" % i), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape)
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
"""Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
"""
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
# Apply 1D convolution to apply linear filter to each element
# along the 2nd dimension.
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
# Apply the context if it exists.
if context is not None:
# Unfortunately tf doesn't support broadcasting via concat, but we can
# simply add the transformed context to get the same effect.
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs
def ravanbakhsh_set_layer(layer_size,
inputs,
mask=None,
sequential=False,
activation_fn=tf.nn.tanh,
dropout=0.0,
name=None):
"""Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 .
More parameter-efficient version of a linear-set-layer with context.
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, vector]
containing the sequences of input vectors.
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
sequential: If true, will use a running global pool so each element will
only depend on those before it. Set true if this layer is being used in
an output sequence.
activation_fn: The activation function to use.
dropout: dropout.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, vector] containing the
sequences of transformed vectors.
"""
del dropout
with tf.variable_scope(name, "ravanbakhsh_set_layer", [inputs]):
if sequential:
return linear_set_layer(
layer_size,
inputs - running_global_pool_1d(inputs),
activation_fn=activation_fn,
name=name)
return linear_set_layer(
layer_size,
inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1),
activation_fn=activation_fn,
name=name)
def fn_device_dependency_dict():
"""State container for fn_device_dependency."""
if not hasattr(tf.get_default_graph(), "dependency_dict"):
setattr(tf.get_default_graph(), "dependency_dict", defaultdict(list))
return tf.get_default_graph().dependency_dict
@contextlib.contextmanager
def fn_device_dependency(name, device=""):
"""Add control deps for name and device."""
key = name + "_" + device
outs = []
def body():
with tf.control_dependencies(fn_device_dependency_dict()[key]):
yield outs
assert outs
deps = outs
if isinstance(outs[0], (list, tuple)):
assert len(outs) == 1
deps = outs[0]
fn_device_dependency_dict()[key] = deps
if device:
with tf.device(device):
return body()
else:
return body()
def underlying_variable_ref(t):
"""Find the underlying variable ref.
Traverses through Identity, ReadVariableOp, and Enter ops.
Stops when op type has Variable or VarHandle in name.
Args:
t: a Tensor
Returns:
a Tensor that is a variable ref, or None on error.
"""
while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
t = t.op.inputs[0]
op_type = t.op.type
if "Variable" in op_type or "VarHandle" in op_type:
return t
else:
return None
def underlying_variable(t):
"""Find the underlying tf.Variable object.
Args:
t: a Tensor
Returns:
tf.Variable.
"""
t = underlying_variable_ref(t)
assert t is not None
# make sure that the graph has a variable index and that it is up-to-date
if not hasattr(tf.get_default_graph(), "var_index"):
tf.get_default_graph().var_index = {}
var_index = tf.get_default_graph().var_index
for v in tf.global_variables()[len(var_index):]:
var_index[v.name] = v
return var_index[t.name]
def approximate_split(x, num_splits, axis=0):
"""Split approximately equally into num_splits parts.
Args:
x: a Tensor
num_splits: an integer
axis: an integer.
Returns:
a list of num_splits Tensors.
"""
size = shape_list(x)[axis]
size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]
return tf.split(x, size_splits, axis=axis)
class FactoredTensor(object):
"""A concise factored representation of Tensor as two tensors.
This class represents the tensor tf.matmul(a, b, transpose_b=True)
by storing the values of Tensors a and b.
The reason for this is that the product may be too big to fully realize at
once, so it can be realized a part at a time.
"a" may have extra leading dimensions, in which case they are flattened out
before computing the matrix product, then re-expanded afterwards.
"""
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] +
[self.b.get_shape()[0]])
return product
def _convert_factored_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.to_tensor(), *args, **kwargs)
tf.register_tensor_conversion_function(FactoredTensor,
_convert_factored_tensor_to_tensor)
def smoothing_cross_entropy_factored_grad(op, dy):
"""Gradient function for smoothing_cross_entropy_factored."""
a = op.inputs[0]
b = op.inputs[1]
labels = op.inputs[2]
confidence = op.inputs[3]
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
dy = approximate_split(dy, num_splits)
b_grad = None
a_grad_parts = []
deps = []
for part in range(num_splits):
with tf.control_dependencies(deps):
logits = tf.matmul(a[part], b, transpose_b=True)
output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,
confidence)
a_grad_part, b_grad_part = tf.gradients(
ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])
a_grad_parts.append(a_grad_part)
if part > 0:
b_grad += b_grad_part
else:
b_grad = b_grad_part
deps = [b_grad, a_grad_part]
a_grad = tf.concat(a_grad_parts, 0)
return a_grad, b_grad, None, None
@function.Defun(
noinline=True,
python_grad_func=smoothing_cross_entropy_factored_grad,
compiled=True,
separate_compiled_gradients=True)
def smoothing_cross_entropy_factored(a, b, labels, confidence):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
a: a Tensor with shape [batch, inner_dim]
b: a Tensor with shape [vocab_size, inner_dim]
labels: an integer Tensor with shape [batch]
confidence: a float
Returns:
A Tensor with shape [batch]
"""
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
parts = []
for part in range(num_splits):
with tf.control_dependencies(parts[-1:]):
logits = tf.matmul(a[part], b, transpose_b=True)
parts.append(
smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))
return tf.concat(parts, 0)
def padded_cross_entropy_factored(factored_logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
factored_logits: a `FactoredTensor` representing a Tensor
with shape `[batch, timesteps, vocab_size]`.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
"""
a = factored_logits.a
b = factored_logits.b
confidence = 1.0 - label_smoothing
with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]):
labels_flat = tf.reshape(labels, [-1])
a_flat = tf.reshape(a, [-1, shape_list(b)[1]])
xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,
tf.convert_to_tensor(confidence))
xent = tf.reshape(xent, shape_list(labels))
weights = weights_fn(labels)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def fn_with_custom_grad(grad_fn, use_global_vars=False):
"""Decorator to create a subgraph with a custom gradient function.
The subgraph created by the decorated function is NOT put in a Defun and so
does not suffer from the limitations of the Defun (all subgraph ops on the
same device, no summaries).
Args:
grad_fn: function with signature
(inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
Decorator for function such that the gradient is defined by grad_fn.
"""
def dec(fn):
@functools.wraps(fn)
def wrapped(*args):
return _fn_with_custom_grad(
fn, args, grad_fn, use_global_vars=use_global_vars)
return wrapped
return dec
def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):
"""Create a subgraph with a custom gradient.
Args:
fn: function that takes inputs as arguments and produces 1 or more Tensors.
inputs: list<Tensor>, will be passed as fn(*inputs).
grad_fn: function with signature
(inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
fn(*inputs)
"""
vs = tf.get_variable_scope()
get_vars_fn = (
vs.global_variables if use_global_vars else vs.trainable_variables)
len_before_vars = len(get_vars_fn())
inputs = list(inputs)
outputs = fn(*inputs)
train_vars = get_vars_fn()[len_before_vars:]
if grad_fn is None:
return outputs
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
outputs = list(outputs)
defun_inputs = [inputs, train_vars, outputs]
def custom_grad_fn(op, *dys):
"""Custom grad fn applying grad_fn for identity Defun."""
fn_inputs, fn_vars, fn_outputs = tf.contrib.framework.nest.pack_sequence_as(
defun_inputs, list(op.inputs))
dys = list(dys)
assert len(fn_outputs) == len(outputs)
assert len(fn_outputs) == len(dys)
grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)
grad_outputs = [None] * len(fn_outputs)
return tuple(grad_inputs + grad_vars + grad_outputs)
# The Defun takes as input the original inputs, the trainable variables
# created in fn, and the outputs. In the forward it passes through the
# outputs. In the backwards, it produces gradients for the original inputs
# and the trainable variables.
in_types = [t.dtype for t in inputs]
out_types = [t.dtype for t in outputs]
var_types = [t.dtype for t in train_vars]
@function.Defun(
*(in_types + var_types + out_types),
func_name="identity_custom_grad%d" % ops.uid(),
python_grad_func=custom_grad_fn,
shape_func=lambda _: [t.get_shape() for t in outputs])
def identity(*args):
_, _, outs = tf.contrib.framework.nest.pack_sequence_as(defun_inputs, args)
return tuple([tf.identity(t) for t in outs])
flat_inputs = tf.contrib.framework.nest.flatten(defun_inputs)
id_out = identity(*flat_inputs)
return id_out
_function_cache = {}
def conv_hidden_relu_memory_efficient(x,
filter_size,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""LayerNorm, Conv, ReLU, Conv.
All convolutions have kernel size 1.
returns conv(relu(conv(layer_norm(x))))
Args:
x: input Tensor with shape [batch, length, io_size]
filter_size: an integer - size of the hidden layer.
epsilon: a float (for layer norm)
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
a Tensor with shape [batch, length, io_size]
"""
io_size = x.get_shape().as_list()[-1]
def forward_internal(x, f1, f2, scale, bias):
"""Forward function."""
# split batch-wise to avoid exhausting memory in cast the batch is large
# and the hidden layer is large.
num_splits = 4
x_flat = tf.reshape(x, [-1, 1, shape_list(x)[2]])
xs = approximate_split(x_flat, num_splits)
ys = []
for i in range(num_splits):
with tf.control_dependencies(ys[-1:]):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
ys.append(y)
y = tf.concat(ys, 0)
y = tf.reshape(y, shape_list(x))
return y
key = ("conv_hidden_relu_memory_efficient %s" % epsilon)
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, f1, f2, scale, bias, dy):
"""Gradient for efficiency."""
with tf.control_dependencies([dy]):
num_splits = 4
x_shape = shape_list(x)
flat_shape = [-1, 1, x_shape[2]]
x = tf.reshape(x, flat_shape)
dy = tf.reshape(dy, flat_shape)
xs = approximate_split(x, num_splits)
dys = approximate_split(dy, num_splits)
dxs = []
df1 = 0
df2 = 0
dscale = 0
dbias = 0
deps = []
for i in range(num_splits):
with tf.control_dependencies(deps):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients(
ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]])
df1 += pdf1
df2 += pdf2
dscale += pdscale
dbias += pdbias
dxs.append(dxi)
deps = [dxi, df1, df2, dscale, dbias]
with tf.control_dependencies(deps):
dx = tf.concat(dxs, 0)
dx = tf.reshape(dx, x_shape)
return dx, df1, df2, dscale, dbias
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, f1, f2, scale, bias):
return forward_internal(x, f1, f2, scale, bias)
with tf.variable_scope(name, default_name="ffn2", values=[x]):
# TODO(noam): it would be nice to save memory by casting x to float16
# here, but this causes problems with the gradients. Figure out if there
# is a way to leave the gradients as float32.
if test_vars is not None:
f1, f2, scale, bias = list(test_vars)
else:
f1 = tf.get_variable("f1", [1, io_size, filter_size])
f2 = tf.get_variable("f2", [1, filter_size, io_size])
scale, bias = layer_norm_vars(io_size)
if forget:
y = forward_fn(x, f1, f2, scale, bias)
else:
y = forward_internal(x, f1, f2, scale, bias)
y.set_shape(x.get_shape())
return y
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i in range(len(static)):
dim = static[i]
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def list_product(els):
prod = els[0]
for el in els[1:]:
prod *= el
return prod
def sample_with_temperature(logits, temperature):
"""Either argmax or random sampling.
Args:
logits: a Tensor.
temperature: a float 0.0=argmax 1.0=random
Returns:
a Tensor with one fewer dimension than logits.
"""
if temperature == 0.0:
# TF argmax doesn't handle >5 dimensions, so we reshape here.
logits_shape = shape_list(logits)
argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)
return tf.reshape(argmax, logits_shape[:-1])
else:
assert temperature > 0.0
reshaped_logits = (
tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
shape_list(logits)[:logits.get_shape().ndims - 1])
return choices
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
def reshape_like_all_dims(a, b):
"""Reshapes a to match the shape of b."""
ret = tf.reshape(a, tf.shape(b))
if not tf.contrib.eager.in_eager_mode():
ret.set_shape(b.get_shape())
return ret
def recompute_grad(fn):
"""Decorator that recomputes the function on the backwards pass.
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
"""
@functools.wraps(fn)
def wrapped(*args):
return _recompute_grad(fn, args)
return wrapped
def _recompute_grad(fn, args):
"""See recompute_grad."""
cached_vs = []
cached_arg_scope = []
def grad_fn(inputs, variables, outputs, output_grads):
"""Recompute outputs for gradient computation."""
del outputs
variables = [underlying_variable_ref(v) for v in variables]
# Recompute outputs
with tf.control_dependencies(output_grads):
with tf.contrib.framework.arg_scope(cached_arg_scope[0]):
with tf.variable_scope(cached_vs[0], reuse=True):
outputs = fn(*inputs)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs = list(outputs)
grads = tf.gradients(outputs, inputs + variables, output_grads)
grad_inputs = grads[:len(inputs)]
grad_vars = grads[len(inputs):]
# TODO(rsepassi): Make fn_with_custom_grad work with bfloat16.
# If the input gradients are bfloat16, it's assumed the variables are
# bfloat16. This is a hack to ensure that grad_vars are the right type.
if grad_inputs[0].dtype == tf.bfloat16:
grad_vars = [tf.cast(grad_var, tf.bfloat16) for grad_var in grad_vars]
return grad_inputs, grad_vars
@fn_with_custom_grad(grad_fn)
def fn_with_recompute(*args):
cached_vs.append(tf.get_variable_scope())
cached_arg_scope.append(tf.contrib.framework.current_arg_scope())
return fn(*args)
return fn_with_recompute(*args)
def dense(x, units, **kwargs):
"""Identical to tf.layers.dense."""
return tf.layers.dense(x, units, **kwargs)
def batch_dense(inputs,
units,
activation=None,
kernel_initializer=None,
reuse=None,
name=None):
"""Multiply a batch of input matrices by a batch of parameter matrices.
Each input matrix is multiplied by the corresponding parameter matrix.
This is useful in a mixture-of-experts where the batch represents different
experts with different inputs.
Args:
inputs: a Tensor with shape [batch, length, input_units]
units: an integer
activation: an optional activation function to apply to the output
kernel_initializer: an optional initializer
reuse: whether to reuse the varaible scope
name: an optional string
Returns:
a Tensor with shape [batch, length, units]
Raises:
ValueError: if the "batch" or "input_units" dimensions of inputs are not
statically known.
"""
inputs_shape = shape_list(inputs)
if len(inputs_shape) != 3:
raise ValueError("inputs must have 3 dimensions")
batch = inputs_shape[0]
input_units = inputs_shape[2]
if not isinstance(batch, int) or not isinstance(input_units, int):
raise ValueError("inputs must have static dimensions 0 and 2")
with tf.variable_scope(
name,
default_name="batch_dense",
values=[inputs],
reuse=reuse,
dtype=inputs.dtype):
if kernel_initializer is None:
kernel_initializer = tf.random_normal_initializer(
stddev=input_units**-0.5)
w = tf.get_variable(
"w", [batch, input_units, units],
initializer=kernel_initializer,
dtype=inputs.dtype)
y = tf.matmul(inputs, w)
if activation is not None:
y = activation(y)
return y
def mix(x1,
x2,
steps,
is_training,
min_prob=0.0,
max_prob=1.0,
mode="lin",
simple=False,
broadcast_last=False):
"""Mix starting with x2, mixing mixing, going towards x1."""
with tf.name_scope("mix"):
if not is_training:
if max_prob >= 1.0:
return x1
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = tf.to_float(tf.less(alpha, max_prob))
return alpha * x1 + (1.0 - alpha) * x2
def get_res():
"""Create the result.
Separate function to speed it up later (see below).
Returns:
Tensor of mixed inputs.
"""
if mode == "lin":
alpha_p = inverse_lin_decay(steps)
else:
alpha_p = inverse_exp_decay(steps)
alpha_p = alpha_p * (max_prob - min_prob) + min_prob
if simple:
return alpha_p * x1 + (1.0 - alpha_p) * x2
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = tf.to_float(tf.less(alpha, alpha_p))
return alpha * x1 + (1.0 - alpha) * x2
if max_prob < 1.0:
return get_res()
# Prevent sampling after steps is passed to speed it up.
if is_xla_compiled():
return get_res()
else:
cur_step = tf.train.get_global_step()
if cur_step is None:
return x1 # Step not available, probably eval mode, don't mix.
return tf.cond(tf.less(cur_step, steps), get_res, lambda: x1)
def brelu(x):
"""Bipolar ReLU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.relu(x1)
y2 = -tf.nn.relu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def belu(x):
"""Bipolar ELU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.elu(x1)
y2 = -tf.nn.elu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def nac(x, depth, name=None, reuse=None):
"""NAC as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse):
x_shape = shape_list(x)
w = tf.get_variable("w", [x_shape[-1], depth])
m = tf.get_variable("m", [x_shape[-1], depth])
w = tf.tanh(w) * tf.nn.sigmoid(m)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
res_flat = tf.matmul(x_flat, w)
return tf.reshape(res_flat, x_shape[:-1] + [depth])
def nalu(x, depth, epsilon=1e-30, name=None, reuse=None):
"""NALU as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse):
x_shape = shape_list(x)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
gw = tf.get_variable("w", [x_shape[-1], depth])
g = tf.nn.sigmoid(tf.matmul(x_flat, gw))
g = tf.reshape(g, x_shape[:-1] + [depth])
a = nac(x, depth, name="nac_lin")
log_x = tf.log(tf.abs(x) + epsilon)
m = nac(log_x, depth, name="nac_log")
return g * a + (1 - g) * tf.exp(m)
def argmax_with_score(logits, axis=None):
"""Argmax along with the value."""
axis = axis or len(logits.get_shape()) - 1
predictions = tf.argmax(logits, axis=axis)
logits_shape = shape_list(logits)
prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
prefix_size = 1
for d in prefix_shape:
prefix_size *= d
# Flatten to extract scores
flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
flat_predictions = tf.reshape(predictions, [prefix_size])
flat_indices = tf.stack(
[tf.range(tf.to_int64(prefix_size)),
tf.to_int64(flat_predictions)],
axis=1)
flat_scores = tf.gather_nd(flat_logits, flat_indices)
# Unflatten
scores = tf.reshape(flat_scores, prefix_shape)
return predictions, scores
def log_prob_from_logits(logits, reduce_axis=-1):
return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True)
def top_1_tpu(inputs):
"""find max and argmax over the last dimension.
Works well on TPU
Args:
inputs: A tensor with shape [..., depth]
Returns:
values: a Tensor with shape [...]
indices: a Tensor with shape [...]
"""
inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
mask = tf.to_int32(tf.equal(inputs_max, inputs))
index = tf.range(tf.shape(inputs)[-1]) * mask
return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
def index_last_dim_with_indices(x, indices):
"""Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
"""
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension."""
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.contrib.eager.in_eager_mode():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.contrib.eager.in_eager_mode():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x.name,
x.device, cast_x.device)
return cast_x
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
def sliced_gan_loss(input1,
input2,
discriminator,
num_vecs,
do_random_vecs=True,
do_tanh=True,
return_logits=False):
"""Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947.
Puts input1 and input2 through the provided discriminator to get logits.
Then, computes num_vecs random projections of the logits, sorts them on
the batch dimension and returns the L2 loss between the sorted vectors.
See the above-mentioned paper for the reasoning behind it.
Args:
input1: first discriminator inputs.
input2: second discriminator inputs.
discriminator: inputs -> logits function.
num_vecs: how many random vectors to use for projections.
do_random_vecs: whether to use random vectors or just tanh of the logits.
do_tanh: if true (default) we'll also just use tanh of the logits.
return_logits: Whether or not to return the logits.
Returns:
The generator loss, i.e., the sliced approximation of the distance between
the projected distributions (warning: discriminator should maximize it).
"""
with tf.variable_scope("sliced_gan"):
with tf.variable_scope("discriminator"):
logits1 = discriminator(input1)
with tf.variable_scope("discriminator", reuse=True):
logits2 = discriminator(input2)
if do_random_vecs:
random_vecs = tf.nn.l2_normalize(
tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0)
def get_sorted_projections(x):
"""Make projections of x and sort them on the batch dimension."""
x = tf.reshape(x, [-1, shape_list(x)[-1]])
batch_size = shape_list(x)[0]
if do_random_vecs and do_tanh:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(n)], axis=1)
elif do_random_vecs:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.matmul(n, random_vecs)
else:
proj = tf.tanh(x)
proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this.
if is_xla_compiled():
proj_dtype = proj.dtype
proj = tf.cast(proj, tf.bfloat16)
# Currently TPU only supports 1-D top_k calls.
map_fn = lambda x: tf.nn.top_k(x, k=batch_size, sorted=True)[0]
values = tf.map_fn(map_fn, proj)
values = tf.cast(values, proj_dtype)
else:
values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True)
return values
proj1 = get_sorted_projections(logits1)
proj2 = get_sorted_projections(logits2)
dist = tf.reduce_mean(tf.square(proj1 - proj2))
if return_logits:
return dist, logits1, logits2
return dist
def lrelu(input_, leak=0.2, name="lrelu"):
return tf.maximum(input_, leak * input_, name=name)
def deep_discriminator(x,
batch_norm,
is_training,
filters=64,
filter_size=4,
stride=2,
output_size=1024):
"""Discriminator architecture based on InfoGAN."""
with tf.variable_scope(
"discriminator", initializer=tf.random_normal_initializer(stddev=0.02)):
batch_size, height, width = shape_list(x)[:3]
net = tf.layers.conv2d(
x, filters, filter_size, strides=stride, padding="SAME", name="conv1")
net = lrelu(net)
net = tf.layers.conv2d(
net,
2 * filters,
filter_size,
strides=stride,
padding="SAME",
name="conv2")
# [bs, h/4, w/4, 128]
if batch_norm:
net = tf.layers.batch_normalization(
net, training=is_training, momentum=0.999, name="d_bn2")
net = lrelu(net)
size = height * width
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
net = tf.reduce_mean(net, axis=[1, 2])
else:
net = tf.reshape(net, [batch_size, size * 8])
net = tf.layers.dense(net, output_size, name="d_fc3")
if batch_norm:
net = tf.layers.batch_normalization(
net, training=is_training, momentum=0.999, name="d_bn3")
net = lrelu(net)
return net
def instance_norm(x):
"""Instance normalization layer."""
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable(
"scale", [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable(
"offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer."""
with tf.variable_scope(name):
x = tf.layers.conv2d(
x,
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))
if do_norm == "layer":
x = tf.contrib.layers.layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x
def patch_discriminator(x, filters=64, filter_size=5, n=4,
name="patch_discrim"):
"""Patch descriminator."""
with tf.variable_scope(name):
x_shape = shape_list(x)
spatial_dims = [x_shape[1] // 4, x_shape[2] // 4]
x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]])
for i in range(n):
x = general_conv(
x=x,
num_filters=filters * 2**i,
filter_size=filter_size,
stride=2 if i != n - 1 else 1,
stddev=0.02,
padding="SAME",
name="c%d" % i,
do_norm="instance" if i != 0 else False,
do_relu=i != n - 1,
relufactor=0.2)
x = tf.reduce_mean(x, [1, 2])
return x
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = tf.layers.dense(x, num_heads, name="mean_attn")
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return tf.layers.dense(tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]]),
2 * shape[-1], name="mean_attn_final")
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator."""
with tf.variable_scope("discriminator"):
net = tf.layers.conv2d(
x, filters, kernel_size, strides=strides, padding="SAME", name="conv1")
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net
def double_discriminator(x, filters1=128, filters2=None,
kernel_size=8, strides=4, pure_mean=False):
"""A convolutional discriminator with 2 layers and concatenated output."""
if filters2 is None:
filters2 = 4 * filters1
with tf.variable_scope("discriminator"):
batch_size = shape_list(x)[0]
net = tf.layers.conv2d(
x, filters1, kernel_size, strides=strides, padding="SAME", name="conv1")
if pure_mean:
net1 = tf.reduce_mean(net, [1, 2])
else:
net1 = mean_with_attention(net, "mean_with_attention1")
tf.reshape(net, [batch_size, -1])
net = tf.nn.relu(net)
net = tf.layers.conv2d(
x, filters2, kernel_size, strides=strides, padding="SAME", name="conv2")
if pure_mean:
net2 = tf.reduce_mean(net, [1, 2])
else:
net2 = mean_with_attention(net, "mean_with_attention2")
return tf.concat([net1, net2], axis=-1)
def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Upscaling the image by a factor of f."""
height, width = shape_list(inputs)[1:3]
return tf.image.resize_images(inputs, (height * f, width * f), method)
def tpu_safe_image_summary(image):
if is_xla_compiled():
# We only support float32 images at the moment due to casting complications.
if image.dtype != tf.float32:
image = tf.to_float(image)
else:
image = tf.cast(image, tf.uint8)
return image
# This has been (shamefully) copied from
# GitHub tensorflow/models/blob/master/research/slim/nets/cyclegan.py
#
# tensorflow/models cannot be pip installed, and even if it were we don't want
# to depend on all the models in it.
#
# Therefore copying and forgoing any more bugfixes into it is the most
# expedient way to use this function.
def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope("upconv"):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a
# 3x3 "valid" convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == "nn_upsample_conv":
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = tf.contrib.layers.conv2d(
net, num_outputs, kernel_size=[3, 3], padding="valid")
elif method == "bilinear_upsample_conv":
net = tf.image.resize_bilinear(net,
[stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = tf.contrib.layers.conv2d(
net, num_outputs, kernel_size=[3, 3], padding="valid")
elif method == "conv2d_transpose":
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing "SAME" padding).
# Note: This doesn"t reflect actual model in paper.
net = tf.contrib.layers.conv2d_transpose(
net, num_outputs, kernel_size=[3, 3], stride=stride, padding="valid")
net = net[:, 1:, 1:, :]
else:
raise ValueError("Unknown method: [%s]" % method)
return net
def weight_targeting(w, k):
"""Weight-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
transpose_w = tf.transpose(w)
thres = tf.contrib.framework.sort(tf.abs(transpose_w), axis=1)[:, k]
mask = tf.to_float(thres[None, :] >= tf.abs(w))
return tf.reshape(mask, w_shape)
def unit_targeting(w, k):
"""Unit-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
norm = tf.norm(w, axis=0)
thres = tf.contrib.framework.sort(norm, axis=0)[k]
mask = tf.to_float(thres >= norm)[None, :]
mask = tf.tile(mask, [size, 1])
return tf.reshape(mask, w_shape)
def td_conv(inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
name=None,
reuse=None):
"""Apply targeted dropout to the weights of a convolution."""
with tf.variable_scope(name, default_name="td_conv", reuse=reuse):
nhwc = data_format == "channels_last"
in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1]
kernel_shape = [kernel_size, kernel_size, in_dim, filters]
w = tf.get_variable(
"DW", shape=kernel_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable("b", shape=[filters], initializer=bias_initializer)
if keep_prob < 1.0:
w = targeted_dropout(
w,
targeting_count,
keep_prob,
targeting_fn,
is_training,
do_prune=do_prune)
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate, dilation_rate]
if nhwc:
strides = [1, strides[0], strides[1], 1]
dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1]
else:
strides = [1, 1, strides[0], strides[1]]
dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]]
y = tf.nn.conv2d(
inputs,
w,
strides,
padding,
data_format="NHWC" if nhwc else "NCHW",
dilations=dilation_rate,
name=None)
if use_bias:
y += b
if activation:
y = activation(y)
return y
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(tf.to_float(k) * tf.to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):
"""KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_var: log(var) parameter of the distribution.
mu_p: optional mu from a learned prior distribution
log_var_p: optional log(var) from a learned prior distribution
Returns:
the KL loss.
"""
batch_size = shape_list(mu)[0]
prior_distribution = tfp.distributions.Normal(
mu_p, tf.exp(tf.multiply(0.5, log_var_p)))
posterior_distribution = tfp.distributions.Normal(
mu, tf.exp(tf.multiply(0.5, log_var)))
kld = tfp.distributions.kl_divergence(posterior_distribution,
prior_distribution)
return tf.reduce_sum(kld) / tf.to_float(batch_size)
def sparse_equals_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
dense_shape=tensor.dense_shape,
values=tf.equal(tensor.values, constant))
def sparse_expand_dims(tensor, current_num_dims, axis=0):
if axis == -1:
axis = current_num_dims
new_col = tf.zeros([tf.shape(tensor.indices)[0]], dtype=tf.int64)
cols = tf.unstack(tensor.indices, axis=1, num=current_num_dims)
shape = tf.unstack(tensor.dense_shape, num=current_num_dims)
new_indices = tf.stack(cols[:axis] + [new_col] + cols[axis:], axis=1)
return tf.SparseTensor(
indices=new_indices,
values=tensor.values,
dense_shape=tf.stack(shape[:axis] + [1] + shape[axis:]))
def sparse_add_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
values=constant + tensor.values,
dense_shape=tensor.dense_shape)
def sparse_eye(size):
indices = tf.cast(tf.stack([tf.range(size), tf.range(size)]), tf.int64)
values = tf.ones(size)
dense_shape = [tf.cast(size, tf.int64), tf.cast(size, tf.int64)]
return tf.SparseTensor(
indices=indices, values=values, dense_shape=dense_shape)
# modification from https://github.com/tensorflow/tensorflow/pull/21276
# without special initialization for g
class WeightNorm(tf.keras.layers.Wrapper):
""" This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma (2016)
WeightNorm wrapper works for keras and tf layers.
```python
net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3), data_init=True)(x)
net = WeightNorm(tf.keras.layers.Conv2D(16, 5, activation='relu'),
data_init=True)
net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNorm(tf.keras.layers.Dense(n_classes),
data_init=True)(net)
```
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Raises:
ValueError: If not initialized with a `Layer` instance.
ValueError: If `Layer` does not contain a `kernel` of weights
NotImplementedError: If `data_init` is True and running graph execution
"""
def __init__(self, layer, data_init=False, **kwargs):
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a "
"`Layer` instance. You passed: {input}".format(input=layer))
super(WeightNorm, self).__init__(layer, **kwargs)
self._track_checkpointable(layer, name="layer")
def _compute_weights(self):
"""Generate weights with normalization."""
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g
def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution."""
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True
def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = tf.layers.InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True
def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(
self.layer.compute_output_shape(input_shape).as_list())
|
mlperf/training_results_v0.5
|
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/layers/common_layers.py
|
Python
|
apache-2.0
| 132,971
|
[
"Gaussian"
] |
68ff514ffcdb5d969400bc78b612ad25e991031b5bd80f0eab7b68663f32e37e
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2009-2011 by the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains classes and functions for working with chemical reactions.
From the `IUPAC Compendium of Chemical Terminology
<http://dx.doi.org/10.1351/goldbook>`_, a chemical reaction is "a process that
results in the interconversion of chemical species".
In RMG Py, a chemical reaction is represented in memory as a :class:`Reaction`
object. This module also provides the :class:`ReactionModel` class for
representing a set of chemical reactions and the species involved.
"""
import cython
import math
import numpy
import logging
import re
import os.path
from copy import copy, deepcopy
import urllib
import rmgpy.constants as constants
from rmgpy.molecule.molecule import Molecule, Atom
from rmgpy.molecule.element import Element
from rmgpy.species import Species
from rmgpy.kinetics.arrhenius import Arrhenius #PyDev: @UnresolvedImport
from rmgpy.kinetics import KineticsData, ArrheniusEP, ThirdBody, Lindemann, Troe, Chebyshev, PDepArrhenius, MultiArrhenius, MultiPDepArrhenius, getRateCoefficientUnitsFromReactionOrder #PyDev: @UnresolvedImport
from rmgpy.pdep.reaction import calculateMicrocanonicalRateCoefficient
from rmgpy.kinetics.diffusionLimited import diffusionLimiter
################################################################################
class ReactionError(Exception):
"""
An exception class for exceptional behavior involving :class:`Reaction`
objects. Pass a string describing the circumstances that caused the
exceptional behavior.
"""
pass
################################################################################
class Reaction:
"""
A chemical reaction. The attributes are:
=================== =========================== ============================
Attribute Type Description
=================== =========================== ============================
`index` :class:`int` A unique nonnegative integer index
`label` ``str`` A descriptive string label
`reactants` :class:`list` The reactant species (as :class:`Species` objects)
`products` :class:`list` The product species (as :class:`Species` objects)
`kinetics` :class:`KineticsModel` The kinetics model to use for the reaction
`reversible` ``bool`` ``True`` if the reaction is reversible, ``False`` if not
`transitionState` :class:`TransitionState` The transition state
`duplicate` ``bool`` ``True`` if the reaction is known to be a duplicate, ``False`` if not
`degeneracy` :class:`double` The reaction path degeneracy for the reaction
`pairs` ``list`` Reactant-product pairings to use in converting reaction flux to species flux
=================== =========================== ============================
"""
def __init__(self,
index=-1,
label='',
reactants=None,
products=None,
kinetics=None,
reversible=True,
transitionState=None,
duplicate=False,
degeneracy=1,
pairs=None
):
self.index = index
self.label = label
self.reactants = reactants
self.products = products
self.kinetics = kinetics
self.reversible = reversible
self.transitionState = transitionState
self.duplicate = duplicate
self.degeneracy = degeneracy
self.pairs = pairs
if diffusionLimiter.enabled:
self.k_effective_cache = {}
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
object.
"""
string = 'Reaction('
if self.index != -1: string += 'index={0:d}, '.format(self.index)
if self.label != '': string += 'label={0!r}, '.format(self.label)
if self.reactants is not None: string += 'reactants={0!r}, '.format(self.reactants)
if self.products is not None: string += 'products={0!r}, '.format(self.products)
if self.kinetics is not None: string += 'kinetics={0!r}, '.format(self.kinetics)
if not self.reversible: string += 'reversible={0}, '.format(self.reversible)
if self.transitionState is not None: string += 'transitionState={0!r}, '.format(self.transitionState)
if self.duplicate: string += 'duplicate={0}, '.format(self.duplicate)
if self.degeneracy != 1: string += 'degeneracy={0:d}, '.format(self.degeneracy)
if self.pairs is not None: string += 'pairs={0}, '.format(self.pairs)
string = string[:-2] + ')'
return string
def __str__(self):
"""
Return a string representation of the reaction, in the form 'A + B <=> C + D'.
"""
arrow = ' <=> '
if not self.reversible: arrow = ' => '
return arrow.join([' + '.join([str(s) for s in self.reactants]), ' + '.join([str(s) for s in self.products])])
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (Reaction, (self.index,
self.label,
self.reactants,
self.products,
self.kinetics,
self.reversible,
self.transitionState,
self.duplicate,
self.degeneracy,
self.pairs
))
def toChemkin(self, speciesList=None, kinetics=True):
"""
Return the chemkin-formatted string for this reaction.
If `kinetics` is set to True, the chemkin format kinetics will also
be returned (requires the `speciesList` to figure out third body colliders.)
Otherwise, only the reaction string will be returned.
"""
import rmgpy.chemkin
if kinetics:
return rmgpy.chemkin.writeKineticsEntry(self, speciesList)
else:
return rmgpy.chemkin.writeReactionString(self)
def toCantera(self, speciesList=None):
"""
Converts the RMG Reaction object to a Cantera Reaction object
with the appropriate reaction class.
"""
from rmgpy.kinetics import Arrhenius, ArrheniusEP, MultiArrhenius, PDepArrhenius, MultiPDepArrhenius, Chebyshev, ThirdBody, Lindemann, Troe
import cantera as ct
if speciesList is None:
speciesList = []
# Create the dictionaries containing species strings and their stoichiometries
# for initializing the cantera reaction object
ctReactants = {}
for reactant in self.reactants:
reactantName = reactant.toChemkin() # Use the chemkin name for the species
if reactantName in ctReactants:
ctReactants[reactantName] += 1
else:
ctReactants[reactantName] = 1
ctProducts = {}
for product in self.products:
productName = product.toChemkin() # Use the chemkin name for the species
if productName in ctProducts:
ctProducts[productName] += 1
else:
ctProducts[productName] = 1
if self.kinetics:
if isinstance(self.kinetics, Arrhenius):
# Create an Elementary Reaction
ctReaction = ct.ElementaryReaction(reactants=ctReactants, products=ctProducts)
elif isinstance(self.kinetics, MultiArrhenius):
# Return a list of elementary reactions which are duplicates
ctReaction = [ct.ElementaryReaction(reactants=ctReactants, products=ctProducts) for arr in self.kinetics.arrhenius]
elif isinstance(self.kinetics, PDepArrhenius):
ctReaction = ct.PlogReaction(reactants=ctReactants, products=ctProducts)
elif isinstance(self.kinetics, MultiPDepArrhenius):
ctReaction = [ct.PlogReaction(reactants=ctReactants, products=ctProducts) for arr in self.kinetics.arrhenius]
elif isinstance(self.kinetics, Chebyshev):
ctReaction = ct.ChebyshevReaction(reactants=ctReactants, products=ctProducts)
elif isinstance(self.kinetics, ThirdBody):
ctReaction = ct.ThreeBodyReaction(reactants=ctReactants, products=ctProducts)
elif isinstance(self.kinetics, Lindemann) or isinstance(self.kinetics, Troe):
ctReaction = ct.FalloffReaction(reactants=ctReactants, products=ctProducts)
else:
raise NotImplementedError('Not able to set cantera kinetics for {0}'.format(self.kinetics))
# Set reversibility, duplicate, and ID attributes
if isinstance(ctReaction,list):
for rxn in ctReaction:
rxn.reversible = self.reversible
# Set the duplicate flag to true since this reaction comes from multiarrhenius or multipdeparrhenius
rxn.duplicate = True
# Set the ID flag to the original rmg index
rxn.ID = str(self.index)
else:
ctReaction.reversible = self.reversible
ctReaction.duplicate = self.duplicate
ctReaction.ID = str(self.index)
self.kinetics.setCanteraKinetics(ctReaction, speciesList)
return ctReaction
else:
raise Exception('Cantera reaction cannot be created because there was no kinetics.')
def getURL(self):
"""
Get a URL to search for this reaction in the rmg website.
"""
# eg. http://dev.rmg.mit.edu/database/kinetics/reaction/reactant1=1%20C%200%20%7B2,S%7D;2%20O%200%20%7B1,S%7D;__reactant2=1%20C%202T;__product1=1%20C%201;__product2=1%20C%200%20%7B2,S%7D;2%20O%201%20%7B1,S%7D;
base_url = "http://rmg.mit.edu/database/kinetics/reaction/"
rxn_string = ''
for i,species in enumerate(self.reactants):
adjlist = species.molecule[0].toAdjacencyList(removeH=False)
rxn_string += "reactant{0}={1}__".format(i+1, adjlist)
for i,species in enumerate(self.products):
adjlist = species.molecule[0].toAdjacencyList(removeH=False)
rxn_string += "product{0}={1}__".format(i+1, adjlist)
url = base_url + urllib.quote(rxn_string)
return url.strip('_')
def isIsomerization(self):
"""
Return ``True`` if the reaction represents an isomerization reaction
:math:`\\ce{A <=> B}` or ``False`` if not.
"""
return len(self.reactants) == 1 and len(self.products) == 1
def isAssociation(self):
"""
Return ``True`` if the reaction represents an association reaction
:math:`\\ce{A + B <=> C}` or ``False`` if not.
"""
return len(self.reactants) > 1 and len(self.products) == 1
def isDissociation(self):
"""
Return ``True`` if the reaction represents a dissociation reaction
:math:`\\ce{A <=> B + C}` or ``False`` if not.
"""
return len(self.reactants) == 1 and len(self.products) > 1
def isUnimolecular(self):
"""
Return ``True`` if the reaction has a single molecule as either reactant or product (or both)
:math:`\\ce{A <=> B + C}` or :math:`\\ce{A + B <=> C}` or :math:`\\ce{A <=> B}`,
or ``False`` if not.
"""
return len(self.reactants) == 1 or len(self.products) == 1
def hasTemplate(self, reactants, products):
"""
Return ``True`` if the reaction matches the template of `reactants`
and `products`, which are both lists of :class:`Species` objects, or
``False`` if not.
"""
return ((all([spec in self.reactants for spec in reactants]) and
all([spec in self.products for spec in products])) or
(all([spec in self.products for spec in reactants]) and
all([spec in self.reactants for spec in products])))
def matchesMolecules(self, reactants):
"""
Return ``True`` if the given ``reactants`` represent the total set of
reactants or products for the current ``reaction``, or ``False`` if not.
The reactants should be :class:`Molecule` objects.
"""
assert all([isinstance(reactant, Molecule) for reactant in reactants])
# Check forward direction
if len(reactants) == len(self.reactants) == 1:
if self.reactants[0].isIsomorphic(reactants[0]):
return True
elif len(reactants) == len(self.reactants) == 2:
if self.reactants[0].isIsomorphic(reactants[0]) and self.reactants[1].isIsomorphic(reactants[1]):
return True
elif self.reactants[0].isIsomorphic(reactants[1]) and self.reactants[1].isIsomorphic(reactants[0]):
return True
# Check reverse direction
if len(reactants) == len(self.products) == 1:
if self.products[0].isIsomorphic(reactants[0]):
return True
elif len(reactants) == len(self.products) == 2:
if self.products[0].isIsomorphic(reactants[0]) and self.products[1].isIsomorphic(reactants[1]):
return True
elif self.products[0].isIsomorphic(reactants[1]) and self.products[1].isIsomorphic(reactants[0]):
return True
if len(reactants) > 2:
raise NotImplementedError("Can't check isomorphism of reactions with {0} reactants".format(len(reactants)))
# If we're here then neither direction matched, so return false
return False
def isIsomorphic(self, other, eitherDirection=True):
"""
Return ``True`` if this reaction is the same as the `other` reaction,
or ``False`` if they are different.
If `eitherDirection=False` then the directions must match.
"""
# Compare reactants to reactants
forwardReactantsMatch = False
if len(self.reactants) == len(other.reactants) == 1:
if self.reactants[0].isIsomorphic(other.reactants[0]):
forwardReactantsMatch = True
elif len(self.reactants) == len(other.reactants) == 2:
if self.reactants[0].isIsomorphic(other.reactants[0]) and self.reactants[1].isIsomorphic(other.reactants[1]):
forwardReactantsMatch = True
elif self.reactants[0].isIsomorphic(other.reactants[1]) and self.reactants[1].isIsomorphic(other.reactants[0]):
forwardReactantsMatch = True
elif len(self.reactants) == len(other.reactants) == 3:
if ( self.reactants[0].isIsomorphic(other.reactants[0]) and
self.reactants[1].isIsomorphic(other.reactants[1]) and
self.reactants[2].isIsomorphic(other.reactants[2]) ):
forwardReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.reactants[0]) and
self.reactants[1].isIsomorphic(other.reactants[2]) and
self.reactants[2].isIsomorphic(other.reactants[1]) ):
forwardReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.reactants[1]) and
self.reactants[1].isIsomorphic(other.reactants[0]) and
self.reactants[2].isIsomorphic(other.reactants[2]) ):
forwardReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.reactants[2]) and
self.reactants[1].isIsomorphic(other.reactants[0]) and
self.reactants[2].isIsomorphic(other.reactants[1]) ):
forwardReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.reactants[1]) and
self.reactants[1].isIsomorphic(other.reactants[2]) and
self.reactants[2].isIsomorphic(other.reactants[0]) ):
forwardReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.reactants[2]) and
self.reactants[1].isIsomorphic(other.reactants[1]) and
self.reactants[2].isIsomorphic(other.reactants[0]) ):
forwardReactantsMatch = True
elif len(self.reactants) == len(other.reactants):
raise NotImplementedError("Can't check isomorphism of reactions with {0} reactants".format(len(self.reactants)))
# Compare products to products
forwardProductsMatch = False
if len(self.products) == len(other.products) == 1:
if self.products[0].isIsomorphic(other.products[0]):
forwardProductsMatch = True
elif len(self.products) == len(other.products) == 2:
if self.products[0].isIsomorphic(other.products[0]) and self.products[1].isIsomorphic(other.products[1]):
forwardProductsMatch = True
elif self.products[0].isIsomorphic(other.products[1]) and self.products[1].isIsomorphic(other.products[0]):
forwardProductsMatch = True
elif len(self.products) == len(other.products) == 3:
if ( self.products[0].isIsomorphic(other.products[0]) and
self.products[1].isIsomorphic(other.products[1]) and
self.products[2].isIsomorphic(other.products[2]) ):
forwardProductsMatch = True
elif ( self.products[0].isIsomorphic(other.products[0]) and
self.products[1].isIsomorphic(other.products[2]) and
self.products[2].isIsomorphic(other.products[1]) ):
forwardProductsMatch = True
elif ( self.products[0].isIsomorphic(other.products[1]) and
self.products[1].isIsomorphic(other.products[0]) and
self.products[2].isIsomorphic(other.products[2]) ):
forwardProductsMatch = True
elif ( self.products[0].isIsomorphic(other.products[2]) and
self.products[1].isIsomorphic(other.products[0]) and
self.products[2].isIsomorphic(other.products[1]) ):
forwardProductsMatch = True
elif ( self.products[0].isIsomorphic(other.products[1]) and
self.products[1].isIsomorphic(other.products[2]) and
self.products[2].isIsomorphic(other.products[0]) ):
forwardProductsMatch = True
elif ( self.products[0].isIsomorphic(other.products[2]) and
self.products[1].isIsomorphic(other.products[1]) and
self.products[2].isIsomorphic(other.products[0]) ):
forwardProductsMatch = True
elif len(self.products) == len(other.products):
raise NotImplementedError("Can't check isomorphism of reactions with {0} products".format(len(self.products)))
# Return now, if we can
if (forwardReactantsMatch and forwardProductsMatch):
return True
if not eitherDirection:
return False
# Compare reactants to products
reverseReactantsMatch = False
if len(self.reactants) == len(other.products) == 1:
if self.reactants[0].isIsomorphic(other.products[0]):
reverseReactantsMatch = True
elif len(self.reactants) == len(other.products) == 2:
if self.reactants[0].isIsomorphic(other.products[0]) and self.reactants[1].isIsomorphic(other.products[1]):
reverseReactantsMatch = True
elif self.reactants[0].isIsomorphic(other.products[1]) and self.reactants[1].isIsomorphic(other.products[0]):
reverseReactantsMatch = True
elif len(self.reactants) == len(other.products) == 3:
if ( self.reactants[0].isIsomorphic(other.products[0]) and
self.reactants[1].isIsomorphic(other.products[1]) and
self.reactants[2].isIsomorphic(other.products[2]) ):
reverseReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.products[0]) and
self.reactants[1].isIsomorphic(other.products[2]) and
self.reactants[2].isIsomorphic(other.products[1]) ):
reverseReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.products[1]) and
self.reactants[1].isIsomorphic(other.products[0]) and
self.reactants[2].isIsomorphic(other.products[2]) ):
reverseReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.products[2]) and
self.reactants[1].isIsomorphic(other.products[0]) and
self.reactants[2].isIsomorphic(other.products[1]) ):
reverseReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.products[1]) and
self.reactants[1].isIsomorphic(other.products[2]) and
self.reactants[2].isIsomorphic(other.products[0]) ):
reverseReactantsMatch = True
elif ( self.reactants[0].isIsomorphic(other.products[2]) and
self.reactants[1].isIsomorphic(other.products[1]) and
self.reactants[2].isIsomorphic(other.products[0]) ):
reverseReactantsMatch = True
elif len(self.reactants) == len(other.products):
raise NotImplementedError("Can't check isomorphism of reactions with {0} reactants".format(len(self.reactants)))
# Compare products to reactants
reverseProductsMatch = False
if len(self.products) == len(other.reactants) == 1:
if self.products[0].isIsomorphic(other.reactants[0]):
reverseProductsMatch = True
elif len(self.products) == len(other.reactants) == 2:
if self.products[0].isIsomorphic(other.reactants[0]) and self.products[1].isIsomorphic(other.reactants[1]):
reverseProductsMatch = True
elif self.products[0].isIsomorphic(other.reactants[1]) and self.products[1].isIsomorphic(other.reactants[0]):
reverseProductsMatch = True
elif len(self.products) == len(other.reactants) == 3:
if ( self.products[0].isIsomorphic(other.reactants[0]) and
self.products[1].isIsomorphic(other.reactants[1]) and
self.products[2].isIsomorphic(other.reactants[2]) ):
reverseProductsMatch = True
elif ( self.products[0].isIsomorphic(other.reactants[0]) and
self.products[1].isIsomorphic(other.reactants[2]) and
self.products[2].isIsomorphic(other.reactants[1]) ):
reverseProductsMatch = True
elif ( self.products[0].isIsomorphic(other.reactants[1]) and
self.products[1].isIsomorphic(other.reactants[0]) and
self.products[2].isIsomorphic(other.reactants[2]) ):
reverseProductsMatch = True
elif ( self.products[0].isIsomorphic(other.reactants[2]) and
self.products[1].isIsomorphic(other.reactants[0]) and
self.products[2].isIsomorphic(other.reactants[1]) ):
reverseProductsMatch = True
elif ( self.products[0].isIsomorphic(other.reactants[1]) and
self.products[1].isIsomorphic(other.reactants[2]) and
self.products[2].isIsomorphic(other.reactants[0]) ):
reverseProductsMatch = True
elif ( self.products[0].isIsomorphic(other.reactants[2]) and
self.products[1].isIsomorphic(other.reactants[1]) and
self.products[2].isIsomorphic(other.reactants[0]) ):
reverseProductsMatch = True
elif len(self.products) == len(other.reactants):
raise NotImplementedError("Can't check isomorphism of reactions with {0} products".format(len(self.products)))
# should have already returned if it matches forwards, or we're not allowed to match backwards
return (reverseReactantsMatch and reverseProductsMatch)
def getEnthalpyOfReaction(self, T):
"""
Return the enthalpy of reaction in J/mol evaluated at temperature
`T` in K.
"""
cython.declare(dHrxn=cython.double, reactant=Species, product=Species)
dHrxn = 0.0
for reactant in self.reactants:
dHrxn -= reactant.getEnthalpy(T)
for product in self.products:
dHrxn += product.getEnthalpy(T)
return dHrxn
def getEntropyOfReaction(self, T):
"""
Return the entropy of reaction in J/mol*K evaluated at temperature `T`
in K.
"""
cython.declare(dSrxn=cython.double, reactant=Species, product=Species)
dSrxn = 0.0
for reactant in self.reactants:
dSrxn -= reactant.getEntropy(T)
for product in self.products:
dSrxn += product.getEntropy(T)
return dSrxn
def getFreeEnergyOfReaction(self, T):
"""
Return the Gibbs free energy of reaction in J/mol evaluated at
temperature `T` in K.
"""
cython.declare(dGrxn=cython.double, reactant=Species, product=Species)
dGrxn = 0.0
for reactant in self.reactants:
dGrxn -= reactant.getFreeEnergy(T)
for product in self.products:
dGrxn += product.getFreeEnergy(T)
return dGrxn
def getEquilibriumConstant(self, T, type='Kc'):
"""
Return the equilibrium constant for the reaction at the specified
temperature `T` in K. The `type` parameter lets you specify the
quantities used in the equilibrium constant: ``Ka`` for activities,
``Kc`` for concentrations (default), or ``Kp`` for pressures. Note that
this function currently assumes an ideal gas mixture.
"""
cython.declare(dGrxn=cython.double, K=cython.double, C0=cython.double, P0=cython.double)
# Use free energy of reaction to calculate Ka
dGrxn = self.getFreeEnergyOfReaction(T)
K = numpy.exp(-dGrxn / constants.R / T)
# Convert Ka to Kc or Kp if specified
P0 = 1e5
if type == 'Kc':
# Convert from Ka to Kc; C0 is the reference concentration
C0 = P0 / constants.R / T
K *= C0 ** (len(self.products) - len(self.reactants))
elif type == 'Kp':
# Convert from Ka to Kp; P0 is the reference pressure
K *= P0 ** (len(self.products) - len(self.reactants))
elif type != 'Ka' and type != '':
raise ReactionError('Invalid type "%s" passed to Reaction.getEquilibriumConstant(); should be "Ka", "Kc", or "Kp".')
if K == 0:
raise ReactionError('Got equilibrium constant of 0')
return K
def getEnthalpiesOfReaction(self, Tlist):
"""
Return the enthalpies of reaction in J/mol evaluated at temperatures
`Tlist` in K.
"""
return numpy.array([self.getEnthalpyOfReaction(T) for T in Tlist], numpy.float64)
def getEntropiesOfReaction(self, Tlist):
"""
Return the entropies of reaction in J/mol*K evaluated at temperatures
`Tlist` in K.
"""
return numpy.array([self.getEntropyOfReaction(T) for T in Tlist], numpy.float64)
def getFreeEnergiesOfReaction(self, Tlist):
"""
Return the Gibbs free energies of reaction in J/mol evaluated at
temperatures `Tlist` in K.
"""
return numpy.array([self.getFreeEnergyOfReaction(T) for T in Tlist], numpy.float64)
def getEquilibriumConstants(self, Tlist, type='Kc'):
"""
Return the equilibrium constants for the reaction at the specified
temperatures `Tlist` in K. The `type` parameter lets you specify the
quantities used in the equilibrium constant: ``Ka`` for activities,
``Kc`` for concentrations (default), or ``Kp`` for pressures. Note that
this function currently assumes an ideal gas mixture.
"""
return numpy.array([self.getEquilibriumConstant(T, type) for T in Tlist], numpy.float64)
def getStoichiometricCoefficient(self, spec):
"""
Return the stoichiometric coefficient of species `spec` in the reaction.
The stoichiometric coefficient is increased by one for each time `spec`
appears as a product and decreased by one for each time `spec` appears
as a reactant.
"""
cython.declare(stoich=cython.int, reactant=Species, product=Species)
stoich = 0
for reactant in self.reactants:
if reactant is spec: stoich -= 1
for product in self.products:
if product is spec: stoich += 1
return stoich
def getRateCoefficient(self, T, P=0):
"""
Return the overall rate coefficient for the forward reaction at
temperature `T` in K and pressure `P` in Pa, including any reaction
path degeneracies.
If diffusionLimiter is enabled, the reaction is in the liquid phase and we use
a diffusion limitation to correct the rate. If not, then use the intrinsic rate
coefficient.
"""
if diffusionLimiter.enabled:
try:
k = self.k_effective_cache[T]
except KeyError:
k = diffusionLimiter.getEffectiveRate(self, T)
self.k_effective_cache[T] = k
return k
else:
return self.kinetics.getRateCoefficient(T, P)
def fixDiffusionLimitedA(self, T):
"""
Decrease the pre-exponential factor (A) by the diffusion factor
to account for the diffusion limit at the specified temperature.
"""
if not diffusionLimiter.enabled:
return
# Obtain effective rate
try:
k = self.k_effective_cache[T]
except KeyError:
k = diffusionLimiter.getEffectiveRate(self, T)
self.k_effective_cache[T] = k
# calculate diffusion factor
diffusionFactor = k / self.kinetics.getRateCoefficient(T, P=0)
# update preexponential factor
self.kinetics.A = self.kinetics.A * diffusionFactor
# Add a comment to self.kinetics.comment
self.kinetics.comment.append(
("Pre-exponential factor A has been decreased by the "
"diffusion factor {0.2g} evaluated at {1} K.").format(
diffusionFactor, T))
def fixBarrierHeight(self, forcePositive=False):
"""
Turns the kinetics into Arrhenius (if they were ArrheniusEP)
and ensures the activation energy is at least the endothermicity
for endothermic reactions, and is not negative only as a result
of using Evans Polanyi with an exothermic reaction.
If `forcePositive` is True, then all reactions
are forced to have a non-negative barrier.
"""
cython.declare(H0=cython.double, H298=cython.double, Ea=cython.double)
H298 = self.getEnthalpyOfReaction(298)
H0 = sum([spec.thermo.E0.value_si for spec in self.products]) - sum([spec.thermo.E0.value_si for spec in self.reactants])
if isinstance(self.kinetics, ArrheniusEP):
Ea = self.kinetics.E0.value_si # temporarily using Ea to store the intrinsic barrier height E0
self.kinetics = self.kinetics.toArrhenius(H298)
if Ea > 0 and self.kinetics.Ea.value_si < 0:
self.kinetics.comment += "\nEa raised from {0:.1f} to 0 kJ/mol.".format(self.kinetics.Ea.value_si/1000)
logging.info("For reaction {1!s} Ea raised from {0:.1f} to 0 kJ/mol.".format(self.kinetics.Ea.value_si/1000, self))
self.kinetics.Ea.value_si = 0
if isinstance(self.kinetics, Arrhenius):
Ea = self.kinetics.Ea.value_si
if H0 > 0 and Ea < H0:
self.kinetics.Ea.value_si = H0
self.kinetics.comment += "\nEa raised from {0:.1f} to {1:.1f} kJ/mol to match endothermicity of reaction.".format(Ea/1000,H0/1000)
logging.info("For reaction {2!s}, Ea raised from {0:.1f} to {1:.1f} kJ/mol to match endothermicity of reaction.".format(Ea/1000, H0/1000, self))
if forcePositive and isinstance(self.kinetics, Arrhenius) and self.kinetics.Ea.value_si < 0:
self.kinetics.comment += "\nEa raised from {0:.1f} to 0 kJ/mol.".format(self.kinetics.Ea.value_si/1000)
logging.info("For reaction {1!s} Ea raised from {0:.1f} to 0 kJ/mol.".format(self.kinetics.Ea.value_si/1000, self))
self.kinetics.Ea.value_si = 0
def reverseThisArrheniusRate(self, kForward, reverseUnits):
"""
Reverses the given kForward, which must be an Arrhenius type.
You must supply the correct units for the reverse rate.
The equilibrium constant is evaluated from the current reaction instance (self).
"""
cython.declare(kf=Arrhenius, kr=Arrhenius)
cython.declare(Tlist=numpy.ndarray, klist=numpy.ndarray, i=cython.int)
kf = kForward
assert isinstance(kf, Arrhenius), "Only reverses Arrhenius rates"
if kf.Tmin is not None and kf.Tmax is not None:
Tlist = 1.0/numpy.linspace(1.0/kf.Tmax.value_si, 1.0/kf.Tmin.value_si, 50)
else:
Tlist = 1.0 / numpy.arange(0.0005, 0.0034, 0.0001) # 294 K to 2000 K
# Determine the values of the reverse rate coefficient k_r(T) at each temperature
klist = numpy.zeros_like(Tlist)
for i in range(len(Tlist)):
klist[i] = kf.getRateCoefficient(Tlist[i]) / self.getEquilibriumConstant(Tlist[i])
kr = Arrhenius()
kr.fitToData(Tlist, klist, reverseUnits, kf.T0.value_si)
return kr
def generateReverseRateCoefficient(self):
"""
Generate and return a rate coefficient model for the reverse reaction.
Currently this only works if the `kinetics` attribute is one of several
(but not necessarily all) kinetics types.
"""
cython.declare(Tlist=numpy.ndarray, klist=numpy.ndarray, i=cython.int)
supported_types = (
KineticsData.__name__,
Arrhenius.__name__,
MultiArrhenius.__name__,
PDepArrhenius.__name__,
MultiPDepArrhenius.__name__,
Chebyshev.__name__,
ThirdBody.__name__,
Lindemann.__name__,
Troe.__name__,
)
# Get the units for the reverse rate coefficient
kunits = getRateCoefficientUnitsFromReactionOrder(len(self.products))
kf = self.kinetics
if isinstance(kf, KineticsData):
Tlist = kf.Tdata.value_si
klist = numpy.zeros_like(Tlist)
for i in range(len(Tlist)):
klist[i] = kf.getRateCoefficient(Tlist[i]) / self.getEquilibriumConstant(Tlist[i])
kr = KineticsData(Tdata=(Tlist,"K"), kdata=(klist,kunits), Tmin=(numpy.min(Tlist),"K"), Tmax=(numpy.max(Tlist),"K"))
return kr
elif isinstance(kf, Arrhenius):
return self.reverseThisArrheniusRate(kf, kunits)
elif isinstance (kf, Chebyshev):
Tlist = 1.0/numpy.linspace(1.0/kf.Tmax.value, 1.0/kf.Tmin.value, 50)
Plist = numpy.linspace(kf.Pmin.value, kf.Pmax.value, 20)
K = numpy.zeros((len(Tlist), len(Plist)), numpy.float64)
for Tindex, T in enumerate(Tlist):
for Pindex, P in enumerate(Plist):
K[Tindex, Pindex] = kf.getRateCoefficient(T, P) / self.getEquilibriumConstant(T)
kr = Chebyshev()
kr.fitToData(Tlist, Plist, K, kunits, kf.degreeT, kf.degreeP, kf.Tmin.value, kf.Tmax.value, kf.Pmin.value, kf.Pmax.value)
return kr
elif isinstance(kf, PDepArrhenius):
if kf.Tmin is not None and kf.Tmax is not None:
Tlist = 1.0/numpy.linspace(1.0/kf.Tmax.value, 1.0/kf.Tmin.value, 50)
else:
Tlist = 1.0/numpy.arange(0.0005, 0.0035, 0.0001)
Plist = kf.pressures.value_si
K = numpy.zeros((len(Tlist), len(Plist)), numpy.float64)
for Tindex, T in enumerate(Tlist):
for Pindex, P in enumerate(Plist):
K[Tindex, Pindex] = kf.getRateCoefficient(T, P) / self.getEquilibriumConstant(T)
kr = PDepArrhenius()
kr.fitToData(Tlist, Plist, K, kunits, kf.arrhenius[0].T0.value)
return kr
elif isinstance(kf, MultiArrhenius):
kr = MultiArrhenius()
kr.arrhenius = []
rxn = Reaction(reactants = self.reactants, products = self.products)
for kinetics in kf.arrhenius:
rxn.kinetics = kinetics
kr.arrhenius.append(rxn.generateReverseRateCoefficient())
return kr
elif isinstance(kf, MultiPDepArrhenius):
kr = MultiPDepArrhenius()
kr.arrhenius = []
rxn = Reaction(reactants = self.reactants, products = self.products)
for kinetics in kf.arrhenius:
rxn.kinetics = kinetics
kr.arrhenius.append(rxn.generateReverseRateCoefficient())
return kr
elif isinstance(kf, ThirdBody):
lowPkunits = getRateCoefficientUnitsFromReactionOrder(len(self.products) + 1)
krLow = self.reverseThisArrheniusRate(kf.arrheniusLow, lowPkunits)
parameters = kf.__reduce__()[1] # use the pickle helper to get all the other things needed
kr = ThirdBody(krLow, *parameters[1:])
return kr
elif isinstance(kf, Lindemann):
krHigh = self.reverseThisArrheniusRate(kf.arrheniusHigh, kunits)
lowPkunits = getRateCoefficientUnitsFromReactionOrder(len(self.products) + 1)
krLow = self.reverseThisArrheniusRate(kf.arrheniusLow, lowPkunits)
parameters = kf.__reduce__()[1] # use the pickle helper to get all the other things needed
kr = Lindemann(krHigh, krLow, *parameters[2:])
return kr
elif isinstance(kf, Troe):
krHigh = self.reverseThisArrheniusRate(kf.arrheniusHigh, kunits)
lowPkunits = getRateCoefficientUnitsFromReactionOrder(len(self.products) + 1)
krLow = self.reverseThisArrheniusRate(kf.arrheniusLow, lowPkunits)
parameters = kf.__reduce__()[1] # use the pickle helper to get all the other things needed
kr = Troe(krHigh, krLow, *parameters[2:])
return kr
else:
raise ReactionError(("Unexpected kinetics type {0}; should be one of {1}").format(self.kinetics.__class__, supported_types))
def calculateTSTRateCoefficients(self, Tlist):
return numpy.array([self.calculateTSTRateCoefficient(T) for T in Tlist], numpy.float64)
def calculateTSTRateCoefficient(self, T):
"""
Evaluate the forward rate coefficient for the reaction with
corresponding transition state `TS` at temperature `T` in K using
(canonical) transition state theory. The TST equation is
.. math:: k(T) = \\kappa(T) \\frac{k_\\mathrm{B} T}{h} \\frac{Q^\\ddagger(T)}{Q^\\mathrm{A}(T) Q^\\mathrm{B}(T)} \\exp \\left( -\\frac{E_0}{k_\\mathrm{B} T} \\right)
where :math:`Q^\\ddagger` is the partition function of the transition state,
:math:`Q^\\mathrm{A}` and :math:`Q^\\mathrm{B}` are the partition function
of the reactants, :math:`E_0` is the ground-state energy difference from
the transition state to the reactants, :math:`T` is the absolute
temperature, :math:`k_\\mathrm{B}` is the Boltzmann constant, and :math:`h`
is the Planck constant. :math:`\\kappa(T)` is an optional tunneling
correction.
"""
# Determine TST rate constant at each temperature
Qreac = 1.0
E0 = 0.0
for spec in self.reactants:
logging.debug(' Calculating Partition function for ' + spec.label)
Qreac *= spec.getPartitionFunction(T) / (constants.R * T / 101325.)
E0 -= spec.conformer.E0.value_si
logging.debug(' Calculating Partition function for ' + self.transitionState.label)
Qts = self.transitionState.getPartitionFunction(T) / (constants.R * T / 101325.)
E0 += self.transitionState.conformer.E0.value_si
k = (constants.kB * T / constants.h * Qts / Qreac) * math.exp(-E0 / constants.R / T)
# Apply tunneling correction
k *= self.transitionState.calculateTunnelingFactor(T)
return k
def canTST(self):
"""
Return ``True`` if the necessary parameters are available for using
transition state theory -- or the microcanonical equivalent, RRKM
theory -- to compute the rate coefficient for this reaction, or
``False`` otherwise.
"""
return len(self.transitionState.conformer.modes) > 0
def calculateMicrocanonicalRateCoefficient(self, Elist, Jlist, reacDensStates, prodDensStates=None, T=0.0):
"""
Calculate the microcanonical rate coefficient :math:`k(E)` for the reaction
`reaction` at the energies `Elist` in J/mol. `reacDensStates` and
`prodDensStates` are the densities of states of the reactant and product
configurations for this reaction. If the reaction is irreversible, only the
reactant density of states is required; if the reaction is reversible, then
both are required. This function will try to use the best method that it
can based on the input data available:
* If detailed information has been provided for the transition state (i.e.
the molecular degrees of freedom), then RRKM theory will be used.
* If the above is not possible but high-pressure limit kinetics
:math:`k_\\infty(T)` have been provided, then the inverse Laplace
transform method will be used.
The density of states for the product `prodDensStates` and the temperature
of interest `T` in K can also be provided. For isomerization and association
reactions `prodDensStates` is required; for dissociation reactions it is
optional. The temperature is used if provided in the detailed balance
expression to determine the reverse kinetics, and in certain cases in the
inverse Laplace transform method.
"""
return calculateMicrocanonicalRateCoefficient(self, Elist, Jlist, reacDensStates, prodDensStates, T)
def isBalanced(self):
"""
Return ``True`` if the reaction has the same number of each atom on
each side of the reaction equation, or ``False`` if not.
"""
from rmgpy.molecule.element import elementList
cython.declare(reactantElements=dict, productElements=dict, molecule=Molecule, atom=Atom, element=Element)
reactantElements = {}; productElements = {}
for element in elementList:
reactantElements[element] = 0
productElements[element] = 0
for reactant in self.reactants:
if isinstance(reactant, Species):
molecule = reactant.molecule[0]
elif isinstance(reactant, Molecule):
molecule = reactant
for atom in molecule.atoms:
reactantElements[atom.element] += 1
for product in self.products:
if isinstance(product, Species):
molecule = product.molecule[0]
elif isinstance(product, Molecule):
molecule = product
for atom in molecule.atoms:
productElements[atom.element] += 1
for element in elementList:
if reactantElements[element] != productElements[element]:
return False
return True
def generatePairs(self):
"""
Generate the reactant-product pairs to use for this reaction when
performing flux analysis. The exact procedure for doing so depends on
the reaction type:
=================== =============== ========================================
Reaction type Template Resulting pairs
=================== =============== ========================================
Isomerization A -> C (A,C)
Dissociation A -> C + D (A,C), (A,D)
Association A + B -> C (A,C), (B,C)
Bimolecular A + B -> C + D (A,C), (B,D) *or* (A,D), (B,C)
=================== =============== ========================================
There are a number of ways of determining the correct pairing for
bimolecular reactions. Here we try a simple similarity analysis by comparing
the number of heavy atoms (carbons and oxygens at the moment). This should
work most of the time, but a more rigorous algorithm may be needed for
some cases.
"""
self.pairs = []
if len(self.reactants) == 1 or len(self.products) == 1:
# Pair each reactant with each product
for reactant in self.reactants:
for product in self.products:
self.pairs.append((reactant, product))
else:
reactants = self.reactants[:]
products = self.products[:]
reactantCarbons = [sum([1 for atom in reactant.molecule[0].atoms if atom.isCarbon()]) for reactant in reactants]
productCarbons = [sum([1 for atom in product.molecule[0].atoms if atom.isCarbon()]) for product in products ]
reactantOxygens = [sum([1 for atom in reactant.molecule[0].atoms if atom.isOxygen()]) for reactant in reactants]
productOxygens = [sum([1 for atom in product.molecule[0].atoms if atom.isOxygen()]) for product in products ]
# Sort the reactants and products by carbon number, then by oxygen number
reactants = [(carbon, oxygen, reactant) for carbon, oxygen, reactant in zip(reactantCarbons,reactantOxygens,reactants)]
reactants.sort()
products = [(carbon, oxygen, product) for carbon, oxygen, product in zip(productCarbons,productOxygens,products)]
products.sort()
while len(reactants) > 1 and len(products) > 1:
self.pairs.append((reactants[-1][2], products[-1][2]))
reactants.pop()
products.pop()
for reactant in reactants:
for product in products:
self.pairs.append((reactant[2], product[2]))
def draw(self, path):
"""
Generate a pictorial representation of the chemical reaction using the
:mod:`draw` module. Use `path` to specify the file to save
the generated image to; the image type is automatically determined by
extension. Valid extensions are ``.png``, ``.svg``, ``.pdf``, and
``.ps``; of these, the first is a raster format and the remainder are
vector formats.
"""
from rmgpy.molecule.draw import ReactionDrawer
format = os.path.splitext(path)[1].lower()[1:]
ReactionDrawer().draw(self, format, path)
def _repr_png_(self):
"""
Return a png picture of the reaction, useful for ipython-qtconsole.
"""
from rmgpy.molecule.draw import ReactionDrawer
tempFileName = 'temp_reaction.png'
ReactionDrawer().draw(self, 'png', tempFileName)
png = open(tempFileName,'rb').read()
os.unlink(tempFileName)
return png
# Build the transition state geometry
def generate3dTS(self, reactants, products):
"""
Generate the 3D structure of the transition state. Called from
model.generateKinetics().
self.reactants is a list of reactants
self.products is a list of products
"""
import rdkit
import rdkit.Chem
import rdkit.Chem.AllChem
import rdkit.Geometry
"""
Iterate through each reactant, then iterate through its atoms to find the
atoms involved in the reaction. If a radical is involved, can find the atom
with radical electrons. If a more reliable method can be found, would greatly
improve the method.
Repeat for the products
"""
for i in range(0, len(reactants)):
mol = reactants[i].molecule[0]
for j in range(0, mol.rdMol.GetNumAtoms()):
if mol.rdMol.GetAtomWithIdx(j).GetNumRadicalElectrons():
point = mol.rdMol.GetConformer(mol.rdMolConfId).GetAtomPosition(j)
neighbor = mol.rdMol.GetAtomWithIdx(j).GetNeighbors()
dirVec = [{} for k in range(len(neighbor))]
lenVec = [None]*len(neighbor)
for k in range(0, len(neighbor)):
newIdx = neighbor[k].GetIdx()
newPt = mol.rdMol.GetConformer(mol.rdMolConfId).GetAtomPosition(newIdx)
dirVec[k] = point.DirectionVector(newPt)
lenVec[k] = point.Distance(newPt)
xCoord = [None]*len(neighbor)
yCoord = [None]*len(neighbor)
zCoord = [None]*len(neighbor)
for k in range(0, len(neighbor)):
xCoord[k] = dirVec[k].x*lenVec[k]
yCoord[k] = dirVec[k].y*lenVec[k]
zCoord[k] = dirVec[k].z*lenVec[k]
reactionAxis = [sum(xCoord), sum(yCoord), sum(zCoord)]
reactants[i].reactionAxis = reactionAxis
for i in range(0, len(products)):
mol = products[i].molecule[0]
for j in range(0, mol.rdMol.GetNumAtoms()):
if mol.rdMol.GetAtomWithIdx(j).GetNumRadicalElectrons():
point = mol.rdMol.GetConformer(mol.rdMolConfId).GetAtomPosition(j)
neighbor = mol.rdMol.GetAtomWithIdx(j).GetNeighbors()
dirVec = [{} for k in range(len(neighbor))]
lenVec = [None]*len(neighbor)
for k in range(0, len(neighbor)):
newIdx = neighbor[k].GetIdx()
newPt = mol.rdMol.GetConformer(mol.rdMolConfId).GetAtomPosition(newIdx)
dirVec[k] = point.DirectionVector(newPt)
lenVec[k] = point.Distance(newPt)
xCoord = [None]*len(neighbor)
yCoord = [None]*len(neighbor)
zCoord = [None]*len(neighbor)
for k in range(0, len(neighbor)):
xCoord[k] = dirVec[k].x*lenVec[k]
yCoord[k] = dirVec[k].y*lenVec[k]
zCoord[k] = dirVec[k].z*lenVec[k]
reactionAxis = [sum(xCoord), sum(yCoord), sum(zCoord)]
products[i].reactionAxis = reactionAxis
def copy(self):
"""
Create a deep copy of the current reaction.
"""
cython.declare(other=Reaction)
other = Reaction.__new__(Reaction)
other.index = self.index
other.label = self.label
other.reactants = []
for reactant in self.reactants:
other.reactants.append(reactant.copy(deep=True))
other.products = []
for product in self.products:
other.products.append(product.copy(deep=True))
other.kinetics = deepcopy(self.kinetics)
other.reversible = self.reversible
other.transitionState = deepcopy(self.transitionState)
other.duplicate = self.duplicate
other.degeneracy = self.degeneracy
other.pairs = deepcopy(self.pairs)
return other
|
nickvandewiele/RMG-Py
|
rmgpy/reaction.py
|
Python
|
mit
| 54,551
|
[
"RDKit"
] |
0dcd544a5bb22e875f4eea76096de463caace9dfd3739cf3f6c9f3b1b879fc80
|
"""
Copyright (c) 2013-2014 Benedicte Ofstad
Distributed under the GNU Lesser General Public License v3.0.
For full terms see the file LICENSE.md.
"""
import unittest
import abavib as av
import read_input as ri
import numpy as np
import pydoc
molecule = "fluoromethane"
input_name = "input_" + molecule + "/"
output_file_name = "output/" + molecule
open(output_file_name, 'w').close() # As we are appending to the output, the old results must be deleted before each run
class abavib_test(unittest.TestCase):
def setUp(self):
#The reason we use this one, is because there are any number of eigenvectors which are correct eigenvectors, for the purpose of testing
#we use the same one that DALTON operates with
self.molecule = "fluoromethane"
if(self.molecule == "fluoromethane"):
self.eig = np.array([0.000156325, 0.000052708, 0.000083924, 0.000087366, 0.000049731, 0.000036663, 0.000105884, 0.000086732, 0.000035581])
self.input_name = "input_" + self.molecule + "/"
self.mol_name = self.input_name + 'MOLECULE.INP'
self.cff_name = self.input_name + 'cubic_force_field'
self.coordinates, self.masses, self.num_atoms_list \
,self.charge_list, self.n_atoms = av.read_molecule(self.mol_name)
self.n_coordinates = self.n_atoms * 3
self.n_nm = self.n_coordinates - 6
hessian_name = self.input_name + 'hessian'
self.hessian = av.read_hessian(hessian_name, self.n_atoms*3)
hessian_t = self.hessian.transpose()
hessian_temp = np.add(self.hessian, hessian_t)
self.hessian = np.subtract(hessian_temp , np.diag(self.hessian.diagonal()))
#self.eig1, self.eigvec1, self.freq, self.eigvec_full1 = \
# av.fundamental_freq(self.hessian, self.num_atoms_list, \
# self.charge_list, self.coordinates, self.n_atoms, self.masses)#Check out the 1s i made
self.cubic_force_field = ri.read_cubic_force_field(self.cff_name,#Remember to switch to av. for h2o\
self.n_coordinates)
#self.cff_norm, self.cff_norm_reduced = av.to_normal_coordinates_3D(self.cubic_force_field, self.eigvec_full, self.n_atoms)
#effective_geometry_norm = av.effective_geometry(self.cff_norm_reduced, self.freq, self.n_atoms)
#self.effective_geometry_cart = av.to_cartessian_coordinates(effective_geometry_norm, self.n_atoms, self.eigvec)
class read_molecule_test(abavib_test):
def test_coordinates(self):
if(self.molecule == "h2o"):
self.correct_coordinates = [[-1.42157256, 2.28115327, 0.00554911],
[ -0.13533793, 2.10700057, 0.07387382],
[-2.02521896, 3.34965922, -0.41371135]]
self.correct_coordinates = np.array(self.correct_coordinates)
self.assertTrue((self.coordinates == self.correct_coordinates).all())
elif(self.molecule == "h2o2"):
self.correct_coordinates = np. array([[0.00000000, 1.40784586, -0.09885600]
,[0.00000000, -1.40784586, -0.09885600]
,[0.69081489, 1.72614891, 1.56891868]
,[-0.69081489, -1.72614891, 1.56891868]])
self.assertTrue((self.coordinates == self.correct_coordinates).all())
def test_masses(self):
if(self.molecule == "h2o"):
self.correct_masses = [15.9994, 1.00794, 1.00794]
self.assertSequenceEqual(self.masses, self.correct_masses)
elif(self.molecule == "h2o2"):
self.correct_masses = [15.9994, 15.9994, 1.00794, 1.00794]
self.assertSequenceEqual(self.masses, self.correct_masses)
def test_n_atoms(self):
if(self.molecule == "h2o"):
self.assertEquals(self.n_atoms, 3)
elif(self.molecule == "h2o2"):
self.assertEquals(self.n_atoms, 4)
class read_hessian_test(abavib_test):
def test_hessian_values(self):
if(self.molecule == "h2o"):
correct_hessian = np.array([[9.79479, -0.452448, 0.229158, -6.327811, -0.924135, 0.318151, -3.466978, 1.376583, -0.547309]
,[ -0.452448, 32.105552, -11.992317, 0.767043, -10.749399, 3.928526, -0.314595, -21.356153, 8.06379]
,[ 0.229158, -11.992317, 6.029794, -0.298792, 3.882729, -2.197041, 0.069634, 8.062308, -3.832753]
,[ -6.327811, 0.767043, -0.298792, 6.541742, -1.005224, 0.392545, -0.213931, 0.238181, -0.093753]
,[ -0.924135, -10.749399, 3.882729, -1.005224, 4.894784, -1.718158, 1.929359, 5.767001, -2.204113]
,[ 0.318151, 3.928526, -2.197041, 0.392545, -1.718158, 1.177308, -0.710696, -2.20263, 1.019733]
,[ -3.466978, -0.314595, 0.069634, -0.213931, 1.929359, -0.710696, 3.680909, -1.614764, 0.641062]
,[ 1.376583, -21.356153, 8.062308, 0.238181, 5.767001, -2.20263, -1.614764, 15.589152, -5.859678]
,[ -0.547309, 8.06379, -3.832753, -0.093753, -2.204113, 1.019733, 0.641062, -5.859678, 2.81302]])
self.assertTrue((self.hessian == correct_hessian).all())
elif(self.molecule == "h2o2"):
correct_hessian = np.array([[ 7.468672, -0.481556, 0.398409, -5.663545, 0.303342, -0.709184, -3.311649, 0.519999, -0.210195, 1.50652, -0.341785, 0.52097]
, [-0.481556, -1.703046, -1.095639, 0.312254, 4.50928, 0.950217, 0.125658, -2.762323, 0.277076, 0.043644, -0.043911, -0.131655]
, [ 0.398409, -1.095639, 6.687239, -0.070907, -0.950218, -4.993221, -0.235783, 1.232919, -1.848179, -0.052817, 0.812937, 0.154161]
, [-5.663545, 0.312254, -0.070907, 6.946056, -0.472644, 0.385633, 1.554707, -0.350697, 0.109883, -2.837218, 0.511087, -0.420659]
, [ 0.303342, 4.50928, -0.950218, -0.472644, -1.703046, 1.095639, 0.050912, -0.043911, 0.131655, 0.118389, -2.762323, -0.277076]
, [-0.709184, 0.950217, -4.993221, 0.385633, 1.095639, 6.687239, 0.657331, -0.812937, 0.154161, -0.36873, -1.232919, -1.848179]
, [-3.311649, 0.125658, -0.235783, 1.554707, 0.050912, 0.657331, 3.054322, -0.151584, 0.096644, -1.29738, -0.024987, -0.518191]
, [ 0.519999, -2.762323, 1.232919, -0.350697, -0.043911, -0.812937, -0.151584, 2.837786, -0.414356, -0.017718, -0.031552, -0.005626]
, [-0.210195, 0.277076, -1.848179, 0.109883, 0.131655, 0.154161, 0.096644, -0.414356, 1.682793, 0.003668, 0.005626, 0.011225]
, [ 1.506522, 0.043644, -0.052817, -2.837218, 0.118389, -0.36873, -1.29738, -0.017718, 0.003668, 2.628076, -0.144315, 0.41788]
, [-0.341785, -0.043911, 0.812937, 0.511087, -2.762323, -1.232919, -0.024987, -0.031552, 0.005626, -0.144315, 2.837786, 0.414356]
, [ 0.52097, -0.131655, 0.154161, -0.420659, -0.277076, -1.848179, -0.518191, -0.005626, 0.011225, 0.41788, 0.414356, 1.682793]])
self.assertTrue((self.hessian - correct_hessian < 0.00001).all())
def test_hessian_dimensions(self):
self.assertTrue(self.hessian.shape == (self.n_coordinates, self.n_coordinates))
class frequency_test(abavib_test):
def test_frequencies(self):
if(self.molecule == "h2o2"):
correct_frequency = np.array([0.0570, 0.0435, 0.0413, 0.0343, 0.0294, 0.0168, 0,0,0,0,0,0])
self.assertTrue(np.allclose(correct_frequency, self.freq, rtol=0.02, atol=0.0003))
if(self.molecule == "h2o"):
correct_frequency = np.array([0.037739, 0.045369, 0.025234, 0,0,0,0,0,0])
self.assertTrue(np.allclose(correct_frequency, self.freq, rtol=0.02, atol=0.0003))
def test_eigvec(self):
if(self.molecule == "h2o2"):
correct_eigvec = np.array([[ -0.00131353, -0.00001741, 0.00029587, -0.00016271, 0.00000038, 0.00006501]
,[ 0.00007785, -0.00060863, -0.00084065, -0.00064259, -0.00032658, 0.00406074]
,[ -0.00018153, 0.00151054, 0.00052282, -0.000208, -0.00088988, -0.00002127]
,[ 0.00116367, 0.00047638, -0.00027149, -0.00042556, 0.00006439, -0.00006238]
,[ 0.00000617, -0.0007995, 0.00059862, -0.0006688, 0.00040987, -0.00405949]
,[ 0.00036161, -0.00151616, 0.00016393, -0.00002019, -0.00098291, -0.00002758]
,[ 0.01633121, -0.00137943, 0.00208677, 0.00294594, 0.00196957, 0.00042724]
,[ -0.00289218, 0.00884232, 0.01679361, 0.01035385, 0.00450935, 0.00316981]
,[ 0.00144209, -0.00874834, -0.00591666, 0.01308062, 0.01362057, 0.00033726]
,[ -0.01395275, -0.00590483, -0.00247371, 0.00639033, -0.0029974, -0.00046896]
,[ 0.00155876, 0.01350575, -0.01295232, 0.01045877, -0.0058313, -0.00318957]
,[ -0.00430002, 0.00883742, -0.0049825, -0.00945915, 0.01610197, 0.00043797]])
vfunc = np.vectorize(np.absolute)
correct_eigvec = vfunc(correct_eigvec)
self.eigvec = vfunc(self.eigvec)
self.assertTrue(np.allclose(correct_eigvec, self.eigvec, rtol=0.02, atol=0.0003))
if(self.molecule == "h2o"):
h2o_eigvec = np.array([[0.003447, -0.039874, -0.067216]
,[-0.072965, 0.008106, -0.017140]
,[0.028630, -0.003180, 0.006726]
,[-0.019459, 0.890699, 0.354563]
,[0.351730, -0.268710, 0.458153]
,[-0.138013, 0.105431, -0.179775]
,[-0.035255, -0.257865, 0.712205]
,[0.806271, 0.140066, -0.186130]
,[-0.316368, -0.054958, 0.073029]])
self.assertTrue(np.allclose(h2o_eigvec, self.eigvec, rtol=0.02, atol=0.0003))
class cubic_force_field_test(abavib_test):
def test_cff(self):
self.assertTrue(True)
class optical_rotation_test(abavib_test):
def setUp(self):
super(optical_rotation_test, self).setUp()
optrot_deriv = ri.read_optrot(self.input_name + "OPTROT", self.n_nm)
self.uncorrected_values, self.values_correction, self.corrected_values = ri.read_DALTON_values_3d_reduced(self.input_name + "OPTROT")
self.optrot_correction, self.optrot = av.get_3D_property("OPTROT", optrot_deriv, self.uncorrected_values, self.n_nm, self.eig)
def test_optical_rotation_corrections(self):
self.assertTrue(np.allclose(self.values_correction, self.optrot_correction, rtol=0.03, atol=0.0003))
def test_optical_rotation_values(self):
ri.write_to_file(self.molecule, "Optical Rotation", self.optrot)
self.assertTrue(np.allclose(self.corrected_values, self.optrot, rtol=0.01, atol=0))
if __name__ == '__main__':
unittest.main()
|
Benedicte/vibrational_motion
|
chiral_tester.py
|
Python
|
lgpl-3.0
| 11,124
|
[
"Dalton"
] |
f034c35d5d8664a68385ac055796dcdc38372f025f6bf739da109a57e77f453f
|
#! /usr/bin/python
from setuptools import setup
from setuptools import find_packages
setup(name='project-euler',
version='0.1',
description='Code to solve project euler problems',
author='Brian Kinney',
author_email='briankanokinney@gmail.com',
url='https://github.com/briankinney/project-euler',
packages=find_packages(),
install_requires=[]
)
|
briankinney/project-euler
|
setup.py
|
Python
|
mit
| 395
|
[
"Brian"
] |
0d91820995e4183dde38d6713895fecd22011d3579a1569ff6e6668fe2860881
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create an API definition by interpreting a discovery document.
This module interprets a discovery document to create a tree of classes which
represent the API structure in a way that is useful for generating a library.
For each discovery element (e.g. schemas, resources, methods, ...) there is
a class to represent it which is directly usable in the templates. The
instances of those classes are annotated with extra variables for use
in the template which are language specific.
The current way to make use of this class is to create a programming language
specific subclass of Api, which adds annotations and template variables
appropriate for that language.
TODO(user): Refactor this so that the API can be loaded first, then annotated.
"""
__author__ = 'aiuto@google.com (Tony Aiuto)'
import json
import logging
import operator
import urlparse
from googleapis.codegen import data_types
from googleapis.codegen import template_objects
from googleapis.codegen import utilities
from googleapis.codegen.api_exception import ApiException
from googleapis.codegen.schema import Schema
from googleapis.codegen.utilities import convert_size
_DEFAULT_SERVICE_HOST = 'www.googleapis.com'
_DEFAULT_OWNER_DOMAIN = 'google.com'
_DEFAULT_OWNER_NAME = 'Google'
_RECOGNIZED_GOOGLE_DOMAINS = (
'google.com',
'googleapis.com',
'googleplex.com'
)
_LOGGER = logging.getLogger('codegen')
class Api(template_objects.CodeObject):
"""An API definition.
This class holds a discovery centric definition of an API. It contains
members such as "resources" and "schemas" which relate directly to discovery
concepts. It defines several properties that can be used in code generation
templates:
name: The API name.
version: The API version.
versionNoDots: The API version with all '.' characters replaced with '_'.
This is typically used in class names.
versionNoDash: The API version with all '-' characters replaced with '_'.
This is typically used in file names where '-' has meaning.
authScopes: The list of the OAuth scopes used by this API.
dataWrapper: True if the API definition contains the 'dataWrapper' feature.
methods: The list of top level API methods.
models: The list of API data models, both from the schema section of
discovery and from anonymous objects defined in method definitions.
parameters: The list of global method parameters (applicable to all methods)
resources: The list of API resources
"""
def __init__(self, discovery_doc, language=None):
super(Api, self).__init__(discovery_doc, self,
wire_name=discovery_doc['name'])
name = self.values['name']
self._validator.ValidateApiName(name)
if name != 'freebase':
self._validator.ValidateApiVersion(self.values['version'])
canonical_name = self.values.get('canonicalName', name)
if not self.values.get('canonicalName'):
self.values['canonicalName'] = canonical_name
self._class_name = self.ToClassName(canonical_name, self)
# Guard against language implementor not taking care of spaces
self._class_name = self._class_name.replace(' ', '')
self._NormalizeOwnerInformation()
self._language = language
self._template_dir = None
self._surface_features = {}
self._schemas = {}
self._methods_by_name = {}
self._all_methods = []
self.SetTemplateValue('className', self._class_name)
self.SetTemplateValue('versionNoDots',
self.values['version'].replace('.', '_'))
self.SetTemplateValue('versionNoDash',
self.values['version'].replace('-', '_'))
self.SetTemplateValue('dataWrapper',
'dataWrapper' in discovery_doc.get('features', []))
self.values.setdefault('title', name)
self.values.setdefault('exponentialBackoffDefault', False)
if not self.values.get('revision'):
self.values['revision'] = 'snapshot'
self._NormalizeUrlComponents()
# Information for variant subtypes, a dictionary of the format:
#
# { 'wireName': {'discriminant': discriminant, 'value': value,
# 'schema': schema},
# ... }
#
# ... where wireName is the name of variant subtypes, discriminant
# the field name of the discriminant, value the discriminant value
# for this variant, and schema the base schema.
#
# This information cannot be stored in the referred schema at
# reading time because at the time we read it from the base
# schema, the referenced variant schemas may not yet be loaded. So
# we first store it here, and after all schemas have been loaded,
# update the schema template properties.
self._variant_info = {}
# Build data types and methods
self._SetupModules()
self.void_type = data_types.Void(self)
self._BuildSchemaDefinitions()
self._BuildResourceDefinitions()
self.SetTemplateValue('resources', self._resources)
# Make data models part of the api dictionary
self.SetTemplateValue('models', self.ModelClasses())
# Replace methods dict with Methods
self._top_level_methods = []
method_dict = self.values.get('methods') or {}
for name in sorted(method_dict):
self._top_level_methods.append(Method(self, name, method_dict[name]))
self.SetTemplateValue('methods', self._top_level_methods)
# Global parameters
self._parameters = []
param_dict = self.values.get('parameters') or {}
for name in sorted(param_dict):
parameter = Parameter(self, name, param_dict[name], self)
self._parameters.append(parameter)
if name == 'alt':
self.SetTemplateValue('alt', parameter)
self.SetTemplateValue('parameters', self._parameters)
# Auth scopes
self._authscopes = []
if (self.values.get('auth') and
self.values['auth'].get('oauth2') and
self.values['auth']['oauth2'].get('scopes')):
for value, auth_dict in sorted(
self.values['auth']['oauth2']['scopes'].iteritems()):
self._authscopes.append(AuthScope(self, value, auth_dict))
self.SetTemplateValue('authscopes', self._authscopes)
@property
def all_schemas(self):
"""The dictionary of all the schema objects found in the API."""
return self._schemas
def _SetupModules(self):
"""Compute and set the module(s) which this API belongs under."""
# The containing module is based on the owner information.
path = self.values.get('modulePath') or self.values.get('packagePath')
self._containing_module = template_objects.Module(
package_path=path,
owner_name=self.values.get('owner'),
owner_domain=self.values.get('ownerDomain'))
self.SetTemplateValue('containingModule', self._containing_module)
# The API is a child of the containing_module
base = self.values['name']
# TODO(user): Introduce a breaking change where we always prefer
# canonicalName.
if self.values.get('packagePath'):
# Lowercase the canonical name only for non-cloud-endpoints Google APIs.
# This is to avoid breaking changes to existing Google-owned Cloud
# Endpoints APIs.
if self.values.get('rootUrl').find('.googleapis.com') > 0:
base = self.values.get('canonicalName').lower() or base
else:
base = self.values.get('canonicalName') or base
if self.values.get('version_module'):
base = '%s/%s' % (base, self.values['versionNoDots'])
self._module = template_objects.Module(package_path=base,
parent=self._containing_module)
self.SetTemplateValue('module', self._module)
# The default module for data models defined by this API.
self._model_module = template_objects.Module(package_path=None,
parent=self._module)
def _BuildResourceDefinitions(self):
"""Loop over the resources in the discovery doc and build definitions."""
self._resources = []
def_dict = self.values.get('resources') or {}
for name in sorted(def_dict):
resource = Resource(self, name, def_dict[name], parent=self)
self._resources.append(resource)
def _BuildSchemaDefinitions(self):
"""Loop over the schemas in the discovery doc and build definitions."""
schemas = self.values.get('schemas')
if schemas:
for name, def_dict in schemas.iteritems():
# Upgrade the string format schema to a dict.
if isinstance(def_dict, unicode):
def_dict = json.loads(def_dict)
self._schemas[name] = self.DataTypeFromJson(def_dict, name)
# Late bind info for variant types, and mark the discriminant
# field and value.
for name, info in self._variant_info.iteritems():
if name not in self._schemas:
# The error will be reported elsewhere
continue
schema = self._schemas[name]
for prop in schema.values.get('properties'):
if prop.values['wireName'] == info['discriminant']:
# Filter out the discriminant property as it is already
# contained in the base type.
schema.SetTemplateValue(
'properties',
[p for p in schema.values.get('properties') if p != prop])
break
else:
logging.warn("Variant schema '%s' for base schema '%s' "
"has not the expected discriminant property '%s'.",
name, info['schema'].values['wireName'],
info['discriminant'])
schema.SetTemplateValue('superClass', info['schema'].class_name)
# TODO(user): baseType is for backwards compatability only. It should
# have always been a different name. When the old Java generators roll
# off, remove it.
schema.SetTemplateValue('baseType', info['schema'].class_name)
schema.SetTemplateValue('discriminantValue', info['value'])
def _NormalizeOwnerInformation(self):
"""Ensure that owner and ownerDomain are set to sane values."""
owner_domain = self.get('ownerDomain', '')
if not owner_domain:
root_url = self.get('rootUrl')
if root_url:
owner_domain = urlparse.urlparse(root_url).hostname
# Normalize google domains.
if any(owner_domain.endswith(d) for d in _RECOGNIZED_GOOGLE_DOMAINS):
owner_domain = 'google.com'
if owner_domain:
owner_domain = utilities.SanitizeDomain(owner_domain)
else:
owner_domain = _DEFAULT_OWNER_DOMAIN
self.SetTemplateValue('ownerDomain', owner_domain)
if not self.get('ownerName'):
if owner_domain == _DEFAULT_OWNER_DOMAIN:
owner_name = _DEFAULT_OWNER_NAME
else:
owner_name = owner_domain.replace('.', '_')
self.SetTemplateValue('ownerName', owner_name)
if not self.get('owner'):
self.SetTemplateValue('owner', self['ownerName'].lower())
def _NormalizeUrlComponents(self):
"""Sets template values concerning the path to the service.
Sets rootUrl and servicePath from the values given or defaults based on what
is available. Verifies them for safeness. The hierarchy of the possible
inputs is:
use rootUrl + servicePath as the best choice if it exists (v1new)
or rpcPath
or use baseUrl (v1)
or use basePath (v1)
or restBasePath (v0.3)
or default to 'api/version'
Raises:
ValueError: if the values available are inconsistent or disallowed.
"""
# If both rootUrl and servicePath exist, they equal what is in baseUrl.
root_url = self.values.get('rootUrl')
service_path = self.values.get('servicePath')
rpc_path = self.values.get('rpcPath')
if root_url:
# oauth2 has a servicePath of "". This is wierd but OK for that API, but
# it means we must explicitly check against None.
if service_path is not None:
base_url = root_url + service_path
elif rpc_path:
base_url = rpc_path
else:
raise ValueError('Neither servicePath nor rpcPath is defined.')
else:
base_url = self.values.get('baseUrl')
# If we have a full path ('https://superman.appspot.com/kryptonite/hurts'),
# then go with that, otherwise just use the various things which might
# hint at the servicePath.
best_path = (base_url
or self.values.get('basePath')
or self.values.get('restBasePath')
or '/%s/%s/' % (self.values['name'], self.values['version']))
if best_path.find('..') >= 0:
raise ValueError('api path must not contain ".." (%s)' % best_path)
# And let urlparse to the grunt work of normalizing and parsing.
url_parts = urlparse.urlparse(best_path)
scheme = url_parts.scheme or 'https'
service_host = url_parts.netloc or _DEFAULT_SERVICE_HOST
base_path = url_parts.path
if not root_url:
self._api.SetTemplateValue('rootUrl', '%s://%s/' % (scheme, service_host))
if service_path is None:
self._api.SetTemplateValue('servicePath', base_path[1:])
batch_path = self.values.get('batchPath')
if batch_path:
batch_path = batch_path.lstrip('/')
if batch_path:
self._api.SetTemplateValue('batchPath', batch_path)
else:
self._api.SetTemplateValue('batchPath', None)
else:
self._api.SetTemplateValue('batchPath', None)
# Make sure template writers do not revert
self._api.DeleteTemplateValue('baseUrl')
self._api.DeleteTemplateValue('basePath')
self._api.DeleteTemplateValue('serviceHost')
def ModelClasses(self):
"""Return all the model classes."""
ret = set(
s for s in self._schemas.itervalues()
if isinstance(s, Schema) or isinstance(s, data_types.MapDataType))
return sorted(ret, key=operator.attrgetter('class_name'))
def TopLevelModelClasses(self):
"""Return the models which are not children of another model."""
return [m for m in self.ModelClasses() if not m.parent]
def DataTypeFromJson(self, type_dict, default_name, parent=None,
wire_name=None):
"""Returns a schema object represented by a JSON Schema dictionary.
Evaluate a JSON schema dictionary and return an appropriate schema object.
If a data type is defined in-line, then create the schema dynamically. If
the schema is a $ref to another, return the previously created schema or
a lazy reference.
If the type_dict is None, a blank schema will be created.
Args:
type_dict: A dict of the form expected of a request or response member
of a method description. See the Discovery specification for more.
default_name: The unique name to give the schema if we have to create it.
parent: The schema where I was referenced. If we cannot determine that
this is a top level schema, set the parent to this.
wire_name: The name which will identify objects of this type in data on
the wire.
Returns:
A Schema object.
"""
# new or not initialized, create a fresh one
schema = Schema.Create(self, default_name, type_dict or {}, wire_name,
parent)
# Only put it in our by-name list if it is a real object
if isinstance(schema, Schema) or isinstance(schema, data_types.MapDataType):
# Use the path to the schema as a key. This means that an anonymous class
# for the 'person' property under the schema 'Activity' will have the
# unique name 'Activity.person', rather than 'ActivityPerson'.
path = '.'.join(
[a.values.get('wireName', '<anon>') for a in schema.full_path])
_LOGGER.debug('DataTypeFromJson: add %s to cache', path)
self._schemas[path] = schema
return schema
def AddMethod(self, method):
"""Add a new method to the set of all methods."""
self._all_methods.append(method)
self._methods_by_name[method.values['rpcMethod']] = method
def MethodByName(self, method_name):
"""Find a method by name.
Args:
method_name: (str) the full RPC name of a method defined by this API.
Returns:
Method object or None if not found.
"""
return self._methods_by_name.get(method_name)
def SchemaByName(self, schema_name):
"""Find a schema by name.
Args:
schema_name: (str) name of a schema defined by this API.
Returns:
Schema object or None if not found.
"""
return self._schemas.get(schema_name, None)
def SetVariantInfo(self, ref, discriminant, value, schema):
"""Sets variant info for the given reference."""
if ref in self._variant_info:
logging.warning("Base type of '%s' changed from '%s' to '%s'. "
"This is an indication that a variant schema is used "
"from multiple base schemas and may result in an "
"inconsistent model.",
ref, self._base_type[ref].wireName, schema.wireName)
self._variant_info[ref] = {'discriminant': discriminant, 'value': value,
'schema': schema}
def VisitAll(self, func):
"""Visit all nodes of an API tree and apply a function to each.
Walks a tree and calls a function on each element of it. This should be
called after the API is fully loaded.
Args:
func: (function) Method to call on each object.
"""
_LOGGER.debug('Applying function to all nodes')
func(self._containing_module)
func(self._module)
func(self._model_module)
for resource in self.values['resources']:
self._VisitResource(resource, func)
# Top level methods
for method in self.values['methods']:
self._VisitMethod(method, func)
for parameter in self.values['parameters']:
func(parameter)
func(parameter.data_type)
for schema in self._schemas.values():
self._VisitSchema(schema, func)
for scope in self.GetTemplateValue('authscopes') or []:
func(scope)
def _VisitMethod(self, method, func):
"""Visit a method, calling a function on every child.
Args:
method: (Method) The Method to visit.
func: (function) Method to call on each object.
"""
func(method)
for parameter in method.parameters:
func(parameter)
def _VisitResource(self, resource, func):
"""Visit a resource tree, calling a function on every child.
Calls down recursively to sub resources.
Args:
resource: (Resource) The Resource to visit.
func: (function) Method to call on each object.
"""
func(resource)
for method in resource.values['methods']:
self._VisitMethod(method, func)
for r in resource.values['resources']:
self._VisitResource(r, func)
def _VisitSchema(self, schema, func):
"""Visit a schema tree, calling a function on every child.
Args:
schema: (Schema) The Schema to visit.
func: (function) Method to call on each object.
"""
func(schema)
func(schema.module)
for prop in schema.values.get('properties', []):
func(prop)
for child in self.children:
func(child)
# Do not warn about unused arguments, pylint: disable=unused-argument
def ToClassName(self, s, element, element_type=None):
"""Convert a name to a suitable class name in the target language.
This default implementation camel cases the string, which is appropriate
for some languages. Subclasses are encouraged to override this.
Args:
s: (str) A rosy name of data element.
element: (object) The object we are making a class name for.
element_type: (str) Deprecated. The kind of object we are making a class
name for. E.g. resource, method, schema.
TODO(user): replace type in favor of class of element, but that will
require changing the place where we call ToClassName with no element.
Returns:
A name suitable for use as a class in the generator's target language.
"""
return utilities.CamelCase(s).replace(' ', '')
def NestedClassNameForProperty(self, name, schema):
"""Returns the class name of an object nested in a property."""
# TODO(user): This functionality belongs in the language model, but
# because of the way the api is bootstrapped, that isn't available when we
# need it. When language model is available from the start, this should be
# moved.
return '%s%s' % (schema.class_name, utilities.CamelCase(name))
@property
def class_name(self):
return self.values['className']
@property
def model_module(self):
return self._model_module
@property
def containing_module(self):
return self._containing_module
@property
def all_methods(self):
"""All the methods in the entire API."""
return self._all_methods
@property
def top_level_methods(self):
"""All the methods at the API top level (not in a resource)."""
return self._top_level_methods
class Resource(template_objects.CodeObject):
def __init__(self, api, name, def_dict, parent=None):
"""Creates a Resource.
Args:
api: (Api) The Api which owns this Resource.
name: (string) The discovery name of the Resource.
def_dict: (dict) The discovery dictionary for this Resource.
parent: (CodeObject) The resource containing this method, if any. Top
level resources have the API as a parent.
"""
super(Resource, self).__init__(def_dict, api, parent=parent, wire_name=name)
self.ValidateName(name)
class_name = api.ToClassName(name, self, element_type='resource')
self.SetTemplateValue('className', class_name)
# Replace methods dict with Methods
self._methods = []
method_dict = self.values.get('methods') or {}
for name in sorted(method_dict):
self._methods.append(Method(api, name, method_dict[name], parent=self))
self.SetTemplateValue('methods', self._methods)
# Get sub resources
self._resources = []
r_def_dict = self.values.get('resources') or {}
for name in sorted(r_def_dict):
r = Resource(api, name, r_def_dict[name], parent=self)
self._resources.append(r)
self.SetTemplateValue('resources', self._resources)
@property
def methods(self):
return self._methods
@property
def methods_dict(self):
return {method['wireName']: method for method in self._methods}
class AuthScope(template_objects.CodeObject):
"""The definition of an auth scope.
An AuthScope defines these template values
value: The scope url
name: a sanitized version of the value, transformed so it generally can
be used as an indentifier in code. Deprecated, use constantName
description: the description of the scope.
It also provides a template property which can be used after a language
binding is set.
constantName: A transformation of the value so it is suitable as a constant
name in the specific language.
"""
GOOGLE_PREFIX = 'https://www.googleapis.com/auth/'
HTTPS_PREFIX = 'https://'
def __init__(self, api, value, def_dict):
"""Construct an auth scope.
Args:
api: (Api) The Api which owns this Property
value: (string) The unique identifier of this scope, often a URL
def_dict: (dict) The discovery dictionary for this auth scope.
"""
super(AuthScope, self).__init__(def_dict, api, wire_name=value)
self._module = api.module
self.SetTemplateValue('value', value)
while value.endswith('/'):
value = value[:-1]
if 'description' not in self.values:
self.SetTemplateValue('description', value)
# Strip the common prefix to get a unique identifying name
if value.startswith(AuthScope.GOOGLE_PREFIX):
scope_id = value[len(AuthScope.GOOGLE_PREFIX):]
elif value.startswith(AuthScope.HTTPS_PREFIX):
# some comon scopes are are just a URL
scope_id = value[len(AuthScope.HTTPS_PREFIX):]
else:
scope_id = value
# We preserve the value stripped of the most common prefixes so we can
# use it for building constantName in templates.
self.SetTemplateValue('lastPart', scope_id)
# replace all non alphanumeric with '_' to form 'name'
name = ''.join([(c if c.isalnum() else '_') for c in scope_id.upper()])
self.SetTemplateValue('name', name)
@property
def constantName(self): # pylint: disable=g-bad-name
"""Overrides default behavior of constantName."""
return self._language_model.ApplyPolicy('constant', self,
self.values['lastPart'])
class Method(template_objects.CodeObject):
"""The definition of a method."""
def __init__(self, api, name, def_dict, parent=None):
"""Construct a method.
Methods in REST discovery are inside of a resource. Note that the method
name and id are calculable from each other. id will always be equal to
api_name.resource_name[.sub_resource...].method_name. At least it should
be, as that is the transformation Discovery makes from the API definition,
which is essentially a flat list of methods, into a hierarchy of resources.
Args:
api: (Api) The Api which owns this Method.
name: (string) The discovery name of the Method.
def_dict: (dict) The discovery dictionary for this Method.
parent: (CodeObject) The resource containing this Method, if any.
Raises:
ApiException: If the httpMethod type is not one we know how to
handle.
"""
super(Method, self).__init__(def_dict, api, parent=(parent or api))
# TODO(user): Fix java templates to name vs. wireName correctly. Then
# change the __init__ to have wire_name=def_dict.get('id') or name
# then eliminate this line.
self.SetTemplateValue('wireName', name)
self.ValidateName(name)
class_name = api.ToClassName(name, self, element_type='method')
if parent and class_name == parent.values['className']:
# Some languages complain when the collection name is the same as the
# method name.
class_name = '%sRequest' % class_name
# The name is the key of the dict defining use. The id field is what you
# have to use to call the method via RPC. That is unique, name might not be.
self.SetTemplateValue('name', name)
# Fix up very old discovery, which does not have an id.
if 'id' not in self.values:
self.values['id'] = name
self.SetTemplateValue('className', class_name)
http_method = def_dict.get('httpMethod', 'POST').upper()
self.SetTemplateValue('httpMethod', http_method)
self.SetTemplateValue('rpcMethod',
def_dict.get('rpcMethod') or def_dict['id'])
rest_path = def_dict.get('path') or def_dict.get('restPath')
# TODO(user): if rest_path is not set, raise a good error and fail fast.
self.SetTemplateValue('restPath', rest_path)
# Figure out the input and output types and schemas for this method.
expected_request = self.values.get('request')
if expected_request:
# TODO(user): RequestBody is only used if the schema is anonymous.
# When we go to nested models, this could be a nested class off the
# Method, making it unique without the silly name. Same for ResponseBody.
request_schema = api.DataTypeFromJson(expected_request,
'%sRequestContent' % name,
parent=self)
self.SetTemplateValue('requestType', request_schema)
expected_response = def_dict.get('response') or def_dict.get('returns')
if expected_response:
response_schema = api.DataTypeFromJson(expected_response,
'%sResponse' % name,
parent=self)
if self.values['wireName'] == 'get':
response_schema.values['associatedResource'] = parent
self.SetTemplateValue('responseType', response_schema)
else:
self.SetTemplateValue('responseType', api.void_type)
# Make sure we can handle this method type and do any fixups.
if http_method not in ['DELETE', 'GET', 'OPTIONS', 'PATCH', 'POST', 'PUT',
'PROPFIND', 'PROPPATCH', 'REPORT']:
raise ApiException('Unknown HTTP method: %s' % http_method, def_dict)
if http_method == 'GET':
self.SetTemplateValue('requestType', None)
# Replace parameters dict with Parameters. We try to order them by their
# position in the request path so that the generated code can track the
# more human readable definition, rather than the order of the parameters
# in the discovery doc.
order = self.values.get('parameterOrder', [])
req_parameters = []
opt_parameters = []
for name, def_dict in self.values.get('parameters', {}).iteritems():
param = Parameter(api, name, def_dict, self)
if name == 'alt':
# Treat the alt parameter differently
self.SetTemplateValue('alt', param)
continue
# Standard params are part of the generic request class
# We want to push all parameters that aren't declared inside
# parameterOrder after those that are.
if param.values['wireName'] in order:
req_parameters.append(param)
else:
# optional parameters are appended in the order they're declared.
opt_parameters.append(param)
# pylint: disable=g-long-lambda
req_parameters.sort(lambda x, y: cmp(order.index(x.values['wireName']),
order.index(y.values['wireName'])))
req_parameters.extend(opt_parameters)
self.SetTemplateValue('parameters', req_parameters)
self._InitMediaUpload(parent)
self._InitPageable(api)
api.AddMethod(self)
def _InitMediaUpload(self, parent):
media_upload = self.values.get('mediaUpload')
if media_upload:
if parent:
parent.SetTemplateValue('isMedia', True)
# Get which MIME Media Ranges are accepted for media uploads to this
# method.
accepted_mime_ranges = media_upload.get('accept')
self.SetTemplateValue('accepted_mime_ranges', accepted_mime_ranges)
max_size = media_upload.get('maxSize')
self.SetTemplateValue('max_size', max_size)
self.SetTemplateValue('max_size_bytes',
convert_size.ConvertSize(max_size))
# Find which upload protocols are supported.
upload_protocols = media_upload['protocols']
for upload_protocol in upload_protocols:
self._SetUploadTemplateValues(
upload_protocol, upload_protocols[upload_protocol])
def _InitPageable(self, api):
response_type = self.values.get('responseType')
if (response_type != api.void_type
and self.FindCodeObjectWithWireName(
response_type.values.get('properties'), 'nextPageToken')
and self.FindCodeObjectWithWireName(
self.optional_parameters, 'pageToken')):
self.SetTemplateValue('isPageable', True)
def _SetUploadTemplateValues(self, upload_protocol, protocol_dict):
"""Sets upload specific template values.
Args:
upload_protocol: (str) The name of the upload protocol. Eg: 'simple' or
'resumable'.
protocol_dict: (dict) The dictionary that corresponds to this upload
protocol. It typically contains keys like 'path', 'multipart' etc.
"""
self.SetTemplateValue('%s_upload_supported' % upload_protocol, True)
upload_path = protocol_dict.get('path')
if upload_path:
self.SetTemplateValue('%s_upload_path' % upload_protocol, upload_path)
self.SetTemplateValue('%s_upload_multipart' % upload_protocol,
protocol_dict.get('multipart', False))
@property
def media_upload_parameters(self):
return self.values.get('mediaUpload')
@property
def parameters(self):
return self.values['parameters']
@property
def optional_parameters(self):
return [p for p in self.values['parameters'] if not p.required]
@property
def required_parameters(self):
return [p for p in self.values['parameters'] if p.required]
@property
def path_parameters(self):
return [p for p in self.values['parameters'] if p.location == 'path']
@property
def query_parameters(self):
return [p for p in self.values['parameters'] if p.location == 'query']
@staticmethod
def FindCodeObjectWithWireName(things, wire_name):
"""Looks for an element having the given wire_name.
Args:
things: (array of DataType) List of parameters or properties to search.
wire_name: (str) The wireName we are looking to find.
Returns:
None or element with the given wire_name.
"""
if not things: return None
for e in things:
if e.values['wireName'] == wire_name: return e
return None
#
# Expose some properties with the naming convention we use in templates
#
def optionalParameters(self): # pylint: disable=g-bad-name
return self.optional_parameters
def requiredParameters(self): # pylint: disable=g-bad-name
return self.required_parameters
def pathParameters(self): # pylint: disable=g-bad-name
return self.path_parameters
def queryParameters(self): # pylint: disable=g-bad-name
return self.query_parameters
class Parameter(template_objects.CodeObject):
"""The definition of a method parameter."""
def __init__(self, api, name, def_dict, method):
super(Parameter, self).__init__(def_dict, api, parent=method,
wire_name=name)
self.ValidateName(name)
self.schema = api
# TODO(user): Deal with dots in names better. What we should do is:
# For x.y, x.z create a little class X, with members y and z. Then
# have the constructor method take an X.
self._repeated = self.values.get('repeated', False)
self._required = self.values.get('required', False)
self._location = (self.values.get('location')
or self.values.get('restParameterType')
or 'query')
# TODO(user): Why not just use Schema.Create here?
referenced_schema = self.values.get('$ref')
if referenced_schema:
self._data_type = (api.SchemaByName(referenced_schema) or
data_types.SchemaReference(referenced_schema, api))
elif def_dict.get('type') == 'array':
self._data_type = Schema.Create(api, name, def_dict, name, method)
elif self.values.get('enum'):
self._data_type = data_types.Enum(def_dict,
api,
name,
self.values.get('enum'),
self.values.get('enumDescriptions'),
parent=method)
self.SetTemplateValue('enumType', self._data_type)
else:
self._data_type = data_types.PrimitiveDataType(def_dict, api, parent=self)
if self._repeated:
self._data_type = data_types.ArrayDataType(name, self._data_type,
parent=self)
@property
def repeated(self):
return self._repeated
@property
def required(self):
return self._required
@property
def location(self):
return self._location
@property
def code_type(self):
return self._data_type.code_type
@property
def data_type(self):
return self._data_type
|
Duikmeester/google-api-dotnet-client
|
ClientGenerator/src/googleapis/codegen/api.py
|
Python
|
apache-2.0
| 36,022
|
[
"VisIt"
] |
b181fcd38b1eedac829ab954aaba604c8e568ba154c6aa34034c70bd18acf3ac
|
#!/usr/bin/env python
from __future__ import print_function
from Bio import SeqIO
from Bio.Seq import Seq
from collections import OrderedDict
import sys
import argparse
# TODO:
# - create some logic to 'group' mutations that will be applied to the same sequence, to
# make all switches at once
# - This will also probably break the verbose transversion output so the maths will need replacing
# - Create the ability to support INDELS (will also require pairwise alignment so that
# hamming distances remain meaningful.
def get_args():
"""Parse command line arguments"""
desc = "Mutate fasta sequences based on a file of sequence mappings."
epi = (
"This script takes a mapfile of the form:\n"
" SequenceID,A123B\n"
" SequenceID,X456Y\n"
"And performs substitutions/mutations. At preset it only does one SNP per sequence.\n"
)
try:
parser = argparse.ArgumentParser(
description=desc, epilog=epi, formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
"mutation_file",
action="store",
help='File of mutation mappings like so: "SeqID,X123Y"',
)
parser.add_argument(
"sequences",
action="store",
help="File of sequences to be mutated (fasta only).",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Verbose behaviour, printing parameters of the script.",
)
parser.add_argument(
"-o",
"--outfile",
action="store",
help="Output file for mutated sequences (default STDOUT).",
)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
exit(1)
except:
sys.stderr.write(
"An exception occurred with argument parsing. Check your provided options.\n"
)
return parser.parse_args()
class Mutation(object):
"""A class wrapper for sequence IDs so that duplicate IDs can be used in a dictionary"""
def __init__(self, name):
self.name = name
def __repr__(self):
return "'" + self.name + "'"
def __str__(self):
return self.name
def parse_mapfile(mapfile):
"""Return a dict of mapped mutations.
File should resemble:
SequenceID,A123B
SequenceID2,X234Y
Sequence IDs should exactly match the fasta headers, as parsed by BioPython.
(">" symbols are optional)
"""
with open(mapfile, "r") as handle:
mut_dict = OrderedDict()
for line in handle:
id, change = line.lstrip(">").rstrip("\n").split(",")
mut_dict[Mutation(id)] = change
for k, v in mut_dict.items():
assert v[0].isalpha(), (
"First character of mutation map is not a valid letter. Got: %s" % v[0]
)
assert v[-1].isalpha(), (
"Last character of mutation map is not a valid letter. Got: %s" % v[-1]
)
assert v[1:-1].isdigit(), (
"Location string of mutation map is not a valid number. Got: %s" % v[1:-1]
)
return mut_dict
def morph(orig, loc, new, mutableseq, verbose):
"""Perform actual sequence change (polymorphism only at present)"""
# Shift location to offset 0-based index
loc = loc - 1
assert mutableseq[loc] == orig, (
"Sequence does not match the mutation file for pre-exising residue. Expected %s , got %s "
% (orig, mutableseq[loc])
)
if verbose is True:
print(
"Performing change: {} -> {}, at location: {} (0 based)".format(
orig, new, loc
)
)
mutableseq[loc] = new
return mutableseq
def hamming_distance(s1, s2):
"""Return the Hamming distance between equal-length sequences"""
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(ch1 != ch2 for ch1, ch2 in zip(s1.upper(), s2.upper()))
def main():
args = get_args()
if args.outfile is not None:
ofh = open(args.outfile, "w")
# Parse the mutation file (get mutations by sequence)
mutations = parse_mapfile(args.mutation_file)
if args.verbose is True:
print("Got mutations:")
print(mutations)
# Iterate all sequences and make any substitutions necessary
for record in SeqIO.parse(args.sequences, "fasta"):
for k, v in mutations.items():
mutable = record.seq.upper().tomutable()
if k.name == record.id:
orig = v[0]
new = v[-1]
loc = int(v[1:-1])
if args.verbose:
print(record.id)
newseq = morph(orig, loc, new, mutable, args.verbose)
if args.verbose is True:
print("Original: " + record.seq.upper())
print(
str((" " * int(loc - 2 + 11))) + "V"
) # Padded string to show where switch happened (not sure how it'll deal with line wrapping
print("New: " + newseq)
print(
"Distance: "
+ str(hamming_distance(str(record.seq), str(newseq)))
)
if args.outfile is not None:
ofh.write(">%s_%s\n%s\n" % (record.id, v, newseq))
if args.verbose is False:
print(">%s_%s\n%s\n" % (record.id, v, newseq))
if args.outfile is not None:
ofh.close()
if __name__ == "__main__":
main()
|
jrjhealey/bioinfo-tools
|
Mutate.py
|
Python
|
gpl-3.0
| 5,669
|
[
"Biopython"
] |
917d11bcb5182c790ce585111cb01c29bd9a2c58bc94311022726e2608fcf615
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************************************
espressopp.interaction.AngularUniquePotential
*********************************************
This is an abstract class, only needed to be inherited from.
.. function:: espressopp.interaction.AngularUniquePotential.computeEnergy(\*args)
:param \*args:
:type \*args:
:rtype:
.. function:: espressopp.interaction.AngularUniquePotential.computeForce(\*args)
:param \*args:
:type \*args:
:rtype:
"""
# -*- coding: iso-8859-1 -*-
from espressopp import pmi
from espressopp import toReal3DFromVector
from _espressopp import interaction_AngularUniquePotential
# Python base class for angular potentials
class AngularUniquePotentialLocal(object):
def computeEnergy(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeEnergy(self, arg0)
return self.cxxclass.computeEnergy(self, toReal3DFromVector(*args))
def computeForce(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1: # in case theta is passed
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeForce(self, arg0)
return self.cxxclass.computeForce(self, toReal3DFromVector(*args))
if pmi.isController:
class AngularUniquePotential(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
localcall = [ 'computeForce', 'computeEnergy' ],
pmiproperty = [ 'cutoff' ]
)
|
kkreis/espressopp
|
src/interaction/AngularUniquePotential.py
|
Python
|
gpl-3.0
| 2,594
|
[
"ESPResSo"
] |
55b8ed140ba6d64cdaceafda881f3d47aacc8e6029222905843fc740b51bca32
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.utils import Testing
from PyQt5.QtCore import Qt
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def setUp(self):
super(Tests, self).setUp()
self.transient_png = "check_transient.png"
Testing.remove_file(self.transient_png)
self.diffusion_png = "check_diffusion.png"
Testing.remove_file(self.diffusion_png)
Testing.clean_files()
def run_and_check(self, app, filename):
exe_plugin = app.main_widget.tab_plugin.ExecuteTabPlugin
exe_plugin.ExecuteOptionsPlugin.csv_checkbox.setCheckState(Qt.Checked)
result_plugin = app.main_widget.tab_plugin.ExodusViewer
app.main_widget.setTab(exe_plugin.tabName())
exe_plugin.ExecuteOptionsPlugin.setWorkingDir(self.starting_directory)
exe_plugin.ExecuteRunnerPlugin.runClicked()
app.main_widget.setTab(result_plugin.tabName())
vtkwin = result_plugin.currentWidget().VTKWindowPlugin
Testing.set_window_size(vtkwin)
# make sure we are finished
while not self.finished:
self.qapp.processEvents()
Testing.process_events(t=5)
app.main_widget.setTab(result_plugin.tabName())
Testing.set_window_size(vtkwin)
Testing.process_events(t=1)
vtkwin.onWrite(filename)
self.assertFalse(Testing.gold_diff(filename))
return app
def checkTransient(self):
args = ["../../common/transient.i", Testing.find_moose_test_exe()]
app = self.createPeacockApp(args)
self.run_and_check(app, self.transient_png)
return app
def testRunResult(self):
self.checkTransient()
def testChangeResultFilename(self):
app = self.checkTransient()
app.main_widget.tab_plugin.InputFileEditorWithMesh.setInputFile("../../common/simple_diffusion.i")
self.run_and_check(app, self.diffusion_png)
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/peacock_app/check_result/test_check_result.py
|
Python
|
lgpl-2.1
| 2,327
|
[
"MOOSE"
] |
317bcc6995e24973a5db9adae9ffb53b3c10d0bc3b7b395d4b2649484674e6b0
|
# Copyright (c) 2009 Leif Johnson <leif@leifjohnson.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Basic self-organizing map implementation.
This module contains the following Kohonen map implementations :
- Map. A standard rectangular N-dimensional Kohonen map.
- Gas. A vector quantizer that does not have a fixed topology. Neurons in a
gas are sorted for updates based on their distance from the cue, with the
sort order defining a topology for each cue presentation.
- GrowingGas. A Gas-based quantizer that can add neurons dynamically to
explain high-error areas of the input space.
- Filter. A wrapper over an underlying Map instance that maintains an explicit
estimate of the likelihood of each neuron.
These are tested using the kohonen_test.py file in this source distribution.
Because they have a grid topology, Map objects have some cool visualization
options, including Map.neuron_colormap and Map.distance_heatmap. These require
the Python Image Library.
There is also a collection of distance metrics :
- cosine_metric. A callable that calculates the cosine distance between a cue
and each neuron in a Kohonen Map.
- euclidean_metric. A callable that calculates the Euclidean distance between
a cue and each neuron in a Kohonen Map.
- manhattan_metric. A callable that calculates the Manhattan distance between
a cue and each neuron in a Kohonen Map.
There are also some small utility classes for modeling time series values :
- Timeseries. A callable that takes no arguments and returns a value that
might vary over time. Each call to the function will generally return a
unique value (though this is not necessary).
- ExponentialTimeseries. A callable that takes no arguments and returns an
exponentially decreasing (or increasing) series of values, dependent on
the parameters passed in at construction time.
- etc.
These distance functions and time series objects are generally used to regulate
the learning parameters in Kohonen Map objects.
'''
import numpy
from numpy import random as rng
def cosine_metric(x, y):
'''Returns the cosine distance between x and y.'''
nx = numpy.sqrt(numpy.sum(x * x, axis=-1))
ny = numpy.sqrt(numpy.sum(y * y, axis=-1))
# the cosine metric returns 1 when the args are equal, 0 when they are
# orthogonal, and -1 when they are opposite. we want the opposite effect,
# and we want to make sure the results are always nonnegative.
return 1 - numpy.sum(x * y, axis=-1) / nx / ny
def euclidean_metric(x, y):
'''Returns the euclidean distance (L-2 norm) between x and y.'''
d = x - y
return numpy.sqrt(numpy.sum(d * d, axis=-1))
def manhattan_metric(x, y):
'''Returns the manhattan distance (L-1 norm) between x and y.'''
d = x - y
return numpy.sum(numpy.abs(d), axis=-1)
def weighted_euclidean_metric(weights):
'''Implements a standard euclidean distance with weighted dimensions.'''
def calculate(x, y):
d = x - y
return numpy.sqrt(numpy.sum(d * d * weights, axis=-1))
return calculate
class Timeseries(object):
'''Represents some sort of value that changes over time.'''
def __init__(self):
'''Set up this timeseries.'''
super(Timeseries, self).__init__()
self.ticks = 0
def __call__(self):
'''Call this timeseries.'''
t = self.ticks
self.ticks += 1
return t
def reset(self):
'''Reset the time for this series.'''
self.ticks = 0
class ConstantTimeseries(Timeseries):
'''This timeseries just returns a constant value.'''
def __init__(self, k=1):
'''Set up this series with a constant value.'''
self.k = k
def __call__(self):
'''Return the constant.'''
return self.k
class ExponentialTimeseries(Timeseries):
'''Represents an exponential decay process.'''
def __init__(self, rate=-1, initial=1, final=0):
'''Create a new exponential timeseries object.'''
super(ExponentialTimeseries, self).__init__()
self.initial = initial - final
self.rate = rate
self.final = final
def __call__(self):
'''Return an exponentially-decreasing series of values.'''
super(ExponentialTimeseries, self).__call__()
return self.final + self.initial * numpy.exp(self.rate * self.ticks)
class Parameters(object):
'''We are plain old data holding self-organizing map parameters.'''
def __init__(self,
dimension=None,
shape=None,
metric=None,
learning_rate=None,
neighborhood_size=None,
noise_variance=None):
'''This class holds standard parameters for self-organizing maps.
dimension: The length of a neuron vector in a Map or a Gas.
shape: The shape of the neuron topology in whatever Map or Gas we are
building.
metric: The distance metric to use when comparing cues to neurons in the
map. Defaults to euclidean_metric.
learning_rate: This parameter determines the time course of the learning
rate for a Map. This parameter should be a callable that takes no
arguments and returns a floating point value for the learning rate.
If this parameter is None, a default learning rate series will be
used, equivalent to ExponentialTimeseries(-1e-3, 1, 0.2).
If this parameter is a numeric value, it will be used as the
constant value for the learning rate: ConstantTimeseries(value).
neighborhood_size: Like the learning rate, this parameter determines the
time course of the neighborhood size parameter. It should be a
callable that takes no arguments and returns a neighborhood size for
storing each cue.
If this is None, a default neighborhood size series will be used. The
initial size will be the maximum of the dimensions given in shape, and
the decay will be -1e-3: ExponentialTimeseries(-1e-3, max(shape), 1).
If this is a floating point value, it will be used as a constant
neighborhood size: ConstantTimeseries(value).
noise_variance: Like the learning rate and neighborhood size, this
should be a factory for creating a callable that creates noise
variance values.
If this is None, no noise will be included in the created Maps.
If this parameter is a number, it will be used as a constant noise
variance.
'''
assert dimension is not None
self.dimension = dimension
assert shape is not None
self.shape = shape
self.metric = metric or euclidean_metric
ET = ExponentialTimeseries
CT = ConstantTimeseries
self.learning_rate = learning_rate
if isinstance(learning_rate, (float, int)):
self.learning_rate = CT(learning_rate)
if learning_rate is None:
self.learning_rate = ET(-1e-3, 1, 0.2)
self.neighborhood_size = neighborhood_size
if isinstance(neighborhood_size, (float, int)):
self.neighborhood_size = CT(neighborhood_size)
if neighborhood_size is None:
self.neighborhood_size = ET(-1e-3, max(shape), 1)
self.noise_variance = noise_variance
if isinstance(noise_variance, (float, int)):
self.noise_variance = CT(noise_variance)
def heatmap(raw, axes=(0, 1), lower=None, upper=None):
'''Create a heat map image from the given raw matrix.
raw: An array of values to use for the image pixels.
axes: The axes in the array that we want to preserve for the final image.
All other axes will be summed away.
lower: If given, clip values in the matrix to this lower limit. If not
given, raw.min() will be used.
upper: If given, clip values in the matrix to this upper limit. If not
given, raw.max() will be used.
Returns an annotated Image object (as returned from _image).
'''
assert len(axes) == 2
for ax in xrange(len(raw.shape) - 1, -1, -1):
if ax in axes:
continue
raw = raw.sum(axis=ax)
l = lower
if l is None:
l = raw.min()
l *= l < 0 and 1.01 or 0.99
u = upper
if u is None:
u = raw.max() * 1.01
u *= u > 0 and 1.01 or 0.99
return _image(raw, l, u)
def colormap(raw, axes=(0, 1, 2), layers=(0, 1, 2)):
'''Create an RGB image using the given layers of a 3D raw values matrix.
raw: An array of raw values to use for the image.
axes: The axes in the array that we want to preserve for the final image.
All other axes will be summed away.
layers: The indices of the third preserved axis that we should use for the
red, green, and blue channels in the output image.
Raw values will be scaled along each layer to lie in [lower, upper], where
lower (upper) is the global lower (upper) bound of all values in each of the
raw layers.
Returns an Image object, as in the heatmap() function.
'''
assert len(axes) == len(layers) == 3
for ax in xrange(len(raw.shape) - 1, -1, -1):
if ax in axes:
continue
raw = raw.sum(axis=ax)
u = -numpy.inf
l = numpy.inf
for i in layers:
v = raw[:, :, i]
l = min(l, v.min())
u = max(u, v.max())
l *= l < 0 and 1.01 or 0.99
u *= u > 0 and 1.01 or 0.99
return _image(raw[:, :, layers], l, u, 'RGB')
def _image(values, lower, upper, format='L'):
'''Create a PIL image using the given 2D array of values.
Pixel values in the range [lower, upper] are scaled linearly to [0, 1]
before creating the image.
Returns an Image object annotated with the lower and upper bounds that were
used to scale the values to convert them to pixels.
'''
from PIL import Image
ratios = (values - lower) / (upper - lower)
img = Image.fromarray(numpy.array(256 * ratios, numpy.uint8), format)
img.lower_bound = lower
img.upper_bound = upper
return img
def _zeros(shape, dtype='d'):
'''Get a blank (all-zero) matrix with a certain shape.'''
return numpy.zeros(shape, dtype=dtype)
def itershape(shape):
'''Given a shape tuple, iterate over all indices in that shape.'''
if not shape:
yield ()
return
for i in xrange(shape[0]):
for z in itershape(shape[1:]):
yield (i, ) + z
def argsample(pdf, n=1):
'''Return n indices drawn proportionally from a discrete mass vector.'''
assert (pdf >= 0).all(), 'cannot sample from %r!' % pdf
cdf = pdf.cumsum()
return numpy.searchsorted(cdf, rng.uniform(0, cdf[-1], n))
def sample(pdf, n=1):
'''Return n samples drawn proportionally from a discrete mass vector.'''
assert len(pdf.shape) == 1
return pdf[argsample(pdf, n)]
class Map(object):
'''Basic implementation of a rectangular N-dimensional self-organizing map.
A Self-Organizing or Kohonen Map (henceforth just Map) is a group of
lightweight processing units called neurons, which are here implemented as
vectors of real numbers. Neurons in a Map are arranged in a specific
topology, so that a given neuron is connected to a small, specific subset of
the overall neurons in the Map. In addition, the Map uses a distance metric
(e.g., Euclidean distance) for computing similarity between neurons and cue
vectors, as described below.
The Map accepts cues---vectors of real numbers---as inputs. In standard Map
usage, cues represent some data point of interest. Normally applications of
Maps use input vectors like the activation patterns for an array of sensors,
term frequency vectors for a document, etc. Cues are stored in the Map as
follows : First, a "winner" neuron w is chosen from the Map, and, second,
the neurons in the Map topologically near w are altered so that they become
closer to the cue. Each of these steps is described briefly below.
For the first step, the Map computes the distance between the cue and each
of the Map neurons using its metric. The neuron closest to the cue under
this metric is declared the "winner" w. Alternatively, the winner can be
selected probabilistically based on the overall distance landscape.
Next, the Map alters the neurons in the neighborhood of w, normally using
some function of the difference between the cue and the neuron being
modified. The weight of the alteration decreases exponentially as the
topological distance from w increases. The learning rule for a neuron n is
n += eta * exp(-d**2 / sigma**2) * (c - n)
where eta is the learning rate, sigma is called the neighborhood size, d is
the topological distance between n and w, and c is the cue vector being
stored in the map. Eta and sigma normally decrease in value over time, to
take advantage of the empirical machine learning benefits of simulated
annealing.
The storage mechanism in a Map has the effect of grouping cues with similar
characteristics into similar areas of the Map. Because the winner---and its
neighborhood---are altered to look more like the cues that they capture, the
winner for a given cue will tend to win similar inputs in the future. This
tends to cluster similar Map inputs, and can lead to interesting data
organization patterns.
'''
def __init__(self, params):
'''Initialize this Map.'''
self._shape = params.shape
self.dimension = params.dimension
self.neurons = _zeros(self.shape + (self.dimension, ))
self._metric = params.metric
self._learning_rate = params.learning_rate
self._neighborhood_size = params.neighborhood_size
self._noise_variance = params.noise_variance
# precompute a neighborhood mask for performing fast storage updates.
# this mask is the same dimensionality as self.shape, but twice the size
# along each axis. the maximum value in the mask is 1, occurring in the
# center. values decrease in a gaussian fashion from the center.
S = tuple(2 * size - 1 for size in self.shape)
self._neighborhood_mask = _zeros(S)
for coords in itershape(S):
z = 0
for axis, offset in enumerate(coords):
d = offset + 1 - self.shape[axis]
z += d * d
self._neighborhood_mask[coords] = numpy.exp(-z / 2)
@property
def shape(self):
return self._shape
def neuron(self, coords):
'''Get the current state of a specific neuron.'''
return self.neurons[coords]
def reset(self, f=None):
'''Reset the neurons and timeseries in the Map.
f: A callable that takes a neuron coordinate and returns a value for
that neuron. Defaults to random values from the standard normal.
'''
self._learning_rate.reset()
self._neighborhood_size.reset()
if f is None:
self.neurons = rng.randn(*self.neurons.shape)
else:
for z in itershape(self.shape):
self.neurons[z] = f(z)
def weights(self, distances):
'''Get an array of learning weights to use for storing a cue.'''
i = self.smallest(distances)
z = []
for axis, size in enumerate(self.flat_to_coords(i)):
offset = self.shape[axis] - size - 1
z.append(slice(offset, offset + self.shape[axis]))
sigma = self._neighborhood_size()
return self._neighborhood_mask[z] ** (1.0 / sigma / sigma)
def distances(self, cue):
'''Get the distance of each neuron in the Map to a particular cue.'''
z = numpy.resize(cue, self.neurons.shape)
return self._metric(z, self.neurons)
def flat_to_coords(self, i):
'''Given a flattened index, convert it to a coordinate tuple.'''
coords = []
for limit in reversed(self.shape[1:]):
i, j = divmod(i, limit)
coords.append(j)
coords.append(i)
return tuple(reversed(coords))
def winner(self, cue):
'''Get the coordinates of the most similar neuron to the given cue.
Returns a flat index ; use flat_to_coords to convert this to a neuron
index.
'''
return self.smallest(self.distances(cue))
def sample(self, n):
'''Get a sample of n neuron coordinates from the map.
The returned values will be flat indices ; use flat_to_coords to convert
them to neuron indices.
'''
return rng.randint(0, self.neurons.size / self.dimension - 1, n)
def smallest(self, distances):
'''Get the index of the smallest element in the given distances array.
Returns a flat index ; use flat_to_coords to convert this to a neuron
index.
'''
assert distances.shape == self.shape
return distances.argmin()
def learn(self, cue, weights=None, distances=None):
'''Add a new cue vector to the Map, moving neurons as needed.'''
if weights is None:
if distances is None:
distances = self.distances(cue)
weights = self.weights(distances)
assert weights.shape == self.shape
weights.shape += (1, )
delta = numpy.resize(cue, self.neurons.shape) - self.neurons
eta = self._learning_rate()
self.neurons += eta * weights * delta
if self._noise_variance:
self.neurons += rng.normal(
0, self._noise_variance(), self.neurons.shape)
def neuron_heatmap(self, axes=(0, 1), lower=None, upper=None):
'''Return an image representation of this Map.'''
return heatmap(self.neurons, axes, lower, upper)
def distance_heatmap(self, cue, axes=(0, 1), lower=None, upper=None):
'''Return an image representation of the distance to a cue.'''
return heatmap(self.distances(cue), axes, lower, upper)
class Gas(Map):
'''A neural Gas is a topologically unordered collection of neurons.
Learning takes place in the Gas by ordering the neurons according to their
distance from each cue that is presented. Neurons are updated using this
sorted order, with an exponentially decreasing weight for neurons that are
further (in sort order) from the cue.
'''
def __init__(self, params):
'''Initialize this Gas. A Gas must have a 1D shape.'''
super(Gas, self).__init__(params)
assert len(params.shape) == 1
self.N = params.shape[0]
def weights(self, distances):
# this is slightly different from a traditional gas, which uses a linear
# negative exponential for update weights:
#
# return numpy.exp(-distances.argsort() / sigma)
#
# quadratic weights more closely match the standard kohonen behavior.
z = distances.argsort() / self._neighborhood_size()
return numpy.exp(-z * z)
def _array_without(a, i):
'''Remove the ith row and column from 2x2 array a.'''
if i == 0:
return a[1:, 1:].copy()
if i == a.shape[0] - 1:
return a[:-1, :-1].copy()
return numpy.hstack((numpy.vstack((a[:i, :i], a[i+1:, :i])),
numpy.vstack((a[:i, i+1:], a[i+1:, i+1:]))))
def _vector_without(v, i):
'''Remove the ith element from vector v.'''
if i == 0:
return v[1:].copy()
if i == v.shape[0] - 1:
return v[:-1].copy()
return numpy.concatenate((v[:i], v[i+1:]))
class GrowingGasParameters(Parameters):
'''Parameters for Growing Neural Gases.'''
def __init__(self,
growth_interval=2,
max_connection_age=5,
error_decay=0.99,
neighbor_error_decay=0.99,
**kwargs):
super(GrowingGasParameters, self).__init__(**kwargs)
self.growth_interval = growth_interval
self.max_connection_age = max_connection_age
self.error_decay = error_decay
self.neighbor_error_decay = neighbor_error_decay
class GrowingGas(Gas):
'''A Growing Neural Gas uses a variable number of variable-topology neurons.
In essence, a GNG is similar to a standard Gas, but there is additional
logic in this class for adding new neurons to better explain areas of the
sample space that currently have large error.
'''
def __init__(self, params):
'''Initialize a new Growing Gas with parameters.'''
self._size = 2
super(GrowingGas, self).__init__(params)
self._growth_interval = params.growth_interval
self._max_connection_age = params.max_connection_age
self._error_decay = params.error_decay
self._neighbor_error_decay = params.neighbor_error_decay
self._errors = _zeros(self.shape)
self._connections = _zeros((self._size, self._size), '=i2') - 1
self._cue_count = 0
@property
def shape(self):
return (self._size, )
def neighbors(self, i):
return self._connections[i]
def _connect(self, a, b):
self._set_connection(a, b, 0)
def _age_connection(self, a, b):
self._set_connection(a, b, self._connections[a, b] + 1)
def _disconnect(self, a, b):
self._set_connection(a, b, -1)
def _set_connection(self, a, b, age):
self._connections[a, b] = self._connections[b, a] = age
def learn(self, cue, weights=None, distances=None):
'''Store a cue in the gas.'''
distances = self.distances(cue)
# find the two closest neurons. connect them. add error to the winner.
w = distances.argmin()
d = distances[w]
self._errors[w] += d * d
distances[w] = 1 + distances.max()
self._connect(w, distances.argmin())
# move the winner and all of its neighbors toward the cue.
eta = self._learning_rate()
def adjust(i):
self.neurons[i] += eta * (cue - self.neurons[i])
adjust(w)
for j, age in enumerate(self.neighbors(w)):
if 0 <= age < 65535: # prevent 16-bit age counter overflow
adjust(j)
self._age_connection(w, j)
# add noise.
if self._noise_variance:
self.neurons += rng.normal(
0, self._noise_variance(), self.neurons.shape)
# manipulate the gas topology by pruning and growing as needed.
self._prune()
self._cue_count += 1
if (self._cue_count % self._growth_interval == 0 and
self._size < self.N):
self._grow()
# decrease unit error.
self._errors *= self._error_decay
def _prune(self):
'''Remove old connections, and prune any disconnected neurons.'''
mask = numpy.where(self._connections > self._max_connection_age)
if self._size == 2 or len(mask[0]) == 0:
return
# remove connections older than max_connection_age (set to -1).
self._connections[mask] = -1
# remove neurons that were disconnected after removing connections.
indices, = numpy.where((self._connections < 0).all(axis=0))
for i in indices[::-1]:
self.neurons = _vector_without(self.neurons, i)
self._errors = _vector_without(self._errors, i)
self._connections = _array_without(self._connections, i)
self._size -= 1
def _grow(self):
'''Add a single neuron between two high-error neurons.'''
# identify the neuron with max error, and its max error neighbor.
q = self._errors.argmax()
f = (self._errors * (self.neighbors(q) >= 0)).argmax()
r = self._size
# allocate a new neurons array.
neurons = _zeros((r + 1, self.dimension))
neurons[:r] = self.neurons
self.neurons = neurons
self.neurons[r] = (self.neurons[q] + self.neurons[f]) / 2
# insert new node between old two nodes.
self._disconnect(q, f)
conn = _zeros((r + 1, r + 1), '=i2') - 1
conn[:r, :r] = self._connections
self._connections = conn
self._connect(q, r)
self._connect(r, q)
# update error for the new and old neurons.
self._errors = numpy.concatenate((self._errors, [0]))
self._errors[f] *= self._neighbor_error_decay
self._errors[q] *= self._neighbor_error_decay
self._errors[r] = (self._errors[f] + self._errors[q]) / 2
self._size += 1
class Filter(object):
'''A Filter is an estimate of the probability density of the inputs.'''
def __init__(self, map, history=None):
'''Initialize this Filter with an underlying Map implementation.
history: A callable that returns values in the open interval (0, 1).
These values determine how much new cues influence the activation
state of the Filter.
A 0 value would mean that no history is preserved (i.e. each new cue
stored in the Filter completely determines the activity of the Filter)
while a 1 value would mean that new cues have no impact on the
activity of the Filter (i.e. the initial activity is the only activity
that is ever used).
'''
self.map = map
self.activity = _zeros(self.map.shape) + 1
self.activity /= self.activity.sum()
self._history = history is None and ConstantTimeseries(0.7) or history
@property
def shape(self):
return self.map.shape
def neuron(self, coords):
return self.map.neuron(coords)
def reset(self, f=None):
return self.map.reset(f=f)
def distances(self, cue):
return self.map.distances(cue)
def flat_to_coords(self, i):
return self.map.flat_to_coords(i)
def winner(self, cue):
return self.map.winner(cue)
def smallest(self, distances):
return self.map.smallest(distances)
def weights(self, distances):
return self.map.weights(distances) * (1 - self.activity)
def sample(self, n):
return argsample(self.activity, n)
def learn(self, cue, **kwargs):
d = self.distances(cue)
p = numpy.exp(-self.distances(cue).argsort())
l = self._history()
self.activity = l * self.activity + (1 - l) * p / p.sum()
self.map.learn(cue, **kwargs)
|
mxlian/neuror
|
dataProcessing/kohonen.py
|
Python
|
gpl-2.0
| 27,576
|
[
"Gaussian",
"NEURON"
] |
dac46cc1fe12aa3bb646b85d78dafccdc21836e550d97c16d3e2cb7d42fea918
|
from pymol.cgo import *
from pymol import cmd
from pymol.vfont import plain
# create the axes object, draw axes with cylinders coloured red, green,
#blue for X, Y and Z
obj = [
CYLINDER, 0., 0., 0., 10., 0., 0., 0.2, 1.0, 1.0, 1.0, 1.0, 0.0, 0.,
CYLINDER, 0., 0., 0., 0., 10., 0., 0.2, 1.0, 1.0, 1.0, 0., 1.0, 0.,
CYLINDER, 0., 0., 0., 0., 0., 10., 0.2, 1.0, 1.0, 1.0, 0., 0.0, 1.0,
]
# add labels to axes object
cyl_text(obj,plain,[-5.,-5.,-1],'Origin',0.20,axes=[[3.0,0.0,0.0],[0.0,3.0,0.0],[0.0,0.0,3.0]])
cyl_text(obj,plain,[10.,0.,0.],'X',0.20,axes=[[3.0,0.0,0.0],[0.0,3.0,0.0],[0.0,0.0,3.0]])
cyl_text(obj,plain,[0.,10.,0.],'Y',0.20,axes=[[3.0,0.0,0.0],[0.0,3.0,0.0],[0.0,0.0,3.0]])
cyl_text(obj,plain,[0.,0.,10.],'Z',0.20,axes=[[3.0,0.0,0.0],[0.0,3.0,0.0],[0.0,0.0,3.0]])
# then we load it into PyMOL
cmd.load_cgo(obj,'axes')
|
weitzner/Dotfiles
|
pymol_scripts/axes_cyl.py
|
Python
|
mit
| 853
|
[
"PyMOL"
] |
a887831096b4e17eb5e693b1ebd7c8a886f94059f644e93715b1f8292f73fdda
|
import argparse
import csv
from decimal import Decimal
import distutils.spawn
import glob
import json
import logging
import os
import shutil
from subprocess import call
import sys
import tempfile
import numpy
import pandas
import pydicom
from radiomics import featureextractor
scriptlogger = logging.getLogger('radiomics.dicom')
scriptlogger.setLevel(logging.DEBUG)
def dcmImageToNRRD(inputDICOMImageDir, tempDir):
scanNRRDFile = os.path.join(tempDir, "image.nrrd")
if not os.path.isfile(scanNRRDFile):
call(['plastimatch', 'convert', '--input',
inputDICOMImageDir, '--output-img', scanNRRDFile])
return scanNRRDFile
def dcmImageToNIfTI(inputDICOMImageDir, tempDir):
destScanNIfTIFile = os.path.join(tempDir, "volume.nii")
scanNIfTIFile = os.path.join(inputDICOMImageDir, "volume.nii")
scanJSONFile = os.path.join(inputDICOMImageDir, "volume.json")
# will save to volume.nii
if not os.path.isfile(destScanNIfTIFile):
cmd = ['dcm2niix', "-m", "y", "-f", "volume", inputDICOMImageDir]
call(cmd)
shutil.move(scanNIfTIFile, destScanNIfTIFile)
if os.path.isfile(scanJSONFile):
os.remove(scanJSONFile)
return destScanNIfTIFile
# individual segments will be extracted to the destination directory into NRRD
# files, with the names assigned consecutive numbers starting from 1
def dcmSEGToNRRDs(inputSEG, tempDir):
segmentsDir = os.path.join(tempDir, 'Segments')
if not os.path.isdir(segmentsDir):
os.mkdir(segmentsDir)
call(['segimage2itkimage', '--inputDICOM',
inputSEG, '--outputDirectory', segmentsDir])
return glob.glob(os.path.join(segmentsDir, "*nrrd"))
def writeSR(inputSEG, inputJSON, inputDICOMImageDir, outputSR):
cmd = [
'tid1500writer',
'--inputImageLibraryDirectory',
inputDICOMImageDir,
'--inputCompositeContextDirectory',
os.path.split(inputSEG)[0],
'--inputMetadata',
inputJSON,
'--outputDICOM',
outputSR]
scriptlogger.debug("Writing SR with: " + str(cmd))
call(cmd)
def getCTSeriesUID(imageDICOMDir):
ctFile = os.listdir(imageDICOMDir)[0]
dcm = pydicom.read_file(os.path.join(imageDICOMDir, ctFile))
return dcm.SeriesInstanceUID
class DICOMMetadataAccessor:
def __init__(self, dcmFileName):
self.dcm = pydicom.read_file(dcmFileName)
def getInstanceUID(self):
return self.dcm.SOPInstanceUID
def getSeriesDescription(self):
return self.dcm.SeriesDescription
def getSeriesInstanceUID(self):
return self.dcm.SeriesInstanceUID
class SEGMetadataAccessor(DICOMMetadataAccessor):
def __init__(self, segFileName):
DICOMMetadataAccessor.__init__(self, segFileName)
if self.dcm.SOPClassUID != '1.2.840.10008.5.1.4.1.1.66.4':
raise ValueError(
"SEGMetadataAccessor: DICOM object is not Segmentation!")
def getSegmentSegmentationTypeCode(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].SegmentedPropertyTypeCodeSequence[0]
except BaseException:
return None
def getTrackingIdentifier(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].TrackingIdentifier
except BaseException:
return None
def getTrackingUniqueIdentifier(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].TrackingUID
except BaseException:
return None
def getSegmentDescription(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].SegmentDescription
except BaseException:
return None
def getSegmentAnatomicLocationCode(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].AnatomicRegionSequence[0]
except BaseException:
return None
class CodedValue:
def __init__(self, value, scheme, meaning):
self.codeValue = value
self.codingSchemeDesignator = scheme
self.codeMeaning = meaning
def getDict(self):
return {"CodeValue": self.codeValue, "CodeMeaning": self.codeMeaning,
"CodingSchemeDesignator": self.codingSchemeDesignator}
class TID1500Metadata:
def __init__(
self,
featuresDictFile,
seriesDescription="Radiomics features"):
self.featuresDict = self.readDictionary(featuresDictFile)
self.m = {}
self.m["@schema"] = "https://raw.githubusercontent.com/qiicr/dcmqi/master/doc/schemas/sr-tid1500-schema.json#"
self.m["SeriesDescription"] = seriesDescription
self.m["Measurements"] = []
self.measurementGroupCount = 0
def addMeasurementGroup(self):
self.measurementGroupCount = self.measurementGroupCount + 1
measurementsGroup = {}
measurementsGroup["measurementItems"] = []
measurementsGroup["ReferencedSegment"] = self.measurementGroupCount
self.m["Measurements"].append(measurementsGroup)
@staticmethod
def readDictionary(featuresDictFile):
return pandas.read_csv(featuresDictFile, sep='\t', low_memory=False)
@staticmethod
def makeHash(text, length=6):
from base64 import b64encode
from hashlib import sha1
return b64encode(sha1(str.encode(text)).digest()).decode('ascii')[:length]
def makePrivateCode(self, text):
return CodedValue(self.makeHash(text), "99PYRADIOMICS", text).getDict()
# returns None if prefix is not recognized, otherwise returns a tuple of
# (measurementModifiers, derivationParameters)
def prefix2codes(self, prefix):
modifiers = []
derivationParameters = []
import re
imageTransformationConcept = self.makePrivateCode(
"Image transformation")
if re.match("original", prefix):
pass
elif re.match("square", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Square transformation")})
elif re.match("squareroot", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Square root transformation")})
elif re.match("logarithm", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Logarithm transformation")})
elif re.match("gradient", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Gradient magnitude transformation")})
elif re.match("exponential", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Exponent transformation")})
elif re.match("exponential", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Exponent transformation")})
# parameterized processing operations
elif re.match(r"wavelet-([HL]{2,3})", prefix):
match = re.match(r"wavelet-([HL]{2,3})", prefix)
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Wavelet transformation")})
modifiers.append({"modifier": self.makePrivateCode("Wavelet sub-band"),
"modifierValue": self.makePrivateCode(match.group(1))})
elif re.match(r"log-sigma-([\d]+)-([\d]+)-([a-z]+)", prefix):
match = re.match(r"log-sigma-([\d]+)-([\d]+)-([a-z]+)", prefix)
units = match.group(3)
if units == "mm":
unitsCode = CodedValue("mm", "UCUM", "millimeters").getDict()
elif units == "cm":
unitsCode = CodedValue("mm", "UCUM", "centimeters").getDict()
else:
unitsCode = self.makePrivateCode(units)
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Laplacian of Gaussian")})
derivationParameters.append({"derivationParameter": self.makePrivateCode("Kernel size"),
"derivationParameterValue": str('.'.join([match.group(1), match.group(2)])),
"derivationParameterUnits": unitsCode})
else:
# unknown prefix
return None
return modifiers, derivationParameters
# adds a single measurement to the last measurement group
def addMeasurement(
self,
value,
quantityCode,
unitsCode=CodedValue(
"1",
"UCUM",
"no units"
)):
if self.measurementGroupCount < 1:
scriptlogger.error(
"Cannot add measurement - no measurement groups initialized!")
return
(preprocessing, featureClass, featureName) = quantityCode.split('_')
mpTuple = self.prefix2codes(preprocessing)
if mpTuple is None:
return
measurement = {}
classSubset = self.featuresDict[self.featuresDict['pyradiomics_feature_class'] == featureClass]
featureTuple = classSubset[classSubset['pyradiomics_feature_name'] == featureName]
if featureTuple.empty:
codeMeaning = featureClass + "_" + featureName
code = self.makeHash(codeMeaning)
measurement["quantity"] = CodedValue(
code, "99PYRADIOMICS", codeMeaning).getDict()
if len(code) > 16:
scriptlogger.error("Sorry, the code value is too long!")
sys.exit()
else:
measurement["quantity"] = CodedValue(
featureTuple["IBSI_code"].values[0],
"IBSI",
featureTuple["IBSI_meaning"].values[0]).getDict()
try:
if numpy.isnan(value):
scriptlogger.info(
"Skipping NaN value for feature %s",
quantityCode)
return
except Exception as e:
scriptlogger.error("Exception checking for NaN: %s %s", str(e), value)
return
try:
measurement["value"] = '%E' % Decimal(float(value))
except Exception as e:
scriptlogger.error("Exception formatting %s as Decimal: %s", value, str(e))
scriptlogger.error("type of value: %s", type(value))
measurement["units"] = unitsCode.getDict()
self.m["Measurements"][-1]["measurementItems"].append(measurement)
if len(mpTuple[0]):
measurement["measurementModifiers"] = [m for m in mpTuple[0]]
if len(mpTuple[1]):
measurement["measurementDerivationParameters"] = [
d for d in mpTuple[1]]
return
def saveJSONToFile(self, fileName):
with open(fileName, 'w') as f:
json.dump(self.m, f, indent=2, sort_keys=True)
def main():
parser = argparse.ArgumentParser(
usage="%(prog)s --input-image <dir> --input-seg <name> --output-sr <name>\n\n"
+ "Warning: This is a \"pyradiomics labs\" script, which means it is an experimental feature in development!\n"
+ "The intent of this helper script is to enable pyradiomics feature extraction directly from/to DICOM data.\n"
+ "The segmentation defining the region of interest must be defined as a DICOM Segmentation image.\n"
+ "Support for DICOM Radiotherapy Structure Sets for defining region of interest may be added in the future.\n")
parser.add_argument(
'--input-image-dir',
dest="inputDICOMImageDir",
metavar="<folder>",
help="Path to the directory with the input DICOM series."
+ " It is expected that a single series is corresponding to a single scalar volume.",
required=True)
parser.add_argument(
'--input-seg-file',
dest="inputSEG",
metavar="<file>",
help="Path to the input segmentation defined as a DICOM Segmentation object.",
required=True)
parser.add_argument(
'--output-dir',
dest="outputDir",
metavar="<folder>",
help="Path to the directory for saving the resulting DICOM file.",
required=True)
parser.add_argument(
'--parameters',
dest="parameters",
metavar="<parameters>",
help="Pyradiomics feature extractor positional arguments")
parser.add_argument(
'--temp-dir',
dest="tempDir",
metavar="<folder>",
help="Path to the directory to store intermediate results")
parser.add_argument(
'--features-dict',
dest="featuresDict",
metavar="<file>",
help="Path to the dictionary mapping pyradiomics feature names to the IBSI defined features.")
parser.add_argument(
'--volume-reconstructor',
dest="volumeReconstructor",
metavar="<plastimatch or dcm2niix>",
help="Choose the tool to be used for reconstructing image volume from the DICOM image series."
+ " Allowed options are plastimatch or dcm2niix (should be installed on the system). plastimatch"
+ " will be used by default.",
choices=['plastimatch', 'dcm2niix'],
default="plastimatch")
parser.add_argument(
'--geometry-tolerance',
dest="geometryTolerance",
metavar="<number>",
help="Decimal number setting geometry tolerance for the extractor. Defaults to 1e-6.",
default=1e-6)
parser.add_argument(
'--correct-mask',
dest="correctMask",
help="Boolean flag argument. If present, PyRadiomics will attempt to resample the mask to the image"
+ " geometry if the mask check fails.",
action='store_true',
default=False)
args = parser.parse_args()
# with tempfile.mkdtemp() as tempDir:
tempDir = args.tempDir
if not tempDir:
tempDir = tempfile.mkdtemp()
scriptlogger.info("Temporary directory: " + tempDir)
# convert input DICOM series into a scalar volume
# plastimatch fails for prostate DWI Data! Need to report
# Selection of the optimal volume reconstructor may depend
# on the specific dataset!
if args.volumeReconstructor == "plastimatch":
scriptlogger.info(
"Using Plastimatch for DICOM image volume reconstruction.")
inputImage = dcmImageToNRRD(args.inputDICOMImageDir, tempDir)
else:
scriptlogger.info(
"Using dcm2niix for DICOM image volume reconstruction.")
inputImage = dcmImageToNIfTI(args.inputDICOMImageDir, tempDir)
# convert segmentation into segments
inputSegments = dcmSEGToNRRDs(args.inputSEG, tempDir)
if len(inputSegments) == 0:
scriptlogger.error("No segments found. Cannot compute features.")
return -1
featuresDir = os.path.join(tempDir, 'Features')
if not os.path.isdir(featuresDir):
os.mkdir(featuresDir)
# initialize Metadata for the individual features
# TODO: to be replaced with direct mapping in the pyradiomics feature functions
# see https://github.com/Radiomics/pyradiomics/issues/435
if args.featuresDict is not None:
featuresDictPath = args.featuresDict
else:
featuresDictPath = "featuresDict.tsv"
if not os.path.exists(featuresDictPath):
scriptlogger.error(
"Features dictionary file %s is not found!",
featuresDictPath)
return -1
m = TID1500Metadata(featuresDictPath)
# find a valid DICOM file in the input image DICOM directory
dicomImage = None
for f in os.listdir(args.inputDICOMImageDir):
try:
pydicom.read_file(os.path.join(args.inputDICOMImageDir, f))
dicomImage = os.path.join(args.inputDICOMImageDir, f)
break
except BaseException:
continue
if dicomImage is None:
scriptlogger.error(
"Input DICOM image directory does not seem to contain any valid DICOM files!")
return -1
imageMetadataAccessor = DICOMMetadataAccessor(
os.path.join(args.inputDICOMImageDir, f))
segmentationMetadataAccessor = SEGMetadataAccessor(args.inputSEG)
pyradiomicsVersion = None
for inputSegment in inputSegments:
scriptlogger.debug("Processing segmentation file %s", inputSegment)
segmentNumber = os.path.split(inputSegment)[-1].split('.')[0]
try:
scriptlogger.debug("Initializing extractor")
extractionSettings = {
"geometryTolerance": float(args.geometryTolerance),
"correctMask": True if args.correctMask else False
}
params = []
if args.parameters is not None:
params = [args.parameters]
extractor = featureextractor.RadiomicsFeatureExtractor(*params, **extractionSettings)
except Exception:
scriptlogger.error(
'Initialization of the pyradimics feature extraction failed.', exc_info=True)
return -1
featureVector = extractor.execute(
inputImage, inputSegment, int(segmentNumber))
if len(featureVector) == 0:
scriptlogger.error("No features extracted!")
return -1
featuresFileName = os.path.join(featuresDir, segmentNumber + '.csv')
scriptlogger.debug("Will save features as %s", featuresFileName)
writer = csv.writer(open(featuresFileName, 'w'), lineterminator='\n')
headers = list(featureVector.keys())
writer.writerow(headers)
row = []
for h in headers:
row.append(featureVector.get(h, ""))
writer.writerow(row)
scriptlogger.debug("Initializing TID 1500 Measurement groups.")
m.addMeasurementGroup()
includedFeatureVectorItems = 0
for featureName in featureVector.keys():
if featureName == 'diagnostics_Versions_PyRadiomics':
pyradiomicsVersion = featureVector[featureName]
continue
featureValue = featureVector[featureName]
featureNameSplit = featureName.split('_')
if len(featureNameSplit) < 3:
scriptlogger.warning(
"Skipping unrecognized feature %s",
featureName)
continue
includedFeatureVectorItems += 1
m.addMeasurement(featureValue, featureName)
scriptlogger.debug(
"%d of %d total features included in the TID 1500 Measurement group.",
len(featureVector), includedFeatureVectorItems)
# initialize metadata common to all measurements
scriptlogger.debug("Populating common metadata")
m.m["Measurements"][-1]["SourceSeriesForImageSegmentation"] = imageMetadataAccessor.getSeriesInstanceUID()
m.m["Measurements"][-1]["segmentationSOPInstanceUID"] = segmentationMetadataAccessor.getInstanceUID()
# TODO: populate those from SEG SegmentationType / AnatomicLocation
segmentationType = segmentationMetadataAccessor.getSegmentSegmentationTypeCode(
int(segmentNumber) - 1)
if segmentationType:
m.m["Measurements"][-1]["Finding"] = CodedValue(segmentationType.CodeValue,
segmentationType.CodingSchemeDesignator,
segmentationType.CodeMeaning).getDict()
segTrackingIdentifier = segmentationMetadataAccessor.getTrackingIdentifier(int(segmentNumber)-1)
segTrackingUniqueIdentifier = segmentationMetadataAccessor.getTrackingUniqueIdentifier(int(segmentNumber)-1)
if segTrackingIdentifier:
m.m["Measurements"][-1]["TrackingIdentifier"] = segTrackingIdentifier
else:
m.m["Measurements"][-1]["TrackingIdentifier"] = segmentationType.CodeMeaning
segmentDescription = segmentationMetadataAccessor.getSegmentDescription(int(segmentNumber)-1)
# SegmentDescription is Type 3, and can be missing
if segmentDescription is not None:
m.m["Measurements"][-1]["TrackingIdentifier"] = segmentationType.CodeMeaning+" - "+segmentDescription
if segTrackingUniqueIdentifier:
m.m["Measurements"][-1]["TrackingUniqueIdentifier"] = segTrackingUniqueIdentifier
segmentationLocation = segmentationMetadataAccessor.getSegmentAnatomicLocationCode(
int(segmentNumber) - 1)
if segmentationLocation:
m.m["Measurements"][-1]["FindingSite"] = CodedValue(segmentationLocation.CodeValue,
segmentationLocation.CodingSchemeDesignator,
segmentationLocation.CodeMeaning).getDict()
# AlgorithmIdentification
m.m["Measurements"][-1]["measurementAlgorithmIdentification"] = {}
m.m["Measurements"][-1]["measurementAlgorithmIdentification"]["AlgorithmName"] = "https://github.com/Radiomics/pyradiomics"
m.m["Measurements"][-1]["measurementAlgorithmIdentification"]["AlgorithmVersion"] = pyradiomicsVersion
m.m["Measurements"][-1]["measurementAlgorithmIdentification"]["AlgorithmParameters"] = [json.dumps(extractor.settings)]
m.m["observerContext"] = {}
m.m["observerContext"]["ObserverType"] = "DEVICE"
m.m["observerContext"]["DeviceObserverName"] = "pyradiomics"
m.m["observerContext"]["DeviceObserverModelName"] = pyradiomicsVersion
m.m["compositeContext"] = [os.path.split(args.inputSEG)[-1]]
m.m["imageLibrary"] = [os.path.split(f)[-1]
for f in os.listdir(args.inputDICOMImageDir)]
m.m["SeriesDescription"] = segmentationMetadataAccessor.getSeriesDescription() + ' - pyradiomics features'
scriptlogger.debug("Saving temporary files for DICOM SR writer.")
dcmqiMetadataFile = os.path.join(featuresDir, "dcmqi_sr.json")
outputSRTempFile = os.path.join(featuresDir, "sr.dcm")
m.saveJSONToFile(dcmqiMetadataFile)
scriptlogger.debug("Generating DICOM SR.")
writeSR(
args.inputSEG,
dcmqiMetadataFile,
args.inputDICOMImageDir,
outputSRTempFile)
# copy to the dest directory under UID as a name
try:
dcm = pydicom.read_file(outputSRTempFile)
shutil.move(
outputSRTempFile,
os.path.join(args.outputDir, dcm.SOPInstanceUID + ".dcm"))
except BaseException:
scriptlogger.error("Failed to move output SR!")
if __name__ == "__main__":
exeFound = {}
for exe in ['tid1500writer', 'dcm2niix', 'plastimatch', 'segimage2itkimage']:
if distutils.spawn.find_executable(exe) is None:
exeFound[exe] = False
else:
exeFound[exe] = True
if not (exeFound['tid1500writer'] and exeFound['segimage2itkimage']) or not (
exeFound['plastimatch'] or exeFound['dcm2niix']):
scriptlogger.error(
"Dependency converter(s) not found in the path.")
scriptlogger.error(
"dcmqi (https://github.com/qiicr/dcmqi), and dcm2niix (https://github.com/rordenlab/dcm2niix/releases)")
scriptlogger.error("or Plastimatch (http://plastimatch.org/)")
scriptlogger.error(
"need to be installed and available in the PATH for using this converter script.")
sys.exit()
main()
|
Radiomics/pyradiomics
|
labs/pyradiomics-dcm/pyradiomics-dcm.py
|
Python
|
bsd-3-clause
| 21,948
|
[
"Gaussian"
] |
e59853ee62ddfc2b8284d5c604808a4fc9679532961e7688f57fe012bfb6880e
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
def pts(category, pyfile):
print('Auto-documenting %s file %s' % (category, pyfile))
# Main driver modules in psi4/driver
fdriver = open('source/autodoc_driver.rst', 'w')
fdriver.write('\n.. include:: /autodoc_abbr_options_c.rst\n\n')
fdriver.write('.. _`sec:driver`:\n\n')
fdriver.write('=============\n')
fdriver.write('Python Driver\n')
fdriver.write('=============\n\n')
for pyfile in glob.glob(DriverPath + '../../psi4/driver/*.py'):
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
div = '=' * len(basename)
if basename not in ['inpsight', 'pep8', 'diatomic_fits', 'pyparsing', 'computation_cache']:
pts('driver', basename)
fdriver.write(basename + '\n')
fdriver.write(div + '\n\n')
fdriver.write('.. automodule:: %s\n' % (basename))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
if basename == 'driver':
fdriver.write(' :exclude-members: energy, optimize, opt, frequency, frequencies, freq, property, prop, molden, gdma, fchk, gradient, hessian\n')
elif basename == 'wrapper_database':
fdriver.write(' :exclude-members: db, database\n')
elif basename == 'driver_nbody':
fdriver.write(' :exclude-members: nbody_gufunc\n')
elif basename == 'driver_cbs':
fdriver.write(' :exclude-members: cbs, complete_basis_set, xtpl_highest_1,\n')
fdriver.write(' scf_xtpl_helgaker_3, scf_xtpl_helgaker_2, corl_xtpl_helgaker_2, n_body\n')
# elif basename == 'physconst':
# fdriver.write('\n.. literalinclude:: %sdriver/%s\n' % (IncludePath, filename))
elif basename == 'diatomic':
fdriver.write(' :exclude-members: anharmonicity\n')
# elif basename == 'interface_dftd3':
# fdriver.write(' :exclude-members: run_dftd3\n')
# elif basename == 'interface_cfour':
# fdriver.write(' :exclude-members: run_cfour\n')
elif basename == 'aliases':
fdriver.write(' :exclude-members: sherrill_gold_standard, allen_focal_point\n')
elif basename == 'p4util':
fdriver.write(' :exclude-members: oeprop, cubeprop\n')
elif basename == 'procedures':
fdriver.write(' :exclude-members: interface_cfour\n')
fdriver.write('\n')
# Python-only plugin modules in psi4/driver
for basename in os.walk(DriverPath + '../../psi4/driver').next()[1]:
div = '=' * len(basename)
if basename not in ['grendel']:
pts('driver', basename)
fdriver.write(basename + '\n')
fdriver.write(div + '\n\n')
fdriver.write('.. automodule:: %s\n' % (basename))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
for pyfile in glob.glob(DriverPath + '../../psi4/driver/' + basename + '/*py'):
filename = os.path.split(pyfile)[1]
basename2 = os.path.splitext(filename)[0]
div = '=' * len(basename2)
fdriver.write('.. automodule:: %s.%s\n' % (basename, basename2))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
if basename == 'qcdb' and basename2 == 'molecule':
fdriver.write(' :exclude-members: run_dftd3\n')
fdriver.write('\n')
fdriver.write('\n')
fdriver.close()
|
susilehtola/psi4
|
doc/sphinxman/document_driver.py
|
Python
|
lgpl-3.0
| 4,542
|
[
"Psi4"
] |
b4ab8f708050eeb56de6de8325e34d71d3796489a2ec7c4afb18e70c7b7b14bb
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from os import symlink
from spack import *
class LtrRetriever(Package):
"""LTR_retriever is a highly accurate and sensitive program for
identification of LTR retrotransposons."""
homepage = "https://github.com/oushujun/LTR_retriever"
url = "https://github.com/oushujun/LTR_retriever/archive/v2.8.7.tar.gz"
version('2.8.7', sha256='29ca6f699c57b5e964aa0ee6c7d3e1e4cd5362dadd789e5f0e8c82fe0bb29369')
depends_on('perl', type='run')
depends_on('blast-plus', type='run')
depends_on('hmmer@3.1b2:', type='run')
depends_on('cdhit', type='run')
depends_on('repeatmasker', type='run')
def install(self, spec, prefix):
filter_file(r'BLAST\+=.*', 'BLAST+=%s' % spec['blast-plus'].prefix.bin,
'paths')
filter_file('RepeatMasker=.*',
'RepeatMasker=%s' % spec['repeatmasker'].prefix.bin,
'paths')
filter_file('HMMER=.*',
'HMMER=%s' % spec['hmmer'].prefix.bin,
'paths')
filter_file('CDHIT=.*',
'CDHIT=%s' % spec['cdhit'].prefix,
'paths')
filter_file('BLAST=.*', '', 'paths')
mkdirp(prefix.opt)
mkdirp(prefix.bin)
install_tree('.', prefix.opt.ltr_retriever)
symlink(prefix.opt.ltr_retriever.LTR_retriever,
prefix.bin.LTR_retriever)
|
LLNL/spack
|
var/spack/repos/builtin/packages/ltr-retriever/package.py
|
Python
|
lgpl-2.1
| 1,604
|
[
"BLAST"
] |
aafb38bb65942ec216a8a3ba083ead0a6e7b47b733b72a31817ca40bc50c3e4c
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provider related utilities
"""
from libcloud.utils.misc import get_driver as _get_provider_driver
from libcloud.utils.misc import set_driver as _set_provider_driver
from libcloud.compute.types import Provider, DEPRECATED_RACKSPACE_PROVIDERS
from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING
__all__ = [
"Provider",
"DRIVERS",
"get_driver"]
DRIVERS = {
Provider.AZURE:
('libcloud.compute.drivers.azure', 'AzureNodeDriver'),
Provider.DUMMY:
('libcloud.compute.drivers.dummy', 'DummyNodeDriver'),
Provider.EC2_US_EAST:
('libcloud.compute.drivers.ec2', 'EC2NodeDriver'),
Provider.EC2_EU_WEST:
('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'),
Provider.EC2_US_WEST:
('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'),
Provider.EC2_US_WEST_OREGON:
('libcloud.compute.drivers.ec2', 'EC2USWestOregonNodeDriver'),
Provider.EC2_AP_SOUTHEAST:
('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'),
Provider.EC2_AP_NORTHEAST:
('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'),
Provider.EC2_SA_EAST:
('libcloud.compute.drivers.ec2', 'EC2SAEastNodeDriver'),
Provider.EC2_AP_SOUTHEAST2:
('libcloud.compute.drivers.ec2', 'EC2APSESydneyNodeDriver'),
Provider.ECP:
('libcloud.compute.drivers.ecp', 'ECPNodeDriver'),
Provider.ELASTICHOSTS:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsNodeDriver'),
Provider.ELASTICHOSTS_UK1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'),
Provider.ELASTICHOSTS_UK2:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'),
Provider.ELASTICHOSTS_US1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'),
Provider.ELASTICHOSTS_US2:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS2NodeDriver'),
Provider.ELASTICHOSTS_US3:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS3NodeDriver'),
Provider.ELASTICHOSTS_CA1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsCA1NodeDriver'),
Provider.ELASTICHOSTS_AU1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsAU1NodeDriver'),
Provider.ELASTICHOSTS_CN1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsCN1NodeDriver'),
Provider.SKALICLOUD:
('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'),
Provider.SERVERLOVE:
('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'),
Provider.CLOUDSIGMA:
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaNodeDriver'),
Provider.GCE:
('libcloud.compute.drivers.gce', 'GCENodeDriver'),
Provider.GOGRID:
('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'),
Provider.RACKSPACE:
('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'),
Provider.RACKSPACE_FIRST_GEN:
('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'),
Provider.HPCLOUD:
('libcloud.compute.drivers.hpcloud', 'HPCloudNodeDriver'),
Provider.KILI:
('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'),
Provider.VPSNET:
('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'),
Provider.LINODE:
('libcloud.compute.drivers.linode', 'LinodeNodeDriver'),
Provider.RIMUHOSTING:
('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'),
Provider.VOXEL:
('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'),
Provider.SOFTLAYER:
('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'),
Provider.EUCALYPTUS:
('libcloud.compute.drivers.ec2', 'EucNodeDriver'),
Provider.IBM:
('libcloud.compute.drivers.ibm_sce', 'IBMNodeDriver'),
Provider.OPENNEBULA:
('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'),
Provider.DREAMHOST:
('libcloud.compute.drivers.dreamhost', 'DreamhostNodeDriver'),
Provider.BRIGHTBOX:
('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'),
Provider.NIMBUS:
('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'),
Provider.BLUEBOX:
('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'),
Provider.GANDI:
('libcloud.compute.drivers.gandi', 'GandiNodeDriver'),
Provider.OPSOURCE:
('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'),
Provider.DIMENSIONDATA:
('libcloud.compute.drivers.dimensiondata', 'DimensionDataNodeDriver'),
Provider.OPENSTACK:
('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'),
Provider.NINEFOLD:
('libcloud.compute.drivers.ninefold', 'NinefoldNodeDriver'),
Provider.VCLOUD:
('libcloud.compute.drivers.vcloud', 'VCloudNodeDriver'),
Provider.TERREMARK:
('libcloud.compute.drivers.vcloud', 'TerremarkDriver'),
Provider.CLOUDSTACK:
('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'),
Provider.LIBVIRT:
('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'),
Provider.JOYENT:
('libcloud.compute.drivers.joyent', 'JoyentNodeDriver'),
Provider.VCL:
('libcloud.compute.drivers.vcl', 'VCLNodeDriver'),
Provider.KTUCLOUD:
('libcloud.compute.drivers.ktucloud', 'KTUCloudNodeDriver'),
Provider.HOSTVIRTUAL:
('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'),
Provider.ABIQUO:
('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'),
Provider.DIGITAL_OCEAN:
('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver'),
Provider.NEPHOSCALE:
('libcloud.compute.drivers.nephoscale', 'NephoscaleNodeDriver'),
Provider.CLOUDFRAMES:
('libcloud.compute.drivers.cloudframes', 'CloudFramesNodeDriver'),
Provider.EXOSCALE:
('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'),
Provider.IKOULA:
('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'),
Provider.OUTSCALE_SAS:
('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'),
Provider.OUTSCALE_INC:
('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'),
Provider.VSPHERE:
('libcloud.compute.drivers.vsphere', 'VSphereNodeDriver'),
Provider.PROFIT_BRICKS:
('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'),
Provider.VULTR:
('libcloud.compute.drivers.vultr', 'VultrNodeDriver'),
Provider.AURORACOMPUTE:
('libcloud.compute.drivers.auroracompute', 'AuroraComputeNodeDriver'),
Provider.CLOUDWATT:
('libcloud.compute.drivers.cloudwatt', 'CloudwattNodeDriver'),
Provider.PACKET:
('libcloud.compute.drivers.packet', 'PacketNodeDriver'),
Provider.ONAPP:
('libcloud.compute.drivers.onapp', 'OnAppNodeDriver'),
Provider.RUNABOVE:
('libcloud.compute.drivers.runabove', 'RunAboveNodeDriver'),
# Deprecated
Provider.CLOUDSIGMA_US:
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaLvsNodeDriver'),
}
def get_driver(provider):
if provider in DEPRECATED_RACKSPACE_PROVIDERS:
id_to_name_map = dict([(v, k) for k, v in Provider.__dict__.items()])
old_name = id_to_name_map[provider]
new_name = id_to_name_map[OLD_CONSTANT_TO_NEW_MAPPING[provider]]
url = 'http://s.apache.org/lc0140un'
msg = ('Provider constant %s has been removed. New constant '
'is now called %s.\n'
'For more information on this change and how to modify your '
'code to work with it, please visit: %s' %
(old_name, new_name, url))
raise Exception(msg)
return _get_provider_driver(DRIVERS, provider)
def set_driver(provider, module, klass):
return _set_provider_driver(DRIVERS, provider, module, klass)
|
Verizon/libcloud
|
libcloud/compute/providers.py
|
Python
|
apache-2.0
| 8,385
|
[
"VisIt"
] |
a6b40290b8d7da2c187d8f578675faad1e7f601e4ef0b0620ab50735ad328677
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
import codecs
from collections import defaultdict
import logging
import os
import re
from commoncode import command
from commoncode.system import on_windows
import extractcode
from extractcode import ExtractErrorFailedToExtract
from extractcode import ExtractWarningIncorrectEntry
logger = logging.getLogger('extractcode')
# logging.basicConfig(level=logging.DEBUG)
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'bin'))
"""
Low level support for p/7zip-based archive extraction.
"""
sevenzip_errors = [
('unsupported method', 'Unsupported archive or broken archive'),
('wrong password', 'Password protected archive, unable to extract'),
# not being able to open an archive is not an error condition for now
('can not open file as archive', None),
]
def get_7z_errors(stdout):
"""
Return error messages extracted from a 7zip command output stdout string.
This maps errors found in stdout to error message.
"""
# FIXME: we should use only one pass over stdout for errors and warnings
if not stdout or not stdout.strip():
return
find_7z_errors = re.compile('^Error:(.*)$',
re.MULTILINE | re.DOTALL).findall
stdlow = stdout.lower()
for err, msg in sevenzip_errors:
if err in stdlow:
return msg
file_errors = find_7z_errors(stdout)
if file_errors:
return ' '.join(file_errors.strip('"\' ')).strip()
def get_7z_warnings(stdout):
"""
Return a dict path->warning_message of 7zip warnings extracted from a
stdout text.
"""
# FIXME: we should use only one pass over stdout for errors and warnings
cannot_open = 'can not open output file'
msg_len = len(cannot_open) + 1
warnings = defaultdict(list)
for line in stdout.splitlines(False):
if cannot_open in line.lower():
path = line[msg_len:]
if cannot_open not in warnings[path]:
warnings[path].append(cannot_open)
# collect warnings
warning_messages = []
for pathname, messages in warnings.items():
msg = pathname + ': ' + '\n'.join(messages.strip('\' "'))
if msg not in warning_messages:
warning_messages.append(msg)
return warning_messages
def list_extracted_7z_files(stdout):
"""
List all files extracted by 7zip based on the stdout analysis.
Based on 7zip Client7z.cpp:
static const char *kExtractingString = "Extracting ";
"""
# FIXME: handle Unicode paths with 7zip command line flags
get_file_list = re.compile('Extracting ' + '(.*)$', re.M).findall
return get_file_list(stdout)
def extract(location, target_dir, arch_type='*'):
"""
Extract all files from a 7zip-supported archive file at location in the
target_dir directory. Return a list of warning messages.
Raise exception on errors.
`arch_type` is the type of 7zip archive passed to the -t 7zip option. Can be
None.
"""
assert location
assert target_dir
abs_location = os.path.abspath(os.path.expanduser(location))
abs_target_dir = os.path.abspath(os.path.expanduser(target_dir))
# note: there are some issues with the extraction of debian .deb ar files
# see sevenzip bug http://sourceforge.net/p/sevenzip/bugs/1472/
# 7z arguments
extract = 'x'
yes_to_all = '-y'
# NB: we use t* to ensure that all archive types are honored
if not arch_type:
arch_type = ''
else:
arch_type = '-t' + arch_type
# pass an empty password so that extraction with passwords WILL fail
password = '-p'
# renaming may not behave the same way on all OSes in particular Mac and Windows
auto_rename_dupe_names = '-aou'
# These things do not work well with p7zip for now:
# - ensure that we treat the FS as case insensitive even if it is
# this ensure we have consistent names across OSes
# case_insensitive = '-ssc-'
# - force any console output to be UTF-8 encoded
# TODO: add this may be for a UTF output on Windows only
# output_as_utf = '-sccUTF-8'
# working_tmp_dir = '-w<path>'
# NB: we force running in the GMT timezone, because 7z is unable to set
# the TZ correctly when the archive does not contain TZ info. This does
# not work on Windows, because 7z is not using the TZ env var there.
timezone = os.environ.update({'TZ': 'GMT'})
# Note: 7z does extract in the current directory so we cwd to the target dir first
args = [extract, yes_to_all, auto_rename_dupe_names,
arch_type, password, abs_location]
rc, stdout, _stderr = command.execute(
cmd='7z',
args=args,
cwd=abs_target_dir,
env=timezone,
root_dir=root_dir
)
if rc != 0:
error = get_7z_errors(stdout) or 'No error returned'
raise ExtractErrorFailedToExtract(error)
extractcode.remove_backslashes_and_dotdots(abs_target_dir)
return get_7z_warnings(stdout)
def list_entries(location, arch_type='*'):
"""
List entries from a 7zip-supported archive file at location.
Yield Entry tuples.
Use the -t* 7z cli type option or the provided arch_type 7z type (can be
None).
"""
assert location
abs_location = os.path.abspath(os.path.expanduser(location))
# 7z arguments
listing = 'l'
# NB: we use t* to ensure that all archive types are honored
if not arch_type:
arch_type = ''
else:
arch_type = '-t' + arch_type
# pass an empty password so that extraction with passwords WILL fail
password = '-p'
tech_info = '-slt'
output_as_utf = ''
if on_windows:
output_as_utf = '-sccUTF-8'
# NB: we force running in the GMT timezone, because 7z is unable to set
# the TZ correctly when the archive does not contain TZ info. This does
# not work on Windows, because 7z is not using the TZ env var there.
timezone = os.environ.update({'TZ': 'GMT'})
args = [listing, tech_info, arch_type, output_as_utf, password, abs_location]
rc, stdout, _stderr = command.execute(cmd='7z',
args=args,
env=timezone,
root_dir=root_dir,
to_files=True)
if rc != 0:
_error = get_7z_errors(stdout) or 'No error returned'
# still try to get the listing?
# print(Exception(error))
pass
# the listing was produced as UTF on windows to avoid damaging binary
# paths in console outputs
utf = bool(output_as_utf)
return parse_7z_listing(stdout, utf)
def as_entry(infos):
"""
Return an Entry built from 7zip path data
"""
e = extractcode.Entry()
e.path = infos.get('Path')
e.size = infos.get('Size', 0)
e.packed_size = infos.get('Packed Size', 0)
e.date = infos.get('Modified', 0)
e.is_dir = infos.get('Folder', False) == '+'
e.is_file = not e.is_dir
e.is_broken_link = False
e.mode = infos.get('Mode', '')
e.user = infos.get('User')
e.group = infos.get('Group')
e.is_special = False
e.is_hardlink = False
sl = infos.get('Symbolic Link')
if sl:
e.is_symlink = True
e.link_target = sl
hl = infos.get('Hard Link')
if hl:
e.is_hardlink = True
e.link_target = hl
if sl and hl:
raise ExtractWarningIncorrectEntry('A Symlink cannot be a hardlink too')
e.linkcount = infos.get('Links', 0)
e.host = infos.get('Host OS')
e.comment = infos.get('Comment')
e.encrypted = infos.get('Encrypted')
return e
def parse_7z_listing(location, utf=False):
"""
Parse a long format 7zip listing and return an iterable of entry.
The 7zip -slt format is:
- copyright and version details
- '--' line
- archive header info, varying based on the archive types and subtype
- lines of key=value pairs
- Errors: followed by one or more message lines
- Warnings: followed by one or more message lines
- Open Warning: : followed by one or more message lines
- sometimes a '---' line
- blank line
- '----------' line
- for each archive member:
- lines of either
- key = value pairs
- Errors: followed by one or more message lines
- Warnings: followed by one or more message lines
- Open Warning: : followed by one or more message lines
- blank line
- two blank lines
- footer sometimes with lines with summary stats
such as Warnings: 1 Errors: 1
- a line with two or more dashes or an empty line
"""
if utf:
text = codecs.open(location, encoding='UTF-8').read()
text = text.replace(u'\r\n', u'\n')
else:
text = open(location, 'rb').read()
header_tail = re.split('\n----------\n', text, flags=re.MULTILINE)
if len(header_tail) != 2:
# we more than one a header, confusion entails.
raise ExtractWarningIncorrectEntry('Incorrect 7zip listing with multiple headers')
if len(header_tail) == 1:
# we have only a header, likely an error condition or an empty archive
return []
_header, body = header_tail
body_and_footer = re.split('\n\n\n', body, flags=re.MULTILINE)
no_footer = len(body_and_footer) == 1
multiple_footers = len(body_and_footer) > 2
_footer = ''
if no_footer:
body = body_and_footer[0]
elif multiple_footers:
raise ExtractWarningIncorrectEntry('Incorrect 7zip listing with multiple footers')
else:
body, _footer == body_and_footer
# FIXME: do something with header and footer?
entries = []
paths = re.split('\n\n', body, flags=re.MULTILINE)
for path in paths:
is_err = False
errors = []
infos = {}
lines = path.splitlines(False)
for line in lines:
line = line.strip()
if not line:
continue
if line.startswith(('Open Warning:', 'Errors:', 'Warnings:')):
is_err = True
messages = line.split(':', 1)
errors.append(messages)
continue
if '=' not in line and is_err:
# not a key = value line, an error message
errors.append(line)
continue
parts = line.split('=', 1)
if len(parts) != 2:
raise ExtractWarningIncorrectEntry('Incorrect 7zip listing line with no key=value')
is_err = False
key, value = parts
assert key not in infos, 'Duplicate keys in 7zip listing'
infos[key.strip()] = value.strip() or ''
if infos:
entries.append(as_entry(infos))
return entries
|
yasharmaster/scancode-toolkit
|
src/extractcode/sevenzip.py
|
Python
|
apache-2.0
| 12,308
|
[
"VisIt"
] |
68e3beb6edd4d432eed30a47a3359e5eec77c20659d7d2270f11b935679038af
|
from ase.structure import molecule
from ase.constraints import FixAtoms
N = 2
atoms = molecule('CO2')
atoms.set_cell((15.,15.,15.))
print('indices method')
atomsi = atoms.copy()
atomsi.set_constraint(FixAtoms(indices=[0,]))
atomsi = atomsi.repeat((N,1,1))
atomsiref = atoms.copy().repeat((N,1,1))
atomsiref.set_constraint(FixAtoms(indices=[0, N + 1]))
lcatomsi = list(atomsi.constraints[0].index)
lcatomsiref = list(atomsiref.constraints[0].index)
assert lcatomsi == lcatomsiref
print('mask method')
atomsm = atoms.copy()
atomsm.set_constraint(FixAtoms(mask=[True, False, False]))
atomsm = atomsm.repeat((N,1,1))
atomsmref = atoms.copy().repeat((N,1,1))
atomsmref.set_constraint(FixAtoms(mask=[True, False, False] * N))
lcatomsm = list(atomsm.constraints[0].index)
lcatomsmref = list(atomsmref.constraints[0].index)
assert lcatomsm == lcatomsmref
# http://stackoverflow.com/questions/3873361/finding-multiple-occurrences-of-a-string-within-a-string-in-python
lcatomsm2i = [n for (n, e) in enumerate(lcatomsm) if e == True]
assert lcatomsm2i == lcatomsi
|
grhawk/ASE
|
tools/ase/test/repeat_FixAtoms.py
|
Python
|
gpl-2.0
| 1,066
|
[
"ASE"
] |
c584d3224f68f84cadff54d5b0dc897a6bcaf5835bb538a8ce9730cd9d681e9d
|
import glob
import os
import pickle
import sys
import numpy as np
import subprocess as sp
from morphct.definitions import PROJECT_ROOT, SINGLE_RUN_MOB_KMC_FILE
from morphct.code import helper_functions as hf
def main(
AA_morphology_dict,
CG_morphology_dict,
CG_to_AAID_master,
parameter_dict,
chromophore_list,
):
# Get the random seed now for all the child processes
if parameter_dict["random_seed_override"] is not None:
np.random.seed(parameter_dict["random_seed_override"])
try:
if parameter_dict["use_average_hop_rates"]:
print(
"".join(
[
"Be advised: use_average_hop_rates is set to ",
repr(parameter_dict["use_average_hop_rates"]),
".",
]
)
)
print(
"Orca-calculated energy levels will be ignored, and the following hop "
"rates will be used:"
)
print(
"Average Intra-molecular hop rate:",
parameter_dict["average_intra_hop_rate"],
)
print(
"Average Inter-molecular hop rate:",
parameter_dict["average_inter_hop_rate"],
)
except KeyError:
pass
# Determine the maximum simulation times based on the parameter dictionary
simulation_times = parameter_dict["simulation_times"]
carrier_list = []
# Modification: Rather than being clever here with the carriers, I'm just
# going to create the master list of jobs that need running and then
# randomly shuffle it. This will hopefully permit a similar number of holes
# and electrons and lifetimes to be run simultaneously providing adequate
# statistics more quickly
for lifetime in simulation_times:
for carrier_no in range(parameter_dict["number_of_holes_per_simulation_time"]):
carrier_list.append([carrier_no, lifetime, "hole"])
for carrier_no in range(
parameter_dict["number_of_electrons_per_simulation_time"]
):
carrier_list.append([carrier_no, lifetime, "electron"])
np.random.shuffle(carrier_list)
proc_IDs = parameter_dict["proc_IDs"]
output_dir = os.path.join(parameter_dict["output_morphology_directory"], "KMC")
jobs_list = [
carrier_list[i : i + (int(np.ceil(len(carrier_list) / len(proc_IDs))))]
for i in range(
0, len(carrier_list), int(np.ceil(len(carrier_list) / float(len(proc_IDs))))
)
]
print("Writing job pickles for each CPU...")
running_jobs = []
for proc_ID, jobs in enumerate(jobs_list):
pickle_name = os.path.join(output_dir, "KMC_data_{:02d}.pickle".format(proc_ID))
with open(pickle_name, "wb+") as pickle_file:
pickle.dump(jobs, pickle_file)
print(
"KMC jobs for proc_ID",
proc_ID,
"written to KMC_data_{:02d}.pickle".format(proc_ID),
)
# Open the required processes to execute the KMC jobs
# Random seeding is a little weird here. If we don't generate a random
# seed in the child process, it will just use the system time. So, we
# generate a seed here to get the same random number stream each time,
# and then feed the child process a new seed from the random number
# stream. This way, we ensure that each child process has a different
# random number stream to the other processes, but it's the same stream
# every time we run the program.
child_seed = np.random.randint(0, 2 ** 32)
# Previous run command:
run_command = [
"python",
SINGLE_RUN_MOB_KMC_FILE,
output_dir,
str(proc_ID),
str(child_seed),
]
print(run_command)
running_jobs.append(sp.Popen(run_command))
# Wait for all jobs to complete
[p.wait() for p in running_jobs]
# Now combine all of the pickle files into one:
print("All KMC jobs completed!")
if parameter_dict["combine_KMC_results"] is True:
print("Combining outputs...")
combined_data = {}
for proc_ID, jobs in enumerate(jobs_list):
file_name = os.path.join(
output_dir, "KMC_results_{:02d}.pickle".format(proc_ID)
)
# The pickle was repeatedly dumped to, in order to save time.
# Each dump stream is self-contained, so iteratively unpickle to
# add the new data.
with open(file_name, "rb") as pickle_file:
pickled_data = pickle.load(pickle_file)
for key, val in pickled_data.items():
if key not in combined_data:
combined_data[key] = val
else:
combined_data[key] += val
# Write out the combined data
KMC_output_file = os.path.join(output_dir, "KMC_results.pickle")
with open(KMC_output_file, "wb+") as pickle_file:
pickle.dump(combined_data, pickle_file)
print("Complete data written to", KMC_output_file)
print("Cleaning up...")
# Delete any unneeded files
for file_name in glob.glob(os.path.join(output_dir, "KMC_results_*")):
os.remove(file_name)
for file_name in glob.glob(os.path.join(output_dir, "KMC_slot_*")):
os.remove(file_name)
for file_name in glob.glob(os.path.join(output_dir, "KMC_data*")):
os.remove(file_name)
return [
AA_morphology_dict,
CG_morphology_dict,
CG_to_AAID_master,
parameter_dict,
chromophore_list,
]
if __name__ == "__main__":
try:
pickle_file = sys.argv[1]
except:
print(
"Please specify the pickle file to load to continue the pipeline from this"
" point."
)
pickle_data = hf.load_pickle(pickle_file)
AA_morphology_dict = pickle_data[0]
CG_morphology_dict = pickle_data[1]
CG_to_AAID_master = pickle_data[2]
parameter_dict = pickle_data[3]
chromophore_list = pickle_data[4]
main(
AA_morphology_dict,
CG_morphology_dict,
CG_to_AAID_master,
parameter_dict,
chromophore_list,
)
|
matty-jones/MorphCT
|
morphct/code/mobility_KMC.py
|
Python
|
gpl-3.0
| 6,372
|
[
"ORCA"
] |
a65a9caa979ed51fb2ddda1da21cfaec2826ca0afcb4bb858f2a20fbee2c926e
|
#!/usr/bin/env python -E
"""
Script to set up a custom genome for bcbio-nextgen
"""
import argparse
from argparse import ArgumentParser
import os
import toolz as tz
from bcbio.utils import safe_makedir, file_exists
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction
from bcbio.install import (REMOTES, get_cloudbiolinux, SUPPORTED_GENOMES, SUPPORTED_INDEXES,
_get_data_dir)
from bcbio.galaxy import loc
from fabric.api import *
import subprocess
import sys
import shutil
import yaml
import gffutils
from gffutils.iterators import DataIterator
import tempfile
SEQ_DIR = "seq"
RNASEQ_DIR = "rnaseq"
ERCC_BUCKET = "bcbio-data.s3.amazonaws.com/"
def gff3_to_gtf(gff3_file):
dialect = {'field separator': '; ',
'fmt': 'gtf',
'keyval separator': ' ',
'leading semicolon': False,
'multival separator': ',',
'quoted GFF2 values': True,
'order': ['gene_id', 'transcript_id'],
'repeated keys': False,
'trailing semicolon': True}
out_file = os.path.splitext(gff3_file)[0] + ".gtf"
if file_exists(out_file):
return out_file
print "Converting %s to %s." %(gff3_file, out_file)
db = gffutils.create_db(gff3_file, ":memory:")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
transcript_id = feature["Parent"][0]
gene_id = db[transcript_id]["Parent"][0]
attr = {"transcript_id": transcript_id, "gene_id": gene_id}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print >> out_handle, feature
return out_file
def _index_w_command(dir_name, command, ref_file, ext=None):
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
build_path = os.path.join(os.path.dirname(ref_file), os.pardir)
out_dir = os.path.join(build_path, dir_name)
index_path = os.path.join(out_dir, index_name)
if not env.safe_exists(out_dir):
env.safe_run("mkdir %s" % out_dir)
subprocess.check_call(command.format(ref_file=ref_file,
index_name=index_path), shell=True)
return index_path
def setup_base_directories(genome_dir, name, build, gtf=None):
name_dir = os.path.join(genome_dir, name)
safe_makedir(name_dir)
build_dir = os.path.join(name_dir, build)
safe_makedir(build_dir)
seq_dir = os.path.join(build_dir, SEQ_DIR)
safe_makedir(seq_dir)
if gtf:
gtf_dir = os.path.join(build_dir, RNASEQ_DIR)
safe_makedir(gtf_dir)
return build_dir
def install_fasta_file(build_dir, fasta, build):
out_file = os.path.join(build_dir, SEQ_DIR, build + ".fa")
if not os.path.exists(out_file):
shutil.copyfile(fasta, out_file)
return out_file
def install_gtf_file(build_dir, gtf, build):
out_file = os.path.join(build_dir, RNASEQ_DIR, "ref-transcripts.gtf")
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
return out_file
def append_ercc(gtf_file, fasta_file):
ercc_fa = ERCC_BUCKET + "ERCC92.fasta.gz"
tmp_fa = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_fa_cmd = "wget {ercc_fa} -O {tmp_fa}; gzip -cd {tmp_fa} >> {fasta_file}"
print append_fa_cmd.format(**locals())
subprocess.check_call(append_fa_cmd.format(**locals()), shell=True)
ercc_gtf = ERCC_BUCKET + "ERCC92.gtf.gz"
tmp_gtf = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_gtf_cmd = "wget {ercc_gtf} -O {tmp_gtf}; gzip -cd {tmp_gtf} >> {gtf_file}"
print append_gtf_cmd.format(**locals())
subprocess.check_call(append_gtf_cmd.format(**locals()), shell=True)
if __name__ == "__main__":
description = ("Set up a custom genome for bcbio-nextgen. This will "
"place the genome under name/build in the genomes "
"directory in your bcbio-nextgen installation.")
parser = ArgumentParser(description=description)
parser.add_argument("-f", "--fasta", required=True,
help="FASTA file of the genome.")
parser.add_argument("--gff3", default=False, action='store_true',
help="File is a GFF3 file.")
parser.add_argument("-g", "--gtf", default=None,
help="GTF file of the transcriptome")
parser.add_argument("-n", "--name", required=True,
help="Name of organism, for example Hsapiens.")
parser.add_argument("-b", "--build", required=True,
help="Build of genome, for example hg19.")
parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
default=["seq"], help="Space separated list of indexes to make")
parser.add_argument("--ercc", action='store_true', default=False,
help="Add ERCC spike-ins.")
args = parser.parse_args()
env.hosts = ["localhost"]
cbl = get_cloudbiolinux(REMOTES)
sys.path.insert(0, cbl["dir"])
genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
# monkey patch cloudbiolinux to use this indexing command instead
genomes = getattr(genomemod, 'genomes')
genomes._index_w_command = _index_w_command
fabmod = __import__("cloudbio", fromlist=["fabutils"])
fabutils = getattr(fabmod, 'fabutils')
fabutils.configure_runsudo(env)
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
with open(system_config) as in_handle:
config = yaml.load(in_handle)
env.picard_home = config_utils.get_program("picard", config, ptype="dir")
genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
args.fasta = os.path.abspath(args.fasta)
args.gtf = os.path.abspath(args.gtf) if args.gtf else None
if args.gff3:
args.gtf = gff3_to_gtf(args.gtf)
# always make a sequence dictionary
if "seq" not in args.indexes:
args.indexes.append("seq")
env.system_install = genome_dir
prepare_tx = os.path.join(cbl["dir"], "utils", "prepare_tx_gff.py")
print "Creating directories using %s as the base." % (genome_dir)
build_dir = setup_base_directories(genome_dir, args.name, args.build, args.gtf)
os.chdir(build_dir)
print "Genomes will be installed into %s." % (build_dir)
fasta_file = install_fasta_file(build_dir, args.fasta, args.build)
print "Installed genome as %s." % (fasta_file)
if args.gtf:
if "bowtie2" not in args.indexes:
args.indexes.append("bowtie2")
gtf_file = install_gtf_file(build_dir, args.gtf, args.build)
print "Installed GTF as %s." % (gtf_file)
if args.ercc:
print "Appending ERCC sequences to %s and %s." % (gtf_file, fasta_file)
append_ercc(gtf_file, fasta_file)
indexed = {}
for index in args.indexes:
print "Creating the %s index." % (index)
index_fn = genomes.get_index_fn(index)
if not index_fn:
print "Do not know how to make the index %s, skipping." % (index)
continue
indexed[index] = index_fn(fasta_file)
indexed["samtools"] = fasta_file
if args.gtf:
"Preparing transcriptome."
os.chdir(os.path.join(build_dir, os.pardir))
cmd = ("{sys.executable} {prepare_tx} --gtf {gtf_file} {env.picard_home} "
"{args.build}")
subprocess.check_call(cmd.format(**locals()), shell=True)
base_dir = os.path.normpath(os.path.dirname(fasta_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % args.build)
print "Dumping genome resources to %s." % resource_file
resource_dict = {"version": 1}
if args.gtf:
transcripts = ["rnaseq", "transcripts"]
mask = ["rnaseq", "transcripts_mask"]
index = ["rnaseq", "transcriptome_index", "tophat"]
dexseq = ["rnaseq", "dexseq"]
refflat = ["rnaseq", "refflat"]
rRNA_fa = ["rnaseq", "rRNA_fa"]
resource_dict = tz.update_in(resource_dict, transcripts,
lambda x: "../rnaseq/ref-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, mask,
lambda x: "../rnaseq/ref-transcripts-mask.gtf")
resource_dict = tz.update_in(resource_dict, index,
lambda x: "../rnaseq/tophat/%s_transcriptome.ver" % args.build)
resource_dict = tz.update_in(resource_dict, refflat,
lambda x: "../rnaseq/ref-transcripts.refFlat")
resource_dict = tz.update_in(resource_dict, dexseq,
lambda x: "../rnaseq/ref-transcripts.dexseq.gff3")
resource_dict = tz.update_in(resource_dict, rRNA_fa,
lambda x: "../rnaseq/rRNA.fa")
# write out resource dictionary
with file_transaction(resource_file) as tx_resource_file:
with open(tx_resource_file, "w") as out_handle:
out_handle.write(yaml.dump(resource_dict, default_flow_style=False))
print "Updating Galaxy .loc files."
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
for index, index_file in indexed.items():
loc.update_loc_file(galaxy_base, index, args.build, index_file)
|
SciLifeLab/bcbio-nextgen
|
scripts/bcbio_setup_genome.py
|
Python
|
mit
| 9,589
|
[
"Galaxy"
] |
b52dfc0dea02c1909d57ec122d537a842973df583fec93cd835836375de63b8c
|
#!/usr/bin/env python
import numpy as np
import vtk
def main():
named_colors = vtk.vtkNamedColors()
# Make a 32 x 32 grid.
size = 32
# Define z values for the topography.
# Comment out the following line if you want a different random
# distribution each time the script is run.
np.random.seed(3)
topography = np.random.randint(0, 5, (size, size))
# Define points, triangles and colors
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
# Build the meshgrid manually.
count = 0
for i in range(size - 1):
for j in range(size - 1):
z1 = topography[i][j]
z2 = topography[i][j + 1]
z3 = topography[i + 1][j]
# Triangle 1
points.InsertNextPoint(i, j, z1)
points.InsertNextPoint(i, (j + 1), z2)
points.InsertNextPoint((i + 1), j, z3)
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, count)
triangle.GetPointIds().SetId(1, count + 1)
triangle.GetPointIds().SetId(2, count + 2)
triangles.InsertNextCell(triangle)
z1 = topography[i][j + 1]
z2 = topography[i + 1][j + 1]
z3 = topography[i + 1][j]
# Triangle 2
points.InsertNextPoint(i, (j + 1), z1)
points.InsertNextPoint((i + 1), (j + 1), z2)
points.InsertNextPoint((i + 1), j, z3)
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, count + 3)
triangle.GetPointIds().SetId(1, count + 4)
triangle.GetPointIds().SetId(2, count + 5)
count += 6
triangles.InsertNextCell(triangle)
# Add some color.
r = [int(i / float(size) * 255), int(j / float(size) * 255), 0]
colors.InsertNextTypedTuple(r)
colors.InsertNextTypedTuple(r)
colors.InsertNextTypedTuple(r)
colors.InsertNextTypedTuple(r)
colors.InsertNextTypedTuple(r)
colors.InsertNextTypedTuple(r)
# Create a polydata object.
trianglePolyData = vtk.vtkPolyData()
# Add the geometry and topology to the polydata.
trianglePolyData.SetPoints(points)
trianglePolyData.GetPointData().SetScalars(colors)
trianglePolyData.SetPolys(triangles)
# Clean the polydata so that the edges are shared!
cleanPolyData = vtk.vtkCleanPolyData()
cleanPolyData.SetInputData(trianglePolyData)
# Use a filter to smooth the data (will add triangles and smooth).
smooth_loop = vtk.vtkLoopSubdivisionFilter()
smooth_loop.SetNumberOfSubdivisions(3)
smooth_loop.SetInputConnection(cleanPolyData.GetOutputPort())
# Create a mapper and actor for smoothed dataset.
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smooth_loop.GetOutputPort())
actor_loop = vtk.vtkActor()
actor_loop.SetMapper(mapper)
actor_loop.GetProperty().SetInterpolationToFlat()
# Update the pipeline so that vtkCellLocator finds cells!
smooth_loop.Update()
# Define a cellLocator to be able to compute intersections between lines.
# and the surface
locator = vtk.vtkCellLocator()
locator.SetDataSet(smooth_loop.GetOutput())
locator.BuildLocator()
maxloop = 1000
dist = 20.0 / maxloop
tolerance = 0.001
# Make a list of points. Each point is the intersection of a vertical line
# defined by p1 and p2 and the surface.
points = vtk.vtkPoints()
for i in range(maxloop):
p1 = [2 + i * dist, 16, -1]
p2 = [2 + i * dist, 16, 6]
# Outputs (we need only pos which is the x, y, z position
# of the intersection)
t = vtk.mutable(0)
pos = [0.0, 0.0, 0.0]
pcoords = [0.0, 0.0, 0.0]
subId = vtk.mutable(0)
locator.IntersectWithLine(p1, p2, tolerance, t, pos, pcoords, subId)
# Add a slight offset in z.
pos[2] += 0.01
# Add the x, y, z position of the intersection.
points.InsertNextPoint(pos)
# Create a spline and add the points
spline = vtk.vtkParametricSpline()
spline.SetPoints(points)
functionSource = vtk.vtkParametricFunctionSource()
functionSource.SetUResolution(maxloop)
functionSource.SetParametricFunction(spline)
# Map the spline
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(functionSource.GetOutputPort())
# Define the line actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(named_colors.GetColor3d("Red"))
actor.GetProperty().SetLineWidth(3)
# Visualize
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add actors and render
renderer.AddActor(actor)
renderer.AddActor(actor_loop)
renderer.SetBackground(named_colors.GetColor3d("Cornsilk"))
renderWindow.SetSize(800, 800)
renderWindow.Render()
renderer.GetActiveCamera().SetPosition(-32.471276, 53.258788, 61.209332)
renderer.GetActiveCamera().SetFocalPoint(15.500000, 15.500000, 2.000000)
renderer.GetActiveCamera().SetViewUp(0.348057, -0.636740, 0.688055)
renderer.ResetCameraClippingRange()
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/DataManipulation/LineOnMesh.py
|
Python
|
apache-2.0
| 5,546
|
[
"VTK"
] |
77c42ca20c6752d1a3a13aa5f45049ae05adbfcf165c96cec9262fe317c38611
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of Neural Net (NN) functions."""
import math
from tensorflow.python.distribute import distribution_strategy_context as ds
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops # pylint: disable=unused-import
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import util as losses_util
from tensorflow.python.platform import device_context
from tensorflow.python.util import dispatch
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
@tf_export("nn.log_poisson_loss")
@dispatch.add_dispatch_support
def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None):
"""Computes log Poisson loss given `log_input`.
Gives the log-likelihood loss between the prediction and the target under the
assumption that the target has a Poisson distribution.
Caveat: By default, this is not the exact loss, but the loss minus a
constant term [log(z!)]. That has no effect for optimization, but
does not play well with relative loss comparisons. To compute an
approximation of the log factorial term, specify
compute_full_loss=True to enable Stirling's Approximation.
For brevity, let `c = log(x) = log_input`, `z = targets`. The log Poisson
loss is
-log(exp(-x) * (x^z) / z!)
= -log(exp(-x) * (x^z)) + log(z!)
~ -log(exp(-x)) - log(x^z) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
[ Note the second term is the Stirling's Approximation for log(z!).
It is invariant to x and does not affect optimization, though
important for correct relative loss comparisons. It is only
computed when compute_full_loss == True. ]
= x - z * log(x) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
= exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
Args:
targets: A `Tensor` of the same type and shape as `log_input`.
log_input: A `Tensor` of type `float32` or `float64`.
compute_full_loss: whether to compute the full loss. If false, a constant
term is dropped in favor of more efficient optimization.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `log_input` with the componentwise
logistic losses.
Raises:
ValueError: If `log_input` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "log_poisson_loss", [log_input, targets]) as name:
log_input = ops.convert_to_tensor(log_input, name="log_input")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().assert_is_compatible_with(log_input.get_shape())
except ValueError:
raise ValueError(
"`log_input` and `targets` must have the same shape, received "
f"({log_input.get_shape()} vs {targets.get_shape()}).")
result = math_ops.exp(log_input) - log_input * targets
if compute_full_loss:
# need to create constant tensors here so that their dtypes can be matched
# to that of the targets.
point_five = constant_op.constant(0.5, dtype=targets.dtype)
two_pi = constant_op.constant(2 * math.pi, dtype=targets.dtype)
stirling_approx = (targets * math_ops.log(targets)) - targets + (
point_five * math_ops.log(two_pi * targets))
zeros = array_ops.zeros_like(targets, dtype=targets.dtype)
ones = array_ops.ones_like(targets, dtype=targets.dtype)
cond = math_ops.logical_and(targets >= zeros, targets <= ones)
result += array_ops.where(cond, zeros, stirling_approx)
return result
@tf_export(v1=["nn.sigmoid_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
def sigmoid_cross_entropy_with_logits( # pylint: disable=invalid-name
_sentinel=None,
labels=None,
logits=None,
name=None):
"""See sigmoid_cross_entropy_with_logits_v2."""
# pylint: disable=protected-access
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,
labels, logits)
# pylint: enable=protected-access
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().assert_is_compatible_with(logits.get_shape())
except ValueError:
raise ValueError("`logits` and `labels` must have the same shape, "
f"received ({logits.get_shape()} vs "
f"{labels.get_shape()}).")
# The logistic loss formula from above is
# x - x * z + log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# -x * z + log(1 + exp(x))
# Note that these two expressions can be combined into the following:
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
# To allow computing gradients at zero, we define custom versions of max and
# abs functions.
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits) # pylint: disable=invalid-unary-operand-type
return math_ops.add(
relu_logits - logits * labels,
math_ops.log1p(math_ops.exp(neg_abs_logits)),
name=name)
# Note: intentionally calling this v2 to not allow existing code with indirect
# imports to ignore the sentinel behavior.
@tf_export("nn.sigmoid_cross_entropy_with_logits", v1=[])
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def sigmoid_cross_entropy_with_logits_v2( # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
r"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in tasks with two outcomes in which each
outcome is independent and need not have a fully certain label. For instance,
one could perform a regression where the probability of an event happening is
known and used as a label. This loss may also be used for binary
classification, where labels are either zero or one.
For brevity, let `x = logits`, `z = labels`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `labels` must have the same type and shape.
>>> logits = tf.constant([1., -1., 0., 1., -1., 0., 0.])
>>> labels = tf.constant([0., 0., 0., 1., 1., 1., 0.5])
>>> tf.nn.sigmoid_cross_entropy_with_logits(
... labels=labels, logits=logits).numpy()
array([1.3132617, 0.3132617, 0.6931472, 0.3132617, 1.3132617, 0.6931472,
0.6931472], dtype=float32)
Compared to the losses which handle multiple outcomes,
`tf.nn.softmax_cross_entropy_with_logits` for general multi-class
classification and `tf.nn.sparse_softmax_cross_entropy_with_logits` for more
efficient multi-class classification with hard labels,
`sigmoid_cross_entropy_with_logits` is a slight simplification for binary
classification:
sigmoid(x) = softmax([x, 0])[0]
$$\frac{1}{1 + e^{-x}} = \frac{e^x}{e^x + e^0}$$
While `sigmoid_cross_entropy_with_logits` works for soft binary labels
(probabilities between 0 and 1), it can also be used for binary classification
where the labels are hard. There is an equivalence between all three symbols
in this case, with a probability 0 indicating the second class or 1 indicating
the first class:
>>> sigmoid_logits = tf.constant([1., -1., 0.])
>>> softmax_logits = tf.stack([sigmoid_logits, tf.zeros_like(sigmoid_logits)],
... axis=-1)
>>> soft_binary_labels = tf.constant([1., 1., 0.])
>>> soft_multiclass_labels = tf.stack(
... [soft_binary_labels, 1. - soft_binary_labels], axis=-1)
>>> hard_labels = tf.constant([0, 0, 1])
>>> tf.nn.sparse_softmax_cross_entropy_with_logits(
... labels=hard_labels, logits=softmax_logits).numpy()
array([0.31326166, 1.3132616 , 0.6931472 ], dtype=float32)
>>> tf.nn.softmax_cross_entropy_with_logits(
... labels=soft_multiclass_labels, logits=softmax_logits).numpy()
array([0.31326166, 1.3132616, 0.6931472], dtype=float32)
>>> tf.nn.sigmoid_cross_entropy_with_logits(
... labels=soft_binary_labels, logits=sigmoid_logits).numpy()
array([0.31326166, 1.3132616, 0.6931472], dtype=float32)
Args:
labels: A `Tensor` of the same type and shape as `logits`. Between 0 and 1,
inclusive.
logits: A `Tensor` of type `float32` or `float64`. Any real number.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
return sigmoid_cross_entropy_with_logits(
logits=logits, labels=labels, name=name)
sigmoid_cross_entropy_with_logits.__doc__ = (
sigmoid_cross_entropy_with_logits_v2.__doc__)
@tf_export("nn.weighted_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def weighted_cross_entropy_with_logits_v2(labels, logits, pos_weight,
name=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
labels * -log(sigmoid(logits)) +
(1 - labels) * -log(1 - sigmoid(logits))
A value `pos_weight > 1` decreases the false negative count, hence increasing
the recall.
Conversely setting `pos_weight < 1` decreases the false positive count and
increases the precision.
This can be seen from the fact that `pos_weight` is introduced as a
multiplicative coefficient for the positive labels term
in the loss expression:
labels * -log(sigmoid(logits)) * pos_weight +
(1 - labels) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = labels`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `labels` must have the same type and shape.
>>> labels = tf.constant([1., 0.5, 0.])
>>> logits = tf.constant([1.5, -0.1, -10.])
>>> tf.nn.weighted_cross_entropy_with_logits(
... labels=labels, logits=logits, pos_weight=tf.constant(1.5)).numpy()
array([3.0211994e-01, 8.8049585e-01, 4.5776367e-05], dtype=float32)
>>> tf.nn.weighted_cross_entropy_with_logits(
... labels=labels, logits=logits, pos_weight=tf.constant(0.5)).numpy()
array([1.00706644e-01, 5.08297503e-01, 4.57763672e-05], dtype=float32)
Args:
labels: A `Tensor` of the same type and shape as `logits`, with values
between 0 and 1 inclusive.
logits: A `Tensor` of type `float32` or `float64`, any real numbers.
pos_weight: A coefficient to use on the positive examples, typically a
scalar but otherwise broadcastable to the shape of `logits`. Its value
should be non-negative.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().assert_is_compatible_with(logits.get_shape())
except ValueError:
raise ValueError("`logits` and `labels` must have the same shape, "
f"received ({logits.get_shape()} vs "
f"{labels.get_shape()}).")
# The logistic loss formula from above is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x
# To avoid branching, we use the combined version
# (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + (pos_weight - 1) * labels
return math_ops.add(
(1 - labels) * logits,
log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) +
nn_ops.relu(-logits)), # pylint: disable=invalid-unary-operand-type
name=name)
@tf_export(v1=["nn.weighted_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
@deprecated_args(None, "targets is deprecated, use labels instead", "targets")
def weighted_cross_entropy_with_logits(labels=None,
logits=None,
pos_weight=None,
name=None,
targets=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
labels * -log(sigmoid(logits)) +
(1 - labels) * -log(1 - sigmoid(logits))
A value `pos_weight > 1` decreases the false negative count, hence increasing
the recall.
Conversely setting `pos_weight < 1` decreases the false positive count and
increases the precision.
This can be seen from the fact that `pos_weight` is introduced as a
multiplicative coefficient for the positive labels term
in the loss expression:
labels * -log(sigmoid(logits)) * pos_weight +
(1 - labels) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = labels`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `labels` must have the same type and shape.
Args:
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
pos_weight: A coefficient to use on the positive examples.
name: A name for the operation (optional).
targets: Deprecated alias for labels.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
labels = deprecated_argument_lookup("labels", labels, "targets", targets)
return weighted_cross_entropy_with_logits_v2(labels, logits, pos_weight, name)
@tf_export("nn.compute_average_loss")
@dispatch.add_dispatch_support
def compute_average_loss(per_example_loss,
sample_weight=None,
global_batch_size=None):
"""Scales per-example losses with sample_weights and computes their average.
Usage with distribution strategy and custom training loop:
```python
with strategy.scope():
def compute_loss(labels, predictions, sample_weight=None):
# If you are using a `Loss` class instead, set reduction to `NONE` so that
# we can do the reduction afterwards and divide by global batch size.
per_example_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
# Compute loss that is scaled by sample_weight and by global batch size.
return tf.nn.compute_average_loss(
per_example_loss,
sample_weight=sample_weight,
global_batch_size=GLOBAL_BATCH_SIZE)
```
Args:
per_example_loss: Per-example loss.
sample_weight: Optional weighting for each example.
global_batch_size: Optional global batch size value. Defaults to (size of
first dimension of `losses`) * (number of replicas).
Returns:
Scalar loss value.
""" # pylint: disable=g-doc-exception
per_example_loss = ops.convert_to_tensor(per_example_loss)
input_dtype = per_example_loss.dtype
with losses_util.check_per_example_loss_rank(per_example_loss):
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight)
per_example_loss = losses_util.scale_losses_by_sample_weight(
per_example_loss, sample_weight)
per_example_loss = math_ops.cast(per_example_loss, input_dtype)
if global_batch_size is None:
if ds.has_strategy() and ds.in_cross_replica_context():
raise RuntimeError(
"You are calling `compute_average_loss` in cross replica context, "
"while it was expected to be called in replica context.")
num_replicas = ds.get_strategy().num_replicas_in_sync
per_replica_batch_size = array_ops.shape_v2(per_example_loss)[0]
global_batch_size = per_replica_batch_size * num_replicas
check_ops.assert_scalar_v2(
global_batch_size, message="global_batch_size must be scalar.")
check_ops.assert_integer_v2(
global_batch_size,
message="global_batch_size must be an integer.")
check_ops.assert_positive_v2(
global_batch_size, message="global_batch_size must be positive.")
global_batch_size = math_ops.cast(global_batch_size, input_dtype)
return math_ops.reduce_sum(per_example_loss) / global_batch_size
@tf_export("nn.scale_regularization_loss")
@dispatch.add_dispatch_support
def scale_regularization_loss(regularization_loss):
"""Scales the sum of the given regularization losses by number of replicas.
Usage with distribution strategy and custom training loop:
```python
with strategy.scope():
def compute_loss(self, label, predictions):
per_example_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
# Compute loss that is scaled by sample_weight and by global batch size.
loss = tf.nn.compute_average_loss(
per_example_loss,
sample_weight=sample_weight,
global_batch_size=GLOBAL_BATCH_SIZE)
# Add scaled regularization losses.
loss += tf.nn.scale_regularization_loss(tf.nn.l2_loss(weights))
return loss
```
Args:
regularization_loss: Regularization loss.
Returns:
Scalar loss value.
""" # pylint: disable=g-doc-exception
if ds.has_strategy() and ds.in_cross_replica_context():
raise RuntimeError(
"You are calling `scale_regularization_loss` in cross replica context, "
"while it was expected to be called in replica context.")
num_replicas = ds.get_strategy().num_replicas_in_sync
return math_ops.reduce_sum(regularization_loss) / num_replicas
@tf_export(v1=["nn.relu_layer"])
@dispatch.add_dispatch_support
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
@tf_export("nn.silu", "nn.swish")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def swish(features, beta=1.0):
# pylint: disable=g-doc-args
"""Computes the SiLU or Swish activation function: `x * sigmoid(beta * x)`.
beta : Hyperparameter for Swish activation function. Default value 1.0.
The SiLU activation function was introduced in "Gaussian Error Linear Units
(GELUs)" [Hendrycks et al. 2016](https://arxiv.org/abs/1606.08415) and
"Sigmoid-Weighted Linear Units for Neural Network Function Approximation in
Reinforcement Learning"
[Elfwing et al. 2017](https://arxiv.org/abs/1702.03118) and was independently
discovered (and called swish) in "Searching for Activation Functions"
[Ramachandran et al. 2017](https://arxiv.org/abs/1710.05941)
Args:
features: A `Tensor` representing preactivation values.
beta: A 'Tensor' representing value of beta hyperparameter.
Returns:
The activation value.
"""
# pylint: enable=g-doc-args
features = ops.convert_to_tensor(features, name="features")
beta = ops.convert_to_tensor(beta, name="beta")
beta = math_ops.cast(beta, features.dtype)
@custom_gradient.custom_gradient
def swish_impl(features):
def grad(dy):
"""Gradient for the Swish activation function."""
# Naively, x * tf.nn.sigmoid(x) requires keeping both x and sigmoid(x)
# around for backprop, effectively doubling the tensor's memory
# consumption. We use a control dependency here so that sigmoid(features)
# is re-computed during backprop (the control dep prevents it being
# de-duped with the forward pass) and we can free the sigmoid(features)
# expression immediately after use during the forward pass.
with ops.control_dependencies([dy]):
sigmoid_features = math_ops.sigmoid(beta * features)
activation_grad = (
sigmoid_features * (1.0 + (beta * features) *
(1.0 - sigmoid_features)))
return dy * activation_grad
return features * math_ops.sigmoid(beta * features), grad
return swish_impl(features)
# pylint: disable=redefined-builtin
@tf_export("linalg.normalize")
@dispatch.add_dispatch_support
def normalize(tensor, ord="euclidean", axis=None, name=None):
"""Normalizes `tensor` along dimension `axis` using specified norm.
This uses `tf.linalg.norm` to compute the norm along `axis`.
This function can compute several different vector norms (the 1-norm, the
Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).
Args:
tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`, `1`,
`2`, `np.inf` and any positive real number yielding the corresponding
p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if
`tensor` is a matrix and equivalent to 2-norm for vectors.
Some restrictions apply: a) The Frobenius norm `'fro'` is not defined for
vectors, b) If axis is a 2-tuple (matrix norm), only `'euclidean'`,
'`fro'`, `1`, `2`, `np.inf` are supported. See the description of `axis`
on how to compute norms for a batch of vectors or matrices stored in a
tensor.
axis: If `axis` is `None` (the default), the input is considered a vector
and a single vector norm is computed over the entire set of values in the
tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
`norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the
input is considered a batch of vectors, and `axis` determines the axis in
`tensor` over which to compute vector norms. If `axis` is a 2-tuple of
Python integers it is considered a batch of matrices and `axis` determines
the axes in `tensor` over which to compute a matrix norm.
Negative indices are supported. Example: If you are passing a tensor that
can be either a matrix or a batch of matrices at runtime, pass
`axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
computed.
name: The name of the op.
Returns:
normalized: A normalized `Tensor` with the same shape as `tensor`.
norm: The computed norms with the same shape and dtype `tensor` but the
final axis is 1 instead. Same as running
`tf.cast(tf.linalg.norm(tensor, ord, axis keepdims=True), tensor.dtype)`.
Raises:
ValueError: If `ord` or `axis` is invalid.
"""
with ops.name_scope(name, "normalize", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor)
norm = linalg_ops.norm(tensor, ord, axis, keepdims=True)
norm = math_ops.cast(norm, tensor.dtype)
normalized = tensor / norm
return normalized, norm
@tf_export("math.l2_normalize", "linalg.l2_normalize", "nn.l2_normalize",
v1=["math.l2_normalize", "linalg.l2_normalize", "nn.l2_normalize"])
@dispatch.add_dispatch_support
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None):
"""Normalizes along dimension `axis` using an L2 norm.
For a 1-D tensor with `axis = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
1-D tensor example:
>>> x = tf.constant([3.0, 4.0])
>>> tf.math.l2_normalize(x).numpy()
array([0.6, 0.8], dtype=float32)
2-D tensor example:
>>> x = tf.constant([[3.0], [4.0]])
>>> tf.math.l2_normalize(x, 0).numpy()
array([[0.6],
[0.8]], dtype=float32)
>>> x = tf.constant([[3.0], [4.0]])
>>> tf.math.l2_normalize(x, 1).numpy()
array([[1.],
[1.]], dtype=float32)
Args:
x: A `Tensor`.
axis: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
dim: Deprecated, do not use.
Returns:
A `Tensor` with the same shape as `x`.
"""
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
with ops.name_scope(name, "l2_normalize", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
square_real = math_ops.square(math_ops.real(x))
square_imag = math_ops.square(math_ops.imag(x))
square_sum = math_ops.real(
math_ops.reduce_sum(square_real + square_imag, axis, keepdims=True))
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
norm_real = math_ops.multiply(math_ops.real(x), x_inv_norm)
norm_imag = math_ops.multiply(math_ops.imag(x), x_inv_norm)
return math_ops.complex(norm_real, norm_imag, name=name)
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.multiply(x, x_inv_norm, name=name)
def _count_nonzero(input_tensor, dtype=dtypes.int64):
"""Same as math_ops.count_nonzero.
The reduction is done in dtype, which can be faster for 32-bit dtypes.
Args:
input_tensor: numeric tensor
dtype: reduction dtype
Returns:
number of nonzero values with type dtype
"""
with ops.name_scope("count_nonzero", values=[input_tensor]):
zero = array_ops.zeros([], dtype=input_tensor.dtype)
nonzero_count = math_ops.reduce_sum(
math_ops.cast(
math_ops.not_equal(input_tensor, zero),
dtype=dtype), name="nonzero_count")
return nonzero_count
@tf_export("math.zero_fraction", "nn.zero_fraction")
@dispatch.add_dispatch_support
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
```python
z = tf.nn.relu(...)
summ = tf.compat.v1.summary.scalar('sparsity', tf.nn.zero_fraction(z))
```
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.name_scope(name, "zero_fraction", [value]):
value = ops.convert_to_tensor(value, name="value")
size = array_ops.size(value, out_type=dtypes.int64)
# If the count is small, we can save memory/CPU with an int32 reduction.
num_nonzero = control_flow_ops.cond(
size <= dtypes.int32.max,
# pylint: disable=g-long-lambda
true_fn=lambda: math_ops.cast(
_count_nonzero(value, dtype=dtypes.int32),
dtype=dtypes.int64),
false_fn=lambda: _count_nonzero(value, dtype=dtypes.int64))
with ops.name_scope("counts_to_fraction"):
num_zero = size - num_nonzero
num_zero_float32 = math_ops.cast(num_zero, dtype=dtypes.float32)
size_float32 = math_ops.cast(size, dtype=dtypes.float32)
zero_fraction_float32 = num_zero_float32 / size_float32
return array_ops.identity(zero_fraction_float32, "fraction")
# pylint: disable=redefined-builtin
@tf_export(v1=["nn.depthwise_conv2d"])
@dispatch.add_dispatch_support
def depthwise_conv2d(input,
filter,
strides,
padding,
rate=None,
name=None,
data_format=None,
dilations=None):
"""Depthwise 2-D convolution.
Given a 4D input tensor ('NHWC' or 'NCHW' data formats)
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail, with the default NHWC format,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Usage Example:
>>> x = np.array([
... [1., 2.],
... [3., 4.],
... [5., 6.]
... ], dtype=np.float32).reshape((1, 3, 2, 1))
>>> kernel = np.array([
... [1., 2.],
... [3., 4]
... ], dtype=np.float32).reshape((2, 1, 1, 2))
>>> tf.compat.v1.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding='VALID').numpy()
array([[[[10., 14.],
[14., 20.]],
[[18., 26.],
[22., 32.]]]], dtype=float32)
>>> tf.compat.v1.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding=[[0, 0], [1, 0], [1, 0], [0, 0]]
... ).numpy()
array([[[[ 0., 0.],
[ 3., 4.],
[ 6., 8.]],
[[ 0., 0.],
[10., 14.],
[14., 20.]],
[[ 0., 0.],
[18., 26.],
[22., 32.]]]], dtype=float32)
Args:
input: 4-D with shape according to `data_format`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: Alias of rate.
Returns:
A 4-D `Tensor` with shape according to `data_format`. E.g., for
"NHWC" format, shape is
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
rate = deprecated_argument_lookup("dilations", dilations, "rate", rate)
with ops.name_scope(name, "depthwise", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
filter = ops.convert_to_tensor(filter, name="filter_in")
if rate is None:
rate = [1, 1]
# Use depthwise_conv2d_native if executing on TPU.
if device_context.enclosing_tpu_context() is not None:
if data_format == "NCHW":
dilations = [1, 1, rate[0], rate[1]]
else:
dilations = [1, rate[0], rate[1], 1]
return nn_ops.depthwise_conv2d_native(
input=input,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
@tf_export("nn.depthwise_conv2d", v1=[])
@dispatch.add_dispatch_support
def depthwise_conv2d_v2(input,
filter,
strides,
padding,
data_format=None,
dilations=None,
name=None):
"""Depthwise 2-D convolution.
Given a 4D input tensor ('NHWC' or 'NCHW' data formats)
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail, with the default NHWC format,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Usage Example:
>>> x = np.array([
... [1., 2.],
... [3., 4.],
... [5., 6.]
... ], dtype=np.float32).reshape((1, 3, 2, 1))
>>> kernel = np.array([
... [1., 2.],
... [3., 4]
... ], dtype=np.float32).reshape((2, 1, 1, 2))
>>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding='VALID').numpy()
array([[[[10., 14.],
[14., 20.]],
[[18., 26.],
[22., 32.]]]], dtype=float32)
>>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding=[[0, 0], [1, 0], [1, 0], [0, 0]]).numpy()
array([[[[ 0., 0.],
[ 3., 4.],
[ 6., 8.]],
[[ 0., 0.],
[10., 14.],
[14., 20.]],
[[ 0., 0.],
[18., 26.],
[22., 32.]]]], dtype=float32)
Args:
input: 4-D with shape according to `data_format`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` with shape according to `data_format`. E.g., for
"NHWC" format, shape is
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
return depthwise_conv2d(input=input,
filter=filter,
strides=strides,
padding=padding,
rate=dilations,
name=name,
data_format=data_format)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin,line-too-long
@tf_export(v1=["nn.separable_conv2d"])
@dispatch.add_dispatch_support
def separable_conv2d(input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=None,
name=None,
data_format=None,
dilations=None):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q, r}
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape according to `data_format`.
depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: Controls how to pad the image before applying the depthwise
convolution. Can be the string `"SAME"` or `"VALID"` indicating the type
of padding algorithm to use, or a Python list indicating the explicit
paddings at the start and end of each dimension. When explicit padding is
used and data_format is `"NHWC"`, this should be in the form `[[0, 0],
[pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit
padding used and data_format is `"NCHW"`, this should be in the form
`[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`.
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: Alias of rate.
Returns:
A 4-D `Tensor` with shape according to 'data_format'. For
example, with data_format="NHWC", shape is [batch, out_height,
out_width, out_channels].
"""
rate = deprecated_argument_lookup("dilations", dilations, "rate", rate)
with ops.name_scope(name, "separable_conv2d",
[input, depthwise_filter, pointwise_filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
depthwise_filter = ops.convert_to_tensor(
depthwise_filter, name="depthwise_filter")
pointwise_filter = ops.convert_to_tensor(
pointwise_filter, name="pointwise_filter")
pointwise_filter_shape = pointwise_filter.get_shape().with_rank(4)
pointwise_filter_shape.dims[0].assert_is_compatible_with(1)
pointwise_filter_shape.dims[1].assert_is_compatible_with(1)
if rate is None:
rate = [1, 1]
# The layout of the ops in the graph are expected to be as follows:
# depthwise_conv2d // Conv2D op corresponding to native depthwise conv.
# separable_conv2d // Conv2D op corresponding to the pointwise conv.
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=depthwise_filter,
strides=strides,
padding=padding,
data_format=data_format,
name="depthwise")
depthwise = nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(depthwise_filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
return nn_ops.conv2d(
depthwise,
pointwise_filter, [1, 1, 1, 1],
padding="VALID",
data_format=data_format,
name=name)
@tf_export("nn.separable_conv2d", v1=[])
@dispatch.add_dispatch_support
def separable_conv2d_v2(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
data_format=None,
dilations=None,
name=None,
):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q, r}
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape according to `data_format`.
depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width,
in_channels, channel_multiplier]`. Contains `in_channels` convolutional
filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier *
in_channels, out_channels]`. Pointwise filter to mix channels after
`depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for each
dimension of `input`.
padding: Controls how to pad the image before applying the depthwise
convolution. Can be the string `"SAME"` or `"VALID"` indicating the type
of padding algorithm to use, or a Python list indicating the explicit
paddings at the start and end of each dimension. When explicit padding is
used and data_format is `"NHWC"`, this should be in the form `[[0, 0],
[pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit
padding used and data_format is `"NCHW"`, this should be in the form
`[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` with shape according to 'data_format'. For
example, with data_format="NHWC", shape is [batch, out_height,
out_width, out_channels].
"""
return separable_conv2d(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=dilations,
name=name,
data_format=data_format)
# pylint: enable=redefined-builtin,line-too-long
@tf_export(v1=["nn.sufficient_statistics"])
@dispatch.add_dispatch_support
def sufficient_statistics(x, axes, shift=None, keep_dims=None, name=None,
keepdims=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
For example:
>>> t = [[1, 2, 3], [4, 5, 6]]
>>> sufficient_statistics(t, [1])
(<tf.Tensor: shape=(), dtype=int32, numpy=3>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([ 6, 15], dtype=int32)>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([14, 77], dtype=int32)>, None)
>>> sufficient_statistics(t, [-1])
(<tf.Tensor: shape=(), dtype=int32, numpy=3>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([ 6, 15], dtype=int32)>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([14, 77], dtype=int32)>, None)
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance. As in
Python, the axes can also be negative numbers. A negative axis is
interpreted as counting from the end of the rank, i.e., axis +
rank(values)-th dimension.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
keepdims: Alias for keep_dims.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
axes = list(set(axes))
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "sufficient_statistics", [x, shift]):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if x_shape.rank is not None and all(
x_shape.dims[d].value is not None for d in axes):
counts = 1
for d in axes:
counts *= x_shape.dims[d].value
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
# Normalize axes to be positive. Required for gather.
rank = array_ops.rank(x)
positive_axes = [axis + rank if axis < 0 else axis for axis in axes]
x_dims = array_ops.gather(
math_ops.cast(array_ops.shape(x), x.dtype), positive_axes)
counts = math_ops.reduce_prod(x_dims, name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.subtract(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keepdims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keepdims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
@tf_export("nn.sufficient_statistics", v1=[])
@dispatch.add_dispatch_support
def sufficient_statistics_v2(x, axes, shift=None, keepdims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keepdims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
return sufficient_statistics(
x=x, axes=axes, shift=shift, keep_dims=keepdims, name=name)
@tf_export("nn.normalize_moments")
@dispatch.add_dispatch_support
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.subtract(
math_ops.multiply(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
@tf_export(v1=["nn.moments"])
@dispatch.add_dispatch_support
def moments(
x,
axes,
shift=None, # pylint: disable=unused-argument
name=None,
keep_dims=None,
keepdims=None):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used; the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
keepdims: Alias to keep_dims.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "moments", [x, axes]):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
mean = math_ops.reduce_mean(y, axes, keepdims=True, name="mean")
# sample variance, not unbiased variance
# Note: stop_gradient does not change the gradient that gets
# backpropagated to the mean from the variance calculation,
# because that gradient is zero
variance = math_ops.reduce_mean(
math_ops.squared_difference(y, array_ops.stop_gradient(mean)),
axes,
keepdims=True,
name="variance")
if not keep_dims:
mean = array_ops.squeeze(mean, axes)
variance = array_ops.squeeze(variance, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16),
math_ops.cast(variance, dtypes.float16))
else:
return (mean, variance)
@tf_export("nn.moments", v1=[])
@dispatch.add_dispatch_support
def moments_v2(
x,
axes,
shift=None,
keepdims=False,
name=None):
"""Calculates the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used; the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation.
keepdims: produce moments with the same dimensionality as the input.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
return moments(x=x, axes=axes, shift=shift, name=name, keep_dims=keepdims)
@tf_export(v1=["nn.weighted_moments"])
@dispatch.add_dispatch_support
def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=None,
keepdims=None):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
name: Name used to scope the operation.
keep_dims: Produce moments with the same dimensionality as the input.
keepdims: Alias of keep_dims.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "weighted_moments", [x, frequency_weights, axes]):
x = ops.convert_to_tensor(x, name="x")
frequency_weights = ops.convert_to_tensor(
frequency_weights, name="frequency_weights")
# Unlike moments(), this just uses a simpler two-pass method.
# See comment in moments() WRT precision; it applies here too.
needs_cast = x.dtype == dtypes.float16
if needs_cast:
x = math_ops.cast(x, dtypes.float32)
if frequency_weights.dtype != x.dtype:
frequency_weights = math_ops.cast(frequency_weights, x.dtype)
# Note that we use keep_dims=True for our reductions regardless of the arg;
# this is so that the results remain broadcast-compatible with the inputs.
weighted_input_sum = math_ops.reduce_sum(
frequency_weights * x, axes, name="weighted_input_sum", keepdims=True)
# The shape of the weights isn't necessarily the same as x's
# shape, just broadcast-compatible with it -- so this expression
# performs broadcasting to give a per-item weight, with the same
# shape as (frequency_weights * x). This avoids having to reason
# through all the broadcast logic to compute a correct
# sum_of_weights.
broadcasted_weights = frequency_weights + array_ops.zeros_like(x)
sum_of_weights = math_ops.reduce_sum(
broadcasted_weights, axes, name="sum_of_weights", keepdims=True)
divisor = math_ops.reciprocal(sum_of_weights, name="inv_weight_sum")
weighted_mean = math_ops.multiply(weighted_input_sum, divisor)
# Have the weighted mean; now on to variance:
weighted_distsq = math_ops.reduce_sum(
frequency_weights * math_ops.squared_difference(x, weighted_mean),
axes,
name="weighted_distsq",
keepdims=True)
weighted_variance = math_ops.multiply(weighted_distsq, divisor)
if not keep_dims:
weighted_mean = array_ops.squeeze(weighted_mean, axis=axes)
weighted_variance = array_ops.squeeze(
weighted_variance, axis=axes)
if needs_cast:
weighted_mean = math_ops.cast(weighted_mean, dtypes.float16)
weighted_variance = math_ops.cast(weighted_variance, dtypes.float16)
return weighted_mean, weighted_variance
@tf_export("nn.weighted_moments", v1=[])
@dispatch.add_dispatch_support
def weighted_moments_v2(x, axes, frequency_weights, keepdims=False, name=None):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
keepdims: Produce moments with the same dimensionality as the input.
name: Name used to scope the operation.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
return weighted_moments(
x=x,
axes=axes,
frequency_weights=frequency_weights,
name=name,
keep_dims=keepdims)
@tf_export("nn.batch_normalization")
@dispatch.add_dispatch_support
def batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Batch normalization.
Normalizes a tensor by `mean` and `variance`, and applies (optionally) a
`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\):
\\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\)
`mean`, `variance`, `offset` and `scale` are all expected to be of one of two
shapes:
* In all generality, they can have the same number of dimensions as the
input `x`, with identical sizes as `x` for the dimensions that are not
normalized over (the 'depth' dimension(s)), and dimension 1 for the
others which are being normalized over.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keepdims=True)` during training, or running averages
thereof during inference.
* In the common case where the 'depth' dimension is the last dimension in
the input tensor `x`, they may be one dimensional tensors of the same
size as the 'depth' dimension.
This is the case for example for the common `[batch, depth]` layout of
fully-connected layers, and `[batch, height, width, depth]` for
convolutions.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keepdims=False)` during training, or running averages
thereof during inference.
See equation 11 in Algorithm 2 of source:
[Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy]
(http://arxiv.org/abs/1502.03167).
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
name: A name for this operation (optional).
Returns:
the normalized, scaled, offset tensor.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://arxiv.org/abs/1502.03167)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
with ops.name_scope(name, "batchnorm", [x, mean, variance, scale, offset]):
inv = math_ops.rsqrt(variance + variance_epsilon)
if scale is not None:
inv *= scale
# Note: tensorflow/contrib/quantize/python/fold_batch_norms.py depends on
# the precise order of ops that are generated by the expression below.
return x * math_ops.cast(inv, x.dtype) + math_ops.cast(
offset - mean * inv if offset is not None else -mean * inv, x.dtype)
@tf_export(v1=["nn.fused_batch_norm"])
@dispatch.add_dispatch_support
def fused_batch_norm(
x,
scale,
offset, # pylint: disable=invalid-name
mean=None,
variance=None,
epsilon=0.001,
data_format="NHWC",
is_training=True,
name=None,
exponential_avg_factor=1.0):
r"""Batch normalization.
See Source: [Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy]
(http://arxiv.org/abs/1502.03167).
Args:
x: Input `Tensor` of 4 or 5 dimensions.
scale: A `Tensor` of 1 dimension for scaling.
offset: A `Tensor` of 1 dimension for bias.
mean: A `Tensor` of 1 dimension for population mean. The shape and meaning
of this argument depends on the value of is_training and
exponential_avg_factor as follows:
is_training==False (inference):
Mean must be a `Tensor` of the same shape as scale containing the
estimated population mean computed during training.
is_training==True and exponential_avg_factor == 1.0:
Mean must be None.
is_training==True and exponential_avg_factor != 1.0:
Mean must be a `Tensor` of the same shape as scale containing the
exponential running mean.
variance: A `Tensor` of 1 dimension for population variance. The shape and
meaning of this argument depends on the value of is_training and
exponential_avg_factor as follows:
is_training==False (inference):
Variance must be a `Tensor` of the same shape as scale containing
the estimated population variance computed during training.
is_training==True and exponential_avg_factor == 1.0:
Variance must be None.
is_training==True and exponential_avg_factor != 1.0:
Variance must be a `Tensor` of the same shape as scale containing
the exponential running variance.
epsilon: A small float number added to the variance of x.
data_format: The data format for x. Support "NHWC" (default) or "NCHW" for
4D tenors and "NDHWC" or "NCDHW" for 5D tensors.
is_training: A bool value to specify if the operation is used for
training or inference.
name: A name for this operation (optional).
exponential_avg_factor: A float number (usually between 0 and 1) used
for controlling the decay of the running
population average of mean and variance.
If set to 1.0, the current batch average is
returned.
Returns:
y: A 4D or 5D Tensor for the normalized, scaled, offsetted x.
running_mean: A 1D Tensor for the exponential running mean of x.
The output value is (1 - exponential_avg_factor) * mean +
exponential_avg_factor * batch_mean), where batch_mean
is the mean of the current batch in x.
running_var: A 1D Tensor for the exponential running variance
The output value is (1 - exponential_avg_factor) * variance +
exponential_avg_factor * batch_variance), where batch_variance
is the variance of the current batch in x.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
if (not is_training or exponential_avg_factor != 1.0) and (
(mean is None) or (variance is None)):
raise ValueError("Both `mean` and `variance` must be a 1D tensor when "
"`is_training` is False or `exponential_avg_factor` != "
f"1.0. Received: `mean` {mean!r} and `variance` "
f"{variance!r}")
x = ops.convert_to_tensor(x, name="input")
scale = ops.convert_to_tensor(scale, name="scale")
offset = ops.convert_to_tensor(offset, name="offset")
if mean is None:
mean = constant_op.constant([])
if variance is None:
variance = constant_op.constant([])
# Set a minimum epsilon to 1.001e-5, which is a requirement by CUDNN to
# prevent exception (see cudnn.h).
min_epsilon = 1.001e-5
epsilon = epsilon if epsilon > min_epsilon else min_epsilon
y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=is_training,
name=name)
return y, running_mean, running_var
@tf_export(v1=["nn.batch_norm_with_global_normalization"])
@dispatch.add_dispatch_support
def batch_norm_with_global_normalization(t=None,
m=None,
v=None,
beta=None,
gamma=None,
variance_epsilon=None,
scale_after_normalization=None,
name=None,
input=None, # pylint: disable=redefined-builtin
mean=None,
variance=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A 4D input Tensor.
m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
input: Alias for t.
mean: Alias for m.
variance: Alias for v.
Returns:
A batch-normalized `t`.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
t = deprecated_argument_lookup("input", input, "t", t)
m = deprecated_argument_lookup("mean", mean, "m", m)
v = deprecated_argument_lookup("variance", variance, "v", v)
return batch_normalization(t, m, v, beta, gamma if scale_after_normalization
else None, variance_epsilon, name)
# pylint: disable=redefined-builtin,line-too-long
@tf_export("nn.batch_norm_with_global_normalization", v1=[])
@dispatch.add_dispatch_support
def batch_norm_with_global_normalization_v2(input,
mean,
variance,
beta,
gamma,
variance_epsilon,
scale_after_normalization,
name=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
input: A 4D input Tensor.
mean: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
variance: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
Returns:
A batch-normalized `t`.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
return batch_norm_with_global_normalization(t=input,
m=mean,
v=variance,
beta=beta,
gamma=gamma,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
# pylint: enable=redefined-builtin,line-too-long
def _sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
# _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
# a matrix. The gradient of _sum_rows(x) is more efficient than
# reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
# we use _sum_rows(x) in the nce_loss() computation since the loss
# is mostly used for training.
cols = array_ops.shape(x)[1]
ones_shape = array_ops.stack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [-1])
def _compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="mod",
name=None,
seed=None):
"""Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target probability 1 / num_true so that the target probabilities
sum to 1 per-example.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The (possibly-partitioned)
class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
subtract_log_q: A `bool`. whether to subtract the log expected count of
the labels in the sample to get the logits of the true labels.
Default is True. Turn off for Negative Sampling.
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
Returns:
out_logits: `Tensor` object with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
out_labels: A Tensor object with the same shape as `out_logits`.
"""
if isinstance(weights, variables.PartitionedVariable):
weights = list(weights)
if not isinstance(weights, list):
weights = [weights]
with ops.name_scope(name, "compute_sampled_logits",
weights + [biases, inputs, labels]):
if labels.dtype != dtypes.int64:
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes,
seed=seed)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = (
array_ops.stop_gradient(s) for s in sampled_values)
# pylint: enable=unpacking-non-sequence
sampled = math_ops.cast(sampled, dtypes.int64)
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
all_ids = array_ops.concat([labels_flat, sampled], 0)
# Retrieve the true weights and the logits of the sampled weights.
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
weights, all_ids, partition_strategy=partition_strategy)
if all_w.dtype != inputs.dtype:
all_w = math_ops.cast(all_w, inputs.dtype)
# true_w shape is [batch_size * num_true, dim]
true_w = array_ops.slice(all_w, [0, 0],
array_ops.stack(
[array_ops.shape(labels_flat)[0], -1]))
sampled_w = array_ops.slice(
all_w, array_ops.stack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# Apply X*W', which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs, sampled_w, transpose_b=True)
# Retrieve the true and sampled biases, compute the true logits, and
# add the biases to the true and sampled logits.
all_b = embedding_ops.embedding_lookup(
biases, all_ids, partition_strategy=partition_strategy)
if all_b.dtype != inputs.dtype:
all_b = math_ops.cast(all_b, inputs.dtype)
# true_b is a [batch_size * num_true] tensor
# sampled_b is a [num_sampled] float tensor
true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))
sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0)
row_wise_dots = math_ops.multiply(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat([[-1], dim], 0))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
sampled_logits += sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat([acc_indices_2d, acc_ids_2d_int32], 1,
"sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
[array_ops.shape(labels)[:1],
array_ops.expand_dims(num_sampled, 0)], 0)
if sampled_logits.dtype != acc_weights.dtype:
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
sampled_logits += gen_sparse_ops.sparse_to_dense(
sparse_indices,
sampled_logits_shape,
acc_weights,
default_value=0.0,
validate_indices=False)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat([true_logits, sampled_logits], 1)
# true_logits is a float tensor, ones_like(true_logits) is a float
# tensor of ones. We then divide by num_true to ensure the per-example
# labels sum to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat([
array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)
], 1)
return out_logits, out_labels
@tf_export("nn.nce_loss", v1=[])
@dispatch.add_dispatch_support
def nce_loss_v2(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical
models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms
Reference](https://www.tensorflow.org/extras/candidate_sampling.pdf)
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference as in the following example:
```python
if mode == "train":
loss = tf.nn.nce_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...)
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
loss = tf.reduce_sum(loss, axis=1)
```
Note: when doing embedding lookup on `weights` and `bias`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
`tf.random.log_uniform_candidate_sampler`.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape [num_classes,
dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The
target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of
the input network.
num_sampled: An `int`. The number of negative classes to randomly sample
per batch. This single sample of negative classes is evaluated for each
element in the batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to `True`,
this is a "Sampled Logistic" loss instead of NCE, and we are learning to
generate log-odds instead of log probabilities. See our [Candidate
Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf). Default is
False.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
"""
# TODO(yuefengz): get partition_strategy from either variables or distribution
# strategies.
return nce_loss(
weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=num_true,
sampled_values=sampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy="div",
name=name)
@tf_export(v1=["nn.nce_loss"])
@dispatch.add_dispatch_support
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.nce_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
loss = tf.reduce_sum(loss, axis=1)
```
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
`tf.random.log_uniform_candidate_sampler`.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of negative classes to randomly sample
per batch. This single sample of negative classes is evaluated for each
element in the batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
our Candidate Sampling Algorithms Reference
([pdf](https://www.tensorflow.org/extras/candidate_sampling.pdf)).
Default is False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
References:
Noise-contrastive estimation - A new estimation principle for unnormalized
statistical models:
[Gutmann et al., 2010](http://proceedings.mlr.press/v9/gutmann10a)
([pdf](http://proceedings.mlr.press/v9/gutmann10a/gutmann10a.pdf))
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits, name="sampled_losses")
# sampled_losses is batch_size x {true_loss, sampled_losses...}
# We sum out true and sampled losses.
return _sum_rows(sampled_losses)
@tf_export("nn.sampled_softmax_loss", v1=[])
@dispatch.add_dispatch_support
def sampled_softmax_loss_v2(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
seed=None,
name="sampled_softmax_loss"):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference as in the following example:
```python
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...)
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Note: when doing embedding lookup on `weights` and `bias`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape [num_classes,
dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The
target classes. Note that this format differs from the `labels` argument
of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of
the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is True.
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
return sampled_softmax_loss(
weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=num_true,
sampled_values=sampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy="div",
name=name,
seed=seed)
@tf_export(v1=["nn.sampled_softmax_loss"])
@dispatch.add_dispatch_support
def sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_softmax_loss",
seed=None):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
See our Candidate Sampling Algorithms Reference
([pdf](https://www.tensorflow.org/extras/candidate_sampling.pdf)).
Also see Section 3 of (Jean et al., 2014) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
References:
On Using Very Large Target Vocabulary for Neural Machine Translation:
[Jean et al., 2014]
(https://aclanthology.coli.uni-saarland.de/papers/P15-1001/p15-1001)
([pdf](http://aclweb.org/anthology/P15-1001))
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name,
seed=seed)
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
sampled_losses = nn_ops.softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
|
tensorflow/tensorflow
|
tensorflow/python/ops/nn_impl.py
|
Python
|
apache-2.0
| 101,157
|
[
"Gaussian"
] |
8d057e51c19fa199de6ce339d7ce9895d5a6e32f2d8b0c8788687994013362f4
|
"""@namespace IMP.pmi.restraints.basic
Some miscellaneous simple restraints.
"""
from __future__ import print_function
import IMP
import IMP.core
import IMP.algebra
import IMP.atom
import IMP.container
import IMP.pmi.tools
import IMP.pmi.restraints
class ExternalBarrier(IMP.pmi.restraints.RestraintBase):
"""Restraint to keep all structures inside sphere."""
def __init__(self,
representation=None,
radius=10.0,
hierarchies=None,
resolution=10,
weight=1.0,
center=None,
label=None):
"""Setup external barrier restraint.
@param representation DEPRECATED
@param radius Size of external barrier
@param hierarchies Can be one of the following inputs: IMP Hierarchy,
PMI System/State/Molecule/TempResidue, or a list/set of them
@param resolution Select which resolutions to act upon
@param weight Weight of restraint
@param center Center of the external barrier restraint
(IMP.algebra.Vector3D object)
@param label A unique label to be used in outputs and
particle/restraint names.
"""
if representation:
m = representation.prot.get_model()
particles = IMP.pmi.tools.select(
representation,
resolution=resolution,
hierarchies=hierarchies)
elif hierarchies:
hiers = IMP.pmi.tools.input_adaptor(hierarchies, resolution,
flatten=True)
m = hiers[0].get_model()
particles = [h.get_particle() for h in hiers]
else:
raise Exception("%s: must pass representation or hierarchies" % (
self.name))
super(ExternalBarrier, self).__init__(m, label=label, weight=weight)
self.radius = radius
if center is None:
c3 = IMP.algebra.Vector3D(0, 0, 0)
elif type(center) is IMP.algebra.Vector3D:
c3 = center
else:
raise Exception(
"%s: @param center must be an IMP.algebra.Vector3D object" % (
self.name))
ub3 = IMP.core.HarmonicUpperBound(radius, 10.0)
ss3 = IMP.core.DistanceToSingletonScore(ub3, c3)
lsc = IMP.container.ListSingletonContainer(self.m)
lsc.add(particles)
r3 = IMP.container.SingletonsRestraint(ss3, lsc)
self.rs.add_restraint(r3)
class DistanceRestraint(IMP.pmi.restraints.RestraintBase):
"""A simple distance restraint"""
def __init__(self,
representation=None,
tuple_selection1=None,
tuple_selection2=None,
distancemin=0,
distancemax=100,
resolution=1.0,
kappa=1.0,
root_hier=None,
label=None,
weight=1.):
"""Setup distance restraint.
@param representation DEPRECATED
@param tuple_selection1 (resnum, resnum, molecule name, copy
number (=0))
@param tuple_selection2 (resnum, resnum, molecule name, copy
number (=0))
@param distancemin The minimum dist
@param distancemax The maximum dist
@param resolution For selecting particles
@param kappa The harmonic parameter
@param root_hier The hierarchy to select from (use this instead of
representation)
@param label A unique label to be used in outputs and
particle/restraint names
@param weight Weight of restraint
\note Pass the same resnum twice to each tuple_selection. Optionally
add a copy number (PMI2 only)
"""
if tuple_selection1 is None or tuple_selection2 is None:
raise Exception("You must pass tuple_selection1/2")
ts1 = IMP.core.HarmonicUpperBound(distancemax, kappa)
ts2 = IMP.core.HarmonicLowerBound(distancemin, kappa)
if representation and not root_hier:
m = representation.prot.get_model()
particles1 = IMP.pmi.tools.select(representation,
resolution=resolution,
name=tuple_selection1[2],
residue=tuple_selection1[0])
particles2 = IMP.pmi.tools.select(representation,
resolution=resolution,
name=tuple_selection2[2],
residue=tuple_selection2[0])
elif root_hier and not representation:
m = root_hier.get_model()
copy_num1 = 0
if len(tuple_selection1) > 3:
copy_num1 = tuple_selection1[3]
copy_num2 = 0
if len(tuple_selection2) > 3:
copy_num2 = tuple_selection2[3]
sel1 = IMP.atom.Selection(root_hier,
resolution=resolution,
molecule=tuple_selection1[2],
residue_index=tuple_selection1[0],
copy_index=copy_num1)
particles1 = sel1.get_selected_particles()
sel2 = IMP.atom.Selection(root_hier,
resolution=resolution,
molecule=tuple_selection2[2],
residue_index=tuple_selection2[0],
copy_index=copy_num2)
particles2 = sel2.get_selected_particles()
else:
raise Exception("Pass representation or root_hier, not both")
super(DistanceRestraint, self).__init__(m, label=label, weight=weight)
print(self.name)
print("Created distance restraint between "
"%s and %s" % (particles1[0].get_name(),
particles2[0].get_name()))
if len(particles1) > 1 or len(particles2) > 1:
raise ValueError("more than one particle selected")
self.rs.add_restraint(
IMP.core.DistanceRestraint(self.m, ts1,
particles1[0],
particles2[0]))
self.rs.add_restraint(
IMP.core.DistanceRestraint(self.m, ts2,
particles1[0],
particles2[0]))
class TorqueRestraint(IMP.Restraint):
import math
def __init__(self, m, objects, resolution, angular_tolerance,label='None'):
IMP.Restraint.__init__(self, m, "TorqueRestraint %1%")
self.softness_angle = 0.5
self.plateau = 1e-10
self.weight = 1.0
self.m=m
hierarchies = IMP.pmi.tools.input_adaptor(objects,
resolution,
flatten=True)
self.particles = [h.get_particle() for h in hierarchies]
self.ds=[IMP.core.XYZ(p) for p in self.particles]
self.label=label
self.at=angular_tolerance
def get_angle_probability(self,xyz,angle_center):
maxtheta=angle_center+self.at
mintheta=angle_center-self.at
angle=self.math.atan2(xyz.get_y(),xyz.get_x() )*180.0/self.math.pi
anglediff = (angle - maxtheta + 180 + 360) % 360 - 180
argvalue1=anglediff / self.softness_angle
anglediff = (angle - mintheta + 180 + 360) % 360 - 180
argvalue2=-anglediff / self.softness_angle
prob = (1.0-self.plateau) / (1.0 + self.math.exp(-max(argvalue1,argvalue2)))
return prob
def unprotected_evaluate(self, da):
s=0.0
center=IMP.core.get_centroid(self.ds)
angle_center=self.math.atan2(center[1],center[0])*180.0/self.math.pi
for xyz in self.ds:
s+=-self.math.log(1.0-self.get_angle_probability(xyz,angle_center))
return s
def do_get_inputs(self):
return self.particles
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self)
def get_output(self):
self.m.update()
output = {}
score = self.weight * self.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["TorqueRestraint_" + self.label] = str(score)
return output
class CylinderRestraint(IMP.Restraint):
'''
PMI2 python restraint. Restrains particles within a
Cylinder aligned along the z-axis and
centered in x,y=0,0
Optionally, one can restrain the cylindrical angle
'''
import math
def __init__(self, m, objects, resolution, radius,mintheta=None,
maxtheta=None,repulsive=False,label='None'):
'''
@param objects PMI2 objects
@param resolution the resolution you want the restraint to be applied
@param radius the radius of the cylinder
@param mintheta minimum cylindrical angle in degrees
@param maxtheta maximum cylindrical angle in degrees
'''
IMP.Restraint.__init__(self, m, "CylinderRestraint %1%")
self.radius=radius
self.softness = 3.0
self.softness_angle = 0.5
self.plateau = 1e-10
self.weight = 1.0
self.m=m
self.mintheta=mintheta
self.maxtheta=maxtheta
self.repulsive=repulsive
hierarchies = IMP.pmi.tools.input_adaptor(objects,
resolution,
flatten=True)
self.particles = [h.get_particle() for h in hierarchies]
self.label=label
def get_probability(self,p):
xyz=IMP.core.XYZ(p)
r=self.math.sqrt(xyz.get_x()**2+xyz.get_y()**2)
argvalue=(r-self.radius) / self.softness
if self.repulsive: argvalue=-argvalue
prob = (1.0 - self.plateau) / (1.0 + self.math.exp(-argvalue))
return prob
def get_angle_probability(self,p):
xyz=IMP.core.XYZ(p)
angle=self.math.atan2(xyz.get_y(),xyz.get_x() )*180.0/self.math.pi
anglediff = (angle - self.maxtheta + 180 + 360) % 360 - 180
argvalue1=anglediff / self.softness_angle
anglediff = (angle - self.mintheta + 180 + 360) % 360 - 180
argvalue2=-anglediff / self.softness_angle
prob = (1.0-self.plateau) / (1.0 + self.math.exp(-max(argvalue1,argvalue2)))
return prob
def unprotected_evaluate(self, da):
s=0.0
for p in self.particles:
s+=-self.math.log(1.0-self.get_probability(p))
if self.mintheta is not None and self.maxtheta is not None:
s+=-self.math.log(1.0-self.get_angle_probability(p))
return s
def do_get_inputs(self):
return self.particles
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self)
def get_output(self):
self.m.update()
output = {}
score = self.weight * self.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["CylinderRestraint_" + self.label] = str(score)
return output
class BiStableDistanceRestraint(IMP.Restraint):
'''
a python restraint with bistable potential
Authors: G. Bouvier, R. Pellarin. Pasteur Institute.
'''
import numpy as np
import math
def __init__(self,m,p1,p2,dist1,dist2,sigma1,sigma2,weight1,weight2):
'''
input twp particles, the two equilibrium distances, their amplitudes, and their weights (populations)
'''
IMP.Restraint.__init__(self, m, "BiStableDistanceRestraint %1%")
self.dist1=dist1
self.dist2=dist2
self.sigma1=sigma1
self.sigma2=sigma2
self.weight1=weight1
self.weight2=weight2
if self.weight1+self.weight2 != 1:
raise ValueError("The sum of the weights must be one")
self.d1=IMP.core.XYZ(p1)
self.d2=IMP.core.XYZ(p2)
self.particle_list=[p1,p2]
def gaussian(self,x, mu, sig, w):
return w*self.np.exp(-self.np.power(x - mu, 2.) / (2 * self.np.power(sig, 2.)))
def unprotected_evaluate(self,da):
dist=IMP.core.get_distance(self.d1,self.d2)
prob=self.gaussian(dist,self.dist1,self.sigma1,self.weight1)+\
self.gaussian(dist,self.dist2,self.sigma2,self.weight2)
return -self.math.log(prob)
def do_get_inputs(self):
return self.particle_list
class DistanceToPointRestraint(IMP.pmi.restraints.RestraintBase):
"""Restraint for anchoring a particle to a specific coordinate."""
def __init__(self,
representation=None,
tuple_selection=None,
anchor_point=IMP.algebra.Vector3D(0, 0, 0),
radius=10.0,
kappa=10.0,
resolution=1.0,
weight=1.0,
root_hier=None,
label=None):
"""Setup distance restraint.
@param representation DEPRECATED
@param tuple_selection (resnum, resnum, molecule name,
copy number (=0))
@param anchor_point Point to which to restrain particle
(IMP.algebra.Vector3D object)
@param radius Size of the tolerance length
@param kappa The harmonic parameter
@param resolution For selecting a particle
@param weight Weight of restraint
@param root_hier The hierarchy to select from (use this instead of
representation)
@param label A unique label to be used in outputs and
particle/restraint names
\note Pass the same resnum twice to each tuple_selection. Optionally
add a copy number (PMI2 only)
"""
if tuple_selection is None:
raise Exception("You must pass a tuple_selection")
if representation and not root_hier:
m = representation.prot.get_model()
ps = IMP.pmi.tools.select(representation,
resolution=resolution,
name=tuple_selection[2],
residue=tuple_selection[0])
elif root_hier and not representation:
m = root_hier.get_model()
copy_num1 = 0
if len(tuple_selection) > 3:
copy_num1 = tuple_selection[3]
sel1 = IMP.atom.Selection(root_hier,
resolution=resolution,
molecule=tuple_selection[2],
residue_index=tuple_selection[0],
copy_index=copy_num1)
ps = sel1.get_selected_particles()
else:
raise Exception("%s: Pass representation or root_hier, not both" %
self.name)
if len(ps) > 1:
raise ValueError("%s: more than one particle selected" %
self.name)
super(DistanceToPointRestraint, self).__init__(m, label=label,
weight=weight)
self.radius = radius
ub3 = IMP.core.HarmonicUpperBound(self.radius, kappa)
if anchor_point is None:
c3 = IMP.algebra.Vector3D(0, 0, 0)
elif type(anchor_point) is IMP.algebra.Vector3D:
c3 = anchor_point
else:
raise Exception(
"%s: @param anchor_point must be an algebra.Vector3D object" %
self.name)
ss3 = IMP.core.DistanceToSingletonScore(ub3, c3)
lsc = IMP.container.ListSingletonContainer(self.m)
lsc.add(ps)
r3 = IMP.container.SingletonsRestraint(ss3, lsc)
self.rs.add_restraint(r3)
print("\n%s: Created distance_to_point_restraint between "
"%s and %s" % (self.name, ps[0].get_name(), c3))
|
shanot/imp
|
modules/pmi/pyext/src/restraints/basic.py
|
Python
|
gpl-3.0
| 16,155
|
[
"Gaussian"
] |
033a96b7687591c7a7dda7a2de5df248637c85ac32b086ed11e8252a7db00a43
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`lib` module contains most of the components and libraries that make
OpenLP work.
"""
from distutils.version import LooseVersion
import logging
import os
from PyQt4 import QtCore, QtGui, Qt
log = logging.getLogger(__name__)
class ServiceItemContext(object):
"""
The context in which a Service Item is being generated
"""
Preview = 0
Live = 1
Service = 2
class ImageSource(object):
"""
This enumeration class represents different image sources. An image sources states where an image is used. This
enumeration class is need in the context of the :class:~openlp.core.lib.imagemanager`.
``ImagePlugin``
This states that an image is being used by the image plugin.
``Theme``
This says, that the image is used by a theme.
"""
ImagePlugin = 1
Theme = 2
class MediaType(object):
"""
An enumeration class for types of media.
"""
Audio = 1
Video = 2
class SlideLimits(object):
"""
Provides an enumeration for behaviour of OpenLP at the end limits of each service item when pressing the up/down
arrow keys
"""
End = 1
Wrap = 2
Next = 3
class ServiceItemAction(object):
"""
Provides an enumeration for the required action moving between service items by left/right arrow keys
"""
Previous = 1
PreviousLastSlide = 2
Next = 3
def translate(context, text, comment=None, encoding=QtCore.QCoreApplication.CodecForTr, n=-1,
qt_translate=QtCore.QCoreApplication.translate):
"""
A special shortcut method to wrap around the Qt4 translation functions. This abstracts the translation procedure so
that we can change it if at a later date if necessary, without having to redo the whole of OpenLP.
``context``
The translation context, used to give each string a context or a namespace.
``text``
The text to put into the translation tables for translation.
``comment``
An identifying string for when the same text is used in different roles within the same context.
"""
return qt_translate(context, text, comment, encoding, n)
def get_text_file_string(text_file):
"""
Open a file and return its content as unicode string. If the supplied file name is not a file then the function
returns False. If there is an error loading the file or the content can't be decoded then the function will return
None.
``textfile``
The name of the file.
"""
if not os.path.isfile(text_file):
return False
file_handle = None
content = None
try:
file_handle = open(text_file, 'r')
if not file_handle.read(3) == '\xEF\xBB\xBF':
# no BOM was found
file_handle.seek(0)
content = file_handle.read()
except (IOError, UnicodeError):
log.exception('Failed to open text file %s' % text_file)
finally:
if file_handle:
file_handle.close()
return content
def str_to_bool(string_value):
"""
Convert a string version of a boolean into a real boolean.
``string_value``
The string value to examine and convert to a boolean type.
"""
if isinstance(string_value, bool):
return string_value
return str(string_value).strip().lower() in ('true', 'yes', 'y')
def build_icon(icon):
"""
Build a QIcon instance from an existing QIcon, a resource location, or a physical file location. If the icon is a
QIcon instance, that icon is simply returned. If not, it builds a QIcon instance from the resource or file name.
``icon``
The icon to build. This can be a QIcon, a resource string in the form ``:/resource/file.png``, or a file
location like ``/path/to/file.png``. However, the **recommended** way is to specify a resource string.
"""
button_icon = QtGui.QIcon()
if isinstance(icon, QtGui.QIcon):
button_icon = icon
elif isinstance(icon, str):
if icon.startswith(':/'):
button_icon.addPixmap(QtGui.QPixmap(icon), QtGui.QIcon.Normal, QtGui.QIcon.Off)
else:
button_icon.addPixmap(QtGui.QPixmap.fromImage(QtGui.QImage(icon)), QtGui.QIcon.Normal, QtGui.QIcon.Off)
elif isinstance(icon, QtGui.QImage):
button_icon.addPixmap(QtGui.QPixmap.fromImage(icon), QtGui.QIcon.Normal, QtGui.QIcon.Off)
return button_icon
def image_to_byte(image):
"""
Resize an image to fit on the current screen for the web and returns it as a byte stream.
``image``
The image to converted.
"""
log.debug('image_to_byte - start')
byte_array = QtCore.QByteArray()
# use buffer to store pixmap into byteArray
buffie = QtCore.QBuffer(byte_array)
buffie.open(QtCore.QIODevice.WriteOnly)
image.save(buffie, "PNG")
log.debug('image_to_byte - end')
# convert to base64 encoding so does not get missed!
return bytes(byte_array.toBase64()).decode('utf-8')
def create_thumb(image_path, thumb_path, return_icon=True, size=None):
"""
Create a thumbnail from the given image path and depending on ``return_icon`` it returns an icon from this thumb.
``image_path``
The image file to create the icon from.
``thumb_path``
The filename to save the thumbnail to.
``return_icon``
States if an icon should be build and returned from the thumb. Defaults to ``True``.
``size``
Allows to state a own size (QtCore.QSize) to use. Defaults to ``None``, which means that a default height of 88
is used.
"""
ext = os.path.splitext(thumb_path)[1].lower()
reader = QtGui.QImageReader(image_path)
if size is None:
ratio = reader.size().width() / reader.size().height()
reader.setScaledSize(QtCore.QSize(int(ratio * 88), 88))
else:
reader.setScaledSize(size)
thumb = reader.read()
thumb.save(thumb_path, ext[1:])
if not return_icon:
return
if os.path.exists(thumb_path):
return build_icon(str(thumb_path))
# Fallback for files with animation support.
return build_icon(str(image_path))
def validate_thumb(file_path, thumb_path):
"""
Validates whether an file's thumb still exists and if is up to date. **Note**, you must **not** call this function,
before checking the existence of the file.
``file_path``
The path to the file. The file **must** exist!
``thumb_path``
The path to the thumb.
"""
if not os.path.exists(thumb_path):
return False
image_date = os.stat(file_path).st_mtime
thumb_date = os.stat(thumb_path).st_mtime
return image_date <= thumb_date
def resize_image(image_path, width, height, background='#000000'):
"""
Resize an image to fit on the current screen.
``image_path``
The path to the image to resize.
``width``
The new image width.
``height``
The new image height.
``background``
The background colour. Defaults to black.
DO NOT REMOVE THE DEFAULT BACKGROUND VALUE!
"""
log.debug('resize_image - start')
reader = QtGui.QImageReader(image_path)
# The image's ratio.
image_ratio = reader.size().width() / reader.size().height()
resize_ratio = width / height
# Figure out the size we want to resize the image to (keep aspect ratio).
if image_ratio == resize_ratio:
size = QtCore.QSize(width, height)
elif image_ratio < resize_ratio:
# Use the image's height as reference for the new size.
size = QtCore.QSize(image_ratio * height, height)
else:
# Use the image's width as reference for the new size.
size = QtCore.QSize(width, 1 / (image_ratio / width))
reader.setScaledSize(size)
preview = reader.read()
if image_ratio == resize_ratio:
# We neither need to centre the image nor add "bars" to the image.
return preview
real_width = preview.width()
real_height = preview.height()
# and move it to the centre of the preview space
new_image = QtGui.QImage(width, height, QtGui.QImage.Format_ARGB32_Premultiplied)
painter = QtGui.QPainter(new_image)
painter.fillRect(new_image.rect(), QtGui.QColor(background))
painter.drawImage((width - real_width) // 2, (height - real_height) // 2, preview)
return new_image
def check_item_selected(list_widget, message):
"""
Check if a list item is selected so an action may be performed on it
``list_widget``
The list to check for selected items
``message``
The message to give the user if no item is selected
"""
if not list_widget.selectedIndexes():
QtGui.QMessageBox.information(list_widget.parent(),
translate('OpenLP.MediaManagerItem', 'No Items Selected'), message)
return False
return True
def clean_tags(text):
"""
Remove Tags from text for display
"""
text = text.replace('<br>', '\n')
text = text.replace('{br}', '\n')
text = text.replace(' ', ' ')
for tag in FormattingTags.get_html_tags():
text = text.replace(tag['start tag'], '')
text = text.replace(tag['end tag'], '')
return text
def expand_tags(text):
"""
Expand tags HTML for display
"""
for tag in FormattingTags.get_html_tags():
text = text.replace(tag['start tag'], tag['start html'])
text = text.replace(tag['end tag'], tag['end html'])
return text
def check_directory_exists(directory, do_not_log=False):
"""
Check a theme directory exists and if not create it
``directory``
The directory to make sure exists
``do_not_log``
To not log anything. This is need for the start up, when the log isn't ready.
"""
if not do_not_log:
log.debug('check_directory_exists %s' % directory)
try:
if not os.path.exists(directory):
os.makedirs(directory)
except IOError:
pass
def create_separated_list(stringlist):
"""
Returns a string that represents a join of a list of strings with a localized separator. This function corresponds
to QLocale::createSeparatedList which was introduced in Qt 4.8 and implements the algorithm from
http://www.unicode.org/reports/tr35/#ListPatterns
``stringlist``
List of unicode strings
"""
if LooseVersion(Qt.PYQT_VERSION_STR) >= LooseVersion('4.9') and \
LooseVersion(Qt.qVersion()) >= LooseVersion('4.8'):
return QtCore.QLocale().createSeparatedList(stringlist)
if not stringlist:
return ''
elif len(stringlist) == 1:
return stringlist[0]
elif len(stringlist) == 2:
return translate('OpenLP.core.lib', '%s and %s',
'Locale list separator: 2 items') % (stringlist[0], stringlist[1])
else:
merged = translate('OpenLP.core.lib', '%s, and %s',
'Locale list separator: end') % (stringlist[-2], stringlist[-1])
for index in reversed(list(range(1, len(stringlist) - 2))):
merged = translate('OpenLP.core.lib', '%s, %s',
'Locale list separator: middle') % (stringlist[index], merged)
return translate('OpenLP.core.lib', '%s, %s', 'Locale list separator: start') % (stringlist[0], merged)
from .registry import Registry
from .uistrings import UiStrings
from .screen import ScreenList
from .settings import Settings
from .listwidgetwithdnd import ListWidgetWithDnD
from .treewidgetwithdnd import TreeWidgetWithDnD
from .formattingtags import FormattingTags
from .spelltextedit import SpellTextEdit
from .plugin import PluginStatus, StringContent, Plugin
from .pluginmanager import PluginManager
from .settingstab import SettingsTab
from .serviceitem import ServiceItem, ServiceItemType, ItemCapabilities
from .htmlbuilder import build_html, build_lyrics_format_css, build_lyrics_outline_css
from .toolbar import OpenLPToolbar
from .dockwidget import OpenLPDockWidget
from .imagemanager import ImageManager
from .renderer import Renderer
from .mediamanageritem import MediaManagerItem
|
marmyshev/item_title
|
openlp/core/lib/__init__.py
|
Python
|
gpl-2.0
| 14,174
|
[
"Brian"
] |
d23234ceaf18620c1f269a5c49626c8fd4e4ed94257ea3bfc90dab0c06e71e78
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import mock
from stoqlib.gui.editors.callseditor import CallsEditor
from stoqlib.gui.search.callsearch import CallsSearch
from stoqlib.gui.test.uitestutils import GUITest
class TestCallsSearch(GUITest):
def test_show(self):
# 2 calls, different persons
call1 = self.create_call()
self.create_call(attendant=call1.attendant)
search = CallsSearch(self.store, date=call1.date)
search.search.refresh()
self.check_search(search, 'calls-show')
def test_with_person(self):
# 2 calls, different persons
call1 = self.create_call()
self.create_call(attendant=call1.attendant)
search = CallsSearch(self.store, person=call1.person)
search.search.refresh()
self.check_search(search, 'calls-show-person')
def test_actions(self):
# 2 calls, different persons
call1 = self.create_call()
self.create_call(attendant=call1.attendant)
search = CallsSearch(self.store, date=call1.date,
reuse_store=True)
self.assertNotSensitive(search, ['print_button'])
search.search.refresh()
self.assertSensitive(search, ['print_button'])
self.assertNotSensitive(search._toolbar, ['edit_button'])
selected = search.results[0]
search.results.select(selected)
self.assertSensitive(search._toolbar, ['edit_button'])
with mock.patch('stoqlib.gui.search.callsearch.print_report') as print_report:
self.click(search.print_button)
self.assertEquals(print_report.call_count, 1)
with mock.patch('stoqlib.gui.search.callsearch.run_dialog') as run_dialog:
self.click(search._toolbar.edit_button)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
editor, parent, store, model, person, person_type = args
self.assertEquals(editor, CallsEditor)
self.assertEquals(parent, search)
self.assertEquals(model, selected.call)
self.assertEquals(person, None)
with mock.patch('stoqlib.gui.search.callsearch.run_dialog') as run_dialog:
self.click(search._toolbar.new_button)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
editor, parent, store, model, person, person_type = args
self.assertEquals(editor, CallsEditor)
self.assertEquals(parent, search)
self.assertEquals(model, None)
self.assertEquals(person, None)
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_callsearch.py
|
Python
|
gpl-2.0
| 3,489
|
[
"VisIt"
] |
0f223e66d9e8b5026a917694f634d47f66f85b482407935d1b5d6bd6434785b1
|
#@author Caoimhe Harvey
#Python 2.7
"""
Things left to do:
- Get date to increment by interval
- Add recurisive call
"""
from google_flight import google_flight_api
import datetime
def findBestRoute(array, start):
g = google_flight_api.GoogleFlight('')
temp = {}
end = {}
cheapest = array[0]
for i in range(0,len(array)):
if(cheapest != array[i]):
data = {
"request": {
"slice": [
{
"origin": cheapest,
"destination": array[i],
"date": start
}
],
"passengers": {
"adultCount": 1,
"infantInLapCount": 0,
"infantInSeatCount": 0,
"childCount": 0,
"seniorCount": 0
},
"solutions": 1,
"refundable": 'false'
}
}
g.get(data)
lowestCost = g.getCost()
print (lowestCost)
temp.update({array[i]: str(lowestCost)})
print(temp)
cheapest = min(temp, key = temp.get)
print (cheapest)
cheapestRoute.update({cheapest : temp[cheapest]})
temp.clear()
#missing recursive call here
desiredAirports = []
cheapestRoute = {}
print "Enter the first date of travel"
year = int(input('Enter a year: '))
month = int(input('Enter a month: '))
day = int(input('Enter a day: '))
startDate = datetime.date(year, month, day)
print startDate
#startDate = datetime.date(year, month, day)
dateInterval = input("How many days will you likely spend in each place? ")
getAirports = True
print "Below please input the airport codes you wish to visit, type \"DONE\" when finished: "
while getAirports == True:
airCode = raw_input()
if (airCode == "DONE"):
getAirports = False
else:
desiredAirports.append(airCode)
print(desiredAirports)
findBestRoute(desiredAirports,str(startDate))
for key, value in cheapestRoute.items():
print key, value
|
caoimheharvey/Backpacking_Solution
|
tsp.py
|
Python
|
mit
| 2,341
|
[
"VisIt"
] |
4cfe1a06797af19ada47604d49fe36d1c3d5300bb766e187ffe9e9aac9185c9f
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
import contextlib
import copy
import os.path as op
from types import GeneratorType
import numpy as np
from scipy import linalg, sparse
from scipy.sparse import coo_matrix, block_diag as sparse_block_diag
from .cov import Covariance
from .evoked import _get_peak
from .filter import resample
from .io.constants import FIFF
from .surface import read_surface, _get_ico_surface, mesh_edges
from .source_space import (_ensure_src, _get_morph_src_reordering,
_ensure_src_subject, SourceSpaces, _get_src_nn,
_import_nibabel, _get_mri_info_data,
_get_atlas_values, _check_volume_labels,
read_freesurfer_lut)
from .transforms import _get_trans, apply_trans
from .utils import (get_subjects_dir, _check_subject, logger, verbose, _pl,
_time_mask, warn, copy_function_doc_to_method_doc,
fill_doc, _check_option, _validate_type, _check_src_normal,
_check_stc_units, _check_pandas_installed, deprecated,
_check_pandas_index_arguments, _convert_times, _ensure_int,
_build_data_frame, _check_time_format, _check_path_like)
from .viz import (plot_source_estimates, plot_vector_source_estimates,
plot_volume_source_estimates)
from .io.base import TimeMixin
from .io.meas_info import Info
from .externals.h5io import read_hdf5, write_hdf5
def _read_stc(filename):
"""Aux Function."""
with open(filename, 'rb') as fid:
buf = fid.read()
stc = dict()
offset = 0
num_bytes = 4
# read tmin in ms
stc['tmin'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tmin'] /= 1000.0
offset += num_bytes
# read sampling rate in ms
stc['tstep'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tstep'] /= 1000.0
offset += num_bytes
# read number of vertices/sources
vertices_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
# read the source vector
stc['vertices'] = np.frombuffer(buf, dtype=">u4", count=vertices_n,
offset=offset)
offset += num_bytes * vertices_n
# read the number of timepts
data_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
if (vertices_n and # vertices_n can be 0 (empty stc)
((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.frombuffer(buf, dtype=">f4", count=vertices_n * data_n,
offset=offset)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file.
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tobytes())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tobytes())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tobytes())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tobytes())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tobytes())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tobytes())
# close the file
fid.close()
def _read_3(fid):
"""Read 3 byte integer from file."""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
"""Write 3 byte integer to file."""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tobytes())
def _write_w(filename, vertices, data):
"""Write a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert (len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tobytes())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tobytes())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a source estimate object.
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate
The source estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for vector surface source estimates, only HDF5 files are supported.
- for mixed source estimates, only HDF5 files are supported.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
""" # noqa: E501
fname_arg = fname
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
# make sure corresponding file(s) can be found
ftype = None
if op.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
elif fname.endswith('.h5'):
ftype = 'h5'
fname = fname[:-3]
else:
raise RuntimeError('Unknown extension for file %s' % fname_arg)
if ftype != 'volume':
stc_exist = [op.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [op.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
if all(stc_exist) and ftype != 'w':
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif op.exists(fname + '.h5'):
ftype = 'h5'
elif op.exists(fname + '-stc.h5'):
ftype = 'h5'
fname += '-stc'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
kwargs['vertices'] = [kwargs['vertices']]
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
ftype = 'surface'
elif ftype == 'h5':
kwargs = read_hdf5(fname + '.h5', title='mnepython')
ftype = kwargs.pop('src_type', 'surface')
if isinstance(kwargs['vertices'], np.ndarray):
kwargs['vertices'] = [kwargs['vertices']]
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
if 'subject' not in kwargs:
kwargs['subject'] = subject
if subject is not None and subject != kwargs['subject']:
raise RuntimeError('provided subject name "%s" does not match '
'subject name from the file "%s'
% (subject, kwargs['subject']))
if ftype in ('volume', 'discrete'):
klass = VolVectorSourceEstimate
elif ftype == 'mixed':
klass = MixedVectorSourceEstimate
else:
assert ftype == 'surface'
klass = VectorSourceEstimate
if kwargs['data'].ndim < 3:
klass = klass._scalar_class
return klass(**kwargs)
def _get_src_type(src, vertices, warn_text=None):
src_type = None
if src is None:
if warn_text is None:
warn("src should not be None for a robust guess of stc type.")
else:
warn(warn_text)
if isinstance(vertices, list) and len(vertices) == 2:
src_type = 'surface'
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \
and len(vertices) == 1:
src_type = 'volume'
elif isinstance(vertices, list) and len(vertices) > 2:
src_type = 'mixed'
else:
src_type = src.kind
assert src_type in ('surface', 'volume', 'mixed', 'discrete')
return src_type
def _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,
subject=None, vector=False, source_nn=None, warn_text=None):
"""Generate a surface, vector-surface, volume or mixed source estimate."""
def guess_src_type():
return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)
src_type = guess_src_type() if src_type is None else src_type
if vector and src_type == 'surface' and source_nn is None:
raise RuntimeError('No source vectors supplied.')
# infer Klass from src_type
if src_type == 'surface':
Klass = VectorSourceEstimate if vector else SourceEstimate
elif src_type in ('volume', 'discrete'):
Klass = VolVectorSourceEstimate if vector else VolSourceEstimate
elif src_type == 'mixed':
Klass = MixedVectorSourceEstimate if vector else MixedSourceEstimate
else:
raise ValueError('vertices has to be either a list with one or more '
'arrays or an array')
# Rotate back for vector source estimates
if vector:
n_vertices = sum(len(v) for v in vertices)
assert data.shape[0] in (n_vertices, n_vertices * 3)
if len(data) == n_vertices:
assert src_type == 'surface' # should only be possible for this
assert source_nn.shape == (n_vertices, 3)
data = data[:, np.newaxis] * source_nn[:, :, np.newaxis]
else:
data = data.reshape((-1, 3, data.shape[-1]))
assert source_nn.shape in ((n_vertices, 3, 3),
(n_vertices * 3, 3))
# This will be an identity transform for volumes, but let's keep
# the code simple and general and just do the matrix mult
data = np.matmul(
np.transpose(source_nn.reshape(n_vertices, 3, 3),
axes=[0, 2, 1]), data)
return Klass(
data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject
)
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations."""
compat = False
if type(a) != type(b):
raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv)
for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError('Cannot combine source estimates that do not have '
'the same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, %r and %r' % (a.subject, b.subject))
class _BaseSourceEstimate(TimeMixin):
_data_ndim = 2
@verbose
def __init__(self, data, vertices, tmin, tstep,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_data_ndim'), self.__class__.__name__
assert hasattr(self, '_src_type'), self.__class__.__name__
assert hasattr(self, '_src_count'), self.__class__.__name__
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel (%s) and sens_data (%s) have invalid '
'dimensions'
% (kernel.shape, sens_data.shape))
if sens_data.ndim != 2:
raise ValueError('The sensor data must have 2 dimensions, got '
'%s' % (sens_data.ndim,))
_validate_type(vertices, list, 'vertices')
if self._src_count is not None:
if len(vertices) != self._src_count:
raise ValueError('vertices must be a list with %d entries, '
'got %s' % (self._src_count, len(vertices)))
vertices = [np.array(v, np.int64) for v in vertices] # makes copy
if any(np.any(np.diff(v) <= 0) for v in vertices):
raise ValueError('Vertices must be ordered in increasing order.')
n_src = sum([len(v) for v in vertices])
# safeguard the user against doing something silly
if data is not None:
if data.ndim not in (self._data_ndim, self._data_ndim - 1):
raise ValueError('Data (shape %s) must have %s dimensions for '
'%s' % (data.shape, self._data_ndim,
self.__class__.__name__))
if data.shape[0] != n_src:
raise ValueError('Number of vertices (%i) and stc.shape[0] '
'(%i) must match' % (n_src, data.shape[0]))
if self._data_ndim == 3:
if data.shape[1] != 3:
raise ValueError(
'Data for VectorSourceEstimate must have '
'shape[1] == 3, got shape %s' % (data.shape,))
if data.ndim == self._data_ndim - 1: # allow upbroadcasting
data = data[..., np.newaxis]
self._data = data
self._tmin = tmin
self._tstep = tstep
self.vertices = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self._times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
def __repr__(self): # noqa: D105
s = "%d vertices" % (sum(len(v) for v in self.vertices),)
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data shape : %s" % (self.shape,)
return "<%s | %s>" % (type(self).__name__, s)
@fill_doc
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
stc = self.magnitude() if self._data_ndim == 3 else self
if self._n_vertices == 0:
raise RuntimeError('Cannot find peaks with no vertices')
vert_idx, time_idx, _ = _get_peak(
stc.data, self.times, tmin, tmax, mode)
if not vert_as_index:
vert_idx = np.concatenate(self.vertices)[vert_idx]
if not time_as_index:
time_idx = self.times[time_idx]
return vert_idx, time_idx
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, verbose=verbose)
@verbose
def save(self, fname, ftype='h5', verbose=None):
"""Save the full source estimate to an HDF5 file.
Parameters
----------
fname : str
The file name to write the source estimate to, should end in
'-stc.h5'.
ftype : str
File format to use. Currently, the only allowed values is "h5".
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
if ftype != 'h5':
raise ValueError('%s objects can only be written as HDF5 files.'
% (self.__class__.__name__,))
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep, subject=self.subject,
src_type=self._src_type),
title='mnepython', overwrite=True)
@property
def sfreq(self):
"""Sample rate of the data."""
return 1. / self.tstep
@property
def _n_vertices(self):
return sum(len(v) for v in self.vertices)
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data."""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Restrict SourceEstimate to a time interval.
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
%(include_tmax)s
Returns
-------
stc : instance of SourceEstimate
The cropped source estimate.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq,
include_tmax=include_tmax)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[..., mask]
else:
self.data = self.data[..., mask]
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample data.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : str | tuple
Window to use in resampling. See :func:`scipy.signal.resample`.
%(n_jobs)s
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The resampled source estimate.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
data = self.data
if data.dtype == np.float32:
data = data.astype(np.float64)
self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
return self
@property
def data(self):
"""Numpy array of source estimate data."""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@data.setter
def data(self, value):
value = np.asarray(value)
if self._data is not None and value.ndim != self._data.ndim:
raise ValueError('Data array should have %d dimensions.' %
self._data.ndim)
n_verts = sum(len(v) for v in self.vertices)
if value.shape[0] != n_verts:
raise ValueError('The first dimension of the data array must '
'match the number of vertices (%d != %d)' %
(value.shape[0], n_verts))
self._data = value
self._update_times()
@property
def shape(self):
"""Shape of the data."""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
@property
def tmin(self):
"""The first timestamp."""
return self._tmin
@tmin.setter
def tmin(self, value):
self._tmin = float(value)
self._update_times()
@property
def tstep(self):
"""The change in time between two consecutive samples (1 / sfreq)."""
return self._tstep
@tstep.setter
def tstep(self, value):
if value <= 0:
raise ValueError('.tstep must be greater than 0.')
self._tstep = float(value)
self._update_times()
@property
def times(self):
"""A timestamp for each sample."""
return self._times
@times.setter
def times(self, value):
raise ValueError('You cannot write to the .times attribute directly. '
'This property automatically updates whenever '
'.tmin, .tstep or .data changes.')
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep."""
self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))
self._times.flags.writeable = False
def __add__(self, a):
"""Add source estimates."""
stc = self.copy()
stc += a
return stc
def __iadd__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data += a.data
else:
self.data += a
return self
def mean(self):
"""Make a summary stc file with mean over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
out = self.sum()
out /= len(self.times)
return out
def sum(self):
"""Make a summary stc file with sum over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[-1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),
vertices=self.vertices, tmin=tmin,
tstep=tstep, subject=self.subject)
return sum_stc
def __sub__(self, a):
"""Subtract source estimates."""
stc = self.copy()
stc -= a
return stc
def __isub__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data -= a.data
else:
self.data -= a
return self
def __truediv__(self, a): # noqa: D105
return self.__div__(a)
def __div__(self, a): # noqa: D105
"""Divide source estimates."""
stc = self.copy()
stc /= a
return stc
def __itruediv__(self, a): # noqa: D105
return self.__idiv__(a)
def __idiv__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data /= a.data
else:
self.data /= a
return self
def __mul__(self, a):
"""Multiply source estimates."""
stc = self.copy()
stc *= a
return stc
def __imul__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data *= a.data
else:
self.data *= a
return self
def __pow__(self, a): # noqa: D105
stc = self.copy()
stc **= a
return stc
def __ipow__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
self.data **= a
return self
def __radd__(self, a): # noqa: D105
return self + a
def __rsub__(self, a): # noqa: D105
return self - a
def __rmul__(self, a): # noqa: D105
return self * a
def __rdiv__(self, a): # noqa: D105
return self / a
def __neg__(self): # noqa: D105
"""Negate the source estimate."""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc.data *= -1
return stc
def __pos__(self): # noqa: D105
return self
def __abs__(self):
"""Compute the absolute value of the data.
Returns
-------
stc : instance of _BaseSourceEstimate
A version of the source estimate, where the data attribute is set
to abs(self.data).
"""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc._data = abs(stc._data)
return stc
def sqrt(self):
"""Take the square root.
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of source estimate instance.
Returns
-------
stc : instance of SourceEstimate
A copy of the source estimate.
"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Return a source estimate object with data summarized over time bins.
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The binned source estimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nt = len(times) - 1
data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[..., i] = func(self.data[..., idx], axis=-1)
tmin = times[0] + width / 2.
stc = self.copy()
stc._data = data
stc.tmin = tmin
stc.tstep = width
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warn('Performance can be improved by not accessing the data '
'attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. See Notes for details.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : SourceEstimate | VectorSourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
# min and max data indices to include
times = 1000. * self.times
t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
# +1, because upper boundary needs to include the last sample
tmax_idx = t_idx[-1] + 1
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmin = self.times[tmin_idx]
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs.vertices = verts
stcs.data = data_t
stcs.tmin = tmin
return stcs
@fill_doc
def to_data_frame(self, index=None, scalings=None,
long_format=False, time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Vertices are converted to columns in the DataFrame. By default,
an additional column "time" is added, unless ``index='time'``
(in which case time values form the DataFrame's index).
Parameters
----------
%(df_index_evk)s
Defaults to ``None``.
%(df_scalings)s
%(df_longform_stc)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'subject']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
data = self.data.T
times = self.times
# prepare extra columns / multiindex
mindex = list()
default_index = ['time']
if self.subject is not None:
default_index = ['subject', 'time']
mindex.append(('subject', np.repeat(self.subject, data.shape[0])))
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
# triage surface vs volume source estimates
col_names = list()
kinds = ['VOL'] * len(self.vertices)
if isinstance(self, (_BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate)):
kinds[:2] = ['LH', 'RH']
for ii, (kind, vertno) in enumerate(zip(kinds, self.vertices)):
col_names.extend(['{}_{}'.format(kind, vert) for vert in vertno])
# build DataFrame
df = _build_data_frame(self, data, None, long_format, mindex, index,
default_index=default_index,
col_names=col_names, col_kind='source')
return df
def _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,
restrict_vertices):
"""Find the center of mass on a surface."""
if (values == 0).all() or (values < 0).any():
raise ValueError('All values must be non-negative and at least one '
'must be non-zero, cannot compute COM')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf = read_surface(op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf))
if restrict_vertices is True:
restrict_vertices = vertices
elif restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif isinstance(restrict_vertices, SourceSpaces):
idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0
restrict_vertices = restrict_vertices[idx]['vertno']
else:
restrict_vertices = np.array(restrict_vertices, int)
pos = surf[0][vertices, :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
return vertex
@fill_doc
class _BaseSurfaceSourceEstimate(_BaseSourceEstimate):
"""Abstract base class for surface source estimates.
Parameters
----------
data : array
The data in source space.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
data : array
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
_src_type = 'surface'
_src_count = 2
@property
def lh_data(self):
"""Left hemisphere data."""
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
"""Right hemisphere data."""
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
"""Left hemisphere vertno."""
return self.vertices[0]
@property
def rh_vertno(self):
"""Right hemisphere vertno."""
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The source estimate restricted to the given label.
"""
# make sure label and stc are compatible
from .label import Label, BiHemiLabel
_validate_type(label, (Label, BiHemiLabel), 'label')
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
else:
assert label.hemi == 'rh'
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices.
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError('vertices must be a list')
if not len(self.vertices) == len(vertices):
raise ValueError('vertices must have the same length as '
'stc.vertices')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds),) + self.data.shape[1:])
self.data = np.insert(self.data, inds, new_data, axis=0)
return self
@verbose
def to_original_src(self, src_orig, subject_orig=None,
subjects_dir=None, verbose=None):
"""Get a source estimate from morphed source to the original subject.
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
%(subjects_dir)s
%(verbose_meth)s
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The transformed source estimate.
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError('stc.subject must be set')
src_orig = _ensure_src(src_orig, kind='surface')
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
return self.__class__(self._data[data_idx], vertices,
self.tmin, self.tstep, subject_orig)
@fill_doc
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
_check_option('hemi', hemi, ('lh', 'rh', None))
vertex_offset = 0
if hemi is not None:
if hemi == 'lh':
data = self.lh_data
vertices = [self.lh_vertno, []]
else:
vertex_offset = len(self.vertices[0])
data = self.rh_data
vertices = [[], self.rh_vertno]
meth = self.__class__(
data, vertices, self.tmin, self.tstep).get_peak
else:
meth = super().get_peak
out = meth(tmin=tmin, tmax=tmax, mode=mode,
vert_as_index=vert_as_index,
time_as_index=time_as_index)
if vertex_offset and vert_as_index:
out = (out[0] + vertex_offset, out[1])
return out
@fill_doc
class SourceEstimate(_BaseSurfaceSourceEstimate):
"""Container for surface source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. When it is a single array, the
left hemisphere is stored in data[:len(vertices[0])] and the right
hemisphere is stored in data[-len(vertices[1]):].
When data is a tuple, it contains two arrays:
- "kernel" shape (n_vertices, n_sensors) and
- "sens_data" shape (n_sensors, n_times).
In this case, the source space data corresponds to
``np.dot(kernel, sens_data)``.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
if np.iscomplexobj(self.data):
raise ValueError("Cannot save complex-valued STC data in "
"FIFF format; please set ftype='h5' to save "
"in HDF5 format instead, or cast the data to "
"real numbers before saving.")
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
elif ftype == 'h5':
super().save(fname)
logger.info('[done]')
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=1.0, time_viewer='auto',
subjects_dir=None,
figure=None, views='lat', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None,
show_traces='auto', verbose=None):
brain = plot_source_estimates(
self, subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit, backend=backend,
spacing=spacing, title=title, show_traces=show_traces,
verbose=verbose)
return brain
@verbose
def estimate_snr(self, info, fwd, cov, verbose=None):
r"""Compute time-varying SNR in the source space.
This function should only be used with source estimates with units
nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).
.. warning:: This function currently only works properly for fixed
orientation.
Parameters
----------
info : instance Info
The measurement info.
fwd : instance of Forward
The forward solution used to create the source estimate.
cov : instance of Covariance
The noise covariance used to estimate the resting cortical
activations. Should be an evoked covariance, not empty room.
%(verbose)s
Returns
-------
snr_stc : instance of SourceEstimate
The source estimate with the SNR computed.
Notes
-----
We define the SNR in decibels for each source location at each
time point as:
.. math::
{\rm SNR} = 10\log_10[\frac{a^2}{N}\sum_k\frac{b_k^2}{s_k^2}]
where :math:`\\b_k` is the signal on sensor :math:`k` provided by the
forward model for a source with unit amplitude, :math:`a` is the
source amplitude, :math:`N` is the number of sensors, and
:math:`s_k^2` is the noise variance on sensor :math:`k`.
References
----------
.. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon,
D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009).
Mapping the Signal-To-Noise-Ratios of Cortical Sources in
Magnetoencephalography and Electroencephalography.
Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571
"""
from .forward import convert_forward_solution, Forward
from .minimum_norm.inverse import _prepare_forward
_validate_type(fwd, Forward, 'fwd')
_validate_type(info, Info, 'info')
_validate_type(cov, Covariance, 'cov')
_check_stc_units(self)
if (self.data >= 0).all():
warn('This STC appears to be from free orientation, currently SNR'
' function is valid only for fixed orientation')
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
# G is gain matrix [ch x src], cov is noise covariance [ch x ch]
G, _, _, _, _, _, _, cov, _ = _prepare_forward(
fwd, info, cov, fixed=True, loose=0, rank=None, pca=False,
use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro',
allow_fixed_depth=False, limit=None)
G = G['sol']['data']
n_channels = cov['dim'] # number of sensors/channels
b_k2 = (G * G).T
s_k2 = np.diag(cov['data'])
scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)
snr_stc = self.copy()
snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)
return snr_stc
@fill_doc
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of activity.
This function computes the spatial center of mass on the surface
as well as the temporal center of mass as in [1]_.
.. note:: All activity must occur in a single hemisphere, otherwise
an error is raised. The "mass" of each point in space for
computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in
computing the temporal center of mass. This is useful for
quantifying spatio-temporal cluster locations, especially
when combined with :func:`mne.vertex_to_mni`.
Parameters
----------
subject : str | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. If instance of SourceSpaces (as of
0.13), the returned vertex will be from the given source space.
For most accuruate estimates, do not restrict vertices.
%(subjects_dir)s
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
See Also
--------
mne.Label.center_of_mass
mne.vertex_to_mni
References
----------
.. [1] Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
if not isinstance(surf, str):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
_check_option('hemi', hemi, [0, 1])
vertices = self.vertices[hemi]
values = values[vert_inds[hemi]] # left or right
del vert_inds
vertex = _center_of_mass(
vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,
subject=subject, subjects_dir=subjects_dir,
restrict_vertices=restrict_vertices)
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
class _BaseVectorSourceEstimate(_BaseSourceEstimate):
_data_ndim = 3
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_scalar_class')
super().__init__(data, vertices, tmin, tstep, subject, verbose)
def magnitude(self):
"""Compute magnitude of activity without directionality.
Returns
-------
stc : instance of SourceEstimate
The source estimate without directionality information.
"""
data_mag = np.linalg.norm(self.data, axis=1)
return self._scalar_class(
data_mag, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
@deprecated('stc.normal(src) is deprecated and will be removed in 0.22, '
'use stc.project("normal", src)[0] instead')
@fill_doc
def normal(self, src, use_cps=True):
"""Compute activity orthogonal to the cortex.
Parameters
----------
src : instance of SourceSpaces
The source space for which this source estimate is specified.
%(use_cps)s
Should be the same value that was used when the forward model
was computed (typically True).
.. versionadded:: 0.20
Returns
-------
stc : instance of SourceEstimate
The source estimate only retaining the activity orthogonal to the
cortex.
"""
return self.project('normal', src, use_cps)[0]
def _get_src_normals(self, src, use_cps):
normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in
zip(src, self.vertices)])
return normals
@fill_doc
def project(self, directions, src=None, use_cps=True):
"""Project the data for each vertex in a given direction.
Parameters
----------
directions : ndarray, shape (n_vertices, 3) | str
Can be:
- ``'normal'``
Project onto the source space normals.
- ``'pca'``
SVD will be used to project onto the direction of maximal
power for each source.
- :class:`~numpy.ndarray`, shape (n_vertices, 3)
Projection directions for each source.
src : instance of SourceSpaces | None
The source spaces corresponding to the source estimate.
Not used when ``directions`` is an array, optional when
``directions='pca'``.
%(use_cps)s
Should be the same value that was used when the forward model
was computed (typically True).
Returns
-------
stc : instance of SourceEstimate
The projected source estimate.
directions : ndarray, shape (n_vertices, 3)
The directions that were computed (or just used).
Notes
-----
When using SVD, there is a sign ambiguity for the direction of maximal
power. When ``src is None``, the direction is chosen that makes the
resulting time waveform sum positive (i.e., have positive amplitudes).
When ``src`` is provided, the directions are flipped in the direction
of the source normals, i.e., outward from cortex for surface source
spaces and in the +Z / superior direction for volume source spaces.
.. versionadded:: 0.21
"""
_validate_type(directions, (str, np.ndarray), 'directions')
_validate_type(src, (None, SourceSpaces), 'src')
if isinstance(directions, str):
_check_option('directions', directions, ('normal', 'pca'),
extra='when str')
if directions == 'normal':
if src is None:
raise ValueError(
'If directions="normal", src cannot be None')
_check_src_normal('normal', src)
directions = self._get_src_normals(src, use_cps)
else:
assert directions == 'pca'
x = self.data
if not np.isrealobj(self.data):
_check_option('stc.data.dtype', self.data.dtype,
(np.complex64, np.complex128))
dtype = \
np.float32 if x.dtype == np.complex64 else np.float64
x = x.view(dtype)
assert x.shape[-1] == 2 * self.data.shape[-1]
u, _, v = np.linalg.svd(x, full_matrices=False)
directions = u[:, :, 0]
# The sign is arbitrary, so let's flip it in the direction that
# makes the resulting time series the most positive:
if src is None:
signs = np.sum(v[:, 0].real, axis=1, keepdims=True)
else:
normals = self._get_src_normals(src, use_cps)
signs = np.sum(directions * normals, axis=1, keepdims=True)
assert signs.shape == (self.data.shape[0], 1)
signs = np.sign(signs)
signs[signs == 0] = 1.
directions *= signs
_check_option(
'directions.shape', directions.shape, [(self.data.shape[0], 3)])
data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0]
stc = self._scalar_class(
data_norm, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
return stc, directions
class _BaseVolSourceEstimate(_BaseSourceEstimate):
_src_type = 'volume'
_src_count = None
@copy_function_doc_to_method_doc(plot_volume_source_estimates)
def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',
bg_img='T1.mgz', colorbar=True, colormap='auto', clim='auto',
transparent='auto', show=True, initial_time=None,
initial_pos=None, verbose=None):
data = self.magnitude() if self._data_ndim == 3 else self
return plot_volume_source_estimates(
data, src=src, subject=subject, subjects_dir=subjects_dir,
mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,
clim=clim, transparent=transparent, show=show,
initial_time=initial_time, initial_pos=initial_pos,
verbose=verbose)
# Override here to provide the volume-specific options
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, trans=None,
mri_resolution=True, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(eltc_trans)s
%(eltc_mri_resolution)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, trans=trans,
mri_resolution=mri_resolution, verbose=verbose)
@fill_doc
def in_label(self, label, mri, src, trans=None):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : str | int
The label to use. Can be the name of a label if using a standard
FreeSurfer atlas, or an integer value to extract from the ``mri``.
mri : str
Path to the atlas to use.
src : instance of SourceSpaces
The volumetric source space. It must be a single, whole-brain
volume.
%(trans_not_none)s
Returns
-------
stc : VolSourceEstimate | VolVectorSourceEstimate
The source estimate restricted to the given label.
Notes
-----
.. versionadded:: 0.21.0
"""
if len(self.vertices) != 1:
raise RuntimeError('This method can only be used with whole-brain '
'volume source spaces')
_validate_type(label, (str, 'int-like'), 'label')
if isinstance(label, str):
volume_label = [label]
else:
volume_label = {'Volume ID %s' % (label): _ensure_int(label)}
label = _volume_labels(src, (mri, volume_label), trans,
mri_resolution=False)
assert len(label) == 1
label = label[0]
vertices = label.vertices
keep = np.in1d(self.vertices[0], label.vertices)
values, vertices = self.data[keep], [self.vertices[0][keep]]
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : str
The name of the generated nifti file.
src : list
The list of source spaces (should all be of type volume).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
.. versionadded:: 0.17
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
import nibabel as nib
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,
format=format)
nib.save(img, fname)
def as_volume(self, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Export volume source estimate as a nifti object.
Parameters
----------
src : instance of SourceSpaces
The source spaces (should all be of type volume, or part of a
mixed source space).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
Returns
-------
img : instance of Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
from .morph import _interpolate_data
data = self.magnitude() if self._data_ndim == 3 else self
return _interpolate_data(data, src, mri_resolution=mri_resolution,
mri_space=True, output=format)
@fill_doc
class VolSourceEstimate(_BaseVolSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolVectorSourceEstimate : A container for volume vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
if ftype != 'h5' and len(self.vertices) != 1:
raise ValueError('Can only write to .stc or .w if a single volume '
'source space was used, use .h5 instead')
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertices[0], data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertices[0], data=self.data)
elif ftype == 'h5':
super().save(fname, 'h5')
logger.info('[done]')
@fill_doc
class VolVectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseVolSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
_scalar_class = VolSourceEstimate
@fill_doc
class VectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseSurfaceSourceEstimate):
"""Container for vector surface source estimates.
For each vertex, the magnitude of the current is defined in the X, Y and Z
directions.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.15
"""
_scalar_class = SourceEstimate
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None, views='lat',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', verbose=None): # noqa: D102
return plot_vector_source_estimates(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, verbose=verbose,
)
###############################################################################
# Mixed source estimate (two cortical surfs plus other stuff)
class _BaseMixedSourceEstimate(_BaseSourceEstimate):
_src_type = 'mixed'
_src_count = None
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError('Vertices must be a list of numpy arrays with '
'one array per source space.')
super().__init__(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@property
def _n_surf_vert(self):
return sum(len(v) for v in self.vertices[:2])
def surface(self):
"""Return the cortical surface source estimate.
Returns
-------
stc : instance of SourceEstimate or VectorSourceEstimate
The surface source estimate.
"""
if self._data_ndim == 3:
klass = VectorSourceEstimate
else:
klass = SourceEstimate
return klass(
self.data[:self._n_surf_vert], self.vertices[:2],
self.tmin, self.tstep, self.subject, self.verbose)
def volume(self):
"""Return the volume surface source estimate.
Returns
-------
stc : instance of VolSourceEstimate or VolVectorSourceEstimate
The volume source estimate.
"""
if self._data_ndim == 3:
klass = VolVectorSourceEstimate
else:
klass = VolSourceEstimate
return klass(
self.data[self._n_surf_vert:], self.vertices[2:],
self.tmin, self.tstep, self.subject, self.verbose)
@fill_doc
class MixedSourceEstimate(_BaseMixedSourceEstimate):
"""Container for mixed surface and volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
VolVectorSourceEstimate : A container for Volume vector source estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@fill_doc
@deprecated('stc_mixed.plot_surface(...) is deprecated and will be removed'
' in 0.22, use stc_mixed.surface().plot(...)')
def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='time=%02.f ms',
smoothing_steps=10,
transparent=None, alpha=1.0, time_viewer='auto',
subjects_dir=None, figure=None,
views='lat', colorbar=True, clim='auto'):
"""Plot surface source estimates with PySurfer.
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
src : SourceSpaces
The source spaces to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use. See `~mne.viz.plot_source_estimates`.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing.
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
%(subjects_dir)s
figure : instance of mayavi.mlab.Figure | None
If None, the last figure will be cleaned and a new figure will
be created.
views : str | list
View to use. See `surfer.Brain`.
colorbar : bool
If True, display colorbar on scene.
clim : str | dict
Colorbar properties specification.
See `~mne.viz.plot_source_estimates`.
Returns
-------
brain : instance of surfer.Brain
A instance of `surfer.Brain` from PySurfer.
"""
# extract surface source spaces
surf = _ensure_src(src, kind='surface')
# extract surface source estimate
data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]
vertices = [s['vertno'] for s in surf]
stc = SourceEstimate(data, vertices, self.tmin, self.tstep,
self.subject, self.verbose)
return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,
colormap=colormap, time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha,
time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure,
views=views, colorbar=colorbar, clim=clim)
@fill_doc
class MixedVectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseMixedSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array, shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (n_src,)
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array, shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.21.0
"""
_scalar_class = MixedSourceEstimate
###############################################################################
# Morphing
def _get_vol_mask(src):
"""Get the volume source space mask."""
assert len(src) == 1 # not a mixed source space
shape = src[0]['shape'][::-1]
mask = np.zeros(shape, bool)
mask.flat[src[0]['vertno']] = True
return mask
def _spatio_temporal_src_adjacency_vol(src, n_times):
from sklearn.feature_extraction import grid_to_graph
mask = _get_vol_mask(src)
edges = grid_to_graph(*mask.shape, mask=mask)
adjacency = _get_adjacency_from_edges(edges, n_times)
return adjacency
def _spatio_temporal_src_adjacency_surf(src, n_times):
if src[0]['use_tris'] is None:
# XXX It would be nice to support non oct source spaces too...
raise RuntimeError("The source space does not appear to be an ico "
"surface. adjacency cannot be extracted from"
" non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]
tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off
for u_v, s, off in zip(used_verts, src, offs)])
adjacency = spatio_temporal_tris_adjacency(tris, n_times)
# deal with source space only using a subset of vertices
masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != adjacency.shape[0] / n_times:
raise ValueError('Used vertices do not match adjacency shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warn('%0.1f%% of original source space vertices have been'
' omitted, tri-based adjacency will have holes.\n'
'Consider using distance-based adjacency or '
'morphing data to all source space vertices.' % missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
adjacency = adjacency.tocsr()
adjacency = adjacency[masks]
adjacency = adjacency[:, masks]
# return to original format
adjacency = adjacency.tocoo()
return adjacency
@verbose
def spatio_temporal_src_adjacency(src, n_times, dist=None, verbose=None):
"""Compute adjacency for a source space activation over time.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
# XXX we should compute adjacency for each source space and then
# use scipy.sparse.block_diag to concatenate them
if src[0]['type'] == 'vol':
if dist is not None:
raise ValueError('dist must be None for a volume '
'source space. Got %s.' % dist)
adjacency = _spatio_temporal_src_adjacency_vol(src, n_times)
elif dist is not None:
# use distances computed and saved in the source space file
adjacency = spatio_temporal_dist_adjacency(src, n_times, dist)
else:
adjacency = _spatio_temporal_src_adjacency_surf(src, n_times)
return adjacency
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade.
Parameters
----------
grade : int
Grade of an icosahedral mesh.
%(verbose)s
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_adjacency.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False,
verbose=None):
"""Compute adjacency from triangles and time instants.
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if remap_vertices:
logger.info('Reassigning vertex indices.')
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris)
edges = (edges + sparse.eye(edges.shape[0], format='csr')).tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None):
"""Compute adjacency from distances in a source space and time instants.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
n_times : int
Number of time points.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using '
'setup_source_space with add_dist=True')
blocks = [s['dist'][s['vertno'], :][:, s['vertno']] for s in src]
# Ensure we keep explicit zeros; deal with changes in SciPy
for block in blocks:
if isinstance(block, np.ndarray):
block[block == 0] = -np.inf
else:
block.data[block.data == 0] == -1
edges = sparse_block_diag(blocks)
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatial_src_adjacency(src, dist=None, verbose=None):
"""Compute adjacency for a source space activation.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_src_adjacency(src, 1, dist)
@verbose
def spatial_tris_adjacency(tris, remap_vertices=False, verbose=None):
"""Compute adjacency from triangles.
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_adjacency(tris, 1, remap_vertices)
@verbose
def spatial_dist_adjacency(src, dist, verbose=None):
"""Compute adjacency from distances in a source space.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_adjacency(src, 1, dist)
@verbose
def spatial_inter_hemi_adjacency(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere.
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric adjacency matrix, e.g. computed
using geodesic distances.
"""
from scipy.spatial.distance import cdist
src = _ensure_src(src, kind='surface')
adj = cdist(src[0]['rr'][src[0]['vertno']],
src[1]['rr'][src[1]['vertno']])
adj = sparse.csr_matrix(adj <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in adj.shape]
adj = sparse.vstack([sparse.hstack([empties[0], adj]),
sparse.hstack([adj.T, empties[1]])])
return adj
@verbose
def _get_adjacency_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create adjacency matrix."""
n_vertices = edges.shape[0]
logger.info("-- number of adjacent vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz))
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int64)
adjacency = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices,) * 2)
return adjacency
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
def _pca_flip(flip, data):
U, s, V = linalg.svd(data, full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = linalg.norm(s) / np.sqrt(len(data))
return sign * scale * V[0]
_label_funcs = {
'mean': lambda flip, data: np.mean(data, axis=0),
'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),
'max': lambda flip, data: np.max(np.abs(data), axis=0),
'pca_flip': _pca_flip,
}
@contextlib.contextmanager
def _temporary_vertices(src, vertices):
orig_vertices = [s['vertno'] for s in src]
for s, v in zip(src, vertices):
s['vertno'] = v
try:
yield
finally:
for s, v in zip(src, orig_vertices):
s['vertno'] = v
def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse):
"""Prepare indices and flips for extract_label_time_course."""
# if src is a mixed src space, the first 2 src spaces are surf type and
# the other ones are vol type. For mixed source space n_labels will be the
# given by the number of ROIs of the cortical parcellation plus the number
# of vol src space
from .label import label_sign_flip, Label, BiHemiLabel
# get vertices from source space, they have to be the same as in the stcs
vertno = stc.vertices
nvert = [len(vn) for vn in vertno]
# do the initialization
label_vertidx = list()
label_flip = list()
for s, v, hemi in zip(src, stc.vertices, ('left', 'right')):
n_missing = (~np.in1d(v, s['vertno'])).sum()
if n_missing:
raise ValueError('%d/%d %s hemisphere stc vertices missing from '
'the source space, likely mismatch'
% (n_missing, len(v), hemi))
bad_labels = list()
for li, label in enumerate(labels):
if use_sparse:
assert isinstance(label, dict)
# This can happen if some labels aren't present in the space
if label['csr'].shape[0] == 0:
bad_labels.append(label['name'])
label_vertidx.append(None)
else:
label_vertidx.append(label['csr'])
label_flip.append(None)
continue
# standard case
_validate_type(label, (Label, BiHemiLabel), 'labels[%d]' % (li,))
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertices = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertices)
elif slabel.hemi == 'rh':
this_vertices = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
this_flip = None
if len(this_vertidx) == 0:
bad_labels.append(label.name)
this_vertidx = None # to later check if label is empty
elif mode not in ('mean', 'max'): # mode-dependent initialization
# label_sign_flip uses two properties:
#
# - src[ii]['nn']
# - src[ii]['vertno']
#
# So if we override vertno with the stc vertices, it will pick
# the correct normals.
with _temporary_vertices(src, stc.vertices):
this_flip = label_sign_flip(label, src[:2])[:, None]
label_vertidx.append(this_vertidx)
label_flip.append(this_flip)
if len(bad_labels):
msg = ('source space does not contain any vertices for %d label%s:\n%s'
% (len(bad_labels), _pl(bad_labels), bad_labels))
if not allow_empty:
raise ValueError(msg)
else:
msg += '\nAssigning all-zero time series.'
if allow_empty == 'ignore':
logger.info(msg)
else:
warn(msg)
return label_vertidx, label_flip
def _volume_labels(src, labels, trans, mri_resolution):
# This will create Label objects that should do the right thing for our
# given volumetric source space when used with extract_label_time_course
from .label import Label
assert src.kind == 'volume'
extra = ' when using a volume source space'
_import_nibabel('use volume atlas labels')
_validate_type(labels, ('path-like', list, tuple), 'labels' + extra)
if _check_path_like(labels):
mri = labels
infer_labels = True
else:
if len(labels) != 2:
raise ValueError('labels, if list or tuple, must have length 2, '
'got %s' % (len(labels),))
mri, labels = labels
infer_labels = False
_validate_type(mri, 'path-like', 'labels[0]' + extra)
logger.info('Reading atlas %s' % (mri,))
vol_info = _get_mri_info_data(str(mri), data=True)
atlas_values = np.unique(vol_info['data'])
atlas_values = atlas_values[np.isfinite(atlas_values)]
if not (atlas_values == np.round(atlas_values)).all():
raise RuntimeError('Non-integer values present in atlas, cannot '
'labelize')
atlas_values = np.round(atlas_values).astype(np.int64)
if infer_labels:
labels = {
k: v for k, v in read_freesurfer_lut()[0].items()
if v in atlas_values}
labels = _check_volume_labels(labels, mri, name='labels[1]')
assert isinstance(labels, dict)
del atlas_values
vox_mri_t = vol_info['vox_mri_t']
want = src[0].get('vox_mri_t', None)
if want is None:
raise RuntimeError(
'Cannot use volumetric atlas if no mri was supplied during '
'source space creation')
vox_mri_t, want = vox_mri_t['trans'], want['trans']
if not np.allclose(vox_mri_t, want, atol=1e-6):
raise RuntimeError(
'atlas vox_mri_t does not match that used to create the source '
'space')
src_shape = tuple(src[0]['mri_' + k] for k in ('width', 'height', 'depth'))
atlas_shape = vol_info['data'].shape
if atlas_shape != src_shape:
raise RuntimeError('atlas shape %s does not match source space MRI '
'shape %s' % (atlas_shape, src_shape))
if mri_resolution:
# Upsample then just index
out_labels = list()
nnz = 0
interp = src[0]['interpolator']
# should be guaranteed by size checks above and our src interp code
assert interp.shape[0] == np.prod(src_shape)
assert interp.shape == (vol_info['data'].size, len(src[0]['rr']))
interp = interp[:, src[0]['vertno']]
for k, v in labels.items():
mask = vol_info['data'].ravel(order='F') == v
csr = interp[mask]
out_labels.append(dict(csr=csr, name=k))
nnz += csr.shape[0] > 0
else:
# Use nearest values
vertno = src[0]['vertno']
src_values = _get_atlas_values(vol_info, src[0]['rr'][vertno])
rr = src[0]['rr']
if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:
trans, _ = _get_trans(trans, 'head', 'mri', allow_none=False)
rr = apply_trans(trans, rr)
del src
src_values = _get_atlas_values(vol_info, rr[vertno])
vertices = [vertno[src_values == val] for val in labels.values()]
out_labels = [Label(v, hemi='lh', name=val)
for v, val in zip(vertices, labels.keys())]
nnz = sum(len(v) != 0 for v in vertices)
logger.info('%d/%d atlas regions had at least one vertex '
'in the source space' % (nnz, len(out_labels)))
return out_labels
def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
allow_empty=False, trans=None,
mri_resolution=True, verbose=None):
# loop through source estimates and extract time series
_validate_type(src, SourceSpaces)
_check_option('mode', mode, sorted(_label_funcs.keys()) + ['auto'])
kind = src.kind
if kind in ('surface', 'mixed'):
if not isinstance(labels, list):
labels = [labels]
use_sparse = False
else:
labels = _volume_labels(src, labels, trans, mri_resolution)
use_sparse = bool(mri_resolution)
n_mode = len(labels) # how many processed with the given mode
n_mean = len(src[2:]) if kind == 'mixed' else 0
n_labels = n_mode + n_mean
vertno = func = None
for si, stc in enumerate(stcs):
_validate_type(stc, _BaseSourceEstimate, 'stcs[%d]' % (si,),
'source estimate')
if isinstance(stc, (_BaseVolSourceEstimate,
_BaseVectorSourceEstimate)):
_check_option(
'mode', mode, ('mean', 'max', 'auto'),
'when using a vector and/or volume source estimate')
mode = 'mean' if mode == 'auto' else mode
else:
mode = 'mean_flip' if mode == 'auto' else mode
if vertno is None:
vertno = copy.deepcopy(stc.vertices) # avoid keeping a ref
nvert = np.array([len(v) for v in vertno])
label_vertidx, src_flip = _prepare_label_extraction(
stc, labels, src, mode, allow_empty, use_sparse)
func = _label_funcs[mode]
# make sure the stc is compatible with the source space
if len(vertno) != len(stc.vertices):
raise ValueError('stc not compatible with source space')
for vn, svn in zip(vertno, stc.vertices):
if len(vn) != len(svn):
raise ValueError('stc not compatible with source space. '
'stc has %s time series but there are %s '
'vertices in source space. Ensure you used '
'src from the forward or inverse operator, '
'as forward computation can exclude vertices.'
% (len(svn), len(vn)))
if not np.array_equal(svn, vn):
raise ValueError('stc not compatible with source space')
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels,) + stc.data.shape[1:],
dtype=stc.data.dtype)
for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):
if vertidx is not None:
if isinstance(vertidx, sparse.csr_matrix):
assert mri_resolution
assert vertidx.shape[1] == stc.data.shape[0]
this_data = np.reshape(stc.data, (stc.data.shape[0], -1))
this_data = vertidx * this_data
this_data.shape = \
(this_data.shape[0],) + stc.data.shape[1:]
else:
this_data = stc.data[vertidx]
label_tc[i] = func(flip, this_data)
# extract label time series for the vol src space (only mean supported)
offset = nvert[:-n_mean].sum() # effectively :2 or :0
for i, nv in enumerate(nvert[2:]):
if nv != 0:
v2 = offset + nv
label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0)
offset = v2
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='auto',
allow_empty=False, return_generator=False,
trans=None, mri_resolution=True,
verbose=None):
"""Extract label time course for lists of labels and source estimates.
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter (see Notes).
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
return_generator : bool
If True, a generator instead of a list is returned.
%(trans_not_none)s
%(eltc_mri_resolution)s
%(verbose)s
Returns
-------
%(eltc_returns)s
Notes
-----
%(eltc_mode_notes)s
If encountering a ``ValueError`` due to mismatch between number of
source points in the subject source space and computed ``stc`` object set
``src`` argument to ``fwd['src']`` or ``inv['src']`` to ensure the source
space is the one actually used by the inverse to compute the source
time courses.
"""
# convert inputs to lists
if not isinstance(stcs, (list, tuple, GeneratorType)):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
label_tc = _gen_extract_label_time_course(
stcs, labels, src, mode=mode, allow_empty=allow_empty,
trans=trans, mri_resolution=mri_resolution)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
|
Teekuningas/mne-python
|
mne/source_estimate.py
|
Python
|
bsd-3-clause
| 119,565
|
[
"Mayavi"
] |
2e7552617bca38dd24ca89fa742eace109827df5668f60b65938618f1c7dd208
|
# lintory - keep track of computers and licenses
# Copyright (C) 2008-2009 Brian May
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
VPAC/lintory
|
lintory/backends/__init__.py
|
Python
|
gpl-3.0
| 721
|
[
"Brian"
] |
a5d3fddc7e631edd413d6b0dc907a8571503aca03426f6e062f48acc8d89dd9a
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CDataString(unittest.TestCase):
def setUp(self):
self.s="this_is_a_test"
self.string=COPASI.CDataString(self.s)
def test_getObjectDisplayName(self):
st=self.string.getObjectDisplayName()
self.assert_(type(st)==StringType)
self.assert_(st=="'"+self.s+"'")
def test_getStaticString(self):
st=self.string.getStaticString()
self.assert_(type(st)==StringType)
self.assert_(st==self.s)
def suite():
tests=[
'test_getObjectDisplayName'
,'test_getStaticString'
]
return unittest.TestSuite(map(Test_CDataString,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
jonasfoe/COPASI
|
copasi/bindings/python/unittests/Test_CCopasiStaticString.py
|
Python
|
artistic-2.0
| 1,477
|
[
"COPASI"
] |
80e7aeec44e217ee16bc5bb38888badb92fad0b2f0a23392ba451b58c8c1ea6b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.