text stringlengths 957 885k |
|---|
import os
from yaml import CLoader as Loader
from importlib.machinery import SourceFileLoader
utils = SourceFileLoader("utils", "./molecule/common/tests/utils.py").load_module()
testinfra_hosts = utils.get_testinfra_hosts()
def check_conf_file(conf_file, instance_id, conf):
assert conf_file.exists
assert conf_file.user == 'tarantool'
assert conf_file.group == 'tarantool'
loader = Loader(conf_file.content_string)
conf_file_dict = loader.get_data()
assert instance_id in conf_file_dict
assert conf_file_dict[instance_id] == conf
def test_systemd_services(host):
app_name = utils.get_app_name()
machine_instances = utils.get_machine_instances(host)
assert machine_instances
for instance in machine_instances:
instance_vars = utils.get_instance_vars(instance)
instance_name = instance_vars['inventory_hostname']
if not utils.instance_is_stateboard(instance_vars):
service_name = '%s@%s' % (app_name, instance_name)
else:
service_name = '%s-stateboard' % app_name
service = host.service(service_name)
if utils.instance_is_expelled(instance_vars):
assert not service.is_running
assert not service.is_enabled
else:
assert service.is_running
assert service.is_enabled
def test_dirs(host):
app_name = utils.get_app_name()
machine_instances = utils.get_machine_instances(host)
assert machine_instances
for instance in machine_instances:
instance_vars = utils.get_instance_vars(instance)
instance_id = utils.get_instance_id(app_name, instance_vars)
conf_dir = instance_vars.get('cartridge_conf_dir', '/etc/tarantool/conf.d')
run_dir = instance_vars.get('cartridge_run_dir', '/var/run/tarantool')
data_dir = instance_vars.get('cartridge_data_dir', '/var/lib/tarantool')
install_dir = instance_vars.get('cartridge_app_install_dir', '/usr/share/tarantool')
instances_dir = instance_vars.get('cartridge_app_instances_dir', '/usr/share/tarantool')
multiversion = instance_vars.get('cartridge_multiversion', False)
if not multiversion:
dist_dir_path = os.path.join(install_dir, app_name)
else:
package_path = instance_vars.get('cartridge_package_path')
package_basename = os.path.basename(package_path)
package_name_version, ext = os.path.splitext(package_basename)
if ext == '.gz' and package_name_version.endswith('.tar'):
package_name_version, _ = os.path.splitext(package_name_version)
dist_dir_path = os.path.join(install_dir, package_name_version)
dist_dir = host.file(dist_dir_path)
assert dist_dir.exists
if multiversion:
instance_dist_dir = host.file(os.path.join(instances_dir, instance_id))
assert instance_dist_dir.exists
assert instance_dist_dir.is_symlink
assert instance_dist_dir.linked_to == dist_dir_path
conf_file = host.file(os.path.join(conf_dir, '%s.yml' % instance_id))
default_conf_file = host.file(os.path.join(conf_dir, '%s.yml' % app_name))
pid_file = host.file(os.path.join(run_dir, '%s.pid' % instance_id))
console_sock_file = host.file(os.path.join(run_dir, '%s.control' % instance_id))
work_dir_file = host.file(os.path.join(data_dir, instance_id))
if not utils.instance_is_expelled(instance_vars):
assert conf_file.exists
assert default_conf_file.exists
assert console_sock_file.exists
assert work_dir_file.exists
else:
assert not conf_file.exists
assert not pid_file.exists
assert not console_sock_file.exists
assert not work_dir_file.exists
def test_configs(host):
app_name = utils.get_app_name()
machine_instances = utils.get_machine_instances(host)
assert machine_instances
default_conf = utils.get_cluster_var('cartridge_defaults', default={})
not_save_cookie_in_app_config = utils.get_cluster_var('cartridge_not_save_cookie_in_app_config', False)
if not not_save_cookie_in_app_config:
default_conf.update(cluster_cookie=utils.get_cluster_cookie())
for instance in machine_instances:
instance_vars = utils.get_instance_vars(instance)
instance_id = utils.get_instance_id(app_name, instance_vars)
instance_conf = instance_vars['config']
if instance_conf.get('memtx_memory') == '{{ common_memtx_memory }}':
instance_conf['memtx_memory'] = 268436000
conf_dir = instance_vars.get('cartridge_conf_dir', '/etc/tarantool/conf.d')
conf_file = host.file(os.path.join(conf_dir, '%s.yml' % instance_id))
default_conf_file = host.file(os.path.join(conf_dir, '%s.yml' % app_name))
if not utils.instance_is_expelled(instance_vars):
check_conf_file(conf_file, instance_id, instance_conf)
check_conf_file(default_conf_file, app_name, default_conf)
def test_instances():
configured_instances = utils.get_configured_instances()
# Select one instance to be control
admin_api_url = utils.get_admin_api_url()
# Get all started instances
query = '''
query {
servers {
uri
alias
zone
}
}
'''
session = utils.get_authorized_session()
response = session.post(admin_api_url, json={'query': query})
data = utils.get_response_data(response)
started_instances = data['servers']
started_instances = {i['alias']: i for i in started_instances}
# filter out expelled instances and stateboard
configured_instances = {
i: instance_vars for i, instance_vars in configured_instances.items()
if not utils.instance_is_expelled(instance_vars) and not utils.instance_is_stateboard(instance_vars)
}
# Check if all configured instances are started and available
assert len(configured_instances) == len(started_instances)
assert set(configured_instances.keys()) == set(started_instances.keys())
assert all([
configured_instances[i]['config']['advertise_uri'] == started_instances[i]['uri']
for i in configured_instances
])
assert all([
configured_instances[i].get('zone') == started_instances[i]['zone']
for i in configured_instances
])
|
import socket
import struct
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GeneralProxyError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks5AuthError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks5Error(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks4Error(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class HTTPError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
_generalerrors = ('success', 'invalid data', 'not connected', 'not available', 'bad proxy type', 'bad input')
_socks5errors = ('succeeded', 'general SOCKS server failure', 'connection not allowed by ruleset', 'Network unreachable', 'Host unreachable', 'Connection refused', 'TTL expired', 'Command not supported', 'Address type not supported', 'Unknown error')
_socks5autherrors = ('succeeded', 'authentication is required', 'all offered authentication methods were rejected', 'unknown username or invalid password', 'unknown error')
_socks4errors = ('request granted', 'request rejected or failed', 'request rejected because SOCKS server cannot connect to identd on the client', 'request rejected because the client program and identd report different user-ids', 'unknown error')
def setdefaultproxy(proxytype = None, addr = None, port = None, rdns = True, username = None, password = <PASSWORD>):
global _defaultproxy
_defaultproxy = (proxytype,
addr,
port,
rdns,
username,
password)
class socksocket(socket.socket):
def __init__(self, family = socket.AF_INET, type = socket.SOCK_STREAM, proto = 0, _sock = None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
return
def __recvall(self, bytes):
data = ''
while len(data) < bytes:
data = data + self.recv(bytes - len(data))
return data
def setproxy(self, proxytype = None, addr = None, port = None, rdns = True, username = None, password = <PASSWORD>):
self.__proxy = (proxytype,
addr,
port,
rdns,
username,
password)
def __negotiatesocks5(self, destaddr, destport):
if self.__proxy[4] != None and self.__proxy[5] != None:
self.sendall('\x05\x02\x00\x02')
else:
self.sendall('\x05\x01\x00')
chosenauth = self.__recvall(2)
if chosenauth[0] != '\x05':
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if chosenauth[1] == '\x00':
pass
elif chosenauth[1] == '\x02':
self.sendall('\x01' + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0] != '\x01':
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1] != '\x00':
self.close()
raise Socks5AuthError, (3, _socks5autherrors[3])
else:
self.close()
if chosenauth[1] == '\xff':
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
req = '\x05\x01\x00'
try:
ipaddr = socket.inet_aton(destaddr)
req = req + '\x01' + ipaddr
except socket.error:
if self.__proxy[3] == True:
ipaddr = None
req = req + '\x03' + chr(len(destaddr)) + destaddr
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + '\x01' + ipaddr
req = req + struct.pack('>H', destport)
self.sendall(req)
resp = self.__recvall(4)
if resp[0] != '\x05':
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1] != '\x00':
self.close()
if ord(resp[1]) <= 8:
raise Socks5Error(ord(resp[1]), _generalerrors[ord(resp[1])])
else:
raise Socks5Error(9, _generalerrors[9])
elif resp[3] == '\x01':
boundaddr = self.__recvall(4)
elif resp[3] == '\x03':
resp = resp + self.recv(1)
boundaddr = self.__recvall(resp[4])
else:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
boundport = struct.unpack('>H', self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
return
def getproxysockname(self):
return self.__proxysockname
def getproxypeername(self):
return _orgsocket.getpeername(self)
def getpeername(self):
return self.__proxypeername
def __negotiatesocks4(self, destaddr, destport):
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
if self.__proxy[3] == True:
ipaddr = '\x00\x00\x00\x01'
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = '\x04\x01' + struct.pack('>H', destport) + ipaddr
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + '\x00'
if rmtrslv == True:
req = req + destaddr + '\x00'
self.sendall(req)
resp = self.__recvall(8)
if resp[0] != '\x00':
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if resp[1] != 'Z':
self.close()
if ord(resp[1]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1]), _socks4errors[ord(resp[1]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack('>H', resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
return
def __negotiatehttp(self, destaddr, destport):
if self.__proxy[3] == False:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall('CONNECT ' + addr + ':' + str(destport) + ' HTTP/1.1\r\n' + 'Host: ' + destaddr + '\r\n\r\n')
resp = self.recv(1)
while resp.find('\r\n\r\n') == -1:
resp = resp + self.recv(1)
statusline = resp.splitlines()[0].split(' ', 2)
if statusline[0] not in ('HTTP/1.0', 'HTTP/1.1'):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ('0.0.0.0', 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
if type(destpair) in (list, tuple) == False or len(destpair) < 2 or type(destpair[0]) != str or type(destpair[1]) != int:
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
return
|
<reponame>ctrlcctrlv/fonttools
from fontTools.misc.py23 import *
from fontTools import unicodedata
import pytest
def test_script():
assert unicodedata.script("a") == "Latn"
assert unicodedata.script(unichr(0)) == "Zyyy"
assert unicodedata.script(unichr(0x0378)) == "Zzzz"
assert unicodedata.script(unichr(0x10FFFF)) == "Zzzz"
# these were randomly sampled, one character per script
assert unicodedata.script(unichr(0x1E918)) == 'Adlm'
assert unicodedata.script(unichr(0x1170D)) == 'Ahom'
assert unicodedata.script(unichr(0x145A0)) == 'Hluw'
assert unicodedata.script(unichr(0x0607)) == 'Arab'
assert unicodedata.script(unichr(0x056C)) == 'Armn'
assert unicodedata.script(unichr(0x10B27)) == 'Avst'
assert unicodedata.script(unichr(0x1B41)) == 'Bali'
assert unicodedata.script(unichr(0x168AD)) == 'Bamu'
assert unicodedata.script(unichr(0x16ADD)) == 'Bass'
assert unicodedata.script(unichr(0x1BE5)) == 'Batk'
assert unicodedata.script(unichr(0x09F3)) == 'Beng'
assert unicodedata.script(unichr(0x11C5B)) == 'Bhks'
assert unicodedata.script(unichr(0x3126)) == 'Bopo'
assert unicodedata.script(unichr(0x1103B)) == 'Brah'
assert unicodedata.script(unichr(0x2849)) == 'Brai'
assert unicodedata.script(unichr(0x1A0A)) == 'Bugi'
assert unicodedata.script(unichr(0x174E)) == 'Buhd'
assert unicodedata.script(unichr(0x18EE)) == 'Cans'
assert unicodedata.script(unichr(0x102B7)) == 'Cari'
assert unicodedata.script(unichr(0x1053D)) == 'Aghb'
assert unicodedata.script(unichr(0x11123)) == 'Cakm'
assert unicodedata.script(unichr(0xAA1F)) == 'Cham'
assert unicodedata.script(unichr(0xAB95)) == 'Cher'
assert unicodedata.script(unichr(0x1F0C7)) == 'Zyyy'
assert unicodedata.script(unichr(0x2C85)) == 'Copt'
assert unicodedata.script(unichr(0x12014)) == 'Xsux'
assert unicodedata.script(unichr(0x1082E)) == 'Cprt'
assert unicodedata.script(unichr(0xA686)) == 'Cyrl'
assert unicodedata.script(unichr(0x10417)) == 'Dsrt'
assert unicodedata.script(unichr(0x093E)) == 'Deva'
assert unicodedata.script(unichr(0x1BC4B)) == 'Dupl'
assert unicodedata.script(unichr(0x1310C)) == 'Egyp'
assert unicodedata.script(unichr(0x1051C)) == 'Elba'
assert unicodedata.script(unichr(0x2DA6)) == 'Ethi'
assert unicodedata.script(unichr(0x10AD)) == 'Geor'
assert unicodedata.script(unichr(0x2C52)) == 'Glag'
assert unicodedata.script(unichr(0x10343)) == 'Goth'
assert unicodedata.script(unichr(0x11371)) == 'Gran'
assert unicodedata.script(unichr(0x03D0)) == 'Grek'
assert unicodedata.script(unichr(0x0AAA)) == 'Gujr'
assert unicodedata.script(unichr(0x0A4C)) == 'Guru'
assert unicodedata.script(unichr(0x23C9F)) == 'Hani'
assert unicodedata.script(unichr(0xC259)) == 'Hang'
assert unicodedata.script(unichr(0x1722)) == 'Hano'
assert unicodedata.script(unichr(0x108F5)) == 'Hatr'
assert unicodedata.script(unichr(0x05C2)) == 'Hebr'
assert unicodedata.script(unichr(0x1B072)) == 'Hira'
assert unicodedata.script(unichr(0x10847)) == 'Armi'
assert unicodedata.script(unichr(0x033A)) == 'Zinh'
assert unicodedata.script(unichr(0x10B66)) == 'Phli'
assert unicodedata.script(unichr(0x10B4B)) == 'Prti'
assert unicodedata.script(unichr(0xA98A)) == 'Java'
assert unicodedata.script(unichr(0x110B2)) == 'Kthi'
assert unicodedata.script(unichr(0x0CC6)) == 'Knda'
assert unicodedata.script(unichr(0x3337)) == 'Kana'
assert unicodedata.script(unichr(0xA915)) == 'Kali'
assert unicodedata.script(unichr(0x10A2E)) == 'Khar'
assert unicodedata.script(unichr(0x17AA)) == 'Khmr'
assert unicodedata.script(unichr(0x11225)) == 'Khoj'
assert unicodedata.script(unichr(0x112B6)) == 'Sind'
assert unicodedata.script(unichr(0x0ED7)) == 'Laoo'
assert unicodedata.script(unichr(0xAB3C)) == 'Latn'
assert unicodedata.script(unichr(0x1C48)) == 'Lepc'
assert unicodedata.script(unichr(0x1923)) == 'Limb'
assert unicodedata.script(unichr(0x1071D)) == 'Lina'
assert unicodedata.script(unichr(0x100EC)) == 'Linb'
assert unicodedata.script(unichr(0xA4E9)) == 'Lisu'
assert unicodedata.script(unichr(0x10284)) == 'Lyci'
assert unicodedata.script(unichr(0x10926)) == 'Lydi'
assert unicodedata.script(unichr(0x11161)) == 'Mahj'
assert unicodedata.script(unichr(0x0D56)) == 'Mlym'
assert unicodedata.script(unichr(0x0856)) == 'Mand'
assert unicodedata.script(unichr(0x10AF0)) == 'Mani'
assert unicodedata.script(unichr(0x11CB0)) == 'Marc'
assert unicodedata.script(unichr(0x11D28)) == 'Gonm'
assert unicodedata.script(unichr(0xABDD)) == 'Mtei'
assert unicodedata.script(unichr(0x1E897)) == 'Mend'
assert unicodedata.script(unichr(0x109B0)) == 'Merc'
assert unicodedata.script(unichr(0x10993)) == 'Mero'
assert unicodedata.script(unichr(0x16F5D)) == 'Plrd'
assert unicodedata.script(unichr(0x1160B)) == 'Modi'
assert unicodedata.script(unichr(0x18A8)) == 'Mong'
assert unicodedata.script(unichr(0x16A48)) == 'Mroo'
assert unicodedata.script(unichr(0x1128C)) == 'Mult'
assert unicodedata.script(unichr(0x105B)) == 'Mymr'
assert unicodedata.script(unichr(0x108AF)) == 'Nbat'
assert unicodedata.script(unichr(0x19B3)) == 'Talu'
assert unicodedata.script(unichr(0x1143D)) == 'Newa'
assert unicodedata.script(unichr(0x07F4)) == 'Nkoo'
assert unicodedata.script(unichr(0x1B192)) == 'Nshu'
assert unicodedata.script(unichr(0x169C)) == 'Ogam'
assert unicodedata.script(unichr(0x1C56)) == 'Olck'
assert unicodedata.script(unichr(0x10CE9)) == 'Hung'
assert unicodedata.script(unichr(0x10316)) == 'Ital'
assert unicodedata.script(unichr(0x10A93)) == 'Narb'
assert unicodedata.script(unichr(0x1035A)) == 'Perm'
assert unicodedata.script(unichr(0x103D5)) == 'Xpeo'
assert unicodedata.script(unichr(0x10A65)) == 'Sarb'
assert unicodedata.script(unichr(0x10C09)) == 'Orkh'
assert unicodedata.script(unichr(0x0B60)) == 'Orya'
assert unicodedata.script(unichr(0x104CF)) == 'Osge'
assert unicodedata.script(unichr(0x104A8)) == 'Osma'
assert unicodedata.script(unichr(0x16B12)) == 'Hmng'
assert unicodedata.script(unichr(0x10879)) == 'Palm'
assert unicodedata.script(unichr(0x11AF1)) == 'Pauc'
assert unicodedata.script(unichr(0xA869)) == 'Phag'
assert unicodedata.script(unichr(0x10909)) == 'Phnx'
assert unicodedata.script(unichr(0x10B81)) == 'Phlp'
assert unicodedata.script(unichr(0xA941)) == 'Rjng'
assert unicodedata.script(unichr(0x16C3)) == 'Runr'
assert unicodedata.script(unichr(0x0814)) == 'Samr'
assert unicodedata.script(unichr(0xA88C)) == 'Saur'
assert unicodedata.script(unichr(0x111C8)) == 'Shrd'
assert unicodedata.script(unichr(0x1045F)) == 'Shaw'
assert unicodedata.script(unichr(0x115AD)) == 'Sidd'
assert unicodedata.script(unichr(0x1D8C0)) == 'Sgnw'
assert unicodedata.script(unichr(0x0DB9)) == 'Sinh'
assert unicodedata.script(unichr(0x110F9)) == 'Sora'
assert unicodedata.script(unichr(0x11A60)) == 'Soyo'
assert unicodedata.script(unichr(0x1B94)) == 'Sund'
assert unicodedata.script(unichr(0xA81F)) == 'Sylo'
assert unicodedata.script(unichr(0x0740)) == 'Syrc'
assert unicodedata.script(unichr(0x1714)) == 'Tglg'
assert unicodedata.script(unichr(0x1761)) == 'Tagb'
assert unicodedata.script(unichr(0x1965)) == 'Tale'
assert unicodedata.script(unichr(0x1A32)) == 'Lana'
assert unicodedata.script(unichr(0xAA86)) == 'Tavt'
assert unicodedata.script(unichr(0x116A5)) == 'Takr'
assert unicodedata.script(unichr(0x0B8E)) == 'Taml'
assert unicodedata.script(unichr(0x1754D)) == 'Tang'
assert unicodedata.script(unichr(0x0C40)) == 'Telu'
assert unicodedata.script(unichr(0x07A4)) == 'Thaa'
assert unicodedata.script(unichr(0x0E42)) == 'Thai'
assert unicodedata.script(unichr(0x0F09)) == 'Tibt'
assert unicodedata.script(unichr(0x2D3A)) == 'Tfng'
assert unicodedata.script(unichr(0x114B0)) == 'Tirh'
assert unicodedata.script(unichr(0x1038B)) == 'Ugar'
assert unicodedata.script(unichr(0xA585)) == 'Vaii'
assert unicodedata.script(unichr(0x118CF)) == 'Wara'
assert unicodedata.script(unichr(0xA066)) == 'Yiii'
assert unicodedata.script(unichr(0x11A31)) == 'Zanb'
def test_script_extension():
assert unicodedata.script_extension("a") == {"Latn"}
assert unicodedata.script_extension(unichr(0)) == {"Zyyy"}
assert unicodedata.script_extension(unichr(0x0378)) == {"Zzzz"}
assert unicodedata.script_extension(unichr(0x10FFFF)) == {"Zzzz"}
assert unicodedata.script_extension("\u0660") == {'Arab', 'Thaa', 'Yezi'}
assert unicodedata.script_extension("\u0964") == {
'Beng', 'Deva', 'Dogr', 'Gong', 'Gonm', 'Gran', 'Gujr', 'Guru', 'Knda',
'Mahj', 'Mlym', 'Nand', 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml',
'Telu', 'Tirh'}
def test_script_name():
assert unicodedata.script_name("Latn") == "Latin"
assert unicodedata.script_name("Zyyy") == "Common"
assert unicodedata.script_name("Zzzz") == "Unknown"
# underscores in long names are replaced by spaces
assert unicodedata.script_name("Egyp") == "Egyptian Hieroglyphs"
with pytest.raises(KeyError):
unicodedata.script_name("QQQQ")
assert unicodedata.script_name("QQQQ", default="Unknown")
def test_script_code():
assert unicodedata.script_code("Latin") == "Latn"
assert unicodedata.script_code("Common") == "Zyyy"
assert unicodedata.script_code("Unknown") == "Zzzz"
# case, whitespace, underscores and hyphens are ignored
assert unicodedata.script_code("Egyptian Hieroglyphs") == "Egyp"
assert unicodedata.script_code("Egyptian_Hieroglyphs") == "Egyp"
assert unicodedata.script_code("egyptianhieroglyphs") == "Egyp"
assert unicodedata.script_code("Egyptian-Hieroglyphs") == "Egyp"
with pytest.raises(KeyError):
unicodedata.script_code("Does not exist")
assert unicodedata.script_code("Does not exist", default="Zzzz") == "Zzzz"
def test_block():
assert unicodedata.block("\x00") == "Basic Latin"
assert unicodedata.block("\x7F") == "Basic Latin"
assert unicodedata.block("\x80") == "Latin-1 Supplement"
assert unicodedata.block("\u1c90") == "Georgian Extended"
assert unicodedata.block("\u0870") == "No_Block"
def test_ot_tags_from_script():
# simple
assert unicodedata.ot_tags_from_script("Latn") == ["latn"]
# script mapped to multiple new and old script tags
assert unicodedata.ot_tags_from_script("Deva") == ["dev2", "deva"]
# exceptions
assert unicodedata.ot_tags_from_script("Hira") == ["kana"]
# special script codes map to DFLT
assert unicodedata.ot_tags_from_script("Zinh") == ["DFLT"]
assert unicodedata.ot_tags_from_script("Zyyy") == ["DFLT"]
assert unicodedata.ot_tags_from_script("Zzzz") == ["DFLT"]
# this is invalid or unknown
assert unicodedata.ot_tags_from_script("Aaaa") == ["DFLT"]
def test_ot_tag_to_script():
assert unicodedata.ot_tag_to_script("latn") == "Latn"
assert unicodedata.ot_tag_to_script("kana") == "Kana"
assert unicodedata.ot_tag_to_script("DFLT") == None
assert unicodedata.ot_tag_to_script("aaaa") == None
assert unicodedata.ot_tag_to_script("beng") == "Beng"
assert unicodedata.ot_tag_to_script("bng2") == "Beng"
assert unicodedata.ot_tag_to_script("dev2") == "Deva"
assert unicodedata.ot_tag_to_script("gjr2") == "Gujr"
assert unicodedata.ot_tag_to_script("yi ") == "Yiii"
assert unicodedata.ot_tag_to_script("nko ") == "Nkoo"
assert unicodedata.ot_tag_to_script("vai ") == "Vaii"
assert unicodedata.ot_tag_to_script("lao ") == "Laoo"
assert unicodedata.ot_tag_to_script("yi") == "Yiii"
for invalid_value in ("", " ", "z zz", "zzzzz"):
with pytest.raises(ValueError, match="invalid OpenType tag"):
unicodedata.ot_tag_to_script(invalid_value)
def test_script_horizontal_direction():
assert unicodedata.script_horizontal_direction("Latn") == "LTR"
assert unicodedata.script_horizontal_direction("Arab") == "RTL"
assert unicodedata.script_horizontal_direction("Thaa") == "RTL"
with pytest.raises(KeyError):
unicodedata.script_horizontal_direction("Azzz")
assert unicodedata.script_horizontal_direction("Azzz",
default="LTR") == "LTR"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
<reponame>Blockwise/crix-client-py
import json
import hmac
from datetime import datetime
from typing import List, Optional, Tuple, AsyncIterator
from aiohttp import ClientSession
from .client import APIError
from .models import Ticker, Resolution, NewOrder, Order, Symbol, Depth, Trade, Account, Ticker24, VolumeFee
class AsyncClient:
"""
HTTP client to the exchange for non-authorized requests.
Supported environments:
- 'mvp' - testnet sandbox with full-wipe each 2nd week (usually)
- 'prod' - mainnet, production environment with real currency
Disable `cache_market` if latest symbols info are always required
"""
def __init__(self, *, env: str = 'mvp', cache_market: bool = True, session: ClientSession = None):
self.environment = env
if env == 'prod':
self._base_url = 'https://crix.io'
else:
self._base_url = 'https://{}.crix.io'.format(env)
self._base_url += '/api/v1'
self.__cache_market = cache_market
self.__market_cache = None
self._session = session or ClientSession()
async def fetch_currency_codes(self) -> List[str]:
"""
Get list of currencies codes in quote_base format (ex. btc_bch)
:return: list of formatted currencies codes
"""
data = await self.fetch_markets()
return [(sym.base + "_" + sym.quote).lower() for sym in data]
async def fetch_markets(self, force: bool = False) -> Tuple[Symbol]:
"""
Get list of all symbols on the exchange. Also includes symbol details like precision, quote, base and e.t.c.
It's a good idea to cache result of this function after first invoke
:param force: don't use cached symbols
:return: list of supported symbols
"""
if not self.__cache_market or force or self.__market_cache is None:
symbols = []
async with self._session.get(self._base_url + '/info/symbols') as req:
await APIError.async_ensure('fetch-markets', req)
data = await req.json()
for info in (data['symbol'] or []):
symbols.append(Symbol.from_json(info))
self.__market_cache = tuple(symbols)
return self.__market_cache
async def fetch_order_book(self, symbol: str, level_aggregation: Optional[str] = None) -> Depth:
"""
Get order book for specific symbol and level aggregation
:param symbol: interesting symbol name
:param level_aggregation: aggregate by rounding numbers (if not defined - no aggregation)
:return: order depth book
"""
req = {
'symbolName': symbol
}
if level_aggregation is not None:
req['strLevelAggregation'] = level_aggregation
async with self._session.post(self._base_url + '/depths', json={'req': req}) as req:
await APIError.async_ensure('fetch-order-book', req)
return Depth.from_json(await req.json())
async def fetch_ticker(self) -> List[Ticker24]:
"""
Get tickers for all symbols for the last 24 hours
:return: list of tickers
"""
tickers = []
async with self._session.get(self._base_url + '/tickers24') as req:
await APIError.async_ensure('ticker', req)
data = await req.json()
for info in data['ohlc']:
tickers.append(Ticker24.from_json(info))
return tickers
async def fetch_ohlcv(self, symbol: str, utc_start_time: datetime, utc_end_time: datetime,
resolution: Resolution = Resolution.one_minute,
limit: int = 10) -> List[Ticker]:
"""
Get K-Lines for specific symbol in a time frame.
Latest OHLCV ticks representing interval up to current minute
(ex: now: 10:15:32, then latest OHLCV with minute resolution will be from 10:14:00 to 10:15:00).
:param symbol: K-Line symbol name
:param utc_start_time: earliest interesting time
:param utc_end_time: latest interesting time
:param resolution: K-line resolution (by default 1-minute)
:param limit: maximum number of entries in a response
:return: list of ticker
"""
tickers = []
async with self._session.post(self._base_url + '/klines',
json={
'req': {
'startTime': int(utc_start_time.timestamp() * 1000),
'endTime': int(utc_end_time.timestamp() * 1000),
'symbolName': symbol,
'resolution': resolution.value,
'limit': limit,
}
}) as req:
await APIError.async_ensure('fetch-ohlcv', req)
data = await req.json()
for info in (data['ohlc'] or []):
tickers.append(Ticker.from_json(info))
return tickers
async def fetch_trades(self, symbol: str, limit: int = 100) -> List[Trade]:
"""
Get last trades for specified symbol name. OrderID, UserID, Fee, FeeCurrency will be empty (or 0)
:param symbol: symbol name
:param limit: maximum number of trades (could not be more then 1000)
:return: list of trades
"""
async with self._session.post(self._base_url + '/trades', json={
'req': {
'symbolName': symbol,
'limit': limit,
}
}) as req:
await APIError.async_ensure('fetch-trades', req)
data = await req.json()
trades = []
for info in (data['trades'] or []):
trades.append(Trade.from_json(info))
return trades
async def fetch_volume_fees(self, symbol: str) -> List[VolumeFee]:
"""
Get fees by volume for the symbol. Volume fees returned in unsorted way.
:param symbol: symbol name
:return: list of volume fee
"""
async with self._session.post(self._base_url + '/info/fee/volume', json={
'req': {
'symbolName': symbol,
}
}) as req:
await APIError.async_ensure('fetch-volume-fees', req)
data = await req.json()
return [VolumeFee.from_json(record) for record in data['fees']]
class AsyncAuthorizedClient(AsyncClient):
"""
HTTP client to the exchange for non-authorized and authorized requests.
Supported environments:
- 'mvp' - testnet sandbox with full-wipe each 2nd week (usually)
- 'prod' - mainnet, production environment with real currency
Expects API token and API secret provided by CRIX.IO exchange as
part of bot API.
"""
def __init__(self, token: str, secret: str, *, env: str = 'mvp', cache_market: bool = True,
session: ClientSession = None):
super().__init__(env=env, cache_market=cache_market, session=session)
self.__token = token
self.__secret = secret
async def fetch_open_orders(self, *symbols: str, limit: int = 1000) -> AsyncIterator[Order]:
"""
Get all open orders for the user.
.. note::
One request per each symbol will be made plus additional
request to query all supported symbols if symbols parameter
not specified.
:param symbols: filter orders by symbols. if not specified - all symbols queried and used
:param limit: maximum number of orders for each symbol
:return: iterator of orders definitions
"""
if not symbols:
markets = await self.fetch_markets()
symbols = [sym.name for sym in markets]
for symbol in symbols:
response = await self.__signed_request('fetch-open-orders', self._base_url + '/user/orders/open', {
'req': {
'limit': limit,
'symbolName': symbol
}
})
for info in (response['orders'] or []):
yield Order.from_json(info)
async def fetch_closed_orders(self, *symbols: str, limit: int = 1000) -> AsyncIterator[Order]:
"""
Get complete (filled, canceled) orders for user
.. note::
One request per each symbol will be made plus additional
request to query all supported symbols if symbols parameter
not specified.
:param symbols: filter orders by symbols. if not specified - all symbols queried and used
:param limit: maximum number of orders for each symbol
:return: iterator of orders definitions
"""
if not symbols:
markets = await self.fetch_markets()
symbols = [sym.name for sym in markets]
for symbol in symbols:
response = await self.__signed_request('fetch-closed-orders', self._base_url + '/user/orders/complete', {
'req': {
'limit': limit,
'symbolName': symbol
}
})
for info in (response['orders'] or []):
yield Order.from_json(info)
async def fetch_orders(self, *symbols: str, limit: int = 1000) -> AsyncIterator[Order]:
"""
Get opened and closed orders filtered by symbols. If no symbols specified - all symbols are used.
Basically the function acts as union of fetch_open_orders and fetch_closed_orders.
.. note::
Two requests per each symbol will be made plus additional
request to query all supported symbols if symbols parameter
not specified.
:param symbols: symbols: filter orders by symbols. if not specified - used all symbols
:param limit: maximum number of orders for each symbol for each state (open, close)
:return: iterator of orders definitions sorted from open to close
"""
if not symbols:
markets = await self.fetch_markets()
symbols = [sym.name for sym in markets]
for symbol in symbols:
async for order in self.fetch_open_orders(symbol, limit=limit):
yield order
async for order in self.fetch_closed_orders(symbol, limit=limit):
yield order
async def fetch_my_trades(self, *symbols: str, limit: int = 1000) -> AsyncIterator[Trade]:
"""
Get all trades for the user. There is some gap (a few ms) between time when trade is actually created and time
when it becomes visible for the user.
.. note::
One request per each symbol will be made plus additional
request to query all supported symbols if symbols parameter
not specified.
:param symbols: filter trades by symbols. if not specified - used all symbols
:param limit: maximum number of trades for each symbol
:return: iterator of trade definition
"""
if not symbols:
markets = await self.fetch_markets()
symbols = [sym.name for sym in markets]
for symbol in symbols:
response = await self.__signed_request('fetch-my-trades', self._base_url + '/user/trades', {
'req': {
'limit': limit,
'symbolName': symbol
}
})
for info in (response['trades'] or []):
yield Trade.from_json(info)
async def fetch_balance(self) -> List[Account]:
"""
Get all balances for the user
:return: list of all accounts
"""
response = await self.__signed_request('fetch-balance', self._base_url + '/user/accounts', {})
return [Account.from_json(info) for info in (response['accounts'] or [])]
async def cancel_order(self, order_id: int, symbol: str) -> Order:
"""
Cancel placed order
:param order_id: order id generated by the exchange
:param symbol: symbol names same as in placed order
:return: order definition with filled field (also includes filled quantity)
"""
response = await self.__signed_request('cancel-order', self._base_url + '/user/order/cancel', {
'req': {
'orderId': order_id,
'symbolName': symbol,
}
})
return Order.from_json(response)
async def create_order(self, new_order: NewOrder) -> Order:
"""
Create and place order to the exchange
:param new_order: order parameters
:return: order definition with filled fields from the exchange
"""
response = await self.__signed_request('create-order', self._base_url + '/user/order/create', {
"req": new_order.to_json()
})
return Order.from_json(response)
async def fetch_order(self, order_id: int, symbol_name: str) -> Optional[Order]:
"""
Fetch single open order info
:param order_id: order id generated by server during 'create_order' phase
:param symbol_name: symbol name same as in order
:return: order definition or None if nothing found
"""
try:
response = await self.__signed_request('fetch-order', self._base_url + '/user/order/info', {
"req": {
"orderId": order_id,
"symbolName": symbol_name
}
})
except APIError as err:
if 'not found' in err.text:
return None
raise
return Order.from_json(response)
async def fetch_history(self, begin: datetime, end: datetime, currency: str) -> AsyncIterator[Ticker]:
"""
Get historical minute tickers for specified time range and currency
There are several caveats:
- it requires additional permission
- end param should be not more then server time, otherwise error returned
- maximum difference between earliest and latest date should be no more then 366 days
- it could be slow for a long time range
- mostly all points have 1 minute tick however in a very few cases gap can be a bit bigger
:param begin: earliest interesting time
:param end: latest interesting time
:param currency: currency name in upper case
:return: iterator of parsed tickers
"""
data = await self.__signed_request('fetch-history', self._base_url + '/user/rates/history', {
"req": {
"currency": currency,
"fromTimestamp": int(begin.timestamp()),
"toTimestamp": int(end.timestamp())
}
})
for info in data:
yield Ticker.from_json_history(info)
async def __signed_request(self, operation: str, url: str, json_data: dict) -> dict:
payload = json.dumps(json_data).encode()
signer = hmac.new(self.__secret.encode(), digestmod='SHA256')
signer.update(payload)
signature = signer.hexdigest()
headers = {
'X-Api-Signed-Token': self.__token + ',' + signature,
}
async with self._session.post(url, data=payload, headers=headers) as req:
await APIError.async_ensure(operation, req)
return await req.json()
|
<filename>LeetCode-All-Solution/Python3/LC-INTERVIEW-0406-Successor-LCCI.py<gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-INTERVIEW-0406-Successor-LCCI.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-05-16
=================================================================="""
import sys
import time
from typing import List, Optional
# import functools
"""
LeetCode - INTERVIEW-0406 - (Medium) - Successor LCCI
https://leetcode.cn/problems/successor-lcci/
Description & Requirement:
Write an algorithm to find the "next" node (i.e., in-order successor) of a given node in a binary search tree.
Return null if there's no "next" node for the given node.
Example 1:
Input: root = [2,1,3], p = 1
2
/ \
1 3
Output: 2
Example 2:
Input: root = [5,3,6,2,4,null,null,1], p = 6
5
/ \
3 6
/ \
2 4
/
1
Output: null
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right # the left and right of leaf_node are both None
@staticmethod
def build_binary_tree_layer(val_list: List[int]):
if not isinstance(val_list, list) or len(val_list) <= 0:
return None
node_list = []
for v in val_list:
if v is None:
node_list.append(None)
else:
node_list.append(TreeNode(val=v))
len_node_list = len(node_list)
for idx, cur_root in enumerate(node_list):
if cur_root is not None:
cur_root_right_index = (idx + 1) << 1
cur_root_left_index = cur_root_right_index - 1
if cur_root_left_index < len_node_list:
cur_root.left = node_list[cur_root_left_index]
if cur_root_right_index < len_node_list:
cur_root.right = node_list[cur_root_right_index]
return node_list[0] # return root_node
@staticmethod
def show_binary_tree_pre_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_root):
if isinstance(cur_root, TreeNode):
val_list.append(cur_root.val)
__dfs(cur_root.left)
__dfs(cur_root.right)
__dfs(root_node)
return val_list
@staticmethod
def show_binary_tree_mid_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_root):
if isinstance(cur_root, TreeNode):
__dfs(cur_root.left)
val_list.append(cur_root.val)
__dfs(cur_root.right)
__dfs(root_node)
return val_list
@staticmethod
def show_binary_tree_post_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_root):
if isinstance(cur_root, TreeNode):
__dfs(cur_root.left)
__dfs(cur_root.right)
val_list.append(cur_root.val)
__dfs(root_node)
return val_list
class Solution:
def inorderSuccessor(self, root: TreeNode, p: TreeNode) -> Optional[TreeNode]:
# exception case
if not isinstance(root, TreeNode) or not isinstance(p, TreeNode):
return None
# main method: (BST traverse)
return self._inorderSuccessor(root, p)
def _inorderSuccessor(self, root: TreeNode, p: TreeNode) -> Optional[TreeNode]:
node_list = []
def __dfs(cur_root):
if isinstance(cur_root, TreeNode):
__dfs(cur_root.left)
node_list.append(cur_root)
__dfs(cur_root.right)
__dfs(root)
len_node = len(node_list)
for idx, node in enumerate(node_list):
if node == p:
return node_list[idx + 1] if idx < len_node - 1 else None
return None
def _inorderSuccessorTODO(self, root: TreeNode, p: TreeNode) -> Optional[TreeNode]:
assert isinstance(root, TreeNode) and isinstance(p, TreeNode)
def __traverse(node: Optional[TreeNode], parent: Optional[TreeNode], is_left: bool) -> Optional[TreeNode]:
if not isinstance(node, TreeNode):
return None
if node == p:
if isinstance(node.right, TreeNode):
successor = node.right
while isinstance(successor.left, TreeNode):
successor = successor.left
return successor
else:
if isinstance(parent, TreeNode) and is_left: # the current node is the left child of its parent
return parent
else:
return None
else:
if node.val == p.val:
res = __traverse(node.left, node, True)
if not isinstance(res, TreeNode):
res = __traverse(node.right, node, False)
elif node.val < p.val:
res = __traverse(node.right, node, False)
else:
res = __traverse(node.left, node, True)
return res
return __traverse(root, None, False)
def main():
# Example 1: Output: 2
# Input:
# 2
# / \
# 1 3
#
# root = [2, 1, 3]
# p = 1
# node_1 = TreeNode(val=1)
# node_2 = TreeNode(val=2)
# node_3 = TreeNode(val=3)
# node_2.left = node_1
# node_2.right = node_3
# root_node = node_2
# p = node_1
# Example 2: Output: null
# root = [5, 3, 6, 2, 4, None, None, 1]
# p = 6
node_1 = TreeNode(val=1)
node_2 = TreeNode(val=2)
node_3 = TreeNode(val=3)
node_4 = TreeNode(val=4)
node_5 = TreeNode(val=5)
node_6 = TreeNode(val=6)
node_5.left = node_3
node_5.right = node_6
node_3.left = node_2
node_3.right = node_4
node_2.left = node_1
root_node = node_5
p = node_6
# root_node = TreeNode.build_binary_tree_layer(root)
print(TreeNode.show_binary_tree_mid_order(root_node)) # mid traverse BST to get ordered list
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.inorderSuccessor(root_node, p)
end = time.process_time()
# show answer
print('\nAnswer:')
if isinstance(ans, TreeNode):
print(ans.val)
else:
print("null")
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimSlabParams_Slab_Floor', [dirname(__file__)])
except ImportError:
import _SimSlabParams_Slab_Floor
return _SimSlabParams_Slab_Floor
if fp is not None:
try:
_mod = imp.load_module('_SimSlabParams_Slab_Floor', fp, pathname, description)
finally:
fp.close()
return _mod
_SimSlabParams_Slab_Floor = swig_import_helper()
del swig_import_helper
else:
import _SimSlabParams_Slab_Floor
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
class SimSlabParams(base.SimBldgModelParams):
__swig_setmethods__ = {}
for _s in [base.SimBldgModelParams]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimSlabParams, name, value)
__swig_getmethods__ = {}
for _s in [base.SimBldgModelParams]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimSlabParams, name)
__repr__ = _swig_repr
def DimensionX(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_DimensionX(self, *args)
def DimensionY(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_DimensionY(self, *args)
def DimensionZ(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_DimensionZ(self, *args)
def SlabElevation(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_SlabElevation(self, *args)
def SlabThickness(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_SlabThickness(self, *args)
def ProfilePath(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_ProfilePath(self, *args)
def VoidProfilePaths(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_VoidProfilePaths(self, *args)
def __init__(self, *args):
this = _SimSlabParams_Slab_Floor.new_SimSlabParams(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimSlabParams_Slab_Floor.SimSlabParams__clone(self, f, c)
__swig_destroy__ = _SimSlabParams_Slab_Floor.delete_SimSlabParams
__del__ = lambda self: None
SimSlabParams_swigregister = _SimSlabParams_Slab_Floor.SimSlabParams_swigregister
SimSlabParams_swigregister(SimSlabParams)
class SimSlabParams_Slab(SimSlabParams):
__swig_setmethods__ = {}
for _s in [SimSlabParams]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimSlabParams_Slab, name, value)
__swig_getmethods__ = {}
for _s in [SimSlabParams]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimSlabParams_Slab, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimSlabParams_Slab_Floor.new_SimSlabParams_Slab(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab__clone(self, f, c)
__swig_destroy__ = _SimSlabParams_Slab_Floor.delete_SimSlabParams_Slab
__del__ = lambda self: None
SimSlabParams_Slab_swigregister = _SimSlabParams_Slab_Floor.SimSlabParams_Slab_swigregister
SimSlabParams_Slab_swigregister(SimSlabParams_Slab)
class SimSlabParams_Slab_Floor(SimSlabParams_Slab):
__swig_setmethods__ = {}
for _s in [SimSlabParams_Slab]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimSlabParams_Slab_Floor, name, value)
__swig_getmethods__ = {}
for _s in [SimSlabParams_Slab]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimSlabParams_Slab_Floor, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimSlabParams_Slab_Floor.new_SimSlabParams_Slab_Floor(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor__clone(self, f, c)
__swig_destroy__ = _SimSlabParams_Slab_Floor.delete_SimSlabParams_Slab_Floor
__del__ = lambda self: None
SimSlabParams_Slab_Floor_swigregister = _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_swigregister
SimSlabParams_Slab_Floor_swigregister(SimSlabParams_Slab_Floor)
class SimSlabParams_Slab_Floor_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimSlabParams_Slab_Floor_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimSlabParams_Slab_Floor_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimSlabParams_Slab_Floor.new_SimSlabParams_Slab_Floor_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_assign(self, n, x)
def begin(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_begin(self, *args)
def end(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_end(self, *args)
def rbegin(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_rend(self, *args)
def at(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_at(self, *args)
def front(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_front(self, *args)
def back(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_back(self, *args)
def push_back(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_push_back(self, *args)
def pop_back(self):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_insert(self, *args)
def erase(self, *args):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_swap(self, x)
__swig_destroy__ = _SimSlabParams_Slab_Floor.delete_SimSlabParams_Slab_Floor_sequence
__del__ = lambda self: None
SimSlabParams_Slab_Floor_sequence_swigregister = _SimSlabParams_Slab_Floor.SimSlabParams_Slab_Floor_sequence_swigregister
SimSlabParams_Slab_Floor_sequence_swigregister(SimSlabParams_Slab_Floor_sequence)
# This file is compatible with both classic and new-style classes.
|
<gh_stars>0
import pandas as pd
import numpy as np
import psycopg2
import pmdarima as pm
import plotly
import plotly.graph_objs as go
from plotly.offline import *
from Graph import *
import plotly.io as pio
pio.renderers.default = 'notebook'
# Connect to database
conn = psycopg2.connect(host='localhost', port=5432, database='postgres')
# Query
query_train = """select tradedate, closeprice from stock.stockprice
where ticker = 'LMT' and
date_part('year', tradedate) between 1997 and 2018
order by 1; """
query_test = """select tradedate as ds, closeprice as realprice
from stock.stockprice
where ticker = 'LMT' and
date_part('year', tradedate) between 2019 and 2020
order by 1; """
df_train = pd.io.sql.read_sql(query_train, conn)
df_test = pd.io.sql.read_sql(query_test, conn)
# Calculate SST
ybar_test = df_test['realprice'].mean()*1.0
sst = ((df_test['realprice'] - ybar_test)**2).sum()
### Train model using 1997-2018 data, let's call model_max ###
# Obtain training data between 1997 and 2018
df_train = pd.io.sql.read_sql(query_train, conn)
X_train = df_train['closeprice']
model_max = pm.auto_arima(X_train, start_p=1, start_q=1,
max_p=3, max_q=3, m=12,
max_P=3, max_Q=3, seasonal=True,
d=1, D=1, max_d=3, max_D=3, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
print(model_max.summary())
df_test_max = df_test.copy()
pred_max = model_max.predict(df_test.shape[0]) # It returns ndarray
df_test_max['yhat'] = pred_max
sse_max = ((df_test_max['yhat'] - df_test_max['realprice'])**2).sum()
rsqu_max = 1 - sse_max / sst
### Train model using 2010-2018 data, let's call model_max ###
# Obtain training data between 2010 and 2018
query_train = """select tradedate, closeprice from stock.stockprice
where ticker = 'LMT' and
date_part('year', tradedate) between 2010 and 2018
order by 1; """
df_train = pd.io.sql.read_sql(query_train, conn)
X_train = df_train['closeprice']
model_8yr = pm.auto_arima(X_train, start_p=1, start_q=1,
max_p=3, max_q=3, m=12,
max_P=3, max_Q=3, seasonal=True,
d=1, D=1, max_d=3, max_D=3, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
print(model_8yr.summary())
df_test_8yr = df_test.copy()
pred_8yr = model_8yr.predict(df_test.shape[0])
df_test_8yr['yhat'] = pred_8yr
sse_8yr = ((df_test_8yr['yhat'] - df_test_8yr['realprice'])**2).sum()
rsqu_8yr = 1 - sse_8yr / sst
print('The R-square of model_max is',f'{rsqu_max:.2f}')
print('The R-square of model_8yr is',f'{rsqu_8yr:.2f}')
# Generate graph of the results
df_train.columns = ['ds','y']
fig = generate_line_chart(df_train, df_test, df_test_max, 'model_max',
df_test_8yr, 'model_8yr',
'Prediction with pmdarima')
plotly.offline.plot(fig, filename='LMTprice_pmdarima.html')
|
import pytest
import esmvalcore.preprocessor
from esmvalcore.preprocessor import _download
@pytest.mark.parametrize(
'variable, cmd',
[
(
{
'dataset': 'CanESM2',
'ensemble': 'r1i1p1',
'exp': 'historical',
'mip': 'Amon',
'project': 'CMIP5',
'variable': 'ta',
},
("synda search --file"
" project='CMIP5'"
" cmor_table='Amon'"
" model='CanESM2'"
" experiment='historical'"
" ensemble='r1i1p1'"),
),
(
{
'activity': 'CMIP',
'dataset': 'BCC-ESM1',
'ensemble': 'r1i1p1f1',
'exp': 'historical',
'grid': 'gn',
'mip': 'Amon',
'project': 'CMIP6',
'variable': 'ta',
},
("synda search --file"
" project='CMIP6'"
" activity_id='CMIP'"
" table_id='Amon'"
" source_id='BCC-ESM1'"
" experiment_id='historical'"
" variant_label='r1i1p1f1'"
" grid_label='gn'"),
),
],
)
def test_synda_search_cmd(variable, cmd):
assert _download._synda_search_cmd(variable) == cmd
def test_synda_search_cmd_fail_unknown_project():
with pytest.raises(NotImplementedError):
_download._synda_search_cmd({'project': 'Unknown'})
def test_synda_search(mocker):
variable = {
'frequency': 'mon',
'start_year': 1962,
'end_year': 1966,
}
cmd = mocker.sentinel.cmd
dataset = ("CMIP6.CMIP.MPI-M.MPI-ESM1-2-HR.historical"
".r1i1p1f1.Amon.pr.gn.v20190710")
files = [
"pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_195501-195912.nc",
"pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196001-196412.nc",
"pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196501-197212.nc",
"pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196501-196912.nc",
"pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_197001-197412.nc",
]
all_files = [f"{dataset}.{filename}" for filename in files]
selected_files = all_files[1:4]
mocker.patch.object(_download,
'_synda_search_cmd',
return_value=cmd,
autospec=True)
mocker.patch.object(_download.subprocess,
'check_output',
return_value="\n".join(
f"new 12.7 MB {dataset}.{filename}"
for filename in files),
autospec=True)
mocker.patch.object(_download,
'select_files',
return_value=selected_files,
autospec=True)
mocker.patch.object(_download,
'get_start_end_year',
side_effect=[(1960, 1964), (1965, 1972), (1965, 1969)],
autospec=True)
result = _download.synda_search(variable)
# Check calls and result
_download._synda_search_cmd.assert_called_once_with(variable)
_download.subprocess.check_output.assert_called_once_with(
cmd, shell=True, universal_newlines=True)
_download.select_files.assert_called_once_with(all_files,
variable['start_year'],
variable['end_year'])
_download.get_start_end_year.assert_has_calls(
[mocker.call(filename) for filename in selected_files])
assert result == all_files[1:3]
@pytest.mark.parametrize('download', [True, False])
def test_synda_download(download, mocker, tmp_path):
filename = "pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_195501-195912.nc"
local_path = tmp_path / filename
synda_path = ("CMIP6.CMIP.MPI-M.MPI-ESM1-2-HR.historical"
f".r1i1p1f1.Amon.pr.gn.v20190710.{filename}")
cmd = f'synda get --dest_folder={tmp_path} --verify_checksum {synda_path}'
mocker.patch.object(_download.subprocess, 'check_call', autospec=True)
if not download:
local_path.touch()
result = _download.synda_download(synda_path, tmp_path)
if download:
_download.subprocess.check_call.assert_called_once_with(cmd,
shell=True)
else:
_download.subprocess.check_call.assert_not_called()
assert result == str(local_path)
def test_download(mocker, tmp_path):
dataset = ("CMIP6.CMIP.MPI-M.MPI-ESM1-2-HR.historical"
".r1i1p1f1.Amon.pr.gn.v20190710")
files = [
"pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_195501-195912.nc",
"pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196001-196412.nc",
"pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196501-196912.nc",
]
synda_files = [f"{dataset}.{filename}" for filename in files]
local_files = [str(tmp_path / filename) for filename in files]
mocker.patch.object(_download,
'synda_download',
autospec=True,
side_effect=local_files)
result = esmvalcore.preprocessor.download(synda_files, tmp_path)
_download.synda_download.assert_has_calls(
[mocker.call(filename, tmp_path) for filename in synda_files])
assert result == local_files
|
"""
Test all things related to the ``jedi.cache`` module.
"""
import os
import pytest
import time
from pathlib import Path
from parso.cache import (_CACHED_FILE_MAXIMUM_SURVIVAL, _VERSION_TAG,
_get_cache_clear_lock_path, _get_hashed_path,
_load_from_file_system, _NodeCacheItem,
_remove_cache_and_update_lock, _save_to_file_system,
load_module, parser_cache, try_to_save_module)
from parso._compatibility import is_pypy
from parso import load_grammar
from parso import cache
from parso import file_io
from parso import parse
skip_pypy = pytest.mark.skipif(
is_pypy,
reason="pickling in pypy is slow, since we don't pickle,"
"we never go into path of auto-collecting garbage"
)
@pytest.fixture()
def isolated_parso_cache(monkeypatch, tmpdir):
"""Set `parso.cache._default_cache_path` to a temporary directory
during the test. """
cache_path = Path(str(tmpdir), "__parso_cache")
monkeypatch.setattr(cache, '_default_cache_path', cache_path)
return cache_path
def test_modulepickling_change_cache_dir(tmpdir):
"""
ParserPickling should not save old cache when cache_directory is changed.
See: `#168 <https://github.com/davidhalter/jedi/pull/168>`_
"""
dir_1 = Path(str(tmpdir.mkdir('first')))
dir_2 = Path(str(tmpdir.mkdir('second')))
item_1 = _NodeCacheItem('bla', [])
item_2 = _NodeCacheItem('bla', [])
path_1 = Path('fake path 1')
path_2 = Path('fake path 2')
hashed_grammar = load_grammar()._hashed
_save_to_file_system(hashed_grammar, path_1, item_1, cache_path=dir_1)
parser_cache.clear()
cached = load_stored_item(hashed_grammar, path_1, item_1, cache_path=dir_1)
assert cached == item_1.node
_save_to_file_system(hashed_grammar, path_2, item_2, cache_path=dir_2)
cached = load_stored_item(hashed_grammar, path_1, item_1, cache_path=dir_2)
assert cached is None
def load_stored_item(hashed_grammar, path, item, cache_path):
"""Load `item` stored at `path` in `cache`."""
item = _load_from_file_system(hashed_grammar, path, item.change_time - 1, cache_path)
return item
@pytest.mark.usefixtures("isolated_parso_cache")
def test_modulepickling_simulate_deleted_cache(tmpdir):
"""
Tests loading from a cache file after it is deleted.
According to macOS `dev docs`__,
Note that the system may delete the Caches/ directory to free up disk
space, so your app must be able to re-create or download these files as
needed.
It is possible that other supported platforms treat cache files the same
way.
__ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html
""" # noqa
grammar = load_grammar()
module = 'fake parser'
# Create the file
path = Path(str(tmpdir.dirname), 'some_path')
with open(path, 'w'):
pass
io = file_io.FileIO(path)
try_to_save_module(grammar._hashed, io, module, lines=[])
assert load_module(grammar._hashed, io) == module
os.unlink(_get_hashed_path(grammar._hashed, path))
parser_cache.clear()
cached2 = load_module(grammar._hashed, io)
assert cached2 is None
def test_cache_limit():
def cache_size():
return sum(len(v) for v in parser_cache.values())
try:
parser_cache.clear()
future_node_cache_item = _NodeCacheItem('bla', [], change_time=time.time() + 10e6)
old_node_cache_item = _NodeCacheItem('bla', [], change_time=time.time() - 10e4)
parser_cache['some_hash_old'] = {
'/path/%s' % i: old_node_cache_item for i in range(300)
}
parser_cache['some_hash_new'] = {
'/path/%s' % i: future_node_cache_item for i in range(300)
}
assert cache_size() == 600
parse('somecode', cache=True, path='/path/somepath')
assert cache_size() == 301
finally:
parser_cache.clear()
class _FixedTimeFileIO(file_io.KnownContentFileIO):
def __init__(self, path, content, last_modified):
super().__init__(path, content)
self._last_modified = last_modified
def get_last_modified(self):
return self._last_modified
@pytest.mark.parametrize('diff_cache', [False, True])
@pytest.mark.parametrize('use_file_io', [False, True])
def test_cache_last_used_update(diff_cache, use_file_io):
p = Path('/path/last-used')
parser_cache.clear() # Clear, because then it's easier to find stuff.
parse('somecode', cache=True, path=p)
node_cache_item = next(iter(parser_cache.values()))[p]
now = time.time()
assert node_cache_item.last_used < now
if use_file_io:
f = _FixedTimeFileIO(p, 'code', node_cache_item.last_used - 10)
parse(file_io=f, cache=True, diff_cache=diff_cache)
else:
parse('somecode2', cache=True, path=p, diff_cache=diff_cache)
node_cache_item = next(iter(parser_cache.values()))[p]
assert now <= node_cache_item.last_used <= time.time()
@skip_pypy
def test_inactive_cache(tmpdir, isolated_parso_cache):
parser_cache.clear()
test_subjects = "abcdef"
for path in test_subjects:
parse('somecode', cache=True, path=os.path.join(str(tmpdir), path))
raw_cache_path = isolated_parso_cache.joinpath(_VERSION_TAG)
assert raw_cache_path.exists()
dir_names = os.listdir(raw_cache_path)
a_while_ago = time.time() - _CACHED_FILE_MAXIMUM_SURVIVAL
old_paths = set()
for dir_name in dir_names[:len(test_subjects) // 2]: # make certain number of paths old
os.utime(raw_cache_path.joinpath(dir_name), (a_while_ago, a_while_ago))
old_paths.add(dir_name)
# nothing should be cleared while the lock is on
assert _get_cache_clear_lock_path().exists()
_remove_cache_and_update_lock() # it shouldn't clear anything
assert len(os.listdir(raw_cache_path)) == len(test_subjects)
assert old_paths.issubset(os.listdir(raw_cache_path))
os.utime(_get_cache_clear_lock_path(), (a_while_ago, a_while_ago))
_remove_cache_and_update_lock()
assert len(os.listdir(raw_cache_path)) == len(test_subjects) // 2
assert not old_paths.intersection(os.listdir(raw_cache_path))
@skip_pypy
def test_permission_error(monkeypatch):
def save(*args, **kwargs):
nonlocal was_called
was_called = True
raise PermissionError
was_called = False
monkeypatch.setattr(cache, '_save_to_file_system', save)
with pytest.warns(Warning):
parse(path=__file__, cache=True, diff_cache=True)
assert was_called
|
# -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from cStringIO import StringIO
from twisted.python import failure
import html
import resource
import linecache
import string, re
import types
def redirectTo(URL, request):
request.redirect(URL)
return """
<html>
<head>
<meta http-equiv=\"refresh\" content=\"0;URL=%(url)s\">
</head>
<body bgcolor=\"#FFFFFF\" text=\"#000000\">
<a href=\"%(url)s\">click here</a>
</body>
</html>
""" % {'url': URL}
class Redirect(resource.Resource):
isLeaf = 1
def __init__(self, url):
resource.Resource.__init__(self)
self.url = url
def render(self, request):
return redirectTo(self.url, request)
def getChild(self, name, request):
return self
class ChildRedirector(Redirect):
isLeaf = 0
def __init__(self, url):
# XXX is this enough?
if ((url.find('://') == -1)
and (not url.startswith('..'))
and (not url.startswith('/'))):
raise ValueError("It seems you've given me a redirect (%s) that is a child of myself! That's not good, it'll cause an infinite redirect." % url)
Redirect.__init__(self, url)
def getChild(self, name, request):
newUrl = self.url
if not newUrl.endswith('/'):
newUrl += '/'
newUrl += name
return ChildRedirector(newUrl)
from twisted.python import urlpath
class ParentRedirect(resource.Resource):
"""
I redirect to URLPath.here().
"""
isLeaf = 1
def render(self, request):
return redirectTo(urlpath.URLPath.fromRequest(request).here(), request)
def getChild(self, request):
return self
class DeferredResource(resource.Resource):
"""
I wrap up a Deferred that will eventually result in a Resource
object.
"""
isLeaf = 1
def __init__(self, d):
resource.Resource.__init__(self)
self.d = d
def getChild(self, name, request):
return self
def render(self, request):
self.d.addCallback(self._cbChild, request).addErrback(
self._ebChild,request)
from twisted.web.server import NOT_DONE_YET
return NOT_DONE_YET
def _cbChild(self, child, request):
result = resource.getChildForRequest(child, request).render(request)
from twisted.web.server import NOT_DONE_YET
if result == NOT_DONE_YET:
return
else:
request.write(result)
request.finish()
def _ebChild(self, reason, request):
request.processingFailed(reason)
return reason
stylesheet = """
<style type="text/css">
p.error {
color: red;
font-family: Verdana, Arial, helvetica, sans-serif;
font-weight: bold;
}
div {
font-family: Verdana, Arial, helvetica, sans-serif;
}
div.stackTrace {
}
div.frame {
padding: 1em;
background: white;
border-bottom: thin black dashed;
}
div.firstFrame {
padding: 1em;
background: white;
border-top: thin black dashed;
border-bottom: thin black dashed;
}
div.location {
}
div.snippet {
margin-bottom: 0.5em;
margin-left: 1em;
background: #FFFFDD;
}
div.snippetHighlightLine {
color: red;
}
span.code {
font-family: "Courier New", courier, monotype;
}
span.function {
font-weight: bold;
font-family: "Courier New", courier, monotype;
}
table.variables {
border-collapse: collapse;
margin-left: 1em;
}
td.varName {
vertical-align: top;
font-weight: bold;
padding-left: 0.5em;
padding-right: 0.5em;
}
td.varValue {
padding-left: 0.5em;
padding-right: 0.5em;
}
div.variables {
margin-bottom: 0.5em;
}
span.heading {
font-weight: bold;
}
div.dict {
background: #cccc99;
padding: 2px;
float: left;
}
td.dictKey {
background: #ffff99;
font-weight: bold;
}
td.dictValue {
background: #ffff99;
}
div.list {
background: #7777cc;
padding: 2px;
float: left;
}
div.listItem {
background: #9999ff;
}
div.instance {
background: #cc7777;
padding: 2px;
float: left;
}
span.instanceName {
font-weight: bold;
display: block;
}
span.instanceRepr {
background: #ff9999;
font-family: "Courier New", courier, monotype;
}
div.function {
background: orange;
font-weight: bold;
float: left;
}
</style>
"""
def htmlrepr(x):
return htmlReprTypes.get(type(x), htmlUnknown)(x)
def saferepr(x):
try:
rx = repr(x)
except:
rx = "<repr failed! %s instance at %s>" % (x.__class__, id(x))
return rx
def htmlUnknown(x):
return '<code>'+html.escape(saferepr(x))+'</code>'
def htmlDict(d):
io = StringIO()
w = io.write
w('<div class="dict"><span class="heading">Dictionary instance @ %s</span>' % hex(id(d)))
w('<table class="dict">')
for k, v in d.items():
if k == '__builtins__':
v = 'builtin dictionary'
w('<tr><td class="dictKey">%s</td><td class="dictValue">%s</td></tr>' % (htmlrepr(k), htmlrepr(v)))
w('</table></div>')
return io.getvalue()
def htmlList(l):
io = StringIO()
w = io.write
w('<div class="list"><span class="heading">List instance @ %s</span>' % hex(id(l)))
for i in l:
w('<div class="listItem">%s</div>' % htmlrepr(i))
w('</div>')
return io.getvalue()
def htmlInst(i):
if hasattr(i, "__html__"):
s = i.__html__()
else:
s = html.escape(saferepr(i))
return '''<div class="instance"><span class="instanceName">%s instance @ %s</span>
<span class="instanceRepr">%s</span></div>
''' % (i.__class__, hex(id(i)), s)
def htmlString(s):
return html.escape(saferepr(s))
def htmlFunc(f):
return ('<div class="function">' +
html.escape("function %s in file %s at line %s" %
(f.__name__, f.func_code.co_filename,
f.func_code.co_firstlineno))+
'</div>')
htmlReprTypes = {types.DictType: htmlDict,
types.ListType: htmlList,
types.InstanceType: htmlInst,
types.StringType: htmlString,
types.FunctionType: htmlFunc}
def htmlIndent(snippetLine):
ret = string.replace(string.replace(html.escape(string.rstrip(snippetLine)),
' ', ' '),
'\t', ' ')
return ret
def formatFailure(myFailure):
exceptionHTML = """
<p class="error">%s: %s</p>
"""
frameHTML = """
<div class="location">%s, line %s in <span class="function">%s</span></div>
"""
snippetLineHTML = """
<div class="snippetLine"><span class="lineno">%s</span><span class="code">%s</span></div>
"""
snippetHighlightLineHTML = """
<div class="snippetHighlightLine"><span class="lineno">%s</span><span class="code">%s</span></div>
"""
variableHTML = """
<tr class="varRow"><td class="varName">%s</td><td class="varValue">%s</td></tr>
"""
if not isinstance(myFailure, failure.Failure):
return html.PRE(str(myFailure))
io = StringIO()
w = io.write
w(stylesheet)
w('<a href="#tbend">')
w(exceptionHTML % (html.escape(str(myFailure.type)),
html.escape(str(myFailure.value))))
w('</a>')
w('<div class="stackTrace">')
first = 1
for method, filename, lineno, localVars, globalVars in myFailure.frames:
if filename == '<string>':
continue
if first:
w('<div class="firstFrame">')
first = 0
else:
w('<div class="frame">')
w(frameHTML % (filename, lineno, method))
w('<div class="snippet">')
textSnippet = ''
for snipLineNo in range(lineno-2, lineno+2):
snipLine = linecache.getline(filename, snipLineNo)
textSnippet += snipLine
snipLine = htmlIndent(snipLine)
if snipLineNo == lineno:
w(snippetHighlightLineHTML % (snipLineNo, snipLine))
else:
w(snippetLineHTML % (snipLineNo, snipLine))
w('</div>')
# Instance variables
for name, var in localVars:
if name == 'self' and hasattr(var, '__dict__'):
usedVars = [ (key, value) for (key, value) in var.__dict__.items()
if re.search(r'\W'+'self.'+key+r'\W', textSnippet) ]
if usedVars:
w('<div class="variables"><b>Self</b>')
w('<table class="variables">')
for key, value in usedVars:
w(variableHTML % (key, htmlrepr(value)))
w('</table></div>')
break
# Local and global vars
for nm, varList in ('Locals', localVars), ('Globals', globalVars):
usedVars = [ (name, var) for (name, var) in varList
if re.search(r'\W'+name+r'\W', textSnippet) ]
if usedVars:
w('<div class="variables"><b>%s</b><table class="variables">' % nm)
for name, var in usedVars:
w(variableHTML % (name, htmlrepr(var)))
w('</table></div>')
w('</div>') # frame
w('</div>') # stacktrace
w('<a name="tbend"> </a>')
w(exceptionHTML % (html.escape(str(myFailure.type)),
html.escape(str(myFailure.value))))
return io.getvalue()
|
<gh_stars>10-100
#
# BitBake Tests for runqueue task processing
#
# Copyright (C) 2019 <NAME>
#
# SPDX-License-Identifier: GPL-2.0-only
#
import unittest
import os
import tempfile
import subprocess
import sys
import time
#
# TODO:
# Add tests on task ordering (X happens before Y after Z)
#
class RunQueueTests(unittest.TestCase):
alltasks = ['package', 'fetch', 'unpack', 'patch', 'prepare_recipe_sysroot', 'configure',
'compile', 'install', 'packagedata', 'package_qa', 'package_write_rpm', 'package_write_ipk',
'populate_sysroot', 'build']
a1_sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_package_write_rpm a1:do_populate_lic a1:do_populate_sysroot"
b1_sstatevalid = "b1:do_package b1:do_package_qa b1:do_packagedata b1:do_package_write_ipk b1:do_package_write_rpm b1:do_populate_lic b1:do_populate_sysroot"
def run_bitbakecmd(self, cmd, builddir, sstatevalid="", slowtasks="", extraenv=None, cleanup=False):
env = os.environ.copy()
env["BBPATH"] = os.path.realpath(os.path.join(os.path.dirname(__file__), "runqueue-tests"))
env["BB_ENV_EXTRAWHITE"] = "SSTATEVALID SLOWTASKS"
env["SSTATEVALID"] = sstatevalid
env["SLOWTASKS"] = slowtasks
if extraenv:
for k in extraenv:
env[k] = extraenv[k]
env["BB_ENV_EXTRAWHITE"] = env["BB_ENV_EXTRAWHITE"] + " " + k
try:
output = subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,universal_newlines=True, cwd=builddir)
print(output)
except subprocess.CalledProcessError as e:
self.fail("Command %s failed with %s" % (cmd, e.output))
tasks = []
tasklog = builddir + "/task.log"
if os.path.exists(tasklog):
with open(tasklog, "r") as f:
tasks = [line.rstrip() for line in f]
if cleanup:
os.remove(tasklog)
return tasks
def test_no_setscenevalid(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1"]
sstatevalid = ""
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:' + x for x in self.alltasks]
self.assertEqual(set(tasks), set(expected))
def test_single_setscenevalid(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1"]
sstatevalid = "a1:do_package"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk',
'a1:populate_sysroot', 'a1:build']
self.assertEqual(set(tasks), set(expected))
def test_intermediate_setscenevalid(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1"]
sstatevalid = "a1:do_package a1:do_populate_sysroot"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package_setscene', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk',
'a1:populate_sysroot_setscene', 'a1:build']
self.assertEqual(set(tasks), set(expected))
def test_intermediate_notcovered(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1"]
sstatevalid = "a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_package_write_rpm a1:do_populate_lic a1:do_populate_sysroot"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
self.assertEqual(set(tasks), set(expected))
def test_all_setscenevalid(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1"]
sstatevalid = self.a1_sstatevalid
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
self.assertEqual(set(tasks), set(expected))
def test_no_settasks(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1", "-c", "patch"]
sstatevalid = self.a1_sstatevalid
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:fetch', 'a1:unpack', 'a1:patch']
self.assertEqual(set(tasks), set(expected))
def test_mix_covered_notcovered(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1:do_patch", "a1:do_populate_sysroot"]
sstatevalid = self.a1_sstatevalid
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:fetch', 'a1:unpack', 'a1:patch', 'a1:populate_sysroot_setscene']
self.assertEqual(set(tasks), set(expected))
# Test targets with intermediate setscene tasks alongside a target with no intermediate setscene tasks
def test_mixed_direct_tasks_setscene_tasks(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "c1:do_patch", "a1"]
sstatevalid = self.a1_sstatevalid
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['c1:fetch', 'c1:unpack', 'c1:patch', 'a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
self.assertEqual(set(tasks), set(expected))
# This test slows down the execution of do_package_setscene until after other real tasks have
# started running which tests for a bug where tasks were being lost from the buildable list of real
# tasks if they weren't in tasks_covered or tasks_notcovered
def test_slow_setscene(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1"]
sstatevalid = "a1:do_package"
slowtasks = "a1:package_setscene"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, slowtasks)
expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk',
'a1:populate_sysroot', 'a1:build']
self.assertEqual(set(tasks), set(expected))
def test_setscenewhitelist(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "a1"]
extraenv = {
"BB_SETSCENE_ENFORCE" : "1",
"BB_SETSCENE_ENFORCE_WHITELIST" : "a1:do_package_write_rpm a1:do_build"
}
sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_populate_lic a1:do_populate_sysroot"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv)
expected = ['a1:packagedata_setscene', 'a1:package_qa_setscene', 'a1:package_write_ipk_setscene',
'a1:populate_sysroot_setscene', 'a1:package_setscene']
self.assertEqual(set(tasks), set(expected))
# Tests for problems with dependencies between setscene tasks
def test_no_setscenevalid_harddeps(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "d1"]
sstatevalid = ""
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk',
'a1:populate_sysroot', 'd1:package', 'd1:fetch', 'd1:unpack', 'd1:patch', 'd1:prepare_recipe_sysroot', 'd1:configure',
'd1:compile', 'd1:install', 'd1:packagedata', 'd1:package_qa', 'd1:package_write_rpm', 'd1:package_write_ipk',
'd1:populate_sysroot', 'd1:build']
self.assertEqual(set(tasks), set(expected))
def test_no_setscenevalid_withdeps(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "b1"]
sstatevalid = ""
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks]
expected.remove('a1:build')
expected.remove('a1:package_qa')
self.assertEqual(set(tasks), set(expected))
def test_single_a1_setscenevalid_withdeps(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "b1"]
sstatevalid = "a1:do_package"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk',
'a1:populate_sysroot'] + ['b1:' + x for x in self.alltasks]
self.assertEqual(set(tasks), set(expected))
def test_single_b1_setscenevalid_withdeps(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "b1"]
sstatevalid = "b1:do_package"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk',
'a1:populate_sysroot', 'b1:package_setscene'] + ['b1:' + x for x in self.alltasks]
expected.remove('b1:package')
self.assertEqual(set(tasks), set(expected))
def test_intermediate_setscenevalid_withdeps(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "b1"]
sstatevalid = "a1:do_package a1:do_populate_sysroot b1:do_package"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package_setscene', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk',
'a1:populate_sysroot_setscene', 'b1:package_setscene'] + ['b1:' + x for x in self.alltasks]
expected.remove('b1:package')
self.assertEqual(set(tasks), set(expected))
def test_all_setscenevalid_withdeps(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
cmd = ["bitbake", "b1"]
sstatevalid = self.a1_sstatevalid + " " + self.b1_sstatevalid
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
'b1:build', 'a1:populate_sysroot_setscene', 'b1:package_write_ipk_setscene', 'b1:package_write_rpm_setscene',
'b1:packagedata_setscene', 'b1:package_qa_setscene', 'b1:populate_sysroot_setscene']
self.assertEqual(set(tasks), set(expected))
def test_multiconfig_setscene_optimise(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
extraenv = {
"BBMULTICONFIG" : "mc1 mc2",
"BB_SIGNATURE_HANDLER" : "basic"
}
cmd = ["bitbake", "b1", "mc:mc1:b1", "mc:mc2:b1"]
setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
'populate_sysroot_setscene', 'package_qa_setscene']
sstatevalid = ""
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv)
expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + \
['mc1:b1:' + x for x in setscenetasks] + ['mc1:a1:' + x for x in setscenetasks] + \
['mc2:b1:' + x for x in setscenetasks] + ['mc2:a1:' + x for x in setscenetasks] + \
['mc1:b1:build', 'mc2:b1:build']
for x in ['mc1:a1:package_qa_setscene', 'mc2:a1:package_qa_setscene', 'a1:build', 'a1:package_qa']:
expected.remove(x)
self.assertEqual(set(tasks), set(expected))
def test_multiconfig_bbmask(self):
# This test validates that multiconfigs can independently mask off
# recipes they do not want with BBMASK. It works by having recipes
# that will fail to parse for mc1 and mc2, then making each multiconfig
# build the one that does parse. This ensures that the recipes are in
# each multiconfigs BBFILES, but each is masking only the one that
# doesn't parse
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
extraenv = {
"BBMULTICONFIG" : "mc1 mc2",
"BB_SIGNATURE_HANDLER" : "basic",
"EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb",
}
cmd = ["bitbake", "mc:mc1:fails-mc2", "mc:mc2:fails-mc1"]
self.run_bitbakecmd(cmd, tempdir, "", extraenv=extraenv)
def test_multiconfig_mcdepends(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
extraenv = {
"BBMULTICONFIG" : "mc1 mc2",
"BB_SIGNATURE_HANDLER" : "TestMulticonfigDepends",
"EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb",
}
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
expected = ["mc1:f1:%s" % t for t in self.alltasks] + \
["mc2:a1:%s" % t for t in self.alltasks]
self.assertEqual(set(tasks), set(expected))
# A rebuild does nothing
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
self.assertEqual(set(tasks), set())
# Test that a signature change in the dependent task causes
# mcdepends to rebuild
tasks = self.run_bitbakecmd(["bitbake", "mc:mc2:a1", "-c", "compile", "-f"], tempdir, "", extraenv=extraenv, cleanup=True)
expected = ["mc2:a1:compile"]
self.assertEqual(set(tasks), set(expected))
rerun_tasks = self.alltasks[:]
for x in ("fetch", "unpack", "patch", "prepare_recipe_sysroot", "configure", "compile"):
rerun_tasks.remove(x)
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
expected = ["mc1:f1:%s" % t for t in rerun_tasks] + \
["mc2:a1:%s" % t for t in rerun_tasks]
self.assertEqual(set(tasks), set(expected))
@unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
def test_hashserv_single(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
extraenv = {
"BB_HASHSERVE" : "auto",
"BB_SIGNATURE_HANDLER" : "TestEquivHash"
}
cmd = ["bitbake", "a1", "b1"]
setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
'populate_sysroot_setscene', 'package_qa_setscene']
sstatevalid = ""
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks]
self.assertEqual(set(tasks), set(expected))
cmd = ["bitbake", "a1", "-c", "install", "-f"]
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
expected = ['a1:install']
self.assertEqual(set(tasks), set(expected))
cmd = ["bitbake", "a1", "b1"]
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
expected = ['a1:populate_sysroot', 'a1:package', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
'a1:package_write_ipk_setscene', 'a1:package_qa_setscene', 'a1:build']
self.assertEqual(set(tasks), set(expected))
self.shutdown(tempdir)
@unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
def test_hashserv_double(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
extraenv = {
"BB_HASHSERVE" : "auto",
"BB_SIGNATURE_HANDLER" : "TestEquivHash"
}
cmd = ["bitbake", "a1", "b1", "e1"]
setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
'populate_sysroot_setscene', 'package_qa_setscene']
sstatevalid = ""
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + ['e1:' + x for x in self.alltasks]
self.assertEqual(set(tasks), set(expected))
cmd = ["bitbake", "a1", "b1", "-c", "install", "-fn"]
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
cmd = ["bitbake", "e1"]
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
expected = ['a1:package', 'a1:install', 'b1:package', 'b1:install', 'a1:populate_sysroot', 'b1:populate_sysroot',
'a1:package_write_ipk_setscene', 'b1:packagedata_setscene', 'b1:package_write_rpm_setscene',
'a1:package_write_rpm_setscene', 'b1:package_write_ipk_setscene', 'a1:packagedata_setscene']
self.assertEqual(set(tasks), set(expected))
self.shutdown(tempdir)
@unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
def test_hashserv_multiple_setscene(self):
# Runs e1:do_package_setscene twice
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
extraenv = {
"BB_HASHSERVE" : "auto",
"BB_SIGNATURE_HANDLER" : "TestEquivHash"
}
cmd = ["bitbake", "a1", "b1", "e1"]
setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
'populate_sysroot_setscene', 'package_qa_setscene']
sstatevalid = ""
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + ['e1:' + x for x in self.alltasks]
self.assertEqual(set(tasks), set(expected))
cmd = ["bitbake", "a1", "b1", "-c", "install", "-fn"]
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
cmd = ["bitbake", "e1"]
sstatevalid = "e1:do_package"
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True, slowtasks="a1:populate_sysroot b1:populate_sysroot")
expected = ['a1:package', 'a1:install', 'b1:package', 'b1:install', 'a1:populate_sysroot', 'b1:populate_sysroot',
'a1:package_write_ipk_setscene', 'b1:packagedata_setscene', 'b1:package_write_rpm_setscene',
'a1:package_write_rpm_setscene', 'b1:package_write_ipk_setscene', 'a1:packagedata_setscene',
'e1:package_setscene']
self.assertEqual(set(tasks), set(expected))
for i in expected:
self.assertEqual(tasks.count(i), 1, "%s not in task list once" % i)
self.shutdown(tempdir)
def shutdown(self, tempdir):
# Wait for the hashserve socket to disappear else we'll see races with the tempdir cleanup
while os.path.exists(tempdir + "/hashserve.sock"):
time.sleep(0.5)
|
import inspect
import json
from django.core.paginator import Paginator
from django.http import JsonResponse
from django.shortcuts import render, HttpResponseRedirect
from webapp.controller.common import *
from webapp.forms.SystemMessageForm import SystemMessageForm
from webapp.models import *
from webapp.utils.form_to_obj import *
from webapp.utils.save_operation_log import save_operation_log
sys_msg = '报名系统'
result = {'status': True, 'message': ''}
def admin_message(request):
"""
系统公告
:param request:
:return:
"""
system_messages_system_announcement = SystemMessage.objects.filter(message_range=1,
hidden_status_sender=2).order_by(
'-id')
system_announcement = system_messages_system_announcement.count()
message_already_sent = 0
not_confirm = 0
username = request.session.get('username', None)
registers = RegisterUserInfo.objects.filter(username=username)
if len(registers) == 1:
user_infos = UserInfo.objects.filter(register_user_info=registers[0]).order_by('-id')
if len(user_infos) == 1:
system_messages = SystemMessage.objects.filter(sender=user_infos[0], hidden_status_sender=2).order_by(
'-id')
message_already_sent = system_messages.count()
system_messages = SystemMessage.objects.filter(receiver=user_infos[0],
hidden_status_receiver=2).order_by('-id')
not_confirm = system_messages.filter(feedback_status=2).count()
paginator = Paginator(system_messages_system_announcement, 10)
page = request.GET.get('page')
contacts = paginator.get_page(page)
teacher_infos = TeacherInfo.objects.all()
school_terms = SchoolTerm.objects.filter().order_by('-id')
title_msg = "系统公告列表"
return render(request, "page_main_controller/system_message/system_message.html",
{'title_msg': title_msg, "contacts": contacts, 'message_type': 'announcement',
'system_message_type': '系统公告',
'system_announcement': system_announcement, 'message_already_sent': message_already_sent,
'message_not_confirm': not_confirm,
'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': 0, 'teacher_info': 0})
def send(request):
"""
发送信息列表
:param request:
:return:
"""
username = request.session.get('username', None)
title_msg = "发送的消息列表"
system_messages_send = None
message_already_sent = 0
not_confirm = 0
system_messages = SystemMessage.objects.filter(message_range=1, hidden_status_sender=2).order_by(
'-id')
system_announcement = system_messages.count()
if username:
registers = RegisterUserInfo.objects.filter(username=username)
if len(registers) == 1:
user_infos = UserInfo.objects.filter(register_user_info=registers[0]).order_by('-id')
if len(user_infos) == 1:
system_messages_send = SystemMessage.objects.filter(sender=user_infos[0],
hidden_status_sender=2).order_by(
'-id')
message_already_sent = system_messages_send.count()
system_messages = SystemMessage.objects.filter(receiver=user_infos[0],
hidden_status_receiver=2).order_by('-id')
not_confirm = system_messages.filter(feedback_status=2).count()
paginator = Paginator(system_messages_send, 10)
page = request.GET.get('page')
contacts = paginator.get_page(page)
teacher_infos = TeacherInfo.objects.all()
school_terms = SchoolTerm.objects.filter().order_by('-id')
return render(request, "page_main_controller/system_message/system_message.html",
{'title_msg': title_msg, "contacts": contacts, 'message_type': 'send', 'system_message_type': '已发送',
'message_already_sent': message_already_sent, 'message_not_confirm': not_confirm,
'system_announcement': system_announcement,
'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': 0, 'teacher_info': 0})
def receive(request):
"""
接收信息列表
:param request:
:return:
"""
username = request.session.get('username', None)
title_msg = "接收的消息列表"
system_messages_receiver = None
message_already_sent = 0
not_confirm = 0
system_messages = SystemMessage.objects.filter(message_range=1, hidden_status_sender=2).order_by(
'-id')
system_announcement = system_messages.count()
if username:
registers = RegisterUserInfo.objects.filter(username=username)
if len(registers) == 1:
print('ddddd')
user_infos = UserInfo.objects.filter(register_user_info=registers[0]).order_by('-id')
system_messages_receiver = SystemMessage.objects.filter(hidden_status_receiver=2).order_by('-id')
if len(user_infos) == 1:
print('kkkk')
system_messages_receiver = SystemMessage.objects.filter(receiver=user_infos[0],
hidden_status_receiver=2).order_by('-id')
not_confirm = system_messages_receiver.filter(feedback_status=2).count()
receive_status_not = system_messages_receiver.filter(receive_status=2)
system_messages = SystemMessage.objects.filter(sender=user_infos[0], hidden_status_sender=2).order_by(
'-id')
message_already_sent = system_messages.count()
for system_message in receive_status_not:
# 将未查看的信息状态设置为已查看
system_message.receive_status = "1"
system_message.save()
print(system_messages_receiver)
print(f'system_messages_receiver--{system_messages_receiver}')
paginator = Paginator(system_messages_receiver, 10)
page = request.GET.get('page')
contacts = paginator.get_page(page)
teacher_infos = TeacherInfo.objects.all()
school_terms = SchoolTerm.objects.filter().order_by('-id')
return render(request, "page_main_controller/system_message/system_message.html",
{'title_msg': title_msg, "contacts": contacts, 'message_type': 'receive',
'message_not_confirm': not_confirm,
'message_already_sent': message_already_sent, 'system_announcement': system_announcement,
'system_message_type': '已接收', 'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': 0, 'teacher_info': 0})
def message_add(request):
"""
添加一条信息
:param request:
:return:
"""
title_msg = '添加一条信息'
teacher_infos = TeacherInfo.objects.all()
school_terms = SchoolTerm.objects.filter().order_by('-id')
not_confirm, message_already_sent, system_announcement = get_message_infos(request)
return render(request, "page_main_controller/system_message/system_message_send_page.html",
{'title_msg': title_msg, 'has_receiver': "false", 'has_receiver_message': 'false',
'teacher_infos': teacher_infos, 'school_terms': school_terms, 'school_term': 0,
'teacher_info': 0,
'message_not_confirm': not_confirm,
'message_already_sent': message_already_sent,
'system_announcement': system_announcement})
def message_to_receiver(request):
"""
指定人发送
:param request:
:return:
"""
not_confirm, message_already_sent, system_announcement = get_message_infos(request)
receiver = request.GET.get('record_id', None)
title_msg = '发送信息编辑页面'
teacher_infos = TeacherInfo.objects.all()
school_terms = SchoolTerm.objects.filter().order_by('-id')
register_info_user = RegisterUserInfo.objects.get(username=request.session.get('username', None))
if register_info_user.role.role_name == 'administrator':
# 如果当前发件人是学校管理员,将意见发送给此学生对应的单位负责人
if receiver:
# 提前指定收信人
receiver_students = StudentInfo.objects.filter(id=receiver).order_by('-id')
if len(receiver_students) > 0:
receiver_student = receiver_students[0]
return render(request, "page_main_controller/system_message/system_message_send_page.html",
{'title_msg': title_msg, 'receiver': receiver_student.teacher_info.user_info,
'has_receiver': 'true',
'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': 0, 'teacher_info': 0,
'message_not_confirm': not_confirm,
'message_already_sent': message_already_sent,
'system_announcement': system_announcement})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:没有找到这条记录基础信息,请您重试或查证后再操作!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:没有找到这条记录基础信息,请您重试或查证后再操作!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
if receiver:
# 提前指定收信人
receiver_students = StudentInfo.objects.filter(id=receiver).order_by('-id')
if len(receiver_students) > 0:
receiver_student = receiver_students[0]
return render(request, "page_main_controller/system_message/system_message_send_page.html",
{'title_msg': title_msg, 'receiver': receiver_student.user_info,
'has_receiver': 'true',
'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': 0, 'teacher_info': 0,
'message_not_confirm': not_confirm,
'message_already_sent': message_already_sent,
'system_announcement': system_announcement})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:没有找到这条记录基础信息,请您重试或查证后再操作!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:没有找到这条记录基础信息,请您重试或查证后再操作!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
def message_to_message(request):
"""
指定信息回复
:param request:
:return:
"""
receiver_message_id = request.GET.get('record_id', None)
title_msg = '发送信息编辑页面'
teacher_infos = TeacherInfo.objects.all()
school_terms = SchoolTerm.objects.filter().order_by('-id')
not_confirm, message_already_sent, system_announcement = get_message_infos(request)
if receiver_message_id:
# 提前指定收信人
receiver_message_infos = SystemMessage.objects.filter(id=receiver_message_id).order_by('-id')
if len(receiver_message_infos) > 0:
return render(request, "page_main_controller/system_message/system_message_send_page.html",
{'title_msg': title_msg, 'receiver_message': receiver_message_infos[0],
'has_receiver_message': 'true',
'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': 0, 'teacher_info': 0,
'message_not_confirm': not_confirm,
'message_already_sent': message_already_sent,
'system_announcement': system_announcement})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:没有找到这条记录基础信息,请您重试或查证后再操作!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:未能获取到相关信息!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
def save_system_message(request):
"""
保存并发送一个系统信息
:param request:
:return:
"""
try:
if request.method == 'POST':
object_form = SystemMessageForm(request.POST)
if object_form.is_valid():
form_object_system_message = form_to_obj(object_form.cleaned_data, SystemMessage())
form_object_system_message.sender = UserInfo.objects.get(
register_user_info__username=request.session.get('username', None))
message_range = object_form.cleaned_data.get('message_range', None)
# if message_range == message_range_system_announcement:
# # 系统公告级别的
# print()
# elif message_range == message_range_all_teacher:
# # 所有负责人
# elif message_range == message_range_teacher_student:
# # 负责人所辖所有的学员
if str(message_range) not in message_range_no_person:
# 如果消息不是全体性的回复
receiver = object_form.cleaned_data.get('receiver', None)
feedback_message = object_form.cleaned_data.get('feedback_message', None)
if receiver:
# 初始发送的消息
if type(receiver) == str:
if "-" in receiver:
ids = str(receiver).split("-")
for id_str in ids:
# 保存并发送到每个指定人
system_message = form_object_system_message
system_message.receiver = UserInfo.objects.get(id=int(id_str))
system_message.save()
else:
if len(receiver) > 0:
form_object_system_message.receiver = UserInfo.objects.get(id=int(receiver))
form_object_system_message.save()
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:收信人信息获取失败!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
form_object_system_message.receiver = UserInfo.objects.get(id=receiver)
form_object_system_message.save()
elif feedback_message:
# 是确认回复的消息
feedback_message_objects = SystemMessage.objects.filter(id=feedback_message).order_by('-id')
if feedback_message_objects.count() > 0:
form_object_system_message.feedback_message = feedback_message_objects[0]
form_object_system_message.receiver = feedback_message_objects[0].sender
form_object_system_message.save()
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:确认回复信件信息获取异常!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:收信人信息获取异常!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
if str(message_range) == '2':
administrator_user_infos = UserInfo.objects.filter(
register_user_info__role__role_name='administrator').order_by('-id')
# form_object_system_message.receiver = administrator_user_infos[0]
if administrator_user_infos.count() > 0:
for administrator_user_info in administrator_user_infos:
form_object_system_message_admin = form_object_system_message
form_object_system_message_admin.receiver = administrator_user_info
form_object_system_message_admin.save()
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:学校管理员信息获取失败,请完善学校管理员的用户基础信息!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif str(message_range) == '3':
teachers = TeacherInfo.objects.all().order_by('-id')
if teachers.count() > 0:
for teacher in teachers:
form_object_system_message_teacher = form_object_system_message
form_object_system_message_teacher.receiver = teacher.user_info
form_object_system_message_teacher.save()
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:您尚未添加负责人!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif str(message_range) == '6':
username = request.session.get('username', None)
register_user_info = RegisterUserInfo.objects.get(username=username)
teacher_info = TeacherInfo.objects.get(
user_info=UserInfo.objects.get(register_user_info=register_user_info))
student_infos = StudentInfo.objects.filter(teacher_info=teacher_info).order_by('-id')
if student_infos.count() > 0:
for student in student_infos:
form_object_system_message_student = form_object_system_message
form_object_system_message_student.receiver = student.user_info
form_object_system_message_student.save()
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:您还没有负责人的学员!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif str(message_range) == '8':
username = request.session.get('username', None)
register_user_info = RegisterUserInfo.objects.get(username=username)
student_infos = StudentInfo.objects.filter(
user_info=UserInfo.objects.get(register_user_info=register_user_info)).order_by('-id')
if student_infos.count() > 0:
student_info = student_infos[0]
if student_info.teacher_info:
form_object_system_message.receiver = student_info.teacher_info.user_info
form_object_system_message.save()
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:请正确选择负责人'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:您尚未填报任何职业信息,请返回首页填报鉴定职业'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
form_object_system_message.save()
return HttpResponseRedirect('/report/send/')
except Exception as e:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:未能获取到相关信息!错误信息提示:' + str(e)
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
# raise e
# certificate_photos = object_form.cleaned_data.get('certificate_photos_form', None)
# if certificate_photos:
# certificate_photos_obj = Picture.objects.get(id=certificate_photos)
# form_object_student_info.certificate_photos = certificate_photos_obj
def system_message_detail(request):
"""
查看信息详情
:param request:
:return:
"""
not_confirm, message_already_sent, system_announcement = get_message_infos(request)
record_id = request.GET.get('record_id', None)
message_type = request.GET.get('message_type', None)
if record_id:
system_messages = SystemMessage.objects.filter(id=record_id).order_by("-id")
if len(system_messages) > 0:
title_msg = '查看消息详情'
if message_type:
return render(request, "page_main_controller/system_message/system_message_detail.html",
{'title_msg': title_msg, 'system_message': system_messages[0],
'message_type': message_type,
'message_not_confirm': not_confirm,
'message_already_sent': message_already_sent,
'system_announcement': system_announcement})
else:
return render(request, "page_main_controller/system_message/system_message_detail.html",
{'title_msg': title_msg, 'system_message': system_messages[0],
'message_not_confirm': not_confirm,
'message_already_sent': message_already_sent,
'system_announcement': system_announcement})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:获取数据记录数为 0 条'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:未能获取到相关信息!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
def system_message_confirm(request):
operation_object = None
try:
if request.method == 'POST':
record_id = request.POST.get('record_id', None)
if int(record_id) > 0:
object_infos = SystemMessage.objects.filter(id=record_id)
if len(object_infos) == 1:
object_info = object_infos[0]
receive_status = object_info.receive_status
feedback_status = object_info.feedback_status
hidden_status_receiver = object_info.hidden_status_receiver
if hidden_status_receiver == '2':
# 信息未隐藏
if receive_status == '1':
# 信息已查看
if feedback_status == '2':
# 信息未确认
object_info.feedback_status = "1"
# 修改信息的确认状态
object_info.save()
operation_object = object_info.id
result['status'] = True
result['message'] = '信息确认成功'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '已确认:请不要重复确认!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '确认失败:信息未查看!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '确认失败:此记录已清理!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '确认失败:无此消息记录!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '确认失败:所要操作的记录不存在!'
result['data'] = ''
else:
result['status'] = False
result['message'] = '确认失败:系统操作请求方式异常!'
result['data'] = ''
except Exception as e:
result['status'] = False
result['message'] = "系统异常:" + str(e)
result['data'] = ''
result["level"] = log_level_change_status
save_operation_log(request, inspect.stack()[0][3], "uid:" + str(operation_object), result)
return JsonResponse(result, safe=False)
def system_message_confirm_send(request):
"""
确认并发送消息
:param request:
:return:
"""
receiver_message_id = request.GET.get('record_id', None)
title_msg = '发送信息编辑页面'
teacher_infos = TeacherInfo.objects.all()
school_terms = SchoolTerm.objects.filter().order_by('-id')
not_confirm, message_already_sent, system_announcement = get_message_infos(request)
if receiver_message_id:
# 提前指定收信人
receiver_message_infos = SystemMessage.objects.filter(id=receiver_message_id).order_by('-id')
if len(receiver_message_infos) > 0:
return render(request, "page_main_controller/system_message/system_message_confirm_send_page.html",
{'title_msg': title_msg, 'receiver_message': receiver_message_infos[0],
'has_receiver_message': 'true',
'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': 0, 'teacher_info': 0,
'message_not_confirm': not_confirm,
'message_already_sent': message_already_sent,
'system_announcement': system_announcement})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:没有找到这条记录基础信息,请您重试或查证后再操作!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:未能获取到相关信息!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
def reply_system_message(request):
"""
确认并回复消息保存
:param request:
:return:
"""
teacher_infos = TeacherInfo.objects.all()
school_terms = SchoolTerm.objects.filter().order_by('-id')
not_confirm, message_already_sent, system_announcement = get_message_infos(request)
try:
not_confirm, message_already_sent, system_announcement = get_message_infos(request)
if request.method == 'POST':
object_form = SystemMessageForm(request.POST)
print(object_form.__str__())
if object_form.is_valid():
form_object_system_message = form_to_obj(object_form.cleaned_data, SystemMessage())
feedback_message = object_form.cleaned_data.get("feedback_message", None)
form_object_system_message.sender = UserInfo.objects.get(
register_user_info__username=request.session.get('username', None))
if feedback_message:
# 是确认回复的消息
feedback_message_objects = SystemMessage.objects.filter(id=feedback_message).order_by('-id')
if len(feedback_message_objects) > 0:
feedback_message_object = feedback_message_objects[0]
form_object_system_message.feedback_message = feedback_message_object
form_object_system_message.receiver = feedback_message_objects[0].sender
# form_object_system_message.feedback_status = "1"
form_object_system_message.save()
# 将原信息修改成已确认并恢复状态
feedback_status = feedback_message_object.feedback_status
if feedback_status == '2':
# 信息未确认
feedback_message_object.feedback_status = "1"
reply_status = feedback_message_object.reply_status
if reply_status == '2':
# 信息未回复
feedback_message_object.reply_status = "1"
# 保存修改之后的状态
feedback_message_object.save()
return HttpResponseRedirect('/report/send/')
# title_msg = "消息回复详情"
# return render(request,
# "page_main_controller/system_message/system_message_confirm_send_page.html",
# {'title_msg': title_msg, 'receiver_message': feedback_message_objects[0],
# 'replay_message': form_object_system_message,
# 'replay_status': 'true',
# 'teacher_infos': teacher_infos,
# 'school_terms': school_terms, 'school_term': 0, 'teacher_info': 0,
# 'message_not_confirm': not_confirm,
# 'message_already_sent': message_already_sent,
# 'system_announcement': system_announcement})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:未能获取到相关信息!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:未能获取到相关信息!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '操作失败:所要操作的记录不存在!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
title_msg = sys_msg + '-错误信息展示页面'
message = '操作失败:系统操作请求方式异常!'
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
except Exception as e:
# raise e
title_msg = sys_msg + '-错误信息展示页面'
message = '系统提示:未能获取到相关信息!错误信息提示:' + str(e)
return render(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
def system_message_hidden(request):
operation_object = None
try:
if request.method == 'POST':
record_id = request.POST.get('record_id', None)
hidden_user = request.POST.get('hidden_user', None)
if int(record_id) > 0:
object_infos = SystemMessage.objects.filter(id=record_id)
if len(object_infos) == 1:
object_info = object_infos[0]
receive_status = object_info.receive_status
feedback_status = object_info.feedback_status
if feedback_status == '1':
# 信息已确认
if receive_status == '1':
# 信息已查看
if hidden_user == 'sender':
hidden_status_sender = object_info.hidden_status_sender
if hidden_status_sender == '2':
# 发信人信息未隐藏
object_info.hidden_status_sender = "1"
# 发信人修改信息的隐藏状态(清理即隐藏)
object_info.save()
operation_object = object_info.id
result['status'] = True
result['message'] = '信息清理(不再查看)成功'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '已清理:请不要重复清理!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
hidden_status_receiver = object_info.hidden_status_receiver
if hidden_status_receiver == '2':
# 收信息人信息未隐藏
object_info.hidden_status_receiver = "1"
# 收信人修改信息的隐藏状态(清理即隐藏)
object_info.save()
operation_object = object_info.id
result['status'] = True
result['message'] = '信息清理(不再查看)成功'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '已清理:请不要重复清理!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '清理失败:信息未查看!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '清理失败:此记录未确认!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '操作失败:无此消息记录!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '操作失败:所要操作的记录不存在!'
result['data'] = ''
else:
result['status'] = False
result['message'] = '操作失败:系统操作请求方式异常!'
result['data'] = ''
except Exception as e:
result['status'] = False
result['message'] = "系统异常:" + str(e)
result['data'] = ''
result["level"] = log_level_change_status
save_operation_log(request, inspect.stack()[0][3], "uid:" + str(operation_object), result)
return JsonResponse(result, safe=False)
def get_message_infos(request):
"""
获取信息的状态数据
:param request:
:return:
"""
username = request.session.get('username', None)
system_messages = SystemMessage.objects.filter(message_range=1, hidden_status_sender=2).order_by('-id')
system_announcement = system_messages.count()
if username:
registers = RegisterUserInfo.objects.filter(username=username)
if registers.count() == 1:
user_infos = UserInfo.objects.filter(register_user_info=registers[0]).order_by('-id')
if user_infos.count() == 1:
system_messages_receiver = SystemMessage.objects.filter(receiver=user_infos[0],
hidden_status_receiver=2).order_by('-id')
message_not_confirm = system_messages_receiver.filter(feedback_status=2).count()
receive_status_not = system_messages_receiver.filter(receive_status=2)
system_messages = SystemMessage.objects.filter(sender=user_infos[0], hidden_status_sender=2).order_by(
'-id')
message_already_sent = system_messages.count()
for system_message in receive_status_not:
# 将未查看的信息状态设置为已查看
system_message.receive_status = "1"
system_message.save()
return message_not_confirm, message_already_sent, system_announcement
else:
# 如果暂时没有完善用户信息,将视为未接受消息以及发送消息为0
return 0, 0, system_announcement
else:
return 0, 0, system_announcement
else:
return 0, 0, system_announcement
|
<reponame>dirkgomez/voice-skill-sdk
#
# voice-skill-sdk
#
# (C) 2020, Deutsche Telekom AG
#
# This file is distributed under the terms of the MIT license.
# For details see the file LICENSE in the top directory.
#
#
import sys
import unittest
from json import loads
from logging import makeLogRecord, INFO
from unittest.mock import patch
from skill_sdk import tracing
from skill_sdk import log
test_context = tracing.SpanContext('abcd', '1234')
class TestSmartHubGELFFormatter(unittest.TestCase):
def setUp(self):
# Re-init the tracer to reset a current span, that might have been activated by previous tests
tracing.initialize_tracer()
self.record = makeLogRecord({
'levelno': INFO,
'levelname': 'INFO',
'thread': '123456',
'name': 'demo.logger',
'msg': 'testmessage',
'audience': 'development',
})
def test_format(self):
data = loads(log.SmartHubGELFFormatter().format(self.record))
self.assertGreater(data['@timestamp'], 1490000000000)
self.assertIn('process', data)
self.assertEqual(data['tenant'], 'unnamed-skill')
self.assertEqual(data['thread'], '123456')
self.assertEqual(data['message'], 'testmessage')
self.assertEqual(data['level'], "INFO")
self.assertNotIn('audience', data)
self.assertEqual(data['logger'], 'demo.logger')
self.assertNotIn('intention', data)
self.assertEqual(data['traceId'], None)
self.assertEqual(data['spanId'], None)
def test_format_exception(self):
self.record.exc_info = True
try:
1 / 0
except:
data = loads(log.SmartHubGELFFormatter().format(self.record))
self.assertIn('ZeroDivisionError', data['_traceback'])
class TestLogLevels(unittest.TestCase):
def setUp(self):
if 'skill_sdk.services.log' in sys.modules:
del sys.modules['skill_sdk.services.log']
def test_log_level_environment_not_set(self):
from skill_sdk.services.log import LOG_LEVEL
self.assertEqual(LOG_LEVEL, "ERROR")
@patch('os.environ', new={'SPAN_TAG_ENVIRONMENT': 'prod'})
def test_log_level_unknown(self):
from skill_sdk.services.log import LOG_LEVEL
self.assertEqual(LOG_LEVEL, "ERROR")
@patch('os.environ', new={'SPAN_TAG_ENVIRONMENT': 'skill-edge'})
def test_log_level_skill_edge(self):
from skill_sdk.services.log import LOG_LEVEL
self.assertEqual(LOG_LEVEL, "DEBUG")
@patch('os.environ', new={'SPAN_TAG_ENVIRONMENT': 'staging'})
def test_log_level_staging(self):
from skill_sdk.services.log import LOG_LEVEL
self.assertEqual(LOG_LEVEL, "DEBUG")
@patch('os.environ', new={'SPAN_TAG_ENVIRONMENT': 'integration'})
def test_log_level_integration(self):
from skill_sdk.services.log import LOG_LEVEL
self.assertEqual(LOG_LEVEL, "DEBUG")
class TestHelperFunctions(unittest.TestCase):
def test_get_logger(self):
import logging
logger = log.get_logger('test')
self.assertIsInstance(logger, logging.Logger)
self.assertEqual(logger.name, 'test')
logger = log.get_logger()
self.assertEqual(logger.name, __name__.split('.')[-1])
@unittest.skipIf(sys.platform.startswith("win"), "Windows cannot gunicorn")
def test_gunicorn_logger(self):
from types import SimpleNamespace
from skill_sdk.log import GunicornLogger, SmartHubGELFFormatter
logger = GunicornLogger(SimpleNamespace(errorlog='-'))
self.assertIsInstance(logger, GunicornLogger)
self.assertEqual(logger.error_log.name, 'gunicorn')
[self.assertIsInstance(handler.formatter, SmartHubGELFFormatter) for handler in logger.error_log.handlers]
[self.assertIsInstance(handler.formatter, SmartHubGELFFormatter) for handler in logger.access_log.handlers]
def test_prepare_for_logging(self):
from skill_sdk.log import prepare_for_logging, LOG_ENTRY_MAX_STRING
request_json = {
"context": {
"attributes": {'location': ['A' * 1000, 'B' * 1000]},
"attributesV2": {'location': [{'value': 'A' * 1000}, {'value': 'B' * 1000}]},
},
}
request = prepare_for_logging(request_json)
self.assertEqual(request['context']['attributes']['location'],
['A' * LOG_ENTRY_MAX_STRING + '...', 'B' * LOG_ENTRY_MAX_STRING + '...'])
self.assertEqual(request['context']['attributesV2']['location'],
[{'value': 'A' * LOG_ENTRY_MAX_STRING + '...'}, {'value': 'B' * LOG_ENTRY_MAX_STRING + '...'}])
def test_hide_tokens(self):
from skill_sdk.log import prepare_for_logging
original = {
"context": {
"tokens": {'cvi': 'eyJblablabla'},
},
}
request = prepare_for_logging(original)
self.assertEqual(original, {"context": {"tokens": {"cvi": "eyJblablabla"}}})
self.assertEqual(request, {"context": {"tokens": {"cvi": "*****"}}})
self.assertEqual(prepare_for_logging(dict(value='Immutable Chuck Norris')), dict(value='Immutable Chuck Norris'))
|
<reponame>datphan/moviecrab<filename>app/people/datastore.py
from abc import abstractmethod, ABCMeta
from datetime import datetime
from flask_security.utils import encrypt_password
from ..datastore import SQLAlchemyDatastore
from ..utils import fix_docs
class PeopleDatastore(object):
"""Abstract PeopleDatastore class.
.. versionadded:: 0.1.0
"""
__metaclass__ = ABCMeta
# peoples
@abstractmethod
def find_people_list(self, q=None, filters=None, sort=None, offset=None, limit=None, **kwargs):
"""Find all existing people from the datastore
by optional query and options.
.. versionadded:: 0.1.0
:param q: the optional query as a string which is provided by
the current people, default is None.
:param filters: the filters list of directory item with keys: (key, op, value)
:param sort: sorting string (sort='+a,-b,c')
:param offset: offset (integer positive)
:param limit: limit (integer positive)
:param kwargs: the additional keyword arguments containing filter dict {key:value,}
:return the query
"""
pass
@abstractmethod
def create_people(self, **kwargs):
"""Creates a new people associated with the current people then save it to the database.
.. versionadded:: 0.1.0
:param kwargs: the optional kwargs
:return the created people
"""
pass
@abstractmethod
def read_people(self, pid, **kwargs):
"""Reads an existing people associated with the current people by its primary id
from the database.
.. versionadded:: 0.1.0
:param pid: primary id of an people.
:param kwargs: the optional kwargs.
:return the found people
"""
pass
@abstractmethod
def update_people(self, pid, **kwargs):
"""Updates an existing people associated with the current people by its primary id
from the database.
.. versionadded:: 0.1.0
:param pid: primary id of an people.
:param kwargs: the optional kwargs.
:return the updated people
"""
pass
@abstractmethod
def delete_people(self, pid, **kwargs):
"""Deletes a existing people associated with the current people by its primary id
from the database.
.. versionadded:: 0.1.0
:param pid: primary id of an people.
:param kwargs: the optional kwargs
"""
pass
@fix_docs
class SQLAlchemyPeopleDatastore(SQLAlchemyDatastore, PeopleDatastore):
"""
Implementation for PeopleDatastore with SQLAlchemy
"""
# User
def find_people_list(self, q=None, filters=None, **kwargs):
accepted_filter_keys = ('email', 'active')
kwargs.update({
'q': q,
'filters': filters,
'accepted_filter_keys': accepted_filter_keys
})
return self.find_by_model_name('people', **kwargs)
def create_people(self, **kwargs):
accepted_keys = ('email', 'password', 'active', 'confirmed_at')
kwargs['password'] = encrypt_password(kwargs['password'])
# TODO(hoatle): implement verification by signals
kwargs['active'] = True
kwargs['confirmed_at'] = datetime.utcnow()
people = self.create_by_model_name('people', accepted_keys, **kwargs)
people.roles.append(self.find_roles(name='people').first())
self.commit()
return people
def read_people(self, pid, **kwargs):
return self.read_by_model_name('people', pid, **kwargs)
def update_people(self, pid, **kwargs):
return self.update_by_model_name('people', pid, **kwargs)
def delete_people(self, pid, **kwargs):
self.delete_by_model_name('people', pid, **kwargs)
def filter_by(self, **kwargs):
return self.filter_by_model_name('people', **kwargs)
|
<reponame>reip-project/reip-pipelines<gh_stars>0
from interface import *
from plasma import save_data, load_data, save_meta, load_meta, save_both, load_both
import pyarrow as pa
import numpy as np
import pyarrow.plasma as plasma
import multiprocessing as mp
import time
import copy
class UniqueID:
_id = 0
@staticmethod
def Gen():
UniqueID._id += 1
t = "%20s" % str(UniqueID._id)
# print(t.encode("utf-8"))
return plasma.ObjectID(t.encode("utf-8"))
class SharedPointer:
def __init__(self, ring_size):
self.counter = mp.Value('i', 0, lock=False)
self.ring_size = ring_size
@property
def value(self):
return self.counter.value
@value.setter
def value(self, new_value):
self.counter.value = new_value
@property
def pos(self):
return self.counter.value % self.ring_size
@property
def loop(self):
return self.counter.value // self.ring_size
class BufferStore(Sink):
def __init__(self, size, debug=False, **kw):
self.size = size + 1 # need extra slot because head == tail means empty
self.data_ids = [UniqueID.Gen() for i in range(self.size)]
self.meta_ids = [UniqueID.Gen() for i in range(self.size)]
self.both_ids = [UniqueID.Gen() for i in range(self.size)]
self.head = SharedPointer(self.size)
self.tail = SharedPointer(self.size)
self.debug = debug
self.customers = []
self.pipes = []
self.client = plasma.connect("/tmp/plasma")
print("Store Connected. Warming up...")
t0 = time.time()
ret = self.client.get(self.client.put("warm-up"))
assert (ret == "warm-up")
print("Warmed up in %.4f sec" % (time.time()- t0))
super().__init__(**kw)
def full(self):
if len(self.customers) > 0:
new_value = min([customer.value for customer in self.customers])
to_delete = []
for v in range(self.tail.value, new_value):
# to_delete.append(self.data_ids[v % self.size])
# to_delete.append(self.meta_ids[v % self.size])
to_delete.append(self.both_ids[v % self.size])
if len(to_delete) > self.size / 5:
# print("Deleting:", len(to_delete))
self.client.delete(to_delete)
self.tail.value = new_value
return (self.head.value - self.tail.value) >= (self.size - 1)
def _put(self, buffer):
data, meta = buffer
# save_data(self.client, data, id=self.data_ids[self.head.pos])
# save_meta(self.client, meta, id=self.meta_ids[self.head.pos])
save_both(self.client, data, meta, id=self.both_ids[self.head.pos], debug=self.debug)
self.head.value += 1
def gen_source(self, **kw):
self.customers.append(SharedPointer(self.size))
self.customers[-1].value = self.tail.value
return Customer(self, len(self.customers) - 1, **kw)
class Customer(Source):
def __init__(self, store, id, **kw):
self.store = store
self.id = id
self.client = None
super().__init__(**kw)
def empty(self):
return self.store.head.value == self.store.customers[self.id].value
def last(self):
return (self.store.head.value - self.store.customers[self.id].value) <= 1
def next(self):
self.store.customers[self.id].value += 1
def _get(self):
if self.client is None:
self.client = plasma.connect("/tmp/plasma")
print("Customer Connected")
# data = load_data(self.client, self.store.data_ids[self.store.customers[self.id].pos])
# meta = load_meta(self.client, self.store.meta_ids[self.store.customers[self.id].pos])
data, meta = load_both(self.client, self.store.both_ids[self.store.customers[self.id].pos], debug=self.store.debug)
return data, meta
def run(customers):
print("Started")
[c0, c1, c2] = customers
print("c0:")
while not c0.empty():
print(c0.get())
c0.next()
print("c1:")
while not c1.empty():
print(c1.get())
c1.next()
# bs.put((str(100), {"buffer": 100}))
print("c2:")
while not c2.empty():
print(c2.get())
c2.next()
time.sleep(0.75)
print("c2':")
while not c2.empty():
print(c2.get())
c2.next()
time.sleep(1.5)
# raise RuntimeError("Foo")
print("Done")
if __name__ == '__main__':
bs = BufferStore(100)
print(issubclass(type(bs), Sink))
print(bs.client.store_capacity())
print(bs.client.list())
bs.client.delete(bs.client.list())
print(bs.client.list())
c0 = bs.gen_source()
c1 = bs.gen_source(strategy=Source.Skip, skip=1)
c2 = bs.gen_source(strategy=Source.Latest)
for i in range(10):
bs.put((str(i), {"buffer": i}))
process = mp.Process(target=run, args=([c0, c1, c2], ))
process.deamon = True
process.start()
time.sleep(0.5)
bs.put((str(200), {"buffer": 200}))
process.join()
time.sleep(5)
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_transform.beam.analyzer_impls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import apache_beam as beam
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
import numpy as np
import tensorflow as tf
from tensorflow_transform import analyzers
from tensorflow_transform.beam import analyzer_impls as impl
import unittest
from tensorflow.python.framework import test_util
class AnalyzerImplsTest(test_util.TensorFlowTestCase):
def assertCombine(self, combine_fn, shards, expected, check_np_type=False):
"""Tests the provided combiner.
Args:
combine_fn: A beam.ComineFn to exercise.
shards: A list of next_inputs to add via the combiner.
expected: The expected output from extract_output.
check_np_type: check strict equivalence of output numpy type.
Exercises create_accumulator, add_input, merge_accumulators,
and extract_output.
"""
accumulators = [
combine_fn.add_input(combine_fn.create_accumulator(), shard)
for shard in shards]
final_accumulator = combine_fn.merge_accumulators(accumulators)
extracted = combine_fn.extract_output(final_accumulator)
# Extract output 0 since all analyzers have a single output
extracted = extracted[0]
if check_np_type:
# This is currently applicable only for quantile buckets, which conains a
# single element list of numpy array; the numpy array contains the bucket
# boundaries.
self.assertEqual(len(expected), 1)
self.assertEqual(len(extracted), 1)
self.assertEqual(expected[0].dtype, extracted[0].dtype)
self.assertAllEqual(expected, extracted)
def testCombineOnBatchSimple(self):
batch_1 = [np.ones((2, 6))]
batch_2 = [np.ones((1, 6))]
out = [3 for _ in range(6)]
analyzer = impl._CombineFnWrapper(
analyzers._NumPyCombinerSpec(np.sum, reduce_instance_dims=False))
self.assertCombine(analyzer, [batch_1, batch_2], out)
def testCombineOnBatchAllEmptyRow(self):
analyzer = impl._CombineFnWrapper(
analyzers._NumPyCombinerSpec(np.sum, reduce_instance_dims=False))
self.assertCombine(analyzer, [[[[]]], [[[]]], [[[]]]], [])
def testCombineOnBatchLotsOfData(self):
shards = [[np.ones((1, 3))] for _ in range(2000)]
out = [1 for _ in range(3)]
analyzer = impl._CombineFnWrapper(
analyzers._NumPyCombinerSpec(np.min, reduce_instance_dims=False))
self.assertCombine(analyzer, shards, out)
def testCombineOnBatchWithBeamPipeline(self):
# Test with a real Beam pipeline instead of calling the Combiner methods
# directly. This surfaces bugs that only occur within a Beam pipeline, e.g.
# due to Beam passing iterators to merge_accumulators instead of lists.
with beam.Pipeline() as p:
batch_1 = [np.ones((2, 6), dtype=np.int)]
batch_2 = [np.ones((1, 6), dtype=np.int)]
expected_output = np.ones(6) * 3
def assert_equals_expected(outputs):
output, = outputs # Expect exactly one analyzer output
return np.array_equal(output, expected_output)
analyzer = impl._CombineFnWrapper(
analyzers._NumPyCombinerSpec(np.sum, reduce_instance_dims=False))
assert_that(p
| beam.Create([batch_1, batch_2])
| beam.CombineGlobally(analyzer)
| beam.Map(assert_equals_expected),
equal_to([True]))
def _test_compute_quantiles_single_batch_helper(self, nptype):
batch_1 = [np.linspace(1, 100, 100, nptype)]
analyzer = impl._ComputeQuantiles(num_quantiles=3, epsilon=0.00001)
out = np.array([[35, 68]], dtype=np.float32)
self.assertCombine(analyzer, np.array([batch_1]), out, check_np_type=True)
def testComputeQuantilesSingleBatch(self):
self._test_compute_quantiles_single_batch_helper(np.double)
self._test_compute_quantiles_single_batch_helper(np.float32)
self._test_compute_quantiles_single_batch_helper(np.float64)
self._test_compute_quantiles_single_batch_helper(np.int32)
self._test_compute_quantiles_single_batch_helper(np.int64)
def _test_compute_quantiles_multipe_batch_helper(self, nptype):
batch_1 = [np.linspace(1, 100, 100, dtype=nptype)]
batch_2 = [np.linspace(101, 200, 100, dtype=nptype)]
batch_3 = [np.linspace(201, 300, 100, dtype=nptype)]
analyzer = impl._ComputeQuantiles(num_quantiles=5, epsilon=0.00001)
out = np.array([[61, 121, 181, 241]], dtype=np.float32)
self.assertCombine(
analyzer, np.array([batch_1, batch_2, batch_3]), out,
check_np_type=True)
def testComputeQuantilesMultipleBatch(self):
self._test_compute_quantiles_multipe_batch_helper(np.double)
self._test_compute_quantiles_multipe_batch_helper(np.float32)
self._test_compute_quantiles_multipe_batch_helper(np.float64)
self._test_compute_quantiles_multipe_batch_helper(np.int32)
self._test_compute_quantiles_multipe_batch_helper(np.int64)
def testCovarianceEmpty(self):
"""Test empty array of inputs."""
analyzer = analyzers._CovarianceCombinerSpec(dtype=tf.float64)
shards = [[[[]]], [[[]]]]
out = np.empty((0, 0))
self.assertCombine(analyzer, shards, out)
def testCovarianceWithZeroAxis(self):
"""Test an example with one zero variance axis."""
analyzer = analyzers._CovarianceCombinerSpec(dtype=tf.float64)
shards = [
[[[0, 0, 1]]],
[[[4, 0, 1], [2, -1, 1]]],
[[[2, 1, 1]]]
]
out = np.array([[2, 0, 0], [0, 0.5, 0], [0, 0, 0]])
self.assertCombine(analyzer, shards, out)
def testCovarianceWithLargeNumbers(self):
"""Test floating point precision with very large doubles."""
analyzer = analyzers._CovarianceCombinerSpec(dtype=tf.float64)
shards = [
[[[2e15, 0], [1e15, 0]]],
[[[-2e15, 0], [-1e15, 0]]]
]
out = np.array([[2.5e30, 0], [0, 0]])
self.assertCombine(analyzer, shards, out)
def testPCAWithZeroAxis(self):
"""Test a PCA example with one zero variance axis."""
analyzer = analyzers._PCACombinerSpec(output_dim=2, dtype=tf.float64)
shards = [
[[[0, 0, 1]]],
[[[4, 0, 1], [2, -1, 1]]],
[[[2, 1, 1]]]
]
out = np.array([[1, 0], [0, 1], [0, 0]])
self.assertCombine(analyzer, shards, out)
if __name__ == '__main__':
unittest.main()
|
from django import forms
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
from . import models
from django.contrib.auth.password_validation import password_validators_help_texts
User = get_user_model()
class UserLoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
def clean(self, *args, **kwargs):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
# user_qs = User.objects.filter(username=username)
# if user_qs.count() == 1:
# user = user_qs.first()
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("This user does not exist")
if not user.check_password(password):
raise forms.ValidationError("Incorrect passsword")
if not user.is_active:
raise forms.ValidationError("This user is not longer active.")
return super(UserLoginForm, self).clean(*args, **kwargs)
class UserProfileinfo(forms.ModelForm):
class Meta:
model= models.Coachs
fields = [
'city',
'state',
]
class TeamUpdate(forms.ModelForm):
class Meta:
model= models.Teams
fields = [
'team_name',
'mascot',
'city',
'state',
]
class PlayerUpdate(forms.ModelForm):
class Meta:
model= models.Players
fields =('team','hometown','homestate','first_name','last_name','email',
'height_feet','height_inches','weight','batting_orientation','player_number','position')
class UserRegisterForm(forms.ModelForm):
email = forms.EmailField(label='Email address')
email2 = forms.EmailField(label='Confirm Email')
password = forms.CharField(min_length=8,widget=forms.PasswordInput)
first_name=forms.CharField(label='first_name')
last_name=forms.CharField(label='last_name')
class Meta:
model = User
fields = [
'username',
'first_name',
'last_name',
'email',
'email2',
'password'
]
def clean_email2(self):
email = self.cleaned_data.get('email')
email2 = self.cleaned_data.get('email2')
if email != email2:
raise forms.ValidationError("Emails must match")
email_qs = User.objects.filter(email=email)
if email_qs.exists():
raise forms.ValidationError("This email has already been registered")
return email
class Useremailedit(forms.Form):
email = forms.EmailField(label='Email address')
email2 = forms.EmailField(label='Confirm Email')
def clean_email2(self):
email = self.cleaned_data.get('email')
email2 = self.cleaned_data.get('email2')
if email != email2:
raise forms.ValidationError("Emails must match")
email_qs = User.objects.filter(email=email)
if email_qs.exists():
raise forms.ValidationError("This email has already been registered")
return email
class Userflnameedit(forms.Form):
first_name = forms.CharField(label='first_name')
last_name = forms.CharField(label='last_name')
class teamregistration(forms.ModelForm):
team_name=forms.CharField(label='team_name')
class Meta:
model=models.Teams
fields=('team_name','mascot','city','state')
exclude = ('coach',)
"""def clean(self):
team_name = self.cleaned_data.get('team_name')
teamname_qs = models.Team.objects.filter(team_name=team_name)
if teamname_qs.exists():
raise forms.ValidationError("This team name has already been registered")
return team_name"""
class PlayerRegistrationForm(forms.ModelForm):
email = forms.EmailField(label='Email address')
email2 = forms.EmailField(label='Confirm Email')
class Meta:
model = models.Players
fields =('team','hometown','homestate','first_name','last_name','email','email2',
'height_feet','height_inches','weight','batting_orientation','player_number','position')
exclude=('password','username','type')
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PlayerRegistrationForm, self).__init__(*args, **kwargs)
self.fields['team'].queryset = models.Teams.objects.filter(coach=self.user)
def clean_email2(self):
email = self.cleaned_data.get('email')
email2 = self.cleaned_data.get('email2')
if email != email2:
raise forms.ValidationError("Emails must match")
email_qs = models.Players.objects.filter(email=email)
if email_qs.exists():
raise forms.ValidationError("This email has already been registered")
return email
"""def clean_first_name(self):
first_name = self.cleaned_data.get('first_name')
last_name = self.cleaned_data.get('last_name')
team = self.cleaned_data.get('team')
first_qs = models.Player.objects.filter(first_name=first_name)
last_qs = models.Player.objects.filter(last_name=last_name)
team_qs = models.Player.objects.filter(team=team)
if first_qs.exists() and last_qs.exists() and team_qs.exists():
raise forms.ValidationError("This name already exist has already been registered")
return first_name,last_name,team"""
"""def clean_last_name(self):
first = self.cleaned_data.get('first_name')
last = self.cleaned_data.get('last_name')
first_qs = models.Player.objects.filter(first=first)
last_qs = models.Player.objects.filter(first=last)
if first_qs.exists() and last_qs.exists():
raise forms.ValidationError("This name already exist has already been registered")
return last"""
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Roles_lkup(models.Model):
rolename=models.CharField(max_length=255)
rolenamesort=models.SmallIntegerField()
def __str__(self):
return self.rolename
class Meta:
db_table='role'
verbose_name_plural='roles'
class Points_lkup(models.Model):
storypoint=models.SmallIntegerField()
def __str__(self):
return str(self.storypoint)
class Meta:
db_table='point'
verbose_name_plural='points'
class Priority_lkup(models.Model):
storypriority=models.CharField(max_length=255)
storyprioritysort=models.SmallIntegerField()
def __str__(self):
return self.storypriority
class Meta:
db_table='priority'
verbose_name_plural='priorites'
class Status_lkup(models.Model):
storystatus=models.CharField(max_length=255)
storystatussort=models.SmallIntegerField()
def __str__(self):
return self.storystatus
class Meta:
db_table='status'
verbose_name_plural='stati'
class Sprint(models.Model):
sprintlabel=models.CharField(max_length=255)
sprintstartdate=models.DateField(null=True, blank=True)
sprintenddate=models.DateField(null=True, blank=True)
sprintcomments=models.TextField(null=True, blank=True)
def __str__(self):
return self.sprintlabel
class Meta:
db_table='sprint'
verbose_name_plural='sprints'
class Teammember(models.Model):
firstname=models.CharField(max_length=255)
lastname=models.CharField(max_length=255)
teammemberrole=models.ForeignKey(Roles_lkup, on_delete=models.DO_NOTHING)
def __str__(self):
return self.lastname
class Meta:
db_table='teammember'
verbose_name_plural='teammembers'
class Story(models.Model):
storytitle=models.CharField(max_length=255)
storydetail=models.TextField(null=True, blank=True)
storyenterdate=models.DateField()
storyclosedate=models.DateField(null=True, blank=True)
storypriority=models.ForeignKey(Priority_lkup, on_delete=models.DO_NOTHING)
storystatus=models.ForeignKey(Status_lkup, on_delete=models.DO_NOTHING)
storyeffortpoints=models.ForeignKey(Points_lkup, on_delete=models.DO_NOTHING, null=True, blank=True)
assignedsprint=models.ForeignKey(Sprint, on_delete=models.DO_NOTHING)
ownerproduct=models.ForeignKey(Teammember, related_name='stakeholder', on_delete=models.DO_NOTHING)
ownersprintteam=models.ForeignKey(Teammember, related_name='technicalowner', on_delete=models.DO_NOTHING)
def __str__(self):
return self.storytitle
class Meta:
db_table='story'
verbose_name_plural='stories'
class Task(models.Model):
tasktitle=models.CharField(max_length=255)
story=models.ForeignKey(Story, on_delete=models.DO_NOTHING)
Teammembermember=models.ManyToManyField(Teammember)
taskdetails=models.TextField(null=True, blank=True)
taskefforttime=models.DecimalField(decimal_places=2,max_digits=4, null=True, blank=True)
def __str__(self):
return self.tasktitle
class Meta:
db_table='task'
verbose_name_plural='tasks'
|
<filename>crosshair/enforce_test.py
import abc
from contextlib import ExitStack
import unittest
import sys
import pytest
from crosshair.condition_parser import Pep316Parser
from crosshair.enforce import EnforcedConditions
from crosshair.enforce import PostconditionFailed
from crosshair.enforce import PreconditionFailed
from crosshair.tracers import COMPOSITE_TRACER
from crosshair.util import set_debug
def foo(x: int) -> int:
"""
pre: 0 <= x <= 100
post: _ > x
"""
return x * 2
class Pokeable:
"""
inv: self.x >= 0
"""
x: int = 1
def poke(self) -> None:
self.x += 1
def pokeby(self, amount: int) -> None:
"""
pre: amount >= 0
"""
self.x += amount
def same_thing(thing: object) -> object:
""" post: __old__.thing == _ """
# If `thing` isn't copyable, it won't be available in `__old__`.
# In this case, enforcement will fail with an AttributeError.
return thing
class Enforcement(ExitStack):
def __enter__(self):
super().__enter__()
enforced_conditions = EnforcedConditions(Pep316Parser())
self.enter_context(COMPOSITE_TRACER)
self.enter_context(enforced_conditions)
self.enter_context(enforced_conditions.enabled_enforcement())
COMPOSITE_TRACER.trace_caller()
class CoreTest(unittest.TestCase):
def test_enforce_conditions(self) -> None:
self.assertEqual(foo(-1), -2) # unchecked
with Enforcement():
self.assertEqual(foo(50), 100)
with self.assertRaises(PreconditionFailed):
foo(-1)
with self.assertRaises(PostconditionFailed):
foo(0)
def test_class_enforce(self) -> None:
Pokeable().pokeby(-1) # no exception (yet!)
with Enforcement():
Pokeable().poke()
with self.assertRaises(PreconditionFailed):
Pokeable().pokeby(-1)
def test_enforce_on_uncopyable_value(self) -> None:
class NotCopyable:
def __copy__(self):
raise TypeError("not copyable")
not_copyable = NotCopyable()
with Enforcement():
with self.assertRaises(AttributeError):
same_thing(not_copyable)
class BaseFooable:
def foo(self, x: int):
""" pre: x > 100 """
def foo_only_in_super(self, x: int):
""" pre: x > 100 """
@classmethod
def class_foo(cls, x: int):
""" pre: x > 100 """
@staticmethod
def static_foo(x: int):
""" pre: x > 100 """
class DerivedFooable(BaseFooable):
def foo(self, x: int):
""" pre: x > 0 """
@classmethod
def class_foo(cls, x: int):
""" pre: x > 0 """
@staticmethod
def static_foo(x: int):
""" pre: x > 0 """
class TrickyCasesTest(unittest.TestCase):
def test_attrs_restored_properly(self) -> None:
orig_class_dict = DerivedFooable.__dict__.copy()
with Enforcement():
pass
for k, v in orig_class_dict.items():
self.assertIs(
DerivedFooable.__dict__[k], v, f'member "{k}" changed afer encforcement'
)
def test_enforcement_of_class_methods(self) -> None:
with Enforcement():
with self.assertRaises(PreconditionFailed):
BaseFooable.class_foo(50)
with Enforcement():
DerivedFooable.class_foo(50)
def test_enforcement_of_static_methods(self) -> None:
with Enforcement():
DerivedFooable.static_foo(50)
with self.assertRaises(PreconditionFailed):
BaseFooable.static_foo(50)
def test_super_method_enforced(self) -> None:
with Enforcement():
with self.assertRaises(PreconditionFailed):
DerivedFooable().foo_only_in_super(50)
with self.assertRaises(PreconditionFailed):
DerivedFooable().foo(-1)
# Derived class has a weaker precondition, so this is OK:
DerivedFooable().foo(50)
class WithMetaclass(metaclass=abc.ABCMeta):
""" inv: x != 55 """
def __init__(self, x):
""" pre: x != 22 """
self.x = x
def test_enforcement_init_on_abcmeta() -> None:
with Enforcement():
with pytest.raises(PreconditionFailed):
WithMetaclass(22)
with pytest.raises(PostconditionFailed):
WithMetaclass(55)
WithMetaclass(99)
if __name__ == "__main__":
if ("-v" in sys.argv) or ("--verbose" in sys.argv):
set_debug(True)
unittest.main()
|
<gh_stars>0
# flups.io: tools for reading and writing files
import re
import logging
import numpy as np
from .calib import load_latest, calibration
logger = logging.getLogger(__name__)
def read_asc(fname):
"""
Read a single `asc` file, the ASCII format from Andor Solis.
"""
logger.debug("Loading `.asc` file: %s", fname)
with open(fname) as f:
contents = f.read()
start = contents.find("\n"*3)
return np.loadtxt((ln for ln in contents[start:].splitlines() if ln), delimiter=",")
def load_asc_series(fnames, calib=None, step=None):
"""
Load a series of Andor Solis `asc` files. Computes the delays and wl.
Parameters
----------
fnames: iterable of filenames.
The list of files to load.
calib: flups.calib.calibration; array-like of shape (2,) or None
Wavelength calibration used to convert the pixels to wavelength.
The parameters can also be passed as an array: `[b0, b1]`, where `b0` is
the initial value assuming 0-based indexing. If `None` (default), uses
the lastest calibration from `flups.calib`
step: float or None
The timestep, in fs. If `None` (default), the timestep will be found
from the filename as `_sNNN_`.
Returns
-------
delays : (M,) np.ndarray
Delays, fs. Starts from 0.
wl : (N,) np.ndarray
Wavelengths, nm.
trace : (M,N) np.ndarray
Signal intensity
"""
# read the data
trace = [read_asc(fn)[:,1] for fn in fnames]
# TODO: change to proper error.
assert np.allclose([t.size for t in trace], trace[0].size) # check they all have the same length
trace = np.array(trace)
# compute time axis
step = step or float(re.search("_s(\d+)_", fnames[0]).group(1))
n_pix = trace.shape[1]
delays = np.arange(0, trace.shape[0])*step
# compute wavelength axis
pixels = np.arange(n_pix)
if calib is None:
calib = load_latest()
if isinstance(calib, calibration):
wl = calib.calibrate(pixels)
else:
b0, b1 = calib
wl = b0 + b1*pixels
assert trace.shape == (delays.size, wl.size)
return delays, wl, trace
def load_npz(fname):
"""
Load data from an npz archive.
Parameters
----------
fname : str
Path to the archive.
Returns
-------
delays : (M,) np.ndarray
Delays, fs. Starts from 0.
wl : (N,) np.ndarray
Wavelengths, nm.
trace : (M,N) np.ndarray
Signal intensity
"""
df = np.load(fname)
delays = df["delays"]
trace = df["trace"]
wl = df["wl"]
return delays, wl, trace
def load_txt(fname):
"""
Load data from a ".txt" file.
The first element is discarded (ie: top left corner), the first column
contains the delays, the first row contains the wavelength, and the rest
contains the signal intensity.
Parameters
----------
fname : str
Path to the archive.
Returns
-------
delays : (M,) np.ndarray
Delays, fs. Starts from 0.
wl : (N,) np.ndarray
Wavelengths, nm.
trace : (M,N) np.ndarray
Signal intensity
"""
cnt = np.loadtxt(fname)
delays = cnt[1:,0]
wl = cnt[0,1:]
trace = cnt[1:,1:]
return delays, wl, trace
def save_txt(fname, delays, wl, trace):
"""
Saves the data in a `.txt` file.
The first element is undefined, the first column contains the delays, the
first row contains the wavelengths and the rest contains the signal
intensity.
Parameters
----------
fname : str
Path to the archive.
delays : (M,) np.ndarray
Delays, fs. Starts from 0.
wl : (N,) np.ndarray
Wavelengths, nm.
trace : (M,N) np.ndarray
Signal intensity
See also
--------
flups.io.load_txt
"""
cnt = np.full([s+1 for s in trace.shape], np.nan)
cnt[1:,0] = delays
cnt[0,1:] = wl
cnt[1:,1:] = trace
np.savetxt(fname, cnt, fmt="%.06g")
|
<reponame>CorentinAmbroise/pynet<filename>pynet/utils.py
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
A module with common functions.
"""
# System import
import collections
import shutil
import logging
import tempfile
import warnings
import os
import re
import sys
import inspect
# Third party imports
import torch
import numpy as np
ALLOWED_LAYERS = [
torch.nn.Conv2d,
torch.nn.Conv3d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.Linear
]
LEVELS = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL
}
logger = logging.getLogger("pynet")
class TemporaryDirectory(object):
""" Securely creates a temporary directory. The resulting object can be
used as a context manager. When the returned object is used as a context
manager, the name will be assigned to the target of the as clause in the
with statement, if there is one.
"""
def __init__(self, dir=None, prefix=None, name=None):
""" Initialize the TempDir class.
Parameters
----------
dir: str, default None
the location where the temporary folder is created. If specified
the folder is persistent.
prefix: str, default None
if set the directory name will begin with that prefix.
name: str, default
if set the directory name will have this name.
"""
self.tmpdir = None
self.dir = dir
self.prefix = prefix
self.name = name
self.delete = self.dir is None
return
def __enter__(self):
if self.dir is not None and self.name is not None:
self.tmpdir = os.path.join(self.dir, self.name)
if not os.path.isdir(self.tmpdir):
os.mkdir(self.tmpdir)
else:
self.tmpdir = tempfile.mkdtemp(dir=self.dir, prefix=self.prefix)
return self.tmpdir
def __exit__(self, type, value, traceback):
if self.delete and self.tmpdir is not None:
shutil.rmtree(self.tmpdir)
class RegisteryDecorator(object):
""" Class that can be used to register class in a registry.
"""
@classmethod
def register(cls, obj_or_klass, *args, **kwargs):
if "name" in kwargs:
name = kwargs["name"]
elif hasattr(obj_or_klass, "__name__"):
name = obj_or_klass.__name__
else:
name = obj_or_klass.__class__.__name__
if name in cls.REGISTRY:
raise ValueError(
"'{0}' name already used in registry.".format(name))
cls.REGISTRY[name] = obj_or_klass
return obj_or_klass
@classmethod
def get_registry(cls):
return cls.REGISTRY
class Networks(RegisteryDecorator):
""" Class that register all the available networks.
"""
REGISTRY = {}
class Regularizers(RegisteryDecorator):
""" Class that register all the available regularizers.
"""
REGISTRY = {}
class Losses(RegisteryDecorator):
""" Class that register all the available losses.
"""
REGISTRY = {}
class Metrics(RegisteryDecorator):
""" Class that register all the available losses.
"""
REGISTRY = {}
def get_tools():
""" List all available Deep Learning tools.
Returns
-------
tools: dict
all available tools for Deep Learning application.
"""
tools = {}
mod_members = dict(inspect.getmembers(sys.modules[__name__]))
for key in ["Networks", "Regularizers", "Losses", "Metrics"]:
tools[key.lower()] = mod_members[key].get_registry()
return tools
def setup_logging(level="info", logfile=None):
""" Setup the logging.
Parameters
----------
logfile: str, default None
the log file.
"""
logging_format = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - "
"%(message)s", "%Y-%m-%d %H:%M:%S")
while len(logging.root.handlers) > 0:
logging.root.removeHandler(logging.root.handlers[-1])
while len(logger.handlers) > 0:
logger.removeHandler(logger.handlers[-1])
level = LEVELS.get(level, None)
if level is None:
raise ValueError("Unknown logging level.")
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level)
stream_handler.setFormatter(logging_format)
logger.addHandler(stream_handler)
if logfile is not None:
file_handler = logging.FileHandler(logfile, mode="a")
file_handler.setLevel(level)
file_handler.setFormatter(logging_format)
logger.addHandler(file_handler)
if level != logging.DEBUG:
warnings.simplefilter("ignore", DeprecationWarning)
def logo():
""" pySAP logo is ascii art using Big Money-ne.
Returns
-------
logo: str
the logo.
"""
logo = r"""
/$$
| $$
/$$$$$$ /$$ /$$ /$$$$$$$ /$$$$$$ /$$$$$$
/$$__ $$| $$ | $$| $$__ $$ /$$__ $$|_ $$_/
| $$ \ $$| $$ | $$| $$ \ $$| $$$$$$$$ | $$
| $$ | $$| $$ | $$| $$ | $$| $$_____/ | $$ /$$
| $$$$$$$/| $$$$$$$| $$ | $$| $$$$$$$ | $$$$/
| $$____/ \____ $$|__/ |__/ \_______/ \___/
| $$ /$$ | $$
| $$ | $$$$$$/
|__/ \______/ """
return logo
def test_model(model, shape):
""" Simple function to test a model.
Parameters
----------
model: Net
the network model.
shape: list of int
the shape of a classical input batch dataset.
"""
x = torch.autograd.Variable(torch.FloatTensor(np.random.random(shape)))
out = model(x)
loss = torch.sum(out)
loss.backward()
return out
def checkpoint(model, epoch, fold, outdir, optimizer=None, scheduler=None,
**kwargs):
""" Save the weights of a given model.
Parameters
----------
model: Net
the network model.
epoch: int
the epoch index.
fold: int
the fold index.
outdir: str
the destination directory where a 'model_<fold>_epoch_<epoch>.pth'
file will be generated.
optimizer: Optimizer, default None
the network optimizer (save the hyperparameters, etc.).
scheduler: Scheduler, default None
the network scheduler.
kwargs: dict
others parameters to save.
"""
outfile = os.path.join(
outdir, "model_{0}_epoch_{1}.pth".format(fold, epoch))
if optimizer is not None:
kwargs.update(optimizer=optimizer.state_dict())
if scheduler is not None:
kwargs.update(scheduler=scheduler.state_dict())
torch.save({
"fold": fold,
"epoch": epoch,
"model": model.state_dict(),
**kwargs}, outfile)
return outfile
def get_named_layers(model, allowed_layers=ALLOWED_LAYERS, resume=False):
""" Function that returned a dictionary with named layers.
Parameters
----------
model: Net
the network model.
allowed_layers: list of str, default ALLOWED_LAYERS
the allowed modules.
resume: bool, default False
simplify layer names and skip type checking.
Returns
-------
layers: dict
the named layers.
"""
layers = {}
for name, mod in model.named_modules():
name = name.replace("ops.", "")
for klass in allowed_layers:
if isinstance(mod, klass):
if not resume:
if (hasattr(mod, "in_channels") and
hasattr(mod, "out_channels")):
name = "{0}-{1}.{2}".format(
name, mod.in_channels, mod.out_channels)
elif hasattr(mod, "num_features"):
name = "{0}-{1}".format(name, mod.num_features)
elif hasattr(mod, "in_features"):
name = "{0}-{1}".format(name, mod.in_features)
else:
raise ValueError("Layer of type '{0}' is not yet "
"supported.".format(klass.__name__))
layers[name] = mod
return layers
def layer_at(model, layer_name, x, allowed_layers=ALLOWED_LAYERS):
""" Access intermediate layers of pretrained network.
Parameters
----------
model: Net
the network model.
layer_name: str
the layer name to be inspected.
x: torch.Tensor
an input tensor.
allowed_layers: list of str, default ALLOWED_LAYERS
the allowed modules.
Returns
-------
layer_data: torch.Tensor or list
the tensor generated at the requested location.
weight: torch.Tensor
the layer associated weight.
"""
layers = get_named_layers(model)
try:
layer = layers[layer_name]
except:
layer = model._modules.get(layer_name)
global hook_x
def hook(module, inp, out):
""" Define hook.
"""
if isinstance(inp, collections.Sequence):
inp_size = [item.data.size() for item in inp]
inp_dtype = [item.data.type() for item in inp]
else:
inp_size = inp.data.size()
inp_dtype = inp.data.type()
if isinstance(out, collections.Sequence):
out_size = [item.data.size() for item in out]
out_dtype = [item.data.type() for item in out]
out_data = [item.data for item in out]
else:
out_size = out.data.size()
out_dtype = out.data.type()
out_data = out.data
print(
"layer:", type(module),
"\ninput:", type(inp),
"\n len:", len(inp),
"\n data size:", inp_size,
"\n data type:", inp_dtype,
"\noutput:", type(out),
"\n data size:", out_size,
"\n data type:", out_dtype)
global hook_x
hook_x = out_data
_hook = layer.register_forward_hook(hook)
_ = model(x)
_hook.remove()
if isinstance(hook_x, collections.Sequence):
layer_data = [item.cpu().numpy() for item in hook_x]
else:
layer_data = hook_x.cpu().numpy()
layer_weight = None
if hasattr(layer, "weight"):
layer_weight = layer.weight.detach().numpy()
return layer_data, layer_weight
def freeze_layers(model, layer_names):
""" Freeze some wights in a network based on layer names.
Parameters
----------
model: Net
the network model.
layer_names: list of str
the layer associated weights to be frozen.
"""
layers = get_named_layers(model, allowed_layers=[torch.nn.Module],
resume=True)
for name in layer_names:
layer = layers[name]
for param in layer.parameters():
param.requires_grad = False
def reset_weights(model, checkpoint=None):
""" Reset all the weights of a model. If a checkpoint is given, restore
the checkpoint weights.
Parameters
----------
model: Net
the network model.
checkpoint: dict
the saved model weights.
"""
def weight_reset(m):
if hasattr(m, "reset_parameters"):
m.reset_parameters()
if checkpoint is None:
model.apply(weight_reset)
else:
if hasattr(checkpoint, "state_dict"):
model.load_state_dict(checkpoint.state_dict())
elif isinstance(checkpoint, dict) and "model" in checkpoint:
model.load_state_dict(checkpoint["model"])
else:
model.load_state_dict(checkpoint)
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from django.utils.encoding import force_str
"""
test_django-teryt
------------
Tests for `django-teryt` modules module.
"""
from django.test import TestCase
from django.db import models
from ..models import (RodzajMiejscowosci, JednostkaAdministracyjna,
Miejscowosc, Ulica)
from .factories import (RodzajMiejscowosciFactory,
JednostkaAdministracyjnaFactory,
MiejscowoscFactory, UlicaFactory)
class MixinTestObjectsManager(object):
def test_objects_model_manager(self):
self.assertIsInstance(JednostkaAdministracyjna.objects, models.Manager)
class TestRodzajMiejscowosci(TestCase, MixinTestObjectsManager):
def test_str(self):
rm = RodzajMiejscowosciFactory(id='96', nazwa='miasto')
self.assertEqual(str(rm), '96: miasto')
def test_set_val(self):
rm = RodzajMiejscowosci()
rm.set_val({'RM': '01',
'STAN_NA': '2013-02-28',
'NAZWA_RM': 'wieś'})
self.assertEqual(rm.nazwa, 'wieś')
self.assertEqual(rm.id, '01')
self.assertEqual(rm.stan_na, '2013-02-28')
class TestJednostkaAdministracyjna(TestCase, MixinTestObjectsManager):
def setUp(self):
self.rm_miasto = RodzajMiejscowosciFactory(
id='96',
nazwa='miasto')
self.gmina = JednostkaAdministracyjnaFactory(
id='0201011',
nazwa='Bolesławiec',
nazwa_dod='gmina miejska')
self.gmina_nowogrodziec = JednostkaAdministracyjnaFactory(
id='0201044',
nazwa='Nowogrodziec',
nazwa_dod='miasto')
self.powiat = JednostkaAdministracyjnaFactory(
id='0201',
nazwa='bolesławiecki',
nazwa_dod='powiat')
self.wojewodztwo = JednostkaAdministracyjnaFactory(
id='02',
nazwa='DOLNOŚLĄSKIE',
nazwa_dod='wojewdztwo')
self.boleslawiec = MiejscowoscFactory(
symbol='0935989',
jednostka=self.gmina,
miejscowosc_nadrzedna=None,
nazwa='Bolesławiec',
rodzaj_miejscowosci=self.rm_miasto)
self.nowogrodziec = MiejscowoscFactory(
symbol='0936262',
jednostka=self.gmina_nowogrodziec,
miejscowosc_nadrzedna=None,
nazwa='Nowogrodziec',
rodzaj_miejscowosci=self.rm_miasto)
def test_str(self):
self.assertEqual(str(self.gmina), '0201011: Bolesławiec')
def test_set_val(self):
gmina = JednostkaAdministracyjna()
gmina.set_val({
'GMI': '01',
'POW': '01',
'STAN_NA': '2013-01-01',
'NAZWA_DOD': 'gmina miejska',
'RODZ': '1',
'NAZWA': 'Bolesławiec',
'WOJ': '02'
})
wojewodztwo = JednostkaAdministracyjna()
wojewodztwo.set_val({
'GMI': None,
'POW': None,
'STAN_NA': '2013-01-01',
'NAZWA_DOD': 'województwo',
'RODZ': None,
'NAZWA': u'DOLNOŚLĄSKIE',
'WOJ': '02'})
# Common
self.assertEqual(gmina.stan_na, '2013-01-01')
self.assertEqual(gmina.aktywny, False)
# JednostkaAdministracyjna - gmina
self.assertEqual(gmina.id, '0201011')
self.assertEqual(gmina.nazwa, 'Bolesławiec')
self.assertEqual(gmina.nazwa_dod, 'gmina miejska')
self.assertEqual(gmina.typ, 'GMI')
# JednostkaAdministracyjna - województwo
self.assertEqual(wojewodztwo.id, '02')
self.assertEqual(wojewodztwo.nazwa, 'dolnośląskie')
self.assertEqual(wojewodztwo.nazwa_dod, 'województwo')
self.assertEqual(wojewodztwo.typ, 'WOJ')
def test_parents(self):
self.assertEqual(self.gmina.powiat(), self.powiat)
self.assertEqual(self.gmina.wojewodztwo(), self.wojewodztwo)
self.assertEqual(self.powiat.wojewodztwo(), self.wojewodztwo)
def test_managers_wojewodztwa(self):
self.assertIsInstance(JednostkaAdministracyjna.wojewodztwa,
models.Manager)
self.assertEqual(JednostkaAdministracyjna.wojewodztwa.count(), 1)
JednostkaAdministracyjna.wojewodztwa.get(id='02')
def test_managers_powiaty(self):
self.assertIsInstance(JednostkaAdministracyjna.powiaty, models.Manager)
self.assertEqual(JednostkaAdministracyjna.powiaty.count(), 1)
JednostkaAdministracyjna.powiaty.get(id='0201')
def test_managers_gminy(self):
self.assertIsInstance(JednostkaAdministracyjna.gminy, models.Manager)
self.assertEqual(JednostkaAdministracyjna.gminy.count(), 2)
JednostkaAdministracyjna.gminy.get(id='0201011')
def test_miejscowosci(self):
self.assertEqual(self.gmina.miejscowosci().count(), 1)
self.assertEqual(self.powiat.miejscowosci().count(), 2)
self.assertEqual(self.wojewodztwo.miejscowosci().count(), 2)
self.gmina.miejscowosci().get(symbol='0935989')
class TestMiejscowosc(TestCase, MixinTestObjectsManager):
def setUp(self):
self.miejscowosc = MiejscowoscFactory(
symbol='0861110',
miejscowosc_nadrzedna=None,
nazwa='<NAME>',
rodzaj_miejscowosci__id='02',
rodzaj_miejscowosci__nazwa='kolonia')
self.warszawa = MiejscowoscFactory(
symbol='0918123',
miejscowosc_nadrzedna=None,
nazwa='Warszawa',
rodzaj_miejscowosci__id='96',
rodzaj_miejscowosci__nazwa='miasto')
self.wies = MiejscowoscFactory(
symbol='0005546',
miejscowosc_nadrzedna=None,
nazwa='Wolica',
rodzaj_miejscowosci__id='01',
rodzaj_miejscowosci__nazwa='wieś')
def test_managers_miasta(self):
self.assertIsInstance(Miejscowosc.miasta, models.Manager)
self.assertEqual(Miejscowosc.miasta.count(), 1)
Miejscowosc.miasta.get(symbol='0918123')
def test_managers_wsie(self):
self.assertIsInstance(Miejscowosc.wsie, models.Manager)
self.assertEqual(Miejscowosc.wsie.count(), 1)
Miejscowosc.wsie.get(symbol='0005546')
def test_str(self):
self.assertEqual(str(self.miejscowosc), '0861110: Strzygowska Kolonia')
def test_set_val(self):
m_dict = {
'GMI': '06',
'RODZ_GMI': '5',
'POW': '18',
'STAN_NA': '2013-03-06',
'SYM': '0861110',
'NAZWA': 'Strzygowska Kolonia',
'WOJ': '04',
'RM': '02',
'SYMPOD': '0861110',
'MZ': '1'
}
miejscowosc = Miejscowosc()
miejscowosc.set_val(m_dict)
# Common
self.assertEqual(miejscowosc.stan_na, '2013-03-06')
self.assertEqual(miejscowosc.aktywny, False)
# Miejscowosc
self.assertIsNone(miejscowosc.miejscowosc_nadrzedna)
self.assertEqual(miejscowosc.symbol, '0861110')
self.assertEqual(miejscowosc.jednostka_id, '0418065')
self.assertEqual(miejscowosc.nazwa, 'Strzygowska Kolonia')
# RodzajMiejscowosci instance made in setUp()
self.assertEqual(miejscowosc.rodzaj_miejscowosci.nazwa, 'kolonia')
m_dict['SYMPOD'] = '1234567'
miejscowosc2 = Miejscowosc()
miejscowosc2.set_val(m_dict)
self.assertEqual(miejscowosc2.miejscowosc_nadrzedna_id, '1234567')
class TestUlica(TestCase, MixinTestObjectsManager):
def setUp(self):
self.ulica1 = UlicaFactory(
miejscowosc__miejscowosc_nadrzedna=None
)
self.ulica2 = UlicaFactory(
cecha='pl.',
nazwa_1='Hoffa',
nazwa_2='Bogumiła',
miejscowosc__miejscowosc_nadrzedna=None
)
def test_str(self):
self.assertEqual(str(self.ulica1), 'ul. {} ({})'.format(self.ulica1.nazwa_1, self.ulica1.miejscowosc.nazwa))
self.assertEqual(str(self.ulica2), 'pl. <NAME> ({})'.format(self.ulica2.miejscowosc.nazwa))
def test_set_val(self):
m_dict = {
'GMI': '03',
'RODZ_GMI': '2',
'NAZWA_1': 'Cicha',
'NAZWA_2': None,
'POW': '03',
'STAN_NA': '2013-12-16',
'SYM': '0185962',
'CECHA': 'ul.',
'WOJ': '08',
'SYM_UL': '02974'
}
ulica = Ulica()
ulica.set_val(m_dict)
# Common
self.assertEqual(ulica.stan_na, '2013-12-16')
self.assertEqual(ulica.aktywny, False)
# Ulica
self.assertEqual(ulica.id, '018596202974')
self.assertEqual(ulica.miejscowosc_id, '0185962')
self.assertEqual(ulica.symbol_ulicy, '02974')
self.assertEqual(ulica.cecha, 'ul.')
self.assertEqual(ulica.nazwa_1, 'Cicha')
self.assertIsNone(ulica.nazwa_2)
|
# -*- coding: utf-8 -*-
import sqlite3
from unicorns.Unicorn import Unicorn
def setup():
'''
Creates a database, empties it and populates it with four unicorns.
'''
conn = sqlite3.connect('unicorns.db')
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS unicorns")
c.execute("CREATE TABLE unicorns (id INTEGER PRIMARY KEY, name TEXT, description TEXT, reportedBy TEXT, location TEXT, lat REAL, lon REAL, spottedWhen DATETIME CURRENT_TIMESTAMP, image TEXT)")
c.execute("INSERT INTO unicorns VALUES (1, 'Nordsvensk travhörning', 'Den nordsvenska travhörningen är en gammal fin lantras. Den har ett trevligt temperament, är uthållig och trivs bra i den nordskandinaviska vintern. Jag fick en glimt av den under en tjänsteresa till Sundsvall under min tid på Telia.', 'Johan', 'Sundsvall, Sverige', 62.4402, 17.3409, '0000-00-00 00:00:00', 'http://unicorns.idioti.se/bilder/nordsvensk.jpg')")
c.execute("INSERT INTO unicorns VALUES (2, 'Karibisk strandponny', 'En lynnig ras som hittas i den karibiska övärlden. Dras till saltvatten och lever av fisk och skaldjur. Just det här exemplaret skådades under en familjesemester. Den sprang ut framför bilen så att min svåger fick svänga och körde över en duva. Oerhört tragiskt.', 'Johan', 'Bahia Honda Key, USA', 24.6661, -81.2636, '2014-10-26 23:00:00', 'http://unicorns.idioti.se/bilder/strandponny.jpg')")
c.execute("INSERT INTO unicorns VALUES (3, 'Nattaktiv hornlöpare', 'Under en tur med mina scouter sov jag vid det gamla slottet i Strečno. Det är en pittoresk ruin, som tydligen är någon form av hotspot för den här enhörningsrasen. De tenderar att mest röra sig nattetid, från vilket de fått sitt namn. Notera det ovanligt tunna hornet. Ett riktigt praktexemplar!', 'Johan', 'Strečno, Slovakien', 49.1778, 18.8902, '2015-09-08 12:14:15', 'http://unicorns.idioti.se/bilder/nattaktiv.jpg')")
c.execute("INSERT INTO unicorns VALUES (4, 'Småväxt enhörning', 'Morsans gamla granne var veterinär och hade en hel uppsjö av djur. Hundar, höns, hängbukssvin och en småväxt enhörning vid namn Morris. Morris var, trots sin något bistra uppsyn, en trevlig varelse. Till skillnad från alla andra enhörningar jag stött på spinner den här rasen och äter kattmat. En oerhört spännande varelse. Yes.', 'Johan', '<NAME>', 55.671, 12.5212, '2013-08-23 22:08:00', 'http://unicorns.idioti.se/bilder/smavaxt.jpg')")
conn.commit()
conn.close()
def fetch_unicorns():
'''
Fetches all unicorns in the database. Returns a list with them.
'''
unicorns = []
conn = sqlite3.connect('unicorns.db')
c = conn.cursor()
for row in c.execute("SELECT * FROM unicorns"):
unicorn = Unicorn()
unicorn.fromDB(row)
unicorns.append(unicorn)
conn.close()
return unicorns
def fetch_unicorn(unicorn_id):
'''
Fetches a specific unicorn from the database.
'''
unicorn = Unicorn()
conn = sqlite3.connect('unicorns.db')
c = conn.cursor()
c.execute("SELECT * FROM unicorns WHERE id = ?", (int(unicorn_id), ))
row = c.fetchone()
if row:
unicorn.fromDB(row)
else:
unicorn = None
conn.close()
return unicorn
def add_unicorn(unicorn):
'''
Adds a unicorn. The parameter is a Unicorn object.
'''
flattened_unicorn = unicorn.toDict()
flattened_unicorn['location'] = flattened_unicorn['spottedWhere']['name']
flattened_unicorn['lat'] = flattened_unicorn['spottedWhere']['lat']
flattened_unicorn['lon'] = flattened_unicorn['spottedWhere']['lon']
conn = sqlite3.connect('unicorns.db')
c = conn.cursor()
c.execute("INSERT INTO unicorns (name, description, reportedBy, location, " +
"lat, lon, spottedWhen, image)"
"VALUES (:name, :description, :reportedBy, :location, " +
":lat, :lon, :spottedWhen, :image)",
flattened_unicorn)
unicorn.id = c.lastrowid;
conn.commit()
conn.close()
return unicorn
def update_unicorn(unicorn):
'''
Updates a unicorn. The parameter is a Unicorn object.
'''
flattened_unicorn = unicorn.toDict()
flattened_unicorn['location'] = flattened_unicorn['spottedWhere']['name']
flattened_unicorn['lat'] = flattened_unicorn['spottedWhere']['lat']
flattened_unicorn['lon'] = flattened_unicorn['spottedWhere']['lon']
print(flattened_unicorn)
conn = sqlite3.connect('unicorns.db')
c = conn.cursor()
c.execute("UPDATE unicorns SET id=:id, name=:name, description=:description, " +
"reportedBy=:reportedBy, location=:location, lat=:lat, lon=:lon, " +
"spottedWhen=:spottedWhen, image=:image WHERE id=:id",
flattened_unicorn)
updated = c.rowcount > 0
conn.commit()
conn.close()
return updated
def delete_unicorn(unicorn_id):
'''
Deletes a given unicorn from the databse.
'''
conn = sqlite3.connect('unicorns.db')
c = conn.cursor()
c.execute("DELETE FROM unicorns WHERE id=?", (unicorn_id, ))
deleted = c.rowcount > 0
conn.commit()
conn.close()
return deleted
|
import logging
import os
from typing import Dict
from ilkbyte.exception import ConfigurationError
from ilkbyte.session import IlkbyteAPISession
from ilkbyte.utils import PowerAction, DNSRecordType
logger = logging.getLogger(__name__)
class Ilkbyte(object):
def __init__(self, host: str = None, secret_key: str = None, access_key: str = None):
"""
Ilkbyte API client.
Args:
host (str): Hostname of the ilkbyte api.
secret_key (str): Secret key.
access_key (str): Access key.
"""
if not host:
host = os.getenv('ILKBYTE_HOST')
if not host:
logger.error("hostname variable or ILKBYTE_HOST environment variable is required!")
raise ConfigurationError()
if not secret_key:
secret_key = os.getenv('ILKBYTE_SECRET_KEY')
if not secret_key:
logger.error("secret_key variable or ILKBYTE_SECRET_KEY environment variable is required!")
raise ConfigurationError()
if not access_key:
access_key = os.getenv('ILKBYTE_ACCESS_KEY')
if not access_key:
logger.error("access_key variable or ILKBYTE_ACCESS_KEY environment variable is required!")
raise ConfigurationError()
self._session = IlkbyteAPISession(host, secret_key, access_key)
def get_account(self) -> Dict:
response = self._session.get_resource('account')
response.raise_for_status()
return response.json()
def get_users(self) -> Dict:
response = self._session.get_resource('account/users')
response.raise_for_status()
return response.json()
def get_all_servers(self, page_number: int = 1):
response = self._session.get_resource('server/list/all', params={
'p': page_number
})
response.raise_for_status()
return response.json()
def get_active_servers(self, page_number: int = 1):
response = self._session.get_resource('server/list', params={
'p': page_number
})
response.raise_for_status()
return response.json()
def get_plans(self, page_number: int = 1):
response = self._session.get_resource('server/create')
response.raise_for_status()
return response.json()
def create_server(self, username, name, os_id, app_id, package_id, sshkey, password=None):
params = {
'username': username,
'name': name,
'os_id': os_id,
'app_id': app_id,
'package_id': package_id,
'sshkey': sshkey,
}
if not password:
params['password'] = password
response = self._session.get_resource('server/create/config', params=params)
response.raise_for_status()
return response.json()
def get_server(self, server_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/show")
response.raise_for_status()
return response.json()
def set_power(self, server_name: str, action: PowerAction):
response = self._session.get_resource(f"server/manage/{server_name}/power", params={
'set': action.value
})
response.raise_for_status()
return response.json()
def get_ips(self, server_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/ip/list")
response.raise_for_status()
return response.json()
def get_ip_logs(self, server_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/ip/logs")
response.raise_for_status()
return response.json()
def get_ip_rdns(self, server_name: str, ip: str, rdns: str):
response = self._session.get_resource(f"server/manage/{server_name}/ip/rdns", params={
'ip': ip,
'rdns': rdns
})
response.raise_for_status()
return response.json()
def get_snapshots(self, server_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/snapshot")
response.raise_for_status()
return response.json()
def create_snapshot(self, server_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/snapshot/create")
response.raise_for_status()
return response.json()
def restore_snapshot(self, server_name: str, snapshot_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/snapshot/revert", params={
'name': snapshot_name
})
response.raise_for_status()
return response.json()
def update_snapshot(self, server_name: str, snapshot_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/snapshot/update", params={
'name': snapshot_name
})
response.raise_for_status()
return response.json()
def delete_snapshot(self, server_name: str, snapshot_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/snapshot/delete", params={
'name': snapshot_name
})
response.raise_for_status()
return response.json()
def set_cron(self, server_name: str, cron_name: str, day: int, hour: int, min: int):
response = self._session.get_resource(f"server/manage/{server_name}/snapshot/cron/add", params={
'name': cron_name,
'day': day,
'hour': hour,
'min': min
})
response.raise_for_status()
return response.json()
def delete_cron(self, server_name: str, cron_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/snapshot/cron/delete", params={
'name': cron_name
})
response.raise_for_status()
return response.json()
def get_backups(self, server_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/backup")
response.raise_for_status()
return response.json()
def restore_backup(self, server_name: str, backup_name: str):
response = self._session.get_resource(f"server/manage/{server_name}/backup/restore", params={
'backup_name': backup_name
})
response.raise_for_status()
return response.json()
def get_domains(self, p: int = 1):
response = self._session.get_resource("domain/list", params={
'p': p
})
response.raise_for_status()
return response.json()
def create_domain(self, domain: str, server: str, ipv6: bool):
response = self._session.get_resource("domain/create", params={
'domain': domain,
'server': server,
'ipv6': ipv6
})
response.raise_for_status()
return response.json()
def get_domain(self, domain_name: str):
response = self._session.get_resource(f"domain/manage/{domain_name}/show")
response.raise_for_status()
return response.json()
def add_dns_record(self, domain_name: str, record_name: str, record_type: DNSRecordType, record_content: str,
record_priority: int):
response = self._session.get_resource(f"domain/manage/{domain_name}/add", params={
'record_name': record_name,
'record_type': record_type.value,
'record_content': record_content,
'record_priority': record_priority
})
response.raise_for_status()
return response.json()
def update_dns_record(self, domain_name: str, record_id: int, record_content: str, record_priority: int):
response = self._session.get_resource(f"domain/manage/{domain_name}/update", params={
'record_id': record_id,
'record_content': record_content,
'record_priority': record_priority
})
response.raise_for_status()
return response.json()
def delete_dns_record(self, domain_name: str, record_id: int):
response = self._session.get_resource(f"domain/manage/{domain_name}/delete", params={
'record_id': record_id
})
response.raise_for_status()
return response.json()
def dns_push(self, domain_name: str):
response = self._session.get_resource(f"domain/manage/{domain_name}/push")
response.raise_for_status()
return response.json()
|
<reponame>matveymisyura-mango/vosk-server
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
LITTLE_ENDIAN = 0
BIG_ENDIAN = 1
SIGNED16BIT = 2
FQ8000HZ = 3
class SilenceDetector:
def __init__(self, frequency: int, sample_format: int, byteorder: int, volume_level: float = 0.0,
volume_length: int = 150, init_silence_length: int = 500, silence_length: int = 400,
silence_to_volume_level: float = 0.01):
"""
@volume_length - int, total count milliseconds of volume after which object will start to found a silence
@init_silence_length - int, total count milliseconds of silence for volume level detection
@silence_length - int, total count milliseconds of silence after which object.get_silence will return True
@silence_to_volume_level - float, the ratio of the areas under the module of the sound curve in the silence
section to the voice section
@frequency - raw format parameter (8000, 16000, 24000, 48000 etc), must be set as constants
@sample_format - raw format parameter (8bit, 16 bit, signed/unsigned int, float, etc), must be set as
constant
"""
self._init_silence_length = init_silence_length
self._silence_length = silence_length
self._volume_length = volume_length
self._diff = silence_to_volume_level
if volume_level:
self._volume_level = volume_level
else:
self._volume_level = 0
if frequency == FQ8000HZ:
self._freq = 8000
self._period = 1 / 8000
if sample_format == SIGNED16BIT:
self._sample_len = 2
self._signed = True
# todo add other formats there
if byteorder == LITTLE_ENDIAN:
self._byte_order = 'little'
elif byteorder == BIG_ENDIAN:
self._byte_order = 'big'
self._data = bytes()
self._volume_detected = False
self._silence_detected = False
def _store_chunk(self, chunk):
"""
private method for storing new chunks, can be different for persistent/non persistent audio storages
"""
self._data += chunk
def _get_sound_square(self, chunk: bytes) -> float:
res = 0.0
for i in range(0, len(chunk), self._sample_len):
res += abs(int.from_bytes(chunk[i:i + self._sample_len], self._byte_order, signed=self._signed))
return res
def _get_sound_level(self, chunk) -> float:
return self._get_sound_square(chunk) / self._get_chunk_len(chunk)
def _get_chunk_len(self, chunk: bytes) -> float:
return float(len(chunk)) / self._sample_len * self._period
def _get_data_len(self) -> float:
return self._get_chunk_len(self._data)
def get_silence(self, chunk) -> bool:
self._store_chunk(chunk)
if not self._volume_level and self._get_data_len() * 1000 < self._volume_length + self._init_silence_length:
# we don't have enough data for detection
return False
if not self._volume_level:
silence_candidate_start = -int(self._init_silence_length * self._sample_len * self._freq / 1000)
silence_candidate_level = self._get_sound_level(self._data[silence_candidate_start:])
volume_candidate_start = -int(
(self._volume_length + self._init_silence_length) / 1000 * self._sample_len / self._period)
volume_candidate_level = self._get_sound_level(self._data[volume_candidate_start:silence_candidate_start])
if volume_candidate_level > 0:
volume_level = silence_candidate_level / volume_candidate_level
if volume_level < self._diff:
self._volume_level = volume_candidate_level
self._silence_detected = True
else:
silence_candidate_start = -int(self._silence_length * self._freq * self._sample_len / 1000)
silence_candidate_level = self._get_sound_level(self._data[silence_candidate_start:])
volume_level = silence_candidate_level / self._volume_level
if volume_level < self._diff:
self._silence_detected = True
self._volume_detected = False
else:
self._silence_detected = False
self._volume_detected = True
return self._silence_detected
def is_new_silence(self, chunk) -> bool:
silence_before = self._silence_detected
return self.get_silence(chunk) and not silence_before
|
"""Contains the classes and functions for scraping a yahoo finance option page."""
from enum import Enum
from itertools import cycle
from typing import Iterable, List, Optional, Tuple, Union
import pandas
from pandas import DataFrame
import pendulum
from pendulum.datetime import DateTime
from pydantic import BaseModel as Base
from pydantic import Field, validator
from requests_html import HTML
from .cleaner import cleaner, CommonCleaners, field_cleaner
from .lookup import fuzzy_search
from .requestor import requestor
class ContractExpiration(Base):
"""Contract Expiration.
Attributes:
symbol (str): Ticker symbol.
timestamp (str): Timestamp of expiration date.
expiration_date (DateTime): Datetime of expiration date.
Notes:
This class inherits from the pydantic BaseModel which allows for the use
of .json() and .dict() for serialization to json strings and dictionaries.
.json(): Serialize to a JSON object.
.dict(): Serialize to a dictionary.
"""
symbol: str
timestamp: str
expiration_date: DateTime = Field(alias="timestamp")
@validator("expiration_date", pre=True)
def convert_to_datetime(cls, value: str) -> DateTime: # pylint: disable=E0213,R0201
"""Convert expiration timestamp to datetime."""
expiration_date = pendulum.from_timestamp(int(value), tz="UTC")
return expiration_date
def __lt__(self, other: "ContractExpiration") -> Optional["ContractExpiration"]:
"""Compare expiration_dates for sorting."""
if other.__class__ is self.__class__:
return self.expiration_date < other.expiration_date
return None
class ContractExpirationList(Base):
"""Contains Multiple Expirations.
Attributes:
expiration_list (List[ContractExpiration]): multiple expirations.
Notes:
This class inherits from the pydantic BaseModel which allows for the use
of .json() and .dict() for serialization to json strings and dictionaries.
.json(): Serialize to a JSON object.
.dict(): Serialize to a dictionary.
"""
expiration_list: List[ContractExpiration]
@validator("expiration_list")
def sort_dates( # pylint: disable=E0213,R0201
cls, values: List[ContractExpiration]
) -> List[ContractExpiration]:
"""Sort expiration_list by date."""
return sorted(values)
def filter_expirations_after(self, after: DateTime) -> None:
"""Filter out any expiration dates prior to the after date.
Args:
after (DateTime): datetime to filter.
Example:
|Input | Args |Output |
|---------------------------|--------------|------------------|
|[01JAN19, 01FEB19, 01MAR19]|after: 15JAN19|[01FEB19, 01MAR19]|
"""
filtered = list(filter(lambda exp: exp.expiration_date >= after, self.expiration_list))
self.expiration_list = filtered
def filter_expirations_before(self, before: DateTime) -> None:
"""Filter out any expiration dates post the before date.
Args:
before (DateTime): datetime to filter.
Example:
|Input | Args |Output |
|---------------------------|---------------|---------|
|[01JAN19, 01FEB19, 01MAR19]|before: 15JAN19|[01JAN19]|
"""
filtered = list(filter(lambda exp: exp.expiration_date <= before, self.expiration_list))
self.expiration_list = filtered
def filter_expirations_between(self, after: DateTime, before: DateTime) -> None:
"""Filter dates outside of a after and before range.
Args:
after (DateTime): datetime to filter.
before (DateTime): datetime to filter.
Example:
|Input | Args |Output |
|---------------------------|------------------------------|------------------|
|[01JAN19, 01FEB19, 01MAR19]|after: 15JAN19,before: 15JAN19|[01FEB19, 01MAR19]|
"""
self.filter_expirations_after(after=after)
self.filter_expirations_before(before=before)
def filter_expirations_after_days(self, days: int) -> None:
"""Filter expirations only allowing expirations after n days.
Args:
days (int): Number of days to start filtering from. All expirations
which expire prior to the days will be filtered out.
"""
after = pendulum.now().add(days=days)
self.filter_expirations_after(after=after)
def filter_expirations_before_days(self, days: int) -> None:
"""Filter expiration only allowing expirations before n days.
Args:
days (int): Number of days to start filtering from. All expirations
which expire post days will be filtered out.
"""
before = pendulum.now().add(days=days)
self.filter_expirations_before(before=before)
def filter_expirations_between_days(
self, after_days: Optional[int] = None, before_days: Optional[int] = None
) -> None:
"""Filter expiration only allowing expirations between a range of days.
Args:
after_days (int): Number of days to start filtering from. All expirations
which expire prior to the days will be filtered out.
before_days (int): Number of days to start filtering from. All expirations
which expire post days will be filtered out.
"""
if after_days:
self.filter_expirations_after_days(days=after_days)
if before_days:
self.filter_expirations_before_days(days=before_days)
def __len__(self) -> int:
"""Length of the expiration_list."""
return len(self.expiration_list)
def __iter__(self) -> Iterable:
"""Iterate over the expirations_list."""
return iter(self.expiration_list)
def __add__(self, other: "ContractExpirationList") -> Optional["ContractExpirationList"]:
"""Combine two ContractExpirationLists using the + operator."""
if self.__class__ == other.__class__:
expiration_list = self.expiration_list + other.expiration_list
return ContractExpirationList(expiration_list=expiration_list)
return None
class OptionContractType(str, Enum):
"""Enum for option contract types."""
CALL = "call"
PUT = "put"
class OptionContract(Base):
"""Represents an Option Contract.
Attributes:
symbol (str): Ticker symbol.
contract_type (OptionContractType): Call or Put type.
timestamp (str): Raw timestamp scraped from yahoo finance. This string is left
untouched to make sure there is no issues when building a URL.
expiration_date (DateTime): Converted from the timestamp. This allows allows
sorting and filtering.
in_the_money (bool): True if strike price is ITM else False.
contract_name (str): Contract Name.
last_trade_date (DateTime): Date of last trade.
strike (float): Contracts strike price.
last_price (float): Last price of a transaction between a contract buyer and a seller.
bid (float): Last bid price.
ask (float): Last ask price.
change (float): Price change in dollars.
percent_change (float): Price change in percentage.
volume (int): Volume.
open_interest (int): Number of contracts opened.
implied_volatility (float): Contract IV.
Notes:
This class inherits from the pydantic BaseModel which allows for the use
of .json() and .dict() for serialization to json strings and dictionaries.
.json(): Serialize to a JSON object.
.dict(): Serialize to a dictionary.
"""
symbol: str
contract_type: OptionContractType
timestamp: str
expiration_date: DateTime # already parsed from ContractExpiration
in_the_money: bool
contract_name: str
# last_trade_date: Optional[DateTime] #TODO: fix validator
strike: Optional[float]
last_price: Optional[float]
bid: Optional[float]
ask: Optional[float]
change: Optional[float]
percent_change: Optional[float]
volume: Optional[int]
open_interest: Optional[int]
implied_volatility: Optional[float]
# @cleaner("last_trade_date")
# def clean_last_trade_date(cls, value):
# return pendulum.parse(value, strict=False)
_clean_common_values = cleaner(
"strike", "last_price", "bid", "ask", "volume", "open_interest"
)(CommonCleaners.clean_common_values)
_clean_percentages = cleaner("change", "percent_change", "implied_volatility")(
CommonCleaners.clean_basic_percentage
)
class Config: # noqa: D106 pylint: disable=C0115
use_enum_values = True
class OptionsChain(Base):
"""Chain of option contracts with the same expiration date.
Attributes:
symbol (str): Company symbol.
expiration_date (DateTime): Contracts expiration date.
chain (List[OptionContract]): List of OptionContracts.
Notes:
This class inherits from the pydantic BaseModel which allows for the use
of .json() and .dict() for serialization to json strings and dictionaries.
.json(): Serialize to a JSON object.
.dict(): Serialize to a dictionary.
"""
symbol: str
expiration_date: DateTime
chain: List[OptionContract]
@property
def dataframe(self) -> DataFrame:
"""Return a dataframe of the option chain."""
data = self.dict()
chain_data = data["chain"]
dataframe = DataFrame.from_dict(chain_data)
return dataframe
@property
def calls(self) -> "OptionsChain":
"""Return a OptionChain with only call contracts."""
call_chain = list(
filter(lambda contract: contract.contract_type == OptionContractType.CALL, self.chain)
)
return OptionsChain(
symbol=self.symbol, expiration_date=self.expiration_date, chain=call_chain
)
@property
def puts(self) -> "OptionsChain":
"""Return a OptionChain with only put contracts."""
put_chain = list(
filter(lambda contract: contract.contract_type == OptionContractType.PUT, self.chain)
)
return OptionsChain(
symbol=self.symbol, expiration_date=self.expiration_date, chain=put_chain
)
def __len__(self) -> int:
"""Return the number of OptionContracts in the OptionChain."""
return len(self.chain)
class MultipleOptionChains(Base):
"""Multiple Option Chains with multiple expiration dates.
Attributes:
option_chain_list (List[OptionsChain]): List of option chains.
contract_expiration_list (ContractExpirationList): List of expirations.
Notes:
This class inherits from the pydantic BaseModel which allows for the use
of .json() and .dict() for serialization to json strings and dictionaries.
.json(): Serialize to a JSON object.
.dict(): Serialize to a dictionary.
"""
option_chain_list: List[OptionsChain]
contract_expiration_list: ContractExpirationList
@property
def dataframe(self) -> DataFrame:
"""Return a dataframe of multiple option chains."""
if len(self.option_chain_list) == 1:
return self.option_chain_list[0].dataframe
dataframes = []
for option_chain in self.option_chain_list:
dataframes.append(option_chain.dataframe)
return pandas.concat(dataframes, ignore_index=True)
@property
def calls(self) -> "MultipleOptionChains":
"""Return a MultipleOptionChains object with only call contracts."""
calls = [chain.calls for chain in self]
return MultipleOptionChains(
option_chain_list=calls, contract_expiration_list=self.contract_expiration_list
)
@property
def puts(self) -> "MultipleOptionChains":
"""Return a MultipleOptionChains object with only put contracts."""
puts = [chain.puts for chain in self]
return MultipleOptionChains(
option_chain_list=puts, contract_expiration_list=self.contract_expiration_list
)
def __len__(self) -> int:
"""Return the number of option chains."""
return len(self.option_chain_list)
def __iter__(self) -> Iterable:
"""Iterate over option chain list."""
return iter(self.option_chain_list)
def __add__(self, other: "MultipleOptionChains") -> Optional["MultipleOptionChains"]:
"""Concatenate MultipleOptionChains."""
if self.__class__ == other.__class__:
option_chain_list = self.option_chain_list + other.option_chain_list
contract_expiration_list = (
self.contract_expiration_list + other.contract_expiration_list
)
return MultipleOptionChains(
option_chain_list=option_chain_list,
contract_expiration_list=contract_expiration_list,
)
return None # NOTE: Maybe Should Raise here
def get_table_elements(html: HTML) -> Tuple[Optional[HTML], Optional[HTML]]:
"""Parse call and put HTML table elements.
Args:
html (HTML): HTML element with call and put data.
Returns:
Tuple of found call and put html elements.
"""
calls_table = html.find("table.calls", first=True)
puts_table = html.find("table.puts", first=True)
return calls_table, puts_table
def parse_option_table(
contract_expiration: ContractExpiration, contract_type: OptionContractType, options_table: HTML
) -> List[OptionContract]:
"""Parse and clean fields and rows of a options table HTML element.
Args:
contract_expiration (ContractExpiration): Used to pass ContractExpiration data
to the returned OptionContract object.
contract_type (OptionContractType): Call or Put
options_table (HTML): HTML element with raw options table data.
Returns:
A list of OptionContracts parsed from the html options_table.
"""
head = options_table.find("thead", first=True)
body = options_table.find("tbody", first=True)
headers = cycle(head.text.split("\n"))
contracts = []
for row in body.find("tr"):
data = contract_expiration.dict()
data["contract_type"] = contract_type
if "in-the-money" in row.attrs["class"]:
data["in_the_money"] = True
else:
data["in_the_money"] = False
for value in row.text.split("\n"):
column_name = field_cleaner(next(headers))
data[column_name] = value
contracts.append(OptionContract(**data))
return contracts
def get_option_expirations(
symbol: str, **kwargs # noqa: ANN003
) -> Optional[ContractExpirationList]:
"""Get and parse option expiration data for the selected symbol.
Args:
symbol (str): Ticker symbol.
kwargs: Pass (session, proxies, and timeout) to the requestor function.
Returns:
ContractExpirationList
"""
url = f"https://finance.yahoo.com/quote/{symbol}/options?p={symbol}"
response = requestor(url, **kwargs)
if response.ok:
html = HTML(html=response.text, url=url)
elements = html.find(r"div.Fl\(start\).Pend\(18px\)", first=True)
if elements:
timestamps = [element.attrs["value"] for element in elements.find("option")]
expiration_list = [
ContractExpiration(symbol=symbol, timestamp=timestamp) for timestamp in timestamps
]
return ContractExpirationList(expiration_list=expiration_list)
return None
class OptionPageNotFound(AttributeError):
"""Raised when options page data is not found."""
def get_options_page( # pylint: disable=R0913, R0914
symbol: str,
after_days: int = None,
before_days: int = None,
first_chain: bool = False,
use_fuzzy_search: bool = True,
page_not_found_ok: bool = False,
**kwargs, # noqa: ANN003
) -> Optional[Union[OptionsChain, MultipleOptionChains]]:
"""Get options data from yahoo finance options page.
Args:
symbol (str): Ticker symbol.
after_days (int): Number of days to start filtering from. All expirations
which expire prior to the days will be filtered out.
before_days (int): Number of days to start filtering from. All expirations
which expire post days will be filtered out.
first_chain (bool): If True returns first chain. Else returns all found chains
within search range.
use_fuzzy_search (bool): If True, does a symbol lookup validation prior
to requesting options page data.
page_not_found_ok (bool): If True, returns None when page is not found.
**kwargs: Pass (session, proxies, and timeout) to the requestor function.
Returns:
OptionsChain: If first_chain is set to True the first found OptionsChain
within the after_days and before_days range is returned.
This is all option contracts from a single expiration and symbol.
MultipleOptionChains: If first_chain is set to False all OptionsChains within
the after_days and before_days range are returned. This can have
multiple expirations. Even if one expiration date is found
the MultipleOptionChains object is returned.
None: If no contracts are found and page_not_found_ok is True.
Raises:
OptionPageNotFound: If page_not_found_ok is False and the Options page is not found.
"""
if use_fuzzy_search:
fuzzy_response = fuzzy_search(symbol, first_ticker=True, **kwargs)
if fuzzy_response:
symbol = fuzzy_response.symbol
expirations_list = get_option_expirations(symbol, **kwargs)
if expirations_list is None:
return None
if after_days or before_days:
expirations_list.filter_expirations_between_days(
after_days=after_days, before_days=before_days
)
mutiple_option_chains = []
for expiration in expirations_list:
timestamp = expiration.timestamp
symbol = expiration.symbol
url = f"https://finance.yahoo.com/quote/{symbol}/options?date={timestamp}&p={symbol}"
response = requestor(url, **kwargs)
if response.ok:
html = HTML(html=response.text, url=url)
calls_table, puts_table = get_table_elements(html)
if calls_table is None or puts_table is None:
continue
calls = parse_option_table(expiration, "call", calls_table)
puts = parse_option_table(expiration, "put", puts_table)
chain = calls + puts
option_chain = OptionsChain(
symbol=symbol, expiration_date=expiration.expiration_date, chain=chain
)
if first_chain:
return option_chain
mutiple_option_chains.append(option_chain)
if len(mutiple_option_chains) > 0:
return MultipleOptionChains(
option_chain_list=mutiple_option_chains, contract_expiration_list=expirations_list
)
if page_not_found_ok:
return None
raise OptionPageNotFound(f"{symbol} options pages is not found.")
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_square_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
## testflag, opfuncname, testRunArgs, dimArgs
# shape, dtype, kernel_name, attrs
("square_001", "square_run", ((4096,), "float16", "cce_mod_fp16")),
("square_002", "square_run", ((1024, 4096), "float16", "cce_mod_fp16")),
("square_003", "square_run", ((8192, 1024), "float16", "cce_mod_fp16")),
("square_004", "square_run", ((8, 128, 1024), "float16", "cce_mod_fp16")),
("square_005", "square_run", ((1280, 1024), "float16", "cce_mod_fp16")),
("square_006", "square_run", ((30522,), "float16", "cce_mod_fp16")),
("square_007", "square_run", ((160, 1024), "float16", "cce_mod_fp16")),
("square_008", "square_run", ((64, 128, 1024), "float16", "cce_mod_fp16")),
("square_009", "square_run", ((1024, 1024), "float16", "cce_mod_fp16")),
("square_010", "square_run", ((1024,), "float16", "cce_mod_fp16")),
("square_011", "square_run", ((2,), "float16", "cce_mod_fp16")),
]
self.testarg_cloud = [
## testflag, opfuncname, testRunArgs, dimArgs
# shape, dtype, kernel_name, attrs
#("square_012", "square_run", ((2,), "float32", "cce_mod_fp16")),
]
self.testarg_rpc_cloud = [
## testflag, opfuncname, testRunArgs, dimArgs
# shape, dtype, kernel_name, attrs
# float:[4096]
("square_001_fp32", "square_run", ((4096,), "float32", "cce_mod_fp32")),
# float:[1280, 1024]
("square_002_fp32", "square_run", ((1280, 1024), "float32", "cce_mod_fp32")),
# float:[1024, 1024]
("square_003_fp32", "square_run", ((1024, 1024), "float32", "cce_mod_fp32")),
# float:[2, 1024]
("square_004_fp32", "square_run", ((2, 1024), "float32", "cce_mod_fp32")),
# float:[4096, 1024]
("square_005_fp32", "square_run", ((4096, 1024), "float32", "cce_mod_fp32")),
# float:[8192, 4096]
("square_006_fp32", "square_run", ((8192, 4096), "float32", "cce_mod_fp32")),
# float:[1024]
("square_007_fp32", "square_run", ((1024,), "float32", "cce_mod_fp32")),
# float:[1024, 4096]
("square_008_fp32", "square_run", ((1024, 4096), "float32", "cce_mod_fp32")),
# float:[30522]
("square_009_fp32", "square_run", ((30522,), "float32", "cce_mod_fp32")),
# float:[30522, 1024]
("square_010_fp32", "square_run", ((30522, 1024), "float32", "cce_mod_fp32")),
# float:[2]
("square_011_fp32", "square_run", ((2,), "float32", "cce_mod_fp32")),
# float:[512, 1024]
("square_012_fp32", "square_run", ((512, 1024), "float32", "cce_mod_fp32")),
# float:[768, 3072] = float:[768, 3072]
("square_013_fp32", "square_run", ((512, 1024), "float32", "cce_mod_fp32")),
# half:[8192, 3072] = half:[8192, 3072]
("square_014_fp32", "square_run", ((8192, 3072), "float16", "cce_mod_fp32")),
# float:[1280, 768] = float:[1280, 768]
("square_015_fp32", "square_run", ((1280, 768), "float32", "cce_mod_fp32")),
# float:[768, 768] = float:[768, 768]
("square_016_fp32", "square_run", ((768, 768), "float32", "cce_mod_fp32")),
# float:[3072] = float:[3072]
("square_017_fp32", "square_run", ((3072,), "float32", "cce_mod_fp32")),
# float:[3072, 768] = float:[3072, 768]
("square_018_fp32", "square_run", ((512, 1024), "float32", "cce_mod_fp32")),
# float:[21128, 768] = float:[21128, 768]
("square_019_fp32", "square_run", ((21128, 768), "float32", "cce_mod_fp32")),
# float:[21128] = float:[21128]
("square_020_fp32", "square_run", ((21128,), "float32", "cce_mod_fp32")),
# float:[2] = float:[2]
("square_021_fp32", "square_run", ((2,), "float32", "cce_mod_fp32")),
# float:[33, 64] = float:[33, 64]
("square_022_fp32", "square_run", ((33, 64), "float32", "cce_mod_fp32")),
# float:[768] = float:[768]
("square_023_fp32", "square_run", ((768,), "float32", "cce_mod_fp32")),
# float:[2, 768] = float:[2, 768]
("square_024_fp32", "square_run", ((2, 768), "float32", "cce_mod_fp32")),
]
self.testarg_level1 = [
## testflag, opfuncname, testRunArgs, dimArgs
# shape, dtype, kernel_name, attrs
("square_001", "square_run", ((30522, 1024), "float16", "cce_mod_fp16")),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def test_run_cloud(self):
self.common_run(self.testarg_cloud)
def test_run_rpc_cloud(self):
self.common_run(self.testarg_rpc_cloud)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run_level1(self):
self.common_run(self.testarg_level1)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
# -----------------------------------------------------------------------------
# Imports:
# -----------------------------------------------------------------------------
import os
import dpa
from dpa.action import Action, ActionError, ActionAborted
from dpa.env.vars import DpaVars
from dpa.location import Location
from dpa.shell.output import Output, Style
# -----------------------------------------------------------------------------
BASH_ACTIVATE_TEMPLATE = 'activate.bash'
BASH_README_TEMPLATE = 'README'
# -----------------------------------------------------------------------------
# Classes:
# -----------------------------------------------------------------------------
class LocationInitAction(Action):
"""Initialize a pipeline location. (staff only).
This action initializes a pipeline location. Before this action is run, a
location must have been created on the server side. This action requires
that the matching location code be provided to initialize the local
filesystem as well as a data server address to connect to.
"""
name = 'init'
target_type = 'location'
# -------------------------------------------------------------------------
# Class methods:
# -------------------------------------------------------------------------
@classmethod
def setup_cl_args(cls, parser):
parser.add_argument(
"-c", "--code",
default=None,
help="The location code to initialize."
)
parser.add_argument(
"-s", "--server",
default=None,
help="The data server this location will connect to.",
metavar="address",
)
# -------------------------------------------------------------------------
# Special methods:
# -------------------------------------------------------------------------
def __init__(self, code=None, server=None):
super(LocationInitAction, self).__init__(
code=code,
server=server,
)
self._code = code
self._server = server
# -------------------------------------------------------------------------
# Methods:
# -------------------------------------------------------------------------
def execute(self):
MODE = 0770
# ensure filesystem root exists
fs_root = self.location.filesystem_root
if not os.path.isdir(fs_root):
try:
os.makedirs(fs_root, MODE)
except error as e:
raise ActionError(
"Unable to create filesystem root directory: " + fs_root + \
"\n " + str(e)
)
# remember the directories created below
dir_lookup = {}
# create standard directories ('projects', 'bash', 'config', etc.)
for dir_name in ['bash', 'projects', 'config', '.logs', 'plugins']:
dir_path = os.path.join(fs_root, dir_name)
dir_lookup[dir_name] = dir_path
if not os.path.isdir(dir_path):
try:
os.makedirs(dir_path, MODE)
except error as e:
raise ActionError(
"Unable to create root subdirectory: " + dir_path + \
"\n " + str(e)
)
# locate the install location to find the bash template
install_pkg_dir = os.path.dirname(os.path.abspath(dpa.__file__))
# ---- bash template
# the file to read from
bash_template_file = os.path.join(
install_pkg_dir, 'data', 'bash', BASH_ACTIVATE_TEMPLATE
)
if not os.path.exists(bash_template_file):
raise ActionError("Unable to locate LOCATION template bash script.")
# the file to write to
bash_activate_file = os.path.join(
dir_lookup['bash'], BASH_ACTIVATE_TEMPLATE
)
# ---- readme file
# readme file
bash_readme_template_file = os.path.join(
install_pkg_dir, 'data', 'bash', BASH_README_TEMPLATE
)
if not os.path.exists(bash_readme_template_file):
raise ActionError("Unable to locate README template file.")
# the file to write to
bash_readme_file = os.path.join(
dir_lookup['bash'], BASH_README_TEMPLATE
)
# ---- format template files
file_pairs = [
(bash_template_file, bash_activate_file),
(bash_readme_template_file, bash_readme_file),
]
replacements = (
("__DPA_LOCATION_CODE__", self.location.code),
("__DPA_DATA_SERVER__", self.server),
("__DPA_FILESYSTEM_ROOT__", self.location.filesystem_root),
)
# handle the file formatting and writing
for in_file_path, out_file_path in file_pairs:
with open(in_file_path) as in_file:
with open(out_file_path, 'w') as out_file:
text = in_file.read()
for in_str, out_str in replacements:
text = text.replace(in_str, out_str)
# write new text to bash file in config dir
out_file.write(text)
# print info to user about bash file to source
Output.text(
"\nA bash script has been created to activate the pipeline in " + \
"this location. The path to the bash script is: \n\n" + \
" " + Style.bright + bash_activate_file + Style.reset + "\n\n" + \
"See the README in the same directory for instructions on how " + \
"to reference the script.\n",
margin=4,
)
# -------------------------------------------------------------------------
def prompt(self):
# ---- prompt for missing fields
if not self.code or not self.server:
print "\nPlease enter the following information:"
if not self.code:
print "\nThe db code for this location:"
self._code = Output.prompt(
" " + Style.bright + "Location code" + Style.reset,
blank=False,
)
if not self.server:
print "\nThe address of the data server this location will " + \
"connect to:"
self._server = Output.prompt(
" " + Style.bright + "Data server address" + Style.reset,
blank=False,
)
# -------------------------------------------------------------------------
def undo(self):
pass
# -------------------------------------------------------------------------
def validate(self):
# make sure the code and server are valid for a connection by retrieving
# the location data.
self._location = self._validate_location()
if not self.location.active:
raise ActionError(
"Location is set to " + \
Style.bright + "inactive " + Style.reset + \
"on the server."
)
# -------------------------------------------------------------------------
def verify(self):
code = "Code"
name = "Name"
description = "Description"
server = "Data server"
filesystem_root = "Filesystem root"
output = Output()
output.header_names = [
code,
name,
description,
server,
filesystem_root,
]
output.add_item(
{
code: self.location.code,
name: self.location.name,
description: self.location.description,
server: self.server,
filesystem_root: self.location.filesystem_root,
},
color_all=Style.bright,
)
output.title = "Location summary:"
output.dump()
if not Output.prompt_yes_no(
Style.bright + "Initialize location" + Style.reset
):
raise ActionAborted("User chose not to proceed.")
# -------------------------------------------------------------------------
# Properties:
# -------------------------------------------------------------------------
@property
def code(self):
return self._code
# -------------------------------------------------------------------------
@property
def location(self):
return self._location
# -------------------------------------------------------------------------
@property
def server(self):
return self._server
# -------------------------------------------------------------------------
# Private methods:
# -------------------------------------------------------------------------
def _validate_location(self):
# ---- make sure code and server are valid
# first, set the server value in the environment
server_var = DpaVars.data_server()
server_var.value = self.server
server_var.set()
# now query the location code
try:
location = Location.get(self.code)
except ActionError as e:
raise ActionError(
"Unable to verify location: " + self.code + "\n" + str(e)
)
return location
|
<gh_stars>0
import pytest
from xchainpy.xchainpy_litecoin.client import Client
from xchainpy.xchainpy_util.asset import Asset
from xchainpy.xchainpy_client.models import tx_types
from xchainpy.xchainpy_litecoin.utils import MIN_TX_FEE
class TestLiteCoinClient:
phrase = 'atom green various power must another rent imitate gadget creek fat then'
phrase_one = 'atom green various power must another rent imitate gadget creek fat then'
testnetaddress = 'tltc1q2pkall6rf6v6j0cvpady05xhy37erndv05de7g'
ltc_asset = Asset('LTC', 'LTC')
memo = 'SWAP:THOR.RUNE'
# phraseTwo = 'green atom various power must another rent imitate gadget creek fat then'
address_two = 'tltc1ql68zjjdjx37499luueaw09avednqtge4u23q36'
# Third ones is used only for balance verification
phrase_three = 'quantum vehicle print stairs canvas kid erode grass baby orbit lake remove'
address_three = 'tltc1q04y2lnt0ausy07vq9dg5w2rnn9yjl3rz364adu'
@pytest.fixture
def client(self):
self.client = Client(self.phrase, network='testnet')
yield
self.client.purge_client()
def test_right_address(self, client):
assert self.client.get_address() == self.testnetaddress
def test_invalid_phrase(self):
with pytest.raises(Exception) as err:
assert Client(phrase='Invalid Phrase')
assert str(err.value) == "Invalid Phrase"
def test_right_phrase(self, client):
assert self.client.set_phrase(self.phrase) == self.testnetaddress
def test_validate_address(self, client):
assert self.client.validate_address(
network=self.client.net, address=self.testnetaddress)
@pytest.mark.asyncio
async def test_has_balances(self, client):
assert await self.client.get_balance()
@pytest.mark.asyncio
async def test_has_no_balances(self, client):
self.client.set_network('mainnet')
balance = await self.client.get_balance()
assert balance.amount == 0
@pytest.mark.asyncio
async def test_equal_balances_when_call_getbalance_twice(self, client):
balance1 = await self.client.get_balance()
balance2 = await self.client.get_balance()
assert balance1.amount == balance2.amount
@pytest.mark.asyncio
async def test_transfer_with_memo_and_fee_rate(self, client):
fee_rates = await self.client.get_fee_rates()
fee_rate = fee_rates['fast']
balance = await self.client.get_balance()
if balance.amount > 0:
amount = 0.0000001
tx_id = await self.client.transfer(amount, self.address_two, self.memo, fee_rate)
assert tx_id
@pytest.mark.asyncio
async def test_purge_client_should_purge_phrase_and_utxos(self):
self.client = Client(self.phrase, network='testnet')
self.client.purge_client()
with pytest.raises(Exception) as err:
self.client.get_address()
assert str(err.value) == "Phrase must be provided"
with pytest.raises(Exception) as err:
await self.client.get_balance()
assert str(err.value) == "Phrase must be provided"
@pytest.mark.asyncio
async def test_should_prevent_tx_when_fees_and_valueOut_exceed_balance(self, client):
balance = await self.client.get_balance()
if balance.amount > 0:
amount = balance.amount + 1000 # LTC
with pytest.raises(Exception) as err:
await self.client.transfer(amount, self.address_two)
assert str(err.value) == "Balance insufficient for transaction"
@pytest.mark.asyncio
async def test_fee_and_rates_normal_tx(self, client):
fees_and_rates = await self.client.get_fees_with_rates()
fees = fees_and_rates['fees']
rates = fees_and_rates['rates']
assert fees['fastest']
assert fees['fast']
assert fees['average']
assert rates['fastest']
assert rates['fast']
assert rates['average']
@pytest.mark.asyncio
async def test_fee_and_rates_with_memo(self, client):
fees_and_rates = await self.client.get_fees_with_rates(self.memo)
fees = fees_and_rates['fees']
rates = fees_and_rates['rates']
assert fees['fastest']
assert fees['fast']
assert fees['average']
assert rates['fastest']
assert rates['fast']
assert rates['average']
@pytest.mark.asyncio
async def test_estimated_fees_normal_tx(self, client):
fees = await self.client.get_fees()
assert fees['fastest']
assert fees['fast']
assert fees['average']
@pytest.mark.asyncio
async def test_normal_tx_fees_and_vault_tx_fees(self, client):
normal_tx = await self.client.get_fees()
vault_tx = await self.client.get_fees_with_memo(self.memo)
if vault_tx['average'] > MIN_TX_FEE:
assert vault_tx['average'] > normal_tx['average']
else:
assert vault_tx['average'] == MIN_TX_FEE
@pytest.mark.asyncio
async def test_has_balances_invalid_address(self, client):
with pytest.raises(Exception) as err:
await self.client.get_balance(address='invalid address')
assert str(err.value) == "Invalid Address"
@pytest.mark.asyncio
async def test_transfer_invalid_address(self, client):
balance = await self.client.get_balance()
if balance.amount > 0:
amount = 0.0000001
with pytest.raises(Exception) as err:
await self.client.transfer(amount, 'invalid address')
assert str(err.value) == "Invalid address"
@pytest.mark.asyncio
async def test_get_transactions(self, client):
txs = await self.client.get_transactions({'address': self.address_three, 'limit': 4})
assert txs
if txs['total'] > 0:
tx = txs['tx'][0]
assert tx.asset == self.ltc_asset
assert tx.tx_date
assert tx.tx_hash
assert tx.tx_type == 'transfer'
assert len(tx.tx_to)
assert len(tx.tx_from)
@pytest.mark.asyncio
async def test_get_transactions_limit_should_work(self, client):
txs = await self.client.get_transactions({'address': self.address_three, 'limit': 1})
assert len(txs['tx']) == 1
@pytest.mark.asyncio
async def test_get_transaction_with_hash(self, client):
tx_data = await self.client.get_transaction_data('b0422e9a4222f0f2b030088ee5ccd33ac0d3c59e7178bf3f4626de71b0e376d3')
assert tx_data.tx_hash == 'b0422e9a4222f0f2b030088ee5ccd33ac0d3c59e7178bf3f4626de71b0e376d3'
assert len(tx_data.tx_from) == 1
assert tx_data.tx_from[0].address == 'tltc1q2pkall6rf6v6j0cvpady05xhy37erndv05de7g'
assert tx_data.tx_from[0].amount == '8.60368562'
assert len(tx_data.tx_to) == 2
assert tx_data.tx_to[0].address == 'tltc1q04y2lnt0ausy07vq9dg5w2rnn9yjl3rz364adu'
assert tx_data.tx_to[0].amount == '0.00002223'
assert tx_data.tx_to[1].address == 'tltc1q2pkall6rf6v6j0cvpady05xhy37erndv05de7g'
assert tx_data.tx_to[1].amount == '8.60365339' |
import os
import re
import platform
import subprocess
from collections import OrderedDict
from .base import Base
from deoplete.util import charpos2bytepos, expand, getlines, load_external_module
load_external_module(__file__, 'sources/deoplete_go')
from cgo import cgo
from stdlib import stdlib
try:
load_external_module(__file__, '')
from ujson import loads
except ImportError:
from json import loads
known_goos = (
'appengine', 'android', 'darwin', 'dragonfly', 'freebsd', 'linux', 'nacl',
'netbsd', 'openbsd', 'plan9', 'solaris', 'windows')
class Source(Base):
def __init__(self, vim):
super(Source, self).__init__(vim)
self.name = 'go'
self.mark = '[Go]'
self.filetypes = ['go']
self.input_pattern = r'(?:\b[^\W\d]\w*|[\]\)])\.(?:[^\W\d]\w*)?'
self.rank = 500
def on_init(self, context):
vars = context['vars']
self.gocode_binary = \
expand(vars.get('deoplete#sources#go#gocode_binary', ''))
self.package_dot = \
vars.get('deoplete#sources#go#package_dot', False)
self.sort_class = \
vars.get('deoplete#sources#go#sort_class', [])
self.pointer = \
vars.get('deoplete#sources#go#pointer', False)
self.auto_goos = \
vars.get('deoplete#sources#go#auto_goos', False)
self.goos = \
vars.get('deoplete#sources#go#goos', '')
self.goarch = \
vars.get('deoplete#sources#go#goarch', '')
self.sock = \
vars.get('deoplete#sources#go#gocode_sock', '')
self.cgo = \
vars.get('deoplete#sources#go#cgo', False)
self.source_importer = \
vars.get('deoplete#sources#go#source_importer', False)
self.builtin_objects = \
vars.get('deoplete#sources#go#builtin_objects', False)
self.unimported_packages = \
vars.get('deoplete#sources#go#unimported_packages', False)
self.fallback_to_source = \
vars.get('deoplete#sources#go#fallback_to_source', False)
self.loaded_gocode_binary = False
self.complete_pos = re.compile(r'\w*$|(?<=")[./\-\w]*$')
if self.pointer:
self.complete_pos = re.compile(self.complete_pos.pattern + r'|\*$')
self.input_pattern += r'|\*'
if self.cgo:
load_external_module(__file__, 'clang')
import clang.cindex as clang
self.libclang_path = \
vars.get('deoplete#sources#go#cgo#libclang_path', '')
if self.libclang_path == '':
return
self.cgo_options = {
'std':
vars.get('deoplete#sources#go#cgo#std', 'c11'),
'sort_algo':
vars.get('deoplete#sources#cgo#sort_algo', None)
}
if not clang.Config.loaded and \
clang.Config.library_path != self.libclang_path:
clang.Config.set_library_file(self.libclang_path)
clang.Config.set_compatibility_check(False)
# Set 'C.' complete pattern
self.cgo_complete_pattern = re.compile(r'[^\W\d]*C\.')
# Create clang.cindex.Index database
self.index = clang.Index.create(0)
# initialize in-memory cache
self.cgo_cache, self.cgo_inline_source = dict(), None
def get_complete_position(self, context):
m = self.complete_pos.search(context['input'])
return m.start() if m else -1
def gather_candidates(self, context):
# If enabled self.cgo, and matched self.cgo_complete_pattern pattern
if self.cgo and self.cgo_complete_pattern.search(context['input']):
return self.cgo_completion(getlines(self.vim))
bufname = self.vim.current.buffer.name
if not os.path.isfile(bufname):
bufname = self.vim.call('tempname')
result = self.get_complete_result(
context, getlines(self.vim), bufname)
try:
if result[1][0]['class'] == 'PANIC':
self.print_error('gocode panicked')
return []
if self.sort_class:
class_dict = OrderedDict((x, []) for x in self.sort_class)
out = []
sep = ' '
for complete in result[1]:
word = complete['name']
info = complete['type']
_class = complete['class']
abbr = str(word + sep + info).replace(' func', '', 1)
kind = _class
if _class == 'package' and self.package_dot:
word += '.'
if self.pointer and \
str(context['input']
[context['complete_position']:]) == '*':
word = '*' + word
candidates = dict(
word=word, abbr=abbr, kind=kind, info=info, dup=1
)
if not self.sort_class or _class == 'import':
out.append(candidates)
elif _class in class_dict.keys():
class_dict[_class].append(candidates)
if self.sort_class:
for v in class_dict.values():
out += v
return out
except Exception:
return []
def cgo_completion(self, buffer):
# No include header
if cgo.get_inline_source(buffer)[0] == 0:
return
count, inline_source = cgo.get_inline_source(buffer)
# exists 'self.cgo_inline_source', same inline sources and
# already cached cgo complete candidates
if self.cgo_inline_source is not None and \
self.cgo_inline_source == inline_source and \
self.cgo_cache[self.cgo_inline_source]:
# Use in-memory(self.cgo_headers) cacahe
return self.cgo_cache[self.cgo_inline_source]
else:
self.cgo_inline_source = inline_source
# return candidates use libclang-python3
return cgo.complete(
self.index, self.cgo_cache, self.cgo_options, count,
self.cgo_inline_source
)
def get_complete_result(self, context, buffer, bufname):
offset = self.get_cursor_offset(context)
env = os.environ.copy()
env['GOPATH'] = self.vim.eval('$GOPATH')
if self.auto_goos:
name = os.path.basename(os.path.splitext(bufname)[0])
if '_' in name:
for part in name.rsplit('_', 2):
if part in known_goos:
env['GOOS'] = part
break
if 'GOOS' not in env:
for line in buffer:
if line.startswith('package '):
break
elif not line.startswith('// +build'):
continue
directives = [
x.split(',', 1)[0] for x in line[9:].strip().split()
]
if platform.system().lower() not in directives:
for plat in directives:
if plat in known_goos:
env['GOOS'] = plat
break
elif self.goos != '':
env['GOOS'] = self.goos
if 'GOOS' in env and env['GOOS'] != platform.system().lower():
env['CGO_ENABLED'] = '0'
if self.goarch != '':
env['GOARCH'] = self.goarch
gocode = self.find_gocode_binary()
if not gocode:
return []
args = [gocode, '-f=json']
if self.source_importer:
args.append('-source')
if self.builtin_objects:
args.append('-builtin')
if self.unimported_packages:
args.append('-unimported-packages')
if self.fallback_to_source:
args.append('-fallback-to-source')
# basically, '-sock' option for mdempsky/gocode.
# probably meaningless in nsf/gocode that already run the rpc server
if self.sock != '' and self.sock in ['unix', 'tcp', 'none']:
args.append('-sock={}'.format(self.sock))
args += ['autocomplete', bufname, str(offset)]
process = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
env=env
)
stdout_data, stderr_data = process.communicate(
'\n'.join(buffer).encode()
)
result = []
try:
result = loads(stdout_data.decode())
except Exception as e:
self.print_error('gocode decode error')
self.print_error(stdout_data.decode())
self.print_error(stderr_data.decode())
return result
def get_cursor_offset(self, context):
line = self.vim.current.window.cursor[0]
column = context['complete_position']
count = self.vim.call('line2byte', line)
if self.vim.current.buffer.options['fileformat'] == 'dos':
# Note: line2byte() counts "\r\n" in DOS format. It must be "\n"
# in gocode.
count -= line - 1
return count + charpos2bytepos(
'utf-8', context['input'][: column], column) - 1
def parse_import_package(self, buffer):
start = 0
packages = []
for line, b in enumerate(buffer):
if re.match(r'^\s*import \w*|^\s*import \(', b):
start = line
continue
elif re.match(r'\)', b):
break
elif line > start:
package_name = re.sub(r'\t|"', '', b)
if str(package_name).find(r'/', 0) > 0:
full_package_name = str(package_name).split('/', -1)
package_name = \
full_package_name[len(full_package_name) - 1]
library = '/'.join(
full_package_name[:len(full_package_name) - 1]
),
packages.append(
dict(
library=library, package=package_name
)
)
else:
packages.append(dict(library='none', package=package_name))
return packages
def find_gocode_binary(self):
if self.gocode_binary != '' and self.loaded_gocode_binary:
return self.gocode_binary
self.loaded_gocode_binary = os.path.isfile(self.gocode_binary)
if self.loaded_gocode_binary:
return self.gocode_binary
elif platform.system().lower() == 'windows':
return self.find_binary_path('gocode.exe')
else:
return self.find_binary_path('gocode')
def find_binary_path(self, path):
def is_exec(bin_path):
return os.path.isfile(bin_path) and os.access(bin_path, os.X_OK)
dirpath, binary = os.path.split(path)
if dirpath:
if is_exec(path):
return path
else:
for p in os.environ["PATH"].split(os.pathsep):
p = p.strip('"')
binary = os.path.join(p, path)
if is_exec(binary):
return binary
return self.print_error(path + ' binary not found')
|
# -*- coding: utf-8 -*-
"""03C_Keras_API.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/Hvass-Labs/TensorFlow-Tutorials/blob/master/03C_Keras_API.ipynb
# TensorFlow Tutorial #03-C
# Keras API
by [<NAME>](http://www.hvass-labs.org/)
/ [GitHub](https://github.com/Hvass-Labs/TensorFlow-Tutorials) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmEu1ZniY0XpHSzl5uihcXZ)
"""
# Clone the repository from GitHub to Google Colab's temporary driv
"""## Introduction
Tutorial #02 showed how to implement a Convolutional Neural Network in TensorFlow. We made a few helper-functions for creating the layers in the network. It is essential to have a good high-level API because it makes it much easier to implement complex models, and it lowers the risk of errors.
There are several of these builder API's available for TensorFlow: PrettyTensor (Tutorial #03), Layers API (Tutorial #03-B), and several others. But they were never really finished and now they seem to be more or less abandoned by their developers.
This tutorial is about the Keras API which is already highly developed with very good documentation - and the development continues. It seems likely that Keras will be the standard API for TensorFlow in the future so it is recommended that you use it instead of the other APIs.
The author of Keras has written a [blog-post](https://blog.keras.io/user-experience-design-for-apis.html) on his API design philosophy which you should read.
## Flowchart
The following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. See Tutorial #02 for a more detailed description of convolution.
There are two convolutional layers, each followed by a down-sampling using max-pooling (not shown in this flowchart). Then there are two fully-connected layers ending in a softmax-classifier.

## Imports
"""
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import math
"""We need to import several things from Keras. Note the long import-statements. This might be a bug. Hopefully it will be possible to write shorter and more elegant lines in the future."""
# from tf.keras.models import Sequential # This does not work!
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import InputLayer, Input
from tensorflow.python.keras.layers import Reshape, MaxPooling2D
from tensorflow.python.keras.layers import Conv2D, Dense, Flatten
"""This was developed using Python 3.6 (Anaconda) and TensorFlow version:"""
tf.__version__
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
"""## Load Data
The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path.
"""
from mnist import MNIST
data = MNIST(data_dir="data/MNIST/")
"""The MNIST data-set has now been loaded and consists of 70.000 images and class-numbers for the images. The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial."""
print("Size of:")
print("- Training-set:\t\t{}".format(data.num_train))
print("- Validation-set:\t{}".format(data.num_val))
print("- Test-set:\t\t{}".format(data.num_test))
"""Copy some of the data-dimensions for convenience."""
# The number of pixels in each dimension of an image.
img_size = data.img_size
# The images are stored in one-dimensional arrays of this length.
img_size_flat = data.img_size_flat
# Tuple with height and width of images used to reshape arrays.
img_shape = data.img_shape
# Tuple with height, width and depth used to reshape arrays.
# This is used for reshaping in Keras.
img_shape_full = data.img_shape_full
# Number of classes, one class for each of 10 digits.
num_classes = data.num_classes
# Number of colour channels for the images: 1 channel for gray-scale.
num_channels = data.num_channels
"""### Helper-function for plotting images
Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image.
"""
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
"""### Plot a few images to see if data is correct"""
# Get the first images from the test-set.
images = data.x_test[0:9]
# Get the true classes for those images.
cls_true = data.y_test_cls[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
"""### Helper-function to plot example errors
Function for plotting examples of images from the test-set that have been mis-classified.
"""
def plot_example_errors(cls_pred):
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Boolean array whether the predicted class is incorrect.
incorrect = (cls_pred != data.y_test_cls)
# Get the images from the test-set that have been
# incorrectly classified.
images = data.x_test[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = data.y_test_cls[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
"""## PrettyTensor API
This is how the Convolutional Neural Network was implemented in Tutorial #03 using the PrettyTensor API. It is shown here for easy comparison to the Keras implementation below.
"""
if False:
x_pretty = pt.wrap(x_image)
with pt.defaults_scope(activation_fn=tf.nn.relu):
y_pred, loss = x_pretty.\
conv2d(kernel=5, depth=16, name='layer_conv1').\
max_pool(kernel=2, stride=2).\
conv2d(kernel=5, depth=36, name='layer_conv2').\
max_pool(kernel=2, stride=2).\
flatten().\
fully_connected(size=128, name='layer_fc1').\
softmax_classifier(num_classes=num_classes, labels=y_true)
"""## Sequential Model
The Keras API has two modes of constructing Neural Networks. The simplest is the Sequential Model which only allows for the layers to be added in sequence.
"""
# Start construction of the Keras Sequential model.
model = Sequential()
# Add an input layer which is similar to a feed_dict in TensorFlow.
# Note that the input-shape must be a tuple containing the image-size.
model.add(InputLayer(input_shape=(img_size_flat,)))
# The input is a flattened array with 784 elements,
# but the convolutional layers expect images with shape (28, 28, 1)
model.add(Reshape(img_shape_full))
# First convolutional layer with ReLU-activation and max-pooling.
model.add(Conv2D(kernel_size=5, strides=1, filters=16, padding='same',
activation='relu', name='layer_conv1'))
model.add(MaxPooling2D(pool_size=2, strides=2))
# Second convolutional layer with ReLU-activation and max-pooling.
model.add(Conv2D(kernel_size=5, strides=1, filters=36, padding='same',
activation='relu', name='layer_conv2'))
model.add(MaxPooling2D(pool_size=2, strides=2))
# Flatten the 4-rank output of the convolutional layers
# to 2-rank that can be input to a fully-connected / dense layer.
model.add(Flatten())
# First fully-connected / dense layer with ReLU-activation.
model.add(Dense(128, activation='relu'))
# Last fully-connected / dense layer with softmax-activation
# for use in classification.
model.add(Dense(num_classes, activation='softmax'))
"""### Model Compilation
The Neural Network has now been defined and must be finalized by adding a loss-function, optimizer and performance metrics. This is called model "compilation" in Keras.
We can either define the optimizer using a string, or if we want more control of its parameters then we need to instantiate an object. For example, we can set the learning-rate.
"""
from tensorflow.python.keras.optimizers import Adam
optimizer = Adam(lr=1e-3)
"""For a classification-problem such as MNIST which has 10 possible classes, we need to use the loss-function called `categorical_crossentropy`. The performance metric we are interested in is the classification accuracy."""
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
"""### Training
Now that the model has been fully defined with loss-function and optimizer, we can train it. This function takes numpy-arrays and performs the given number of training epochs using the given batch-size. An epoch is one full use of the entire training-set. So for 10 epochs we would iterate randomly over the entire training-set 10 times.
"""
model.fit(x=data.x_train,
y=data.y_train,
epochs=1, batch_size=128)
"""### Evaluation
Now that the model has been trained we can test its performance on the test-set. This also uses numpy-arrays as input.
"""
result = model.evaluate(x=data.x_test,
y=data.y_test)
"""We can print all the performance metrics for the test-set."""
for name, value in zip(model.metrics_names, result):
print(name, value)
"""Or we can just print the classification accuracy."""
print("{0}: {1:.2%}".format(model.metrics_names[1], result[1]))
"""### Prediction
We can also predict the classification for new images. We will just use some images from the test-set but you could load your own images into numpy arrays and use those instead.
"""
images = data.x_test[0:9]
"""These are the true class-number for those images. This is only used when plotting the images."""
cls_true = data.y_test_cls[0:9]
"""Get the predicted classes as One-Hot encoded arrays."""
y_pred = model.predict(x=images)
"""Get the predicted classes as integers."""
cls_pred = np.argmax(y_pred, axis=1)
plot_images(images=images,
cls_true=cls_true,
cls_pred=cls_pred)
"""### Examples of Mis-Classified Images
We can plot some examples of mis-classified images from the test-set.
First we get the predicted classes for all the images in the test-set:
"""
y_pred = model.predict(x=data.x_test)
"""Then we convert the predicted class-numbers from One-Hot encoded arrays to integers."""
cls_pred = np.argmax(y_pred, axis=1)
"""Plot some of the mis-classified images."""
plot_example_errors(cls_pred)
"""## Functional Model
The Keras API can also be used to construct more complicated networks using the Functional Model. This may look a little confusing at first, because each call to the Keras API will create and return an instance that is itself callable. It is not clear whether it is a function or an object - but we can call it as if it is a function. This allows us to build computational graphs that are more complex than the Sequential Model allows.
"""
# Create an input layer which is similar to a feed_dict in TensorFlow.
# Note that the input-shape must be a tuple containing the image-size.
inputs = Input(shape=(img_size_flat,))
# Variable used for building the Neural Network.
net = inputs
# The input is an image as a flattened array with 784 elements.
# But the convolutional layers expect images with shape (28, 28, 1)
net = Reshape(img_shape_full)(net)
# First convolutional layer with ReLU-activation and max-pooling.
net = Conv2D(kernel_size=5, strides=1, filters=16, padding='same',
activation='relu', name='layer_conv1')(net)
net = MaxPooling2D(pool_size=2, strides=2)(net)
# Second convolutional layer with ReLU-activation and max-pooling.
net = Conv2D(kernel_size=5, strides=1, filters=36, padding='same',
activation='relu', name='layer_conv2')(net)
net = MaxPooling2D(pool_size=2, strides=2)(net)
# Flatten the output of the conv-layer from 4-dim to 2-dim.
net = Flatten()(net)
# First fully-connected / dense layer with ReLU-activation.
net = Dense(128, activation='relu')(net)
# Last fully-connected / dense layer with softmax-activation
# so it can be used for classification.
net = Dense(num_classes, activation='softmax')(net)
# Output of the Neural Network.
outputs = net
"""### Model Compilation
We have now defined the architecture of the model with its input and output. We now have to create a Keras model and compile it with a loss-function and optimizer, so it is ready for training.
"""
from tensorflow.python.keras.models import Model
"""Create a new instance of the Keras Functional Model. We give it the inputs and outputs of the Convolutional Neural Network that we constructed above."""
model2 = Model(inputs=inputs, outputs=outputs)
"""Compile the Keras model using the RMSprop optimizer and with a loss-function for multiple categories. The only performance metric we are interested in is the classification accuracy, but you could use a list of metrics here."""
model2.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
"""### Training
The model has now been defined and compiled so it can be trained using the same `fit()` function as used in the Sequential Model above. This also takes numpy-arrays as input.
"""
model2.fit(x=data.x_train,
y=data.y_train,
epochs=1, batch_size=128)
"""### Evaluation
Once the model has been trained we can evaluate its performance on the test-set. This is the same syntax as for the Sequential Model.
"""
result = model2.evaluate(x=data.x_test,
y=data.y_test)
"""The result is a list of values, containing the loss-value and all the metrics we defined when we compiled the model. Note that 'accuracy' is now called 'acc' which is a small inconsistency."""
for name, value in zip(model2.metrics_names, result):
print(name, value)
"""We can also print the classification accuracy as a percentage:"""
print("{0}: {1:.2%}".format(model2.metrics_names[1], result[1]))
"""### Examples of Mis-Classified Images
We can plot some examples of mis-classified images from the test-set.
First we get the predicted classes for all the images in the test-set:
"""
y_pred = model2.predict(x=data.x_test)
"""Then we convert the predicted class-numbers from One-Hot encoded arrays to integers."""
cls_pred = np.argmax(y_pred, axis=1)
"""Plot some of the mis-classified images."""
plot_example_errors(cls_pred)
"""## Save & Load Model
NOTE: You need to install `h5py` for this to work!
Tutorial #04 was about saving and restoring the weights of a model using native TensorFlow code. It was an absolutely horrible API! Fortunately, Keras makes this very easy.
This is the file-path where we want to save the Keras model.
"""
path_model = 'model.keras'
"""Saving a Keras model with the trained weights is then just a single function call, as it should be."""
model2.save(path_model)
"""Delete the model from memory so we are sure it is no longer used."""
del model2
"""We need to import this Keras function for loading the model."""
from tensorflow.python.keras.models import load_model
"""Loading the model is then just a single function-call, as it should be."""
model3 = load_model(path_model)
"""We can then use the model again e.g. to make predictions. We get the first 9 images from the test-set and their true class-numbers."""
images = data.x_test[0:9]
cls_true = data.y_test_cls[0:9]
"""We then use the restored model to predict the class-numbers for those images."""
y_pred = model3.predict(x=images)
"""Get the class-numbers as integers."""
cls_pred = np.argmax(y_pred, axis=1)
"""Plot the images with their true and predicted class-numbers."""
plot_images(images=images,
cls_pred=cls_pred,
cls_true=cls_true)
"""## Visualization of Layer Weights and Outputs
### Helper-function for plotting convolutional weights
"""
def plot_conv_weights(weights, input_channel=0):
# Get the lowest and highest values for the weights.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
w_min = np.min(weights)
w_max = np.max(weights)
# Number of filters used in the conv. layer.
num_filters = weights.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot all the filter-weights.
for i, ax in enumerate(axes.flat):
# Only plot the valid filter-weights.
if i<num_filters:
# Get the weights for the i'th filter of the input channel.
# See new_conv_layer() for details on the format
# of this 4-dim tensor.
img = weights[:, :, input_channel, i]
# Plot image.
ax.imshow(img, vmin=w_min, vmax=w_max,
interpolation='nearest', cmap='seismic')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
"""### Get Layers
Keras has a simple way of listing the layers in the model.
"""
model3.summary()
"""We count the indices to get the layers we want.
The input-layer has index 0.
"""
layer_input = model3.layers[0]
"""The first convolutional layer has index 2."""
layer_conv1 = model3.layers[2]
layer_conv1
"""The second convolutional layer has index 4."""
layer_conv2 = model3.layers[4]
"""### Convolutional Weights
Now that we have the layers we can easily get their weights.
"""
weights_conv1 = layer_conv1.get_weights()[0]
"""This gives us a 4-rank tensor."""
weights_conv1.shape
"""Plot the weights using the helper-function from above."""
plot_conv_weights(weights=weights_conv1, input_channel=0)
"""We can also get the weights for the second convolutional layer and plot them."""
weights_conv2 = layer_conv2.get_weights()[0]
plot_conv_weights(weights=weights_conv2, input_channel=0)
"""### Helper-function for plotting the output of a convolutional layer"""
def plot_conv_output(values):
# Number of filters used in the conv. layer.
num_filters = values.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot the output images of all the filters.
for i, ax in enumerate(axes.flat):
# Only plot the images for valid filters.
if i<num_filters:
# Get the output image of using the i'th filter.
img = values[0, :, :, i]
# Plot image.
ax.imshow(img, interpolation='nearest', cmap='binary')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
"""### Input Image
Helper-function for plotting a single image.
"""
def plot_image(image):
plt.imshow(image.reshape(img_shape),
interpolation='nearest',
cmap='binary')
plt.show()
"""Plot an image from the test-set which will be used as an example below."""
image1 = data.x_test[0]
plot_image(image1)
"""### Output of Convolutional Layer - Method 1
There are different ways of getting the output of a layer in a Keras model. This method uses a so-called K-function which turns a part of the Keras model into a function.
"""
from tensorflow.python.keras import backend as K
output_conv1 = K.function(inputs=[layer_input.input],
outputs=[layer_conv1.output])
"""We can then call this function with the input image. Note that the image is wrapped in two lists because the function expects an array of that dimensionality. Likewise, the function returns an array with one more dimensionality than we want so we just take the first element."""
layer_output1 = output_conv1([[image1]])[0]
layer_output1.shape
"""We can then plot the output of all 16 channels of the convolutional layer."""
plot_conv_output(values=layer_output1)
"""### Output of Convolutional Layer - Method 2
Keras also has another method for getting the output of a layer inside the model. This creates another Functional Model using the same input as the original model, but the output is now taken from the convolutional layer that we are interested in.
"""
output_conv2 = Model(inputs=layer_input.input,
outputs=layer_conv2.output)
"""This creates a new model-object where we can call the typical Keras functions. To get the output of the convoloutional layer we call the `predict()` function with the input image."""
layer_output2 = output_conv2.predict(np.array([image1]))
layer_output2.shape
"""We can then plot the images for all 36 channels."""
plot_conv_output(values=layer_output2)
"""## Conclusion
This tutorial showed how to use the so-called *Keras API* for easily building Convolutional Neural Networks in TensorFlow. Keras is by far the most complete and best designed API for TensorFlow.
This tutorial also showed how to use Keras to save and load a model, as well as getting the weights and outputs of convolutional layers.
It seems likely that Keras will be the standard API for TensorFlow in the future, for the simple reason that is already very good and it is constantly being improved. So it is recommended that you use Keras.
## Exercises
These are a few suggestions for exercises that may help improve your skills with TensorFlow. It is important to get hands-on experience with TensorFlow in order to learn how to use it properly.
You may want to backup this Notebook before making any changes.
* Train for more epochs. Does it improve the classification accuracy?
* Change the activation function to sigmoid for some of the layers.
* Can you find a simple way of changing the activation function for all the layers?
* Plot the output of the max-pooling layers instead of the conv-layers.
* Replace the 2x2 max-pooling layers with stride=2 in the convolutional layers. Is there a difference in classification accuracy? What if you optimize it again and again? The difference is random, so how would you measure if there really is a difference? What are the pros and cons of using max-pooling vs. stride in the conv-layer?
* Change the parameters for the layers, e.g. the kernel, depth, size, etc. What is the difference in time usage and classification accuracy?
* Add and remove some convolutional and fully-connected layers.
* What is the simplest network you can design that still performs well?
* Change the Functional Model so it has another convolutional layer that connects in parallel to the existing conv-layers before going into the dense layers.
* Change the Functional Model so it outputs the predicted class both as a One-Hot encoded array and as an integer, so we don't have to use `numpy.argmax()` afterwards.
* Remake the program yourself without looking too much at this source-code.
* Explain to a friend how the program works.
## License (MIT)
Copyright (c) 2016-2017 by [<NAME>](http://www.hvass-labs.org/)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
|
from __future__ import print_function
import tensorflow as tf
from termcolor import colored
from openrec.tf1.legacy.modules.interactions import Interaction
class PointwiseMSE(Interaction):
"""
The PointwiseMSE module minimizes the pointwise mean-squre-error [ctm]_ as follows (regularization terms \
are not included):
.. math::
\min \sum_{ij}c_{ij}(r_{ij} - u_i^T v_j)^2
where :math:`u_i` and :math:`v_j` are representations for user :math:`i` and item :math:`j` respectively; \
:math:`c_{ij}=a` if :math:`r_{ij}=1`, otherwise :math:`c_{ij}=b`.
Parameters
----------
user: Tensorflow tensor
Representations for users involved in the interactions. Shape: **[number of interactions, dimensionality of \
user representations]**.
item: Tensorflow tensor
Representations for items involved in the interactions. Shape: **[number of interactions, dimensionality of \
item representations]**.
item_bias: Tensorflow tensor
Biases for items involved in the interactions. Shape: **[number of interactions, 1]**.
labels: Tensorflow tensor, required for training
Groundtruth labels for the interactions. Shape **[number of interactions, ]**.
a: float, optional
The value of :math:`c_{ij}` if :math:`r_{ij}=1`.
b: float, optional
The value of :math:`c_{ij}` if :math:`r_{ij}=0`.
sigmoid: bool, optional
Normalize the dot products, i.e., sigmoid(:math:`u_i^T v_j`).
train: bool, optionl
An indicator for training or servining phase.
batch_serving: bool, optional
If True, the model calculates scores for all users against all items, and returns scores with shape [len(user), len(item)]. Otherwise, it returns scores for specified user item pairs (require :code:`len(user)==len(item)`).
scope: str, optional
Scope for module variables.
reuse: bool, optional
Whether or not to reuse module variables.
References
----------
.. [ctm] <NAME>. and <NAME>., 2011, August. Collaborative topic modeling for recommending scientific articles. \
In Proceedings of the 17th ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 448-456). ACM.
"""
def __init__(self, user, item, item_bias, labels=None, a=1.0, b=1.0,
sigmoid=False, train=True, batch_serving=True, scope=None, reuse=False):
assert train is not None, 'train cannot be None'
assert user is not None, 'user cannot be None'
assert item is not None, 'item cannot be None'
assert item_bias is not None, 'item_bias cannot be None'
self._user = user
self._item = item
self._item_bias = item_bias
self._sigmoid = sigmoid
self._batch_serving = batch_serving
if train:
assert labels is not None, 'labels cannot be None'
self._labels = tf.reshape(tf.to_float(labels), (-1,))
self._a = a
self._b = b
super(PointwiseMSE, self).__init__(train=train, scope=scope, reuse=reuse)
def _build_training_graph(self):
with tf.variable_scope(self._scope, reuse=self._reuse):
labels_weight = (self._a - self._b) * self._labels + self._b
dot_user_item = tf.reduce_sum(tf.multiply(self._user, self._item),
axis=1, keep_dims=False, name="dot_user_item")
if self._sigmoid:
prediction = tf.sigmoid(dot_user_item + tf.reshape(self._item_bias, [-1]))
else:
prediction = dot_user_item + tf.reshape(self._item_bias, [-1])
self._loss = tf.nn.l2_loss(labels_weight * (self._labels - prediction))
def _build_serving_graph(self):
with tf.variable_scope(self._scope, reuse=self._reuse):
if self._batch_serving:
prediction = tf.matmul(self._user, self._item, transpose_b=True) + tf.reshape(self._item_bias, [-1])
else:
dot_user_item = tf.reduce_sum(tf.multiply(self._user, self._item),
axis=1, keep_dims=False, name="dot_user_item")
prediction = dot_user_item + tf.reshape(self._item_bias, [-1])
if self._sigmoid:
prediction = tf.sigmoid(prediction)
self._outputs.append(prediction)
|
<filename>tests/hwsim/test_tnc.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# TNC tests
# Copyright (c) 2014-2015, <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os.path
import hostapd
from utils import HwsimSkip, alloc_fail, fail_test, wait_fail_trigger
from test_ap_eap import int_eap_server_params, check_eap_capa
def test_tnc_peap_soh(dev, apdev):
"""TNC PEAP-SoH"""
params = int_eap_server_params()
params["tnc"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
eap="PEAP", identity="user", password="password",
ca_cert="auth_serv/ca.pem",
phase1="peapver=0 tnc=soh cryptobinding=0",
phase2="auth=MSCHAPV2",
scan_freq="2412", wait_connect=False)
dev[0].wait_connected(timeout=10)
dev[1].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
eap="PEAP", identity="user", password="password",
ca_cert="auth_serv/ca.pem",
phase1="peapver=0 tnc=soh1 cryptobinding=1",
phase2="auth=MSCHAPV2",
scan_freq="2412", wait_connect=False)
dev[1].wait_connected(timeout=10)
dev[2].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
eap="PEAP", identity="user", password="password",
ca_cert="auth_serv/ca.pem",
phase1="peapver=0 tnc=soh2 cryptobinding=2",
phase2="auth=MSCHAPV2",
scan_freq="2412", wait_connect=False)
dev[2].wait_connected(timeout=10)
def test_tnc_peap_soh_errors(dev, apdev):
"""TNC PEAP-SoH local error cases"""
params = int_eap_server_params()
params["tnc"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
tests = [ (1, "tncc_build_soh"),
(1, "eap_msg_alloc;=eap_peap_phase2_request") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
eap="PEAP", identity="user", password="password",
ca_cert="auth_serv/ca.pem",
phase1="peapver=0 tnc=soh cryptobinding=0",
phase2="auth=MSCHAPV2",
scan_freq="2412", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;tncc_build_soh"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
eap="PEAP", identity="user", password="password",
ca_cert="auth_serv/ca.pem",
phase1="peapver=0 tnc=soh cryptobinding=0",
phase2="auth=MSCHAPV2",
scan_freq="2412", wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_tnc_ttls(dev, apdev):
"""TNC TTLS"""
check_eap_capa(dev[0], "MSCHAPV2")
params = int_eap_server_params()
params["tnc"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
if not os.path.exists("tnc/libhostap_imc.so"):
raise HwsimSkip("No IMC installed")
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
eap="TTLS", identity="DOMAIN\mschapv2 user",
anonymous_identity="ttls", password="password",
phase2="auth=MSCHAPV2",
ca_cert="auth_serv/ca.pem",
scan_freq="2412", wait_connect=False)
dev[0].wait_connected(timeout=10)
def test_tnc_ttls_fragmentation(dev, apdev):
"""TNC TTLS with fragmentation"""
check_eap_capa(dev[0], "MSCHAPV2")
params = int_eap_server_params()
params["tnc"] = "1"
params["fragment_size"] = "150"
hostapd.add_ap(apdev[0]['ifname'], params)
if not os.path.exists("tnc/libhostap_imc.so"):
raise HwsimSkip("No IMC installed")
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
eap="TTLS", identity="DOMAIN\mschapv2 user",
anonymous_identity="ttls", password="password",
phase2="auth=MSCHAPV2",
ca_cert="auth_serv/ca.pem",
fragment_size="150",
scan_freq="2412", wait_connect=False)
dev[0].wait_connected(timeout=10)
def test_tnc_ttls_errors(dev, apdev):
"""TNC TTLS local error cases"""
if not os.path.exists("tnc/libhostap_imc.so"):
raise HwsimSkip("No IMC installed")
check_eap_capa(dev[0], "MSCHAPV2")
params = int_eap_server_params()
params["tnc"] = "1"
params["fragment_size"] = "150"
hostapd.add_ap(apdev[0]['ifname'], params)
tests = [ (1, "eap_ttls_process_phase2_eap;eap_ttls_process_tnc_start",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_ttls_process_phase2_eap;eap_ttls_process_tnc_start",
"mschap user", "auth=MSCHAP"),
(1, "=eap_tnc_init", "chap user", "auth=CHAP"),
(1, "tncc_init;eap_tnc_init", "pap user", "auth=PAP"),
(1, "eap_msg_alloc;eap_tnc_build_frag_ack",
"pap user", "auth=PAP"),
(1, "eap_msg_alloc;eap_tnc_build_msg",
"pap user", "auth=PAP"),
(1, "wpabuf_alloc;=eap_tnc_process_fragment",
"pap user", "auth=PAP"),
(1, "eap_msg_alloc;=eap_tnc_process", "pap user", "auth=PAP"),
(1, "wpabuf_alloc;=eap_tnc_process", "pap user", "auth=PAP"),
(1, "dup_binstr;tncc_process_if_tnccs", "pap user", "auth=PAP"),
(1, "tncc_get_base64;tncc_process_if_tnccs",
"pap user", "auth=PAP"),
(1, "tncc_if_tnccs_start", "pap user", "auth=PAP"),
(1, "tncc_if_tnccs_end", "pap user", "auth=PAP"),
(1, "tncc_parse_imc", "pap user", "auth=PAP"),
(2, "tncc_parse_imc", "pap user", "auth=PAP"),
(3, "tncc_parse_imc", "pap user", "auth=PAP"),
(1, "os_readfile;tncc_read_config", "pap user", "auth=PAP"),
(1, "tncc_init", "pap user", "auth=PAP"),
(1, "TNC_TNCC_ReportMessageTypes", "pap user", "auth=PAP"),
(1, "base64_encode;TNC_TNCC_SendMessage", "pap user", "auth=PAP"),
(1, "=TNC_TNCC_SendMessage", "pap user", "auth=PAP"),
(1, "tncc_get_base64;tncc_process_if_tnccs",
"pap user", "auth=PAP") ]
for count, func, identity, phase2 in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="TTLS", anonymous_identity="ttls",
identity=identity, password="password",
ca_cert="auth_serv/ca.pem", phase2=phase2,
fragment_size="150", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL",
note="Allocation failure not triggered for: %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
def test_tnc_fast(dev, apdev):
"""TNC FAST"""
check_eap_capa(dev[0], "FAST")
params = int_eap_server_params()
params["tnc"] = "1"
params["pac_opaque_encr_key"] ="<KEY>"
params["eap_fast_a_id"] = "101112131415161718191a1b1c1d1e00"
params["eap_fast_a_id_info"] = "test server2"
hostapd.add_ap(apdev[0]['ifname'], params)
if not os.path.exists("tnc/libhostap_imc.so"):
raise HwsimSkip("No IMC installed")
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
eap="FAST", identity="user",
anonymous_identity="FAST", password="password",
phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_auth_tnc",
ca_cert="auth_serv/ca.pem",
scan_freq="2412", wait_connect=False)
dev[0].wait_connected(timeout=10)
|
<reponame>alxcord/my_public_scripts<filename>HANA/hana_extract/hana_extract.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 1 12:18:15 2017
@author: <NAME>
Script para executar um ou mais consultas SQL num banco de dados HANA
Ajustes
"""
import pyhdb, optparse, time
import csv, sys, termios
import os.path, getpass, re, json
def getSqlScript(filename):
sql_content = ""
with open(filename,'r') as f:
sql_content = f.read()
word_list = re.sub("[^\w]", " ", sql_content.upper()).split()
# Evita que rodem algum DDL/DML por engano.
if "UPDATE" in word_list or "INSERT" in word_list or "DELETE" in word_list:
print("Comandos DML não suportados, apenas SELECT")
sql_content = ""
if "CREATE" in word_list or "ALTER" in word_list or "TRUNCATE" in word_list or "DROP" in word_list:
print("Comandos DDL não suportados, apenas SELECT")
sql_content = ""
if "BEGIN" in word_list or "CALL" in word_list:
print("Blocos (BEGIN/EXEC/CALL não suportados, apenas SELECT")
sql_content = ""
return sql_content
def run(user, password, host, port, filename, sql_script, clear_data, show_descriptions):
if os.path.isfile(filename):
while True:
confirm = input("O arquivo de saida {} já existe, sobregravar? ".format(filename))
if confirm.upper() == 'S':
break
elif confirm.upper() == 'N':
return
with open(filename, 'w') as file_handler:
csv_writer = csv.writer(file_handler, delimiter=';', quotechar = '"')
# credentials = getPw()
# print ("Usuário = {0}, senha = {1}".format(credentials[0], credentials[1]))
# sys.exit()
connection = pyhdb.connect(
host=host,
port=port,
user=user,
password=password,
)
# no futuro posso incluir suporte a mais de um arquivo
# "{0}_{2}.{1}".format(*filename.rsplit('.', 1) + [file_number]) # é uma lista [arquivo, ext] + [numero] = [arq, ext, numero]
first_script = True
for sql_command in sql_script.split(";"):
if sql_command.isspace() or not sql_command: # vazio ou só contem espaços, entre lf, tab
break # proximo
cursor = connection.cursor()
print ("-" * 30)
print (sql_command)
t_start = time.time()
# incluir tratamento para exceção pyhdb.exceptions.DatabaseError
cursor.execute(sql_command)
t_fetch = time.time()
counter = 0
desc_fields = []
# ('ZVOLFTANT', 5, None, 17, 3, None, 2), ('ZVOLFTAT', 5, None, 17, 3, None, 2), ('ZVOLFTTL', 5, None, 17, 3, None, 2))
for field_item in cursor.description:
if clear_data and field_item[0].startswith("/BIC/"):
desc_fields.append(field_item[0][5:])
else:
desc_fields.append(field_item[0])
if first_script:
first_script = False
csv_writer.writerow(desc_fields)
if show_descriptions:
desc_fields = []
for field_item in cursor.description:
desc_fields.append(field_item[2])
if first_script:
first_script = False
csv_writer.writerow(desc_fields)
while True:
res = cursor.fetchmany(1000)
for line in res:
csv_writer.writerow(line)
if len(res) == 0:
break
counter += len(res)
print ("\rProgresso: {:,} ".format(counter).replace(",", "."), end = "")
t_end = time.time()
try:
cursor.close()
except:
pass
print()
print("tempo exec..: {} segundos".format(t_fetch - t_start))
print("tempo fetch.: {} segundos".format(t_end - t_fetch))
print("tempo total.: {} segundos".format(t_end - t_start))
try:
connection.close()
except:
pass
if __name__ == "__main__":
config_file_name = "config.json"
default_env = ""
env_list = ""
try:
with open(config_file_name, "r") as f:
env_config = json.load(f)
except IOError:
env_config = []
print ("Arquivo de ambientes (config.json) ausente")
for entry in env_config:
env_list = env_list + entry["env"] + " "
if entry["default"] == "S":
default_env = entry["env"]
#usage = "Uso: %prog [options] usuário senha arquivo"
#parser = optparse.OptionParser(usage = usage)
parser = optparse.OptionParser()
parser.add_option('-u', '--user',
action="store", dest="user", default="",
help="Nome do usuário HANA")
parser.add_option('-p', '--pass',
action="store", dest="password", default="",
help="Nome do usuário HANA")
parser.add_option('-e', '--env',
action="store", dest="env", default=default_env,
help = "Nome do ambiente: " + env_list)
parser.add_option("-s", "--script", dest="script",
help = "Arquivo com o Script SQL", metavar="SCRIPT", default="")
parser.add_option("-f", "--file", dest = "filename",
help = "Arquivo de saida", metavar = "FILE", default = "")
parser.add_option("-c", "--clean",
action="store_true", dest="clean", default=False,
help="Limpa nomes dos campos (/BIC/)")
parser.add_option("-d", "--showdesc",
action="store_true", dest="descriptions", default=False,
help="Adiciona uma linha de cabeçalho com as descrições")
# parser.add_option("-q", "--quiet",
# action="store_false", dest="verbose", default=True,
# help="Suprimir mensagens")
options, args = parser.parse_args()
#print ('Query string:', options.query)
# print(options)
# print(args)
param_ok = True
cred_user = options.user
cred_password = <PASSWORD>.password
file_name = options.filename
sql_file = options.script
clear_data = options.clean
show_descriptions = options.descriptions
if cred_user == "":
print ("Usuário inválido")
param_ok = False
if file_name == "":
print ("arquivo de saída inválido")
param_ok = False
if sql_file == "":
print ("arquivo SQL inválido")
param_ok = False
if os.path.isfile(sql_file) == False:
print ("arquivo SQL inexistente")
param_ok = False
env = options.env.upper()
for entry in env_config:
if env == entry["env"]:
host = entry["host"]
port = entry["port"]
break
if host == "":
print ("Ambiente inválido")
param_ok = False
if cred_password == "" and param_ok:
print ("senha não informada")
#cred_password = get_password("<PASSWORD> de {} :: ".format(cred_user))
cred_password = getpass.getpass()
if cred_password == "":
print ("Parametros inválidos")
param_ok = False
if param_ok:
#print ("parametros Ok")
sql_script = getSqlScript (sql_file)
if sql_script == "":
print ("SQL inválido")
param_ok = False
if param_ok:
run(cred_user, cred_password, host, port, file_name, sql_script, clear_data, show_descriptions)
else:
parser.print_help()
sys.exit(1)
|
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.http.response import JsonResponse
from django.contrib.auth.models import User, Group
from django.utils.crypto import get_random_string
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import action
from chat import settings
from core.serializers import (
MessageModelSerializer,
UserModelSerializer,
RoomModelSerializer,
)
from core.models import MessageModel, RoomModel
class CsrfExemptSessionAuthentication(SessionAuthentication):
"""
SessionAuthentication scheme used by DRF. DRF's SessionAuthentication uses
Django's session framework for authentication which requires CSRF to be
checked. In this case we are going to disable CSRF tokens for the API.
"""
def enforce_csrf(self, request):
return
class MessagePagination(PageNumberPagination):
"""
Limit message prefetch to one page.
"""
page_size = settings.MESSAGES_TO_LOAD
class MessageModelViewSet(ModelViewSet):
queryset = MessageModel.objects.all()
serializer_class = MessageModelSerializer
allowed_methods = ("GET", "POST", "HEAD", "OPTIONS")
authentication_classes = (CsrfExemptSessionAuthentication,)
pagination_class = MessagePagination
def list(self, request, *args, **kwargs):
self.queryset = self.queryset.filter(group=self.request.query_params["target"])
return super(MessageModelViewSet, self).list(request, *args, **kwargs)
# def retrieve(self, request, *args, **kwargs):
# msg = get_object_or_404(
# self.queryset.filter(
# Q(recipient=request.user) | Q(user=request.user), Q(pk=kwargs["pk"])
# )
# )
# serializer = self.get_serializer(msg)
# return Response(serializer.data)
class UserModelViewSet(ModelViewSet):
queryset = User.objects.all()
serializer_class = UserModelSerializer
allowed_methods = ("GET", "HEAD", "OPTIONS")
pagination_class = None # Get all user
def list(self, request, *args, **kwargs):
# Get all users except yourself
self.queryset = self.queryset.exclude(id=request.user.id)
return super(UserModelViewSet, self).list(request, *args, **kwargs)
class RoomModelViewSet(ModelViewSet):
queryset = RoomModel.objects.all()
serializer_class = RoomModelSerializer
allowed_methods = ("GET", "POST", "DELETE", "PUT")
authentication_classes = (CsrfExemptSessionAuthentication,)
pagination_class = None
def list(self, request, *args, **kwargs):
self.queryset = RoomModel.objects.filter(members=request.user)
room_list = []
for _ in self.queryset:
user_list = []
for u in _.members.all():
user_list.append(
{
"id": u.id,
"username": u.username,
}
)
room_list.append(
{
"id": _.id,
"name": _.name,
"members": user_list,
}
)
return JsonResponse(room_list, safe=False)
# return super(RoomModelViewSet, self).list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
group_name = request.data["room_name"]
user_list = User.objects.all()
if group_name == "":
new_group = Group.objects.create(name=get_random_string())
room = RoomModel(base_group=new_group, name=get_random_string())
room.save() # must be saved first!
else:
new_group = Group.objects.create(name=get_random_string())
room = RoomModel(base_group=new_group, name=group_name)
room.save() # must be saved first!
for u in user_list:
room.members.add(u)
room.save()
return Response(status=200)
def update(self, request, *args, **kwargs):
# self.queryset = RoomModel.objects.get(id=kwargs["pk"])
return super(RoomModelViewSet, self).update(
request, partial=True, *args, **kwargs
)
|
<reponame>ArqiSoft/ml-services
import gc
import re
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
class Param:
def __init__(self,charset,char_to_int,int_to_char,smiles_len):
self.charset = charset
self.char_to_int = char_to_int
self.int_to_char = int_to_char
self.smiles_len= smiles_len
class Preprocessor:
""" Class is designed to take care of any preprocessing
of SMILES strings and their representation """
def __init__(self, filepath, delimiter, smiles_header, smiles_len=70):
print('Started preprocessing of input data')
self.smiles_len = smiles_len
data = pd.read_csv(filepath, delimiter=delimiter)
data = self.prepare_smiles(data,self.smiles_len,smiles_header=smiles_header)
self.charset, self.char_to_int, self.int_to_char = \
Preprocessor.get_charset_charints(data,smiles_header)
smiles_train, smiles_test = train_test_split(data[smiles_header], random_state=6)
gc.collect()
print(self.charset)
print(smiles_train.shape)
print(smiles_test.shape)
print(smiles_test)
print(smiles_train.iloc[0])
self.X_train,self.T_train,self.Y_train = Preprocessor.vectorize(
smiles_train.values,self.charset,self.char_to_int, self.smiles_len
)
self.X_test,self.T_test,self.Y_test = Preprocessor.vectorize(
smiles_test.values,self.charset,self.char_to_int, self.smiles_len
)
'''The SMILES must be vectorized to one-hot encoded arrays.
To do this a character set is built from all characters found
in the SMILES string (both train and test). Also, some start
and stop characters are added, which will be used to initiate
the decoder and to signal when SMILES generation has stopped.
The stop character also work as padding to get the same length
of all vectors, so that the network can be trained in batch mode.
The character set is used to define two dictionaries to translate back
and forth between index and character. The maximum length of the SMILES
strings is needed as the RNN’s will be trained in batch mode, and is set
to the maximum encountered + some extra.'''
@staticmethod
def get_charset_charints(data,smiles_header):
"""
Analyses characters that are present in the training dataset
and creates a dictionary to use them later for constant encoding
:param data:
:param smiles_header:
:return:
"""
charset = set("".join(list(data[smiles_header])) + "!E")
char_to_int = dict((c, i) for i, c in enumerate(charset))
int_to_char = dict((i, c) for i, c in enumerate(charset))
return charset,char_to_int,int_to_char
@staticmethod
def prepare_smiles(data,smiles_len,smiles_header=None):
"""
function which discards unacceptable SMILES
:param data: SMILES string
:param smiles_len: max allowed length of the SMILES string
:return: acceptable SMILES for further modeling
"""
if type(data) is str:
smiles = data
smiles = Preprocessor.fold_double_characters(data)
if re.compile("[Mga.uUerbLGTfKRmd*h]").search(smiles) or len(smiles) >= smiles_len:
return None
else:
return smiles
else:
if smiles_header is None:
raise Exception('Smiles header is not provided. Cannot locate smiles column in dataframe.')
else:
data[smiles_header] = data[smiles_header].apply(Preprocessor.fold_double_characters)
mask_1 = (~data[smiles_header].str.contains(re.compile("[Mga.uUerbLGTfKRmd*h]")))
mask_2 = (data[smiles_header].str.len() < smiles_len)
return data.loc[mask_1].loc[mask_2]
@staticmethod
def fold_double_characters(smiles,rep=None):
"""
method folds double characters in SMILES to special single characters
:param smiles: SMILES string - text
:param rep: Dictionary of which double chars fold to which single chars
:return: SMILES string with folded double chars
"""
if rep is None:
rep = {"Cl": "X", "Br": "Y", "Si": "A", "Se": "Z", "se": "z", "As": "Q"}
if type(smiles) is str:
# define desired replacements here
rep = dict((re.escape(k), v) for k, v in iter(rep.items()))
pattern = re.compile("|".join(rep.keys()))
smiles = pattern.sub(lambda m: rep[re.escape(m.group(0))], smiles)
return smiles
else:
raise TypeError("Not a string type provided!")
@staticmethod
def unfold_double_characters(smiles,rep=None):
"""
:param smiles:
:param rep:
:return:
"""
if rep is None:
rep = {"Cl": "X", "Br": "Y", "Si": "A", "Se": "Z", "se": "z", "As": "Q"}
inv_rep = {v: k for k, v in rep.items()}
else:
inv_rep = {v: k for k, v in rep.items()}
if type(smiles) is str:
# define desired replacements here
rep = dict((re.escape(k), v) for k, v in iter(inv_rep.items()))
pattern = re.compile("|".join(inv_rep.keys()))
smiles = pattern.sub(lambda m: inv_rep[re.escape(m.group(0))], smiles)
return smiles
else:
raise TypeError("Not a string type provided!")
@staticmethod
def vectorize(smiles, charset, char_to_int, smiles_len):
"""
:param smiles: iterable of smiles-strings
:param charset:
:param char_to_int:
:param smiles_len:
:return:
"""
embed = smiles_len + 1
if type(smiles) is str:
one_hot = np.zeros((1, embed, len(charset)), dtype=np.int8)
# encode the startchar
one_hot[0, 0, char_to_int["!"]] = 1
# encode the rest of the chars
for j, c in enumerate(smiles):
one_hot[0, j + 1, char_to_int[c]] = 1
# Encode endchar
one_hot[0, len(smiles) + 1:, char_to_int["E"]] = 1
# Return two, one for input and the other for output
return one_hot[:, :0:-1, :], one_hot[:, :-1, :], one_hot[:, 1:, :]
else:
one_hot = np.zeros((smiles.shape[0], embed, len(charset)), dtype=np.int8)
for i, smile in enumerate(smiles):
# encode the startchar
one_hot[i, 0, char_to_int["!"]] = 1
# encode the rest of the chars
for j, c in enumerate(smile):
one_hot[i, j + 1, char_to_int[c]] = 1
# Encode endchar
one_hot[i, len(smile) + 1:, char_to_int["E"]] = 1
# Return two, one for input and the other for output
return one_hot[:, :0:-1, :],one_hot[:, :-1, :],one_hot[:, 1:, :]
|
# Copyright (c) SenseTime Research and its affiliates. All Rights Reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target=None):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
#for stft:
#input image can't smaller than dcn kernel
#so image scale >= 128*3
ow = int(max(384, ow))
oh = int(max(384, oh))
return (oh, ow)
def __call__(self, image, target=None):
size = self.get_size(image.size)
image = F.resize(image, size)
if target is None:
return image, target
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
self.chance = 0.0
def __call__(self, image, target=None):
if target is not None:
self.chance = random.random()
if self.chance < self.prob:
image = F.hflip(image)
if target is not None:
target = target.transpose(0)
return image, target
class RandomVerticalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target=None):
if random.random() < self.prob:
image = F.vflip(image)
if target is not None:
target = target.transpose(1)
return image, target
class ColorJitter(object):
def __init__(self,
brightness=None,
contrast=None,
saturation=None,
hue=None,
):
self.color_jitter = torchvision.transforms.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,)
def __call__(self, image, target=None):
image = self.color_jitter(image)
return image, target
class ToTensor(object):
def __call__(self, image, target=None):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target=None):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, target
return image, target
|
<reponame>hmlewis-astro/ARCTIC_ERMINE<gh_stars>0
"""
ARCTIC_phot.py
<NAME>
<EMAIL>
2020
Automatic reduction pipeline for transit photometry with the Astrophysical Research Consortium Imaging Camera (ARCTIC) at Apache Point Observatory (APO).
to use:
python ARCTIC_phot.py path/to/your/data
OR place ARCTIC_phot.py in your folder with data and run with no argument:
python ARCTIC_phot.py
Performs aperture photometry on science images in the /reduced/data/ directory for multiple filters.
"""
import os
import re
import sys
import warnings
import numpy as np
import glob
import pandas as pd
import astropy.io.fits as pyfits
from astropy.io import ascii
from astropy.coordinates import FK5, SkyCoord
from astropy.wcs import WCS
import astropy.units as u
from astropy import modeling
from astropy.convolution import convolve, Gaussian2DKernel, convolve_fft
# ignore overwriting reduced files warnings in case you need to rerun
warnings.filterwarnings('ignore', message='Overwriting existing file')
# ignore overflow errors
warnings.filterwarnings('ignore', message='overflow encountered in sinh')
# ignore everything
warnings.filterwarnings('ignore')
"""
Find reduced data
"""
# take directory from user or assume current directory
if len(sys.argv) > 1:
direc = sys.argv[1]
else:
direc = '.'
cals_direc = os.path.join(direc, 'reduced', 'cals')
reduced_direc = os.path.join(direc, 'reduced', 'data')
results_direc = os.path.join(reduced_direc, 'results')
# directories for reduced images
if not os.path.exists(cals_direc):
print(' > Reduced cals directory does not exist! Run ARCTIC_imagered.py first.')
if not os.path.exists(reduced_direc):
print(' > Reduced data directory does not exist! Run ARCTIC_imagered.py first.')
if not os.path.exists(results_direc):
os.makedirs(results_direc)
"""
Find sources
"""
import matplotlib
matplotlib.use('agg')
from astropy.stats import sigma_clipped_stats, mad_std
from astropy.visualization import SqrtStretch, SinhStretch, MinMaxInterval, PercentileInterval, ZScaleInterval
from astropy.visualization.mpl_normalize import ImageNormalize
import csv
import datetime
import matplotlib.pyplot as plt
import photutils as pt
from photutils import DAOStarFinder, find_peaks, aperture_photometry, CircularAperture
from progress.bar import ChargingBar
from scipy import stats
import scipy.signal
import scipy.optimize as optimize
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
files = glob.glob(os.path.join(reduced_direc, "*.fits"))
print('\n >>> Starting daofind...')
#bar = ChargingBar(' > ', max=len(files))
def update_coords(img, x_guess, y_guess, mask_max_counts=65000, box_width=70, plot_fit=False, smooth=True, kernel_size=10.):
'''
img: 2D array. Should be the image you are analyzing
x_guess: int, 1st guess for the x coordinate. Needs to be closer than box_width
y_guess: int, 1st guess for the y coordinate. Needs to be closer than box_width
mask_max_counts: Set all points with counts higher than this number equal to the median
box_width: int, The area to consider for the stars coordinates. Needs to be small enough to not include
extra stars, but big enough not to include errors on your x,y guess
plot_fit: bool, show a plot to the gauss fit?
smooth: bool, convolve image with gaussian first? The advantage of this is that it will take out some
of the errors caused by the image being a donut instead of a gaussian. Especially useful for
non-uniform PSFs, such as ARCSAT's defocused image. For ARCTIC, this may not be necessary.
Try it anyway though!
kernel_size: float, standard deviation of gaussian kernel used to smooth data (pixels). Irrevelvant
if smooth is set to False
'''
box_size = int(box_width/2)
x_guess = int(x_guess)
y_guess=int(y_guess)
# cutout the part of the image around the star of interest
stamp = img[y_guess-box_size:y_guess+box_size,x_guess-box_size:x_guess+box_size ].astype(np.float64)
cutout = np.copy(stamp)
# change saturated pixels to 0, so it doesn't throw off fit
cutout[cutout>mask_max_counts] = 0.
if smooth:
# Convolve image with gaussian kernel to limit the noise
gauss_kernel = Gaussian2DKernel(kernel_size)
cutout = convolve(cutout, gauss_kernel, boundary='extend')
else:
cutout_s = cutout
# Subtract sky background
cutout -= np.median(cutout)
# Sum pixels in x,y directions
x_sum = np.sum(cutout, axis=0)
y_sum = np.sum(cutout, axis=1)
# Fit a gaussian to the x and y summed columns
offset = np.arange(box_width)-box_size
fitter = modeling.fitting.LevMarLSQFitter()
model = modeling.models.Gaussian1D() # depending on the data you need to give some initial values
fitted_x = fitter(model, offset, x_sum)
fitted_y = fitter(model, offset, y_sum)
# Add the offset from the fitted gaussian to the original guess
x_cen = x_guess + fitted_x.mean
y_cen = y_guess + fitted_y.mean
x_diff = x_cen - x_guess
y_diff = y_cen - y_guess
print("X Guess : ", x_guess, "; X Corrected To : ", x_cen, "; Difference Of : ", (x_diff))
print("Y Guess : ", y_guess, "; Y Corrected To: ", y_cen, "; Difference Of : ", y_diff)
return x_cen, y_cen
if plot_fit:
f, (ax1,ax2,ax3) = plt.subplots(1,3,figsize=(15,5))
ax1.plot(offset, x_sum, 'o', color='C0', label='x offset')
ax1.plot(offset, y_sum, 'o', color='C1', label='y offset')
ax1.plot(offset, fitted_x(offset), 'C0')
ax1.plot(offset, fitted_y(offset), 'C1')
ax1.legend()
m,s = np.median(stamp), np.std(stamp)
ax2.imshow(stamp, vmin=m-s, vmax=m+s, origin='lower', cmap='Greys_r', interpolation='nearest',
extent=[-box_size,box_size,-box_size,box_size])
ax2.plot(fitted_x.mean, fitted_y.mean, 'ro', label='updated')
ax2.plot(0,0, 'bo', label='guess')
ax2.legend()
ax3.imshow(img, vmin=m-s, vmax=m+s, origin='lower', cmap='Greys_r', interpolation='nearest',)
ax3.plot(x_cen, y_cen, 'ro', markersize=1)
ax3.plot(x_guess, y_guess, 'bo', markersize=1)
plt.tight_layout()
plt.show()
for ff,fname in enumerate(files):
hdul = pyfits.open(fname)
header = hdul[0].header
wcs = WCS(header)
filt = hdul[0].header['FILTER']
image = hdul[0].data
mean, median, std = sigma_clipped_stats(image, sigma=3., iters=10)
sigma = 8.
#decrease sigma ERROR "xcentroid" (line 119)
daofind = DAOStarFinder(threshold=sigma*std, fwhm=15., exclude_border=True)
sources = daofind(image - median)
# sources = sources[sources['xcentroid']<1800 and sources['xcentroid']>500 and sources['ycentroid']<1750 and sources['ycentroid']>1000]
# print sources
positions = (sources['xcentroid'], sources['ycentroid'])
#print positions
results = xpos, ypos = [], []
xy = os.path.join(reduced_direc,'xypos.txt')
with open(xy, 'r') as df:
for row in df:
x, y = row.split()
# print("First : ", x, " ", y)
x, y = update_coords(image, x, y, box_width = 80)
#print("Second : ", x, " ", y)
xpos.append(float(x))
ypos.append(float(y))
#print(xpos,ypos)
'''
results = []
radec = os.path.join(reduced_direc,'radec.txt')
with open(radec, 'r') as df:
for row in df:
r, d = row.split()
results.append({'ra':r, 'dec':d})
fwhm = 18.
source_snr = 2.
#mean, median, std = sigma_clipped_stats(image, sigma=3., iters=10)
#daofind = DAOStarFinder(threshold=source_snr*std, fwhm=fwhm, exclude_border=True)
#sources = daofind(image - median)
bkg_sigma = mad_std(image)
daofind = DAOStarFinder(fwhm=fwhm, threshold=source_snr*bkg_sigma)
sources = daofind(image)
for star in results:
star_coord = SkyCoord(star['ra'], star['dec'], unit=(u.hourangle, u.deg))
xy = SkyCoord.to_pixel(star_coord, wcs=wcs, origin=1)
x = xy[0].item(0) - 7.0
y = xy[1].item(0) - 7.0
for source in sources:
if(source['xcentroid']-15 < x < source['xcentroid']+15) and source['ycentroid']-15 < y < source['ycentroid']+15:
star['x'] = x
star['y'] = y
star['peak'] = source['peak']
results = pd.DataFrame(results)
ref0 = (results['x'][0], results['y'][0])
ref1 = (results['x'][1], results['y'][1])
refs = [ref0, ref1]
plot_apertures = CircularAperture(refs, r=37.)
'''
refs = [(x,y) for x,y in zip(xpos,ypos)]
plot_apertures = CircularAperture(refs, r=45.)
#plot_apertures = CircularAperture(refs, r=35.)
plot_annulus_in = CircularAperture(refs, r=50.)
plot_annulus_out = CircularAperture(refs, r=55.)
#plot_annulus_in = CircularAperture(refs, r=40.)
#plot_annulus_out = CircularAperture(refs, r=45.)
_, new_fname = os.path.split(fname)
new_fname = os.path.splitext(new_fname)[0]
'''
if str(new_fname)[-1:] == '5':
norm = ImageNormalize(image, interval=ZScaleInterval(), stretch=SinhStretch())
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(image, cmap='Greys', origin='lower', norm=norm)
plot_apertures.plot(color='r', lw=1.0, alpha=0.5)
fig.colorbar(im, label='Counts')
plt.tight_layout()
plt.savefig(os.path.join(results_direc,str(new_fname)+'.coor.png'))
plt.close()
plt.close()
plt.close()
'''
radii = np.arange(1.0,60.0,1.0)
for r in refs:
if np.isnan(r).any():
print('Make sure you remove the file!', fname)
break
else:
apertures = [pt.CircularAperture(refs, r=r) for r in radii]
phot_table = pt.aperture_photometry(image, apertures)
if str(new_fname)[-1:] == '5':
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for s in range(len(results['x'])):
aperture_sum = []
for j, r in enumerate(radii):
col = 'aperture_sum_'+str(j)
aperture_sum.append(-2.5*np.log10(phot_table[col][s]))
ax.scatter(radii, aperture_sum/np.min(aperture_sum) - 1.0)
#plt.axvline(x=ap_radii,linestyle='--',linewidth=1.0,c='k')
plt.axhline(y=0.0, linestyle='--', linewidth=1.0, c='k')
plt.xlabel('Aperture Radius (pixels)')
plt.ylabel(r'$\Delta$ Magnitude')
plt.tight_layout()
plt.savefig(os.path.join(results_direc,str(new_fname)+'.cog.png'))
plt.close()
plt.close()
plt.close()
"""
Get magnitudes of those sources
"""
new_fname_mag = str(new_fname)+'.mag'
new_fname_mag = open(os.path.join(results_direc,new_fname_mag),'w+')
ap_radii = 37.0
apertures = pt.CircularAperture(refs, r=ap_radii)
new_fname_mag.write('aperture_area \t {} \n'.format(apertures.area))
annulus_apertures = pt.CircularAnnulus(refs, r_in=40.0, r_out=45.0)
new_fname_mag.write('annulus_area \t {} \n'.format(annulus_apertures.area))
new_fname_mag.write('# \n')
appers = [apertures, annulus_apertures]
phot_table = pt.aperture_photometry(image, appers, method='exact')
ascii.write(phot_table, new_fname_mag, delimiter='\t')
#bar.next()
#bar.finish()
|
from __future__ import annotations
from dippy.core.caching.cacheable import Cacheable
from dippy.core.models.guild import *
from dippy.core.timestamp import Timestamp
from gully import Gully
class Guild(Cacheable):
def __init__(self, model: GuildModel):
self._model = model
self._change_event_stream = Gully()
def update(self, model: GuildModel):
print(self._model.afk_channel_id)
self._model = self._model.copy(update=model.dict(exclude_unset=True))
print(self._model.afk_channel_id)
def freeze(self) -> Guild:
return Guild(self._model)
@property
def created(self) -> Timestamp:
return self._model.created
@property
def afk_timeout(self) -> int:
return self._model.afk_timeout
@property
def default_message_notifications(self) -> DefaultMessageNotificationLevel:
return self._model.default_message_notifications
@property
def emojis(self) -> list[EmojiModel]:
return self._model.emojis
@property
def explicit_content_filter(self) -> ExplicitContentFilterLevel:
return self._model.explicit_content_filter
@property
def features(self) -> list[GuildFeature]:
return self._model.features
@property
def id(self) -> Snowflake:
return self._model.id
@property
def name(self) -> str:
return self._model.name
@property
def nsfw(self) -> bool:
return self._model.nsfw
@property
def owner_id(self) -> Snowflake:
return self._model.owner_id
@property
def preferred_locale(self) -> str:
return self._model.preferred_locale
@property
def premium_tier(self) -> PremiumTier:
return self._model.premium_tier
@property
def region(self) -> str:
return self._model.region
@property
def roles(self) -> list[RoleModel]:
return self._model.roles
@property
def system_channel_flags(self) -> int:
return self._model.system_channel_flags
@property
def verification_level(self) -> VerificationLevel:
return self._model.verification_level
@property
def afk_channel_id(self) -> Optional[Snowflake]:
return self._model.afk_channel_id
@property
def application_id(self) -> Optional[Snowflake]:
return self._model.application_id
@property
def approximate_member_count(self) -> Optional[int]:
return self._model.approximate_member_count
@property
def approximate_presence_count(self) -> Optional[int]:
return self._model.approximate_presence_count
@property
def banner(self) -> Optional[str]:
return self._model.banner
@property
def channels(self) -> Optional[list[ChannelModel]]:
return self._model.channels
@property
def description(self) -> Optional[str]:
return self._model.description
@property
def discovery_splash(self) -> Optional[str]:
return self._model.discovery_splash
@property
def icon(self) -> Optional[str]:
return self._model.icon
@property
def icon_hash(self) -> Optional[str]:
return self._model.icon_hash
@property
def joined_at(self) -> Optional[datetime]:
return self._model.joined_at
@property
def large(self) -> Optional[bool]:
return self._model.large
@property
def max_members(self) -> Optional[int]:
return self._model.max_members
@property
def max_presences(self) -> Optional[int]:
return self._model.max_presences
@property
def max_video_channel_users(self) -> Optional[int]:
return self._model.max_video_channel_users
@property
def member_count(self) -> Optional[int]:
return self._model.member_count
@property
def members(self) -> Optional[list[MemberModel]]:
return self._model.members
@property
def mfa(self) -> Optional[MFALevel]:
return self._model.mfa
@property
def mfa_level(self) -> Optional[MFALevel]:
return self._model.mfa_level
@property
def owner(self) -> Optional[bool]:
return self._model.owner
@property
def permissions(self) -> Optional[str]:
return self._model.permissions
@property
def premium_subscription_count(self) -> Optional[int]:
return self._model.premium_subscription_count
@property
def presences(self) -> Optional[list[PresenceModel]]:
return self._model.presences
@property
def public_updates_channel_id(self) -> Optional[Snowflake]:
return self._model.public_updates_channel_id
@property
def rules_channel_id(self) -> Optional[Snowflake]:
return self._model.rules_channel_id
@property
def splash(self) -> Optional[Union[int, str]]:
return self._model.splash
@property
def system_channel_id(self) -> Optional[Snowflake]:
return self._model.system_channel_id
@property
def unavailable(self) -> Optional[bool]:
return self._model.unavailable
@property
def vanity_url_code(self) -> Optional[str]:
return self._model.vanity_url_code
@property
def voice_states(self) -> Optional[list[VoiceStateModel]]:
return self._model.voice_states
@property
def welcome_screen(self) -> Optional[WelcomeScreenModel]:
return self._model.welcome_screen
@property
def widget_channel_id(self) -> Optional[Snowflake]:
return self._model.widget_channel_id
@property
def widget_enabled(self) -> Optional[bool]:
return self._model.widget_enabled
|
"""
Module which interprets textX model based on viewX model and generates preview.html file
with Cytoscape.js graph model used for visualization of textX model. The preview.html file is hosted on file server
and can be loaded with multiple clients (regular internet browser or Visual Studio Code extension) and previewed.
The graph is interactive and it's visualization is based on Cytoscape.js graph engine.
"""
import sys
from os.path import dirname, abspath, join
from textx.metamodel import metamodel_from_file
from textx.model import children_of_type
from textx.exceptions import *
import preview_generator
import cytoscape_helper as cy
import cytoscape_rule_engine as cre
class ViewXInterpreter(object):
"""
ViewX model interpreter.
"""
def __init__(self, view_model):
self.view_model = view_model # assign view model when initialized
self.model = None # different models can be interpreted with same view model
self.elements = {} # all Cytoscape.js graph elements
self.styles = [] # style definitions for elements
self.overwrite_styles = False # overwrite styles flag
self.traversed_types = [] # visited types during recursive search algorithm
self.existing_parents = [] # used when multiple sources reference same element as inside
self.links = {} # store links when not all elements have been created yet
# dictionary of dictionaries of dictionaries of target element (hash code) and list of properties (tuples)
# {type1 : {source1 : {dst1 : [(prop1, value1), (prop2, value2)]}, {dst2: [(prop3, value3)]} },
# {source2 : {dst3 : [(prop4, value4)]}, {dst4 : [(prop5, value5)} },
# type2 : {source3 : [{dst5: [(prop6, value6), (prop7, value7)]} },
# ...
# }
def interpret(self, model):
"""
Main interpreting logic.
:param model: textX model that should be interpreted
:return: /
"""
self.model = model
for view in view_model.views:
# loop over model tx properties recursively and match them with defined views
if not (type(view.shape) is str and view.shape.lower() == 'none'):
self.match_view_within_type(model, view)
self.overwrite_styles = True if view_model.stylesheet\
and view_model.stylesheet.overwrite == 'overwrite' else False
if not self.overwrite_styles:
# generate view styles
visitor = cre.ViewStylePropertyVisitor(view)
self.styles.append(visitor.view_style)
property_link = None
selected_property = None
container_property = None
for prop in view.properties:
# create ViewStyle for selected state
if prop.__class__.__name__ == 'SelectedProperty':
selected_property = prop
# check if view has link to it's properties
elif prop.__class__.__name__ == 'PropertyLink':
property_link = prop
# create styles for container shape
elif prop.__class__.__name__ == 'ContainerProperty':
container_property = prop
# if it has, create style for property links
if property_link:
link_visitor = cre.LinkStylePropertyVisitor(view, property_link)
self.styles.append(link_visitor.view_style)
# create ViewStyle for link in selected state
for link_prop in property_link.properties:
if link_prop.__class__.__name__ == 'LinkSelectedProperty':
sel_link_visitor = cre.LinkStylePropertyVisitor(view, property_link, link_prop, ':selected')
self.styles.append(sel_link_visitor.view_style)
break
# append ViewStyle for selected state at the end
if selected_property:
sel_visitor = cre.ViewStylePropertyVisitor(view, False, selected_property, ':selected')
self.styles.append(sel_visitor.view_style)
# create style for container if it is defined
if container_property:
cont_sel_property = None
container_visitor = cre.ViewStylePropertyVisitor(view, True, prop)
for cont_prop in prop.properties:
# create ViewStyle for container in selected state
if cont_prop.__class__.__name__ == 'SelectedProperty':
cont_sel_property = cont_prop
break
self.styles.append(container_visitor.view_style)
# append ViewStyle for container in selected state at the end
if cont_sel_property:
cont_sel_visitor = cre.ViewStylePropertyVisitor(view, True, cont_sel_property, ':selected')
self.styles.append(cont_sel_visitor.view_style)
# create property links if any
if self.links.__len__() > 0:
self.create_links()
def match_view_within_type(self, tx_type, view):
"""
Utilize children_of_type method from textX module to return all elements that match textX type defined in view
starting from root tx_type.
:param tx_type: root tx_type to start searching from
:param view: defined view for contained textX type that should be found within element of tx_type
:return: /
"""
children = children_of_type(view.name, tx_type)
conditional_parent = view.__getattribute__('conditional_parent')
if conditional_parent is None:
for child in children:
self.elements.update(self.build_graph_element(child, view))
# follow condition of defined parent properties
else:
elements_of_type = children_of_type(conditional_parent.name, self.model)
for parent in elements_of_type:
for child in children:
if self.item_contains_property_by_structure(view.class_properties, parent, child):
self.elements.update(self.build_graph_element(child, view))
def build_graph_element(self, item, view):
"""
Method for creating Cytoscape.js graph elements defined by specified textX item and view.
:param item: instance of textX type from which to create Cytoscape.js graph element
:param view: view which describes how graph element should be created
:return: Cytoscape.js graph element uniquely defined by textX instance's hash code
"""
graph_element = None
# if element is edge
if view.shape.__class__.__name__ == 'LinkShape':
start_element = None
end_element = None
for prop in view.properties:
if prop.__class__.__name__ == 'EdgeStartProperty':
start_element = self.get_class_property(prop.class_properties, item)
start_element = self.elements.get(cy.small_hash(start_element), None)
elif prop.__class__.__name__ == 'EdgeEndProperty':
end_element = self.get_class_property(prop.class_properties, item)
end_element = self.elements.get(cy.small_hash(end_element), None)
# when both start and end nodes are defined
if start_element is not None and end_element is not None:
graph_element = cy.Edge(start_element, end_element, cy.small_hash(item))
else: # element is node
graph_element = cy.Node(cy.small_hash(item))
# check item referencing properties (label, is_edge(connection points), parent...)
element_label = self.resolve_element_label(item, view.properties)
# if not defined, set default label with element index
if element_label is None:
element_label = 'Element_{0}'.format(self.elements.__len__())
graph_element.add_data('label', element_label)
# check if property link is defined (need to store links and create them later)
property_link = None
for prop in view.properties:
if prop.__class__.__name__ == 'PropertyLink':
property_link = prop
break
# if item has defined links to it's properties, store them for later creating
if property_link is not None:
# resolve links to properties
property_links = self.get_all_resolved_properties(property_link.link_to.class_properties, item)
# transform in dictionary (hash_code : array of properties)
transformed_links = {cy.small_hash(link): [] for link in property_links}
# add property link classes as first property in each link
for value_props in transformed_links.values():
value_props.append(('class', '{}-{}'.format(property_link.link_to.class_view.name.lower(),
'-'.join(property_link.link_to.class_properties))))
# if property link label is defined
for prop in property_link.properties:
if prop.__class__.__name__ == 'Label':
if prop.label.__class__.__name__ == 'ClassLabel':
# the result are labels for all links which are resolve by class properties
link_labels = self.get_all_resolved_properties(prop.label.class_properties, item)
# if label is properly built and based on link, the count should be the same
for value_props, link_label in zip(transformed_links.values(), link_labels):
value_props.append(('label', link_label))
else:
# update label as string
for value_props in transformed_links.values():
value_props.append(('label', prop.label))
break
self.update_links(item, transformed_links)
# if parent class view is defined
if hasattr(view, 'parent_view') and view.parent_view is not None:
parent = self.find_view_parent_tx_type(item, view, self.model)
if parent is not None:
graph_element.add_data('parent', cy.small_hash(parent))
# if parent class view is defined
if hasattr(view, 'container') and view.container:
# TODO: add something that uniquely defines peg rule, should be class+parent+conditions because of css
container = self.find_element_with_class('{}-container'.format(view.name.lower()))
if container is None:
# create container element if not exists
container = cy.Node()
for prop1 in view.properties:
if prop1.__class__.__name__ == 'ContainerProperty':
element_label = self.resolve_element_label(item, prop1.properties)
if element_label:
container.add_data('label', element_label)
break
container.add_class('{}-container'.format(view.name.lower()))
self.elements.update({cy.small_hash(container): container})
graph_element.add_data('parent', cy.small_hash(container))
# add type definition offset
graph_element.add_data('offset', item._tx_position)
graph_element.add_data('offset_end', item._tx_position_end)
# add class of view name (textX model type name)
graph_element.add_class(view.name.lower())
return {cy.small_hash(item): graph_element}
def resolve_element_label(self, element, properties):
element_label = None
for prop in properties:
if prop.__class__.__name__ == 'Label':
label_property = prop.label
if label_property.__class__.__name__ == 'ClassLabel':
# resolve item name
element_label = self.get_class_property(label_property.class_properties, element)
else:
element_label = label_property
# prepend/append pre_label/post_label if defined
pre_label = self.get_class_property(['parent', 'pre_label', 'label'], label_property)
if type(pre_label) is str:
element_label = pre_label + element_label
post_label = self.get_class_property(['parent', 'post_label', 'label'], label_property)
if type(post_label) is str:
element_label = element_label + post_label
break
return element_label
def get_class_property(self, class_properties, starting_item):
"""
Resolve single item properties.
:param class_properties: property hierarchy used for retrieving specific item.
Each property must reference only single item.
:param starting_item: item from which to start resolving properties
:return: resolved property or None if not found
"""
result_property = starting_item
for class_prop in class_properties:
if hasattr(result_property, class_prop):
result_property = result_property.__getattribute__(class_prop)
else:
return None
return result_property
def item_contains_property_by_structure(self, class_properties, starting_item, item_to_find):
"""
Resolve class properties and check whether item_to_find can be found within starting_item
following the class_properties structure.
:param class_properties: property hierarchy used for resolving
:param starting_item: textX type instance from which to start resolving
:param item_to_find: textX type instance to find
:return: True if item can be found else False
"""
result_property = starting_item
if class_properties.__len__() == 0:
# if result is list, must check if any resulting items match searched item
if result_property.__class__.__name__ == 'list':
match_any = True
for result in result_property:
match_any = match_any and cy.small_hash(result) == cy.small_hash(item_to_find)
return match_any
else:
return cy.small_hash(result_property) == cy.small_hash(item_to_find)
if result_property.__class__.__name__ == 'list':
# try for each item because not every item has to have defined all properties
for item in result_property:
for class_prop in class_properties:
if hasattr(item, class_prop):
result_property = item.__getattribute__(class_prop)
# if property found, take following class properties and pass them recursively
if self.item_contains_property_by_structure(class_properties[1:], result_property, item_to_find):
return True
else:
# if single item, resolve property directly
for class_prop in class_properties:
if hasattr(result_property, class_prop):
result_property = result_property.__getattribute__(class_prop)
if self.item_contains_property_by_structure(class_properties[1:], result_property, item_to_find):
return True
return False
def get_all_resolved_properties(self, class_properties, tx_item):
"""
Resolve class properties of tx_item following the class_properties structure.
:param class_properties: property hierarchy used for resolving
:param tx_item: textX type instance from which to start resolving
:return: all textX type instances that matched defined class property hierarchy
"""
result_property = tx_item
# if all class properties are used that means we have resolved all properties
if class_properties.__len__() == 0:
return result_property
resolved_properties = []
if result_property.__class__.__name__ == 'list':
# try for each item because not every item has to have defined all properties
for item in result_property:
for class_prop in class_properties:
if hasattr(item, class_prop):
result_property = item.__getattribute__(class_prop)
# if property found, take following class properties and pass them recursively
properties = self.get_all_resolved_properties(class_properties[1:], result_property)
if properties.__class__.__name__ != 'list':
properties = [properties]
resolved_properties.extend(properties)
else:
# if single item, resolve property directly
for class_prop in class_properties:
if hasattr(result_property, class_prop):
result_property = result_property.__getattribute__(class_prop)
properties = self.get_all_resolved_properties(class_properties[1:], result_property)
if properties.__class__.__name__ != 'list':
properties = [properties]
resolved_properties.extend(properties)
return resolved_properties
def find_view_parent_tx_type(self, tx_item, view, tx_root_item):
"""
Method that finds parent of the passed tx_item, starting from the tx_root_item.
:param tx_item: textX type instance for which to find parent
:param view: view which holds defined parent textX type for tx_item
:param tx_root_item: textX type instance from which to start search
:return: parent textX type instance of the tx_item
"""
# find parent
for parent_key, parent_value in tx_root_item._tx_attrs.items():
# if defined get the property
if parent_value.cont:
parents = tx_root_item.__getattribute__(parent_key)
# parent is list of items
if parents.__class__.__name__ == 'list':
first_parent = parents[0] if parents.__len__() > 0 else None
if first_parent and view.parent_view:
# if any of found items is type of parent type defined in view
if first_parent.__class__.__name__ == view.parent_view.name:
for parent in parents:
# find child
for child_key, child_value in parent._tx_attrs.items():
children = parent.__getattribute__(child_key)
# child is list of items
if children.__class__.__name__ == 'list':
first_child = children[0] if children.__len__() > 0 else None
if first_child and first_child.__class__.__name__ == tx_item.__class__.__name__:
for child in children:
if cy.small_hash(tx_item) == cy.small_hash(child):
return parent
break
# child is single item
else:
if children.__class__.__name__ == tx_item.__class__.__name__:
return parent
# parent is single item
else:
if parents and view.parent_view:
if parents.__class__.__name__ == view.parent_view.name:
for parent in parents:
# find child
for child_key, child_value in parent._tx_attrs.items():
children = parent.__getattribute__(child_key)
# child is list of items
if children.__class__.__name__ == 'list':
first_child = children[0] if children.__len__() > 0 else None
if first_child and first_child.__class__.__name__ == tx_item.__class__.__name__:
for child in children:
if cy.small_hash(tx_item) == cy.small_hash(child):
return parent
break
# child is single item
else:
if children.__class__.__name__ == tx_item.__class__.__name__:
return parent
return None
def find_element_with_class(self, _class):
"""
Searches created graph elements and finds the first one with defined class.
:param _class: A class which element should contain
:return: Graph element if found else None.
"""
for element in self.elements.values():
if element.classes.split(' ')[0] == _class:
return element
return None
def update_links(self, element, element_links):
"""
Updates links to item's properties. All items are defined by their type in outter dictionary
and all links are defined by their source item's hash code in inner dictionary.
:param element: element for which to update links
:param element_links: links to be updated for specified element
:return: / (updates private property of element links)
"""
link_dict = self.links.get(element.__class__.__name__, {})
link_dict.update({cy.small_hash(element): element_links})
self.links[element.__class__.__name__] = link_dict
def create_links(self):
"""
When all property links have been resolved they need to be created additionally.
:return: / (Updates private property of elements with newly created edges
for property links of existing graph elements)
"""
new_edges = []
# outter dictionary iteration
for key_type, value_link_dict in self.links.items():
# inner dictionary iteration
for key_el_hash, value_element in self.elements.items():
# linked properties (hash codes)
linked = value_link_dict.get(key_el_hash, {})
for target_hash, link_props in linked.items():
start_element = self.elements.get(key_el_hash, None)
end_element = self.elements.get(target_hash, None)
if start_element is not None and end_element is not None:
new_edge = cy.Edge(start_element, end_element)
# skip first, first is always class name for property link
for prop_key, prop_value in link_props[1:]:
new_edge.add_data(prop_key, prop_value)
# old class, removed type, property name instead
# new_edge.add_class('{}-{}'.format(key_type.lower(), end_element.classes.split(' ')[0]))
new_edge.add_class(link_props[0][1])
new_edges.append(new_edge)
# after creation add them to the elements
for edge in new_edges:
self.elements[cy.small_hash(edge)] = edge
def build_path_from_import(view_model, _import):
"""
Build system path from defined import (relative '.' separated) path.
:param view_model: view model from which to resolve relative import
:param _import: relative import path
:return: absolute file system path of the import
"""
path = dirname(view_model)
_import = _import[1:-1] # remove ""
if _import[0:2] == './':
_import = _import[2:]
subpaths = _import.split('/')
for subpath in subpaths:
if subpath == '..':
path = dirname(path)
else:
path = join(path, subpath)
return path
if __name__ == '__main__':
if len(sys.argv) < 4: # the script expects at least 3 arguments (+1 implicit which is script name)
print('Usage: python {} <view_model> <model> <output_dir> [<socketPort>]'.format(sys.argv[0]))
else:
try:
script_dir = dirname(abspath(__file__))
viewX_grammar_folder = join(dirname(dirname(script_dir)), 'grammar')
# load viewX metamodel from grammar folder and create model
view_meta_model = metamodel_from_file(join(viewX_grammar_folder, 'viewX.tx'))
view_model_path = sys.argv[1]
view_model = view_meta_model.model_from_file(view_model_path)
# get view model name
parts = view_model_path.split('/') if view_model_path.__contains__('/') else view_model_path.split('\\')
view_model_name = parts[-1]
# create textX metamodel path based on viewX model import
metamodel_path = build_path_from_import(sys.argv[1], view_model.tx_import.path)
model_path = sys.argv[2]
# get model name
parts = model_path.split('/') if model_path.__contains__('/') else model_path.split('\\')
model_name = parts[-1]
# load metamodel and create model
target_metamodel = metamodel_from_file(metamodel_path)
target_model = target_metamodel.model_from_file(model_path)
# create viewX interpreter based on viewX model and interpret target textX model
viewX_interpreter = ViewXInterpreter(view_model)
viewX_interpreter.interpret(target_model)
# assign output directory path
output_dir = sys.argv[3] if sys.argv.__len__() > 3 else script_dir
# assign socket.io server port number
socket_port = sys.argv[4] if sys.argv.__len__() > 4 else '4000'
preview_generator.generate(viewX_interpreter, output_dir, socket_port, model_name, view_model_name)
# print messages below are interpreted by viewX extension
print('success')
except TextXSyntaxError as e:
print('error')
print('TextXSyntaxError: {}'.format(e.__str__()))
except TextXSemanticError as e:
print('error')
print('TextXSemanticError: {}'.format(e.__str__()))
except FileNotFoundError as e:
print('error')
print('FileNotFoundError: {} {}'.format(e.strerror, e.filename))
|
"""Queryset optimization. """
import logging
import typing
import django.db.models as djm
import graphql
import graphql.language.ast as ast_
import phrases_case
if typing.TYPE_CHECKING:
class OptimizationOption(typing.TypedDict):
"""
Optimization option dict.
See :doc:`/optimize` for more information.
"""
only: typing.Dict[typing.Optional[str], typing.List[str]]
select: typing.Dict[typing.Optional[str], typing.List[str]]
prefetch: typing.Dict[typing.Optional[str], typing.List[str]]
related: typing.Dict[str, str]
class Optimization(typing.TypedDict):
"""Optimization computation result dict. """
only: typing.List[str]
select: typing.List[str]
prefetch: typing.List[str]
LOGGER = logging.getLogger(__name__)
OPTIMIZATION_OPTIONS: typing.Dict[str, dict] = {}
def get_optimization_option(typename: str) -> 'OptimizationOption':
"""Get optimization options from typename.
Args:
typename (str): Graphql typename.
Returns:
OptimizationOption: Options.
"""
ret = OPTIMIZATION_OPTIONS.get(
typename,
{},
)
ret.setdefault('only', {}) # type: ignore
ret.setdefault('select', {}) # type: ignore
ret.setdefault('prefetch', {}) # type: ignore
ret.setdefault('related', {}) # type: ignore
return ret # type: ignore
def _get_inner_type(return_type):
if not hasattr(return_type, 'of_type'):
return return_type
return _get_inner_type(return_type.of_type)
def _get_model_field(model: djm.Model, lookup: str) -> djm.Field:
_model = model
field = None
for i in lookup.split('__'):
field = _model._meta.get_field(i)
_model = field.related_model
assert field is not None
return field
def _get_default_only_lookups(
fieldname: str, model: djm.Model, related_query_name: str) -> typing.List[str]:
field = None
for lookup in (
_format_related_name(related_query_name, fieldname),
_format_related_name(related_query_name,
phrases_case.snake(fieldname)),
):
try:
field = _get_model_field(model, lookup)
except djm.FieldDoesNotExist:
continue
if field is None or not hasattr(field, 'attname'):
return []
return [field.attname]
def _get_selection(ast: ast_.Node, fragments, is_recursive=True) -> typing.Iterator[ast_.Field]:
if not is_recursive and isinstance(ast, ast_.Field):
yield ast
return
if isinstance(ast, (ast_.Field, ast_.FragmentDefinition, ast_.InlineFragment)):
for i in ast.selection_set and ast.selection_set.selections or []:
yield from _get_selection(i, fragments, is_recursive=False)
elif isinstance(ast, ast_.FragmentSpread):
yield from _get_selection(fragments[ast.name.value], fragments)
else:
raise ValueError(f'Unknown ast type: {ast}')
def _format_related_name(related_query_name, name):
if related_query_name is 'self':
return name
return f'{related_query_name}__{name}'
def _get_ast_optimization(ast, return_type, fragments, model, related_query_name='self') -> 'Optimization':
inner_type = _get_inner_type(return_type)
opt = get_optimization_option(inner_type.name)
ret: Optimization = {
'only': opt['only'].get(None, []),
'select': opt['select'].get(None, []),
'prefetch': opt['prefetch'].get(None, []),
}
for sub_ast in _get_selection(ast, fragments):
fieldname = sub_ast.name.value
ret['only'].extend(opt['only'].get(
fieldname) or _get_default_only_lookups(fieldname, model, related_query_name))
ret['select'].extend(opt['select'].get(fieldname, []))
ret['prefetch'].extend(opt['prefetch'].get(fieldname, []))
_related_query_name = opt['related'].get(fieldname)
if not _related_query_name:
continue
_optimization = _get_ast_optimization(
sub_ast,
inner_type.fields[fieldname].type,
fragments, model,
_related_query_name
)
ret['only'].extend(_optimization['only'])
ret['select'].extend(_optimization['select'])
ret['prefetch'].extend(_optimization['prefetch'])
ret['only'] = [_format_related_name(
related_query_name, i) for i in ret['only']]
ret['select'] = [_format_related_name(
related_query_name, i) for i in ret['select']]
ret['prefetch'] = [_format_related_name(
related_query_name, i) for i in ret['prefetch']]
return ret
def _get_ast_and_return_type(
info: graphql.execution.ResolveInfo,
path: typing.Optional[typing.List[str]]
) -> typing.Tuple[graphql.GraphQLField, typing.Union[
graphql.GraphQLList,
graphql.GraphQLObjectType,
graphql.GraphQLScalarType
]]:
ret = (info.field_asts[0], info.return_type)
for fieldname in path or []:
ret = (
next(
i.name.value for i in _get_selection(ret[0], info.fragments)
if i.name.value == fieldname
),
ret[1].fields[fieldname]
)
return ret
def optimize(
queryset: djm.QuerySet,
info: graphql.ResolveInfo,
path: typing.Optional[typing.List[str]] = None
) -> djm.QuerySet:
"""Optimization queryset with resolve info and global optimization options.
Args:
info (graphql.ResolveInfo): Resolve info.
queryset (djm.QuerySet): Queryset to optimize.
path (typing.Optional[typing.List[str]]): Field path. defaults to None.
None means root field.
Returns:
djm.QuerySet: optimized queryset.
"""
ast, return_type = _get_ast_and_return_type(info, path)
optimization = _get_ast_optimization(
ast, return_type, info.fragments, queryset.model)
LOGGER.debug("Optimization queryset: optimization=%s, model=%s",
optimization, queryset.model)
qs = queryset
if optimization['select']:
qs = qs.select_related(*optimization['select'])
if optimization['prefetch']:
qs = qs.prefetch_related(*optimization['prefetch'])
qs = qs.only(*optimization['only'], *optimization['select'])
return qs
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 IBM Corporation
Licensed under the Apache License, Version 2.0 (the “License”);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
* <NAME> <<EMAIL>>
* <NAME> <<EMAIL>>
* <NAME> <<EMAIL>>
* <NAME> <<EMAIL>>
"""
import sys
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
import pkg_resources
from . import core
from . import controller
__all__ = []
__version__ = pkg_resources.require("ma")[0].version
class CLIError(Exception):
"""Error treatment"""
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def main(argv=None):
"""MA main function"""
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_version = "v%s" % __version__
program_version_message = '%%(prog)s %s ' % (program_version)
program_shortdesc = '''
--- Migration Advisor (MA) ---
Migrates C/C++ applications to POWER
'''
try:
parser = ArgumentParser(description=program_shortdesc,
formatter_class=RawTextHelpFormatter)
parser.add_argument('-V', '--version',
action='version',
version=program_version_message)
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
subparsers = parser.add_subparsers(help='\nMA commands\n\n',
dest='subcommand')
# Arguments for the run subcommand
parser_run = subparsers.add_parser(
'run',
formatter_class=RawTextHelpFormatter,
help='analyze a given directory or file for possible C/C++\n'
'migration problems from x86_64 to Power\n'
'see ma run --help\n\n')
parser_run.add_argument(
dest='location',
metavar='LOCATION',
help='directory or file which will be analyzed\n\n',
nargs=1)
parser_run.add_argument(
'-m', '--mode',
dest='execution_mode',
type=str,
choices=['full', 'lightweight'], default='lightweight',
help='specify the execution mode of MA which can be \'full\' or\n'
'\'lightweight\'. The \'full\' mode looks for problems in all files\n'
'located in a given directory. This option may take several minutes\n'
'to complete. On the other hand the \'lightweight\' mode minimize\n'
'the amount of files where the search for problems is executed by\n'
'best guessing whether a file should or not be verified.\n'
' e.g: ma run --mode=full <location>\n\n')
parser_run.add_argument(
'-s', '--stat',
dest='statistics',
type=str,
choices=['project', 'file'], default='',
help='display migration statistics by project or per file and'
'\nsuppresses the migraton report\n\n')
parser_run.add_argument(
'-c',
'--checkers',
dest='checkers',
metavar='checker_1,...,checher_n',
default=core.get_supported_checkers(),
help='allows selecting a group of checkers which will be used to\n'
'identify potential migration problems. By default MA will look for\n'
'all problems. By activating this option you will be able to select\n'
'a unique checker or a subset of checkers from the following list:\n'
' api: Linux/x86-specific API\n'
' asm: x86-specific assembly\n'
' builtin: x86-specific compiler built-in\n'
' char: Char usage\n'
' double: Long double usage\n'
' htm: Hardware Transaction Memory\n'
' performance: Performance degradation\n'
' pthread: Non-portable Pthreads implementation\n'
' syscall: Syscall not available for Linux on Power\n\n'
' Usage: ma run -c/--checkers <checkers> <location>\n'
' e.g: ma run -c/--checker htm <location>\n'
' e.g: ma run -c/--checker api,char,syscall <location>\n\n'
)
# Arguments for the info subcommand
parser_info = subparsers.add_parser(
'info',
formatter_class=RawTextHelpFormatter,
help='show information about supported migration checkers\n'
'see ma info --help\n\n')
parser_info.add_argument(
'-c',
required=True,
dest='checker_info',
type=str,
choices=core.get_supported_checkers(),
help='\ndisplay information about the set of checkers that Migration\n'
'Advisor uses to identify potential migration problems.\n\n'
'The available checkers are:\n'
' api: Linux/x86-specific API\n'
' asm: x86-specific assembly\n'
' builtin: x86-specific compiler built-in\n'
' char: Char usage\n'
' double: Long double usage\n'
' htm: Hardware Transaction Memory\n'
' performance: Performance degradation\n'
' pthread: Non-portable Pthreads implementation\n'
' syscall: Syscall not available for Linux on Power\n\n'
' Usage: ma info -c <checker>\n'
' e.g: ma info -c asm\n\n'
)
# Process arguments
args = parser.parse_args()
if args.subcommand == None:
parser.print_help()
return 1
controller.run(args)
except KeyboardInterrupt:
return 1
if __name__ == "__main__":
sys.exit(main())
|
<filename>backgammon.py
import lib.dice as dice
import lib.state as state
import lib.stateTreeNode as stateTreeNode
import lib.learning as learning
import lib.plotResults as plotResults
import lib.move as move
from lib.strategies import linTransform as lin_transform_strategy
from lib.strategies import heuristic
from lib.strategies import randomMove
from lib.strategies import human as humanStrat
import backgammonTester
import copy
import math
import random
import datetime
import sys
import operator
stateList = []
def main():
next = True
while (next == True):
programLoop()
next_in = raw_input("Continue? Y/y/Yes/yes: ")
if (next_in == "Y" or next_in == "y" or next_in == "Yes" or next_in == "yes"):
continue
else:
next = False
def programLoop():
print "Would you like to play against another person or the computer?"
print ""
print "0: two people to play each other"
print "1: human vs. computer"
print "2: comp vs. comp (strategy testing and simulation)"
print "3: simulation"
print "4: sim for learning "
print "5: calculate learning values"
print "6: plot results "
print "7: GUI "
option = raw_input("Please make your selection: ")
good_input = False
while (good_input != True):
try:
int(option)
good_input = True
continue
except:
option = raw_input("Please enter the appropriate option: ")
option = int(option)
if (option == 0):
# Human vs. Human
print_flag = 1
first_strat = 0
second_strat = 0
again = playMatch(15, first_strat, second_strat, print_flag, None, None)
while(again):
again = playMatch(15, first_strat, second_strat, print_flag, factors_list1, factors_list2)
elif (option == 1):
# Human v. Comp
print "What strategies would you like the computer to use? They are, currently: "
print "1: Random computer player"
print "2: My own, custom algorithm"
print "3: My own algo but customized to minimize opponent move values"
print "4: Learned algo (make sure to run learning.py first)"
print "... More to come ..."
good_input = False
print_flag = 1
first_strat = 0
factors_list1 = []
second_strat = raw_input("\n" + "Please enter the desired computer strategy: ")
while (good_input != True):
second_strat = int(second_strat)
if (second_strat == 4):
stratFile = raw_input("What is the name of the computer strat file? Press enter to use default ")
factors_list1 = loadStratFile(stratFile)
good_input = True
else:
good_input = True
again = playMatch(15, first_strat, second_strat, print_flag, factors_list1, None)
while(again):
again = playMatch(15, first_strat, second_strat, print_flag, factors_list1, None)
elif (option == 2):
# Comp v. Comp
print "What strategies would you like the computer to use? They are, currently: "
print "1: Random computer player"
print "2: My own, custom algorithm"
print "3: My own algo but customized to minimize opponent move values"
print "4: Learned algo (make sure to run learning.py first)"
print "... More to come ..."
good_input = False
print_flag = True
first_strat = raw_input("\n" + "Please enter the first desired computer strategy: ")
factors_list1 = factors_list2 = []
while (good_input != True):
first_strat = int(first_strat)
if (first_strat == 4):
stratFile = raw_input("What is the name of the computer strat file? Press enter to use default ")
factors_list1 = loadStratFile(stratFile)
good_input = True
else:
good_input = True
good_input = False
second_strat = raw_input("\n" + "Please enter the second desired computer strategy: ")
while (good_input != True):
second_strat = int(second_strat)
if (second_strat == 4):
stratFile = raw_input("What is the name of the computer strat file? Press enter to use default ")
factors_list2 = loadStratFile(stratFile)
good_input = True
else:
good_input = True
print first_strat
print factors_list1
print second_strat
print factors_list2
again = playMatch(15, first_strat, second_strat, print_flag, factors_list1, factors_list2)
while(again):
again = playMatch(15, first_strat, second_strat, print_flag, factors_list1, factors_list2)
elif (option == 3):
#On screen simulation
num_sims = raw_input("How many matches would you like to simulate? ")
try:
num_sims = int(num_sims)
except:
num_sims = int(raw_input("Please enter a # of sims: "))
print "What strategies would you like the computer to use? They are, currently: "
print "1: Random computer player"
print "2: My own, custom algorithm"
print "3: My own algo but customized to minimize opponent move values"
print "4: Learned algo (make sure to run learning.py first)"
print "... More to come ..."
first_strat = raw_input("Choice for comp 1: ")
second_strat = raw_input("Choice for comp 2: ")
factors_list1 = factors_list2 = []
good_input = False
while (good_input != True):
first_strat = int(first_strat)
if (first_strat == 4):
stratFile = raw_input("What is the name of the computer strat file? Press enter to use default ")
factors_list1 = loadStratFile(stratFile)
good_input = True
else:
good_input = True
good_input = False
while (good_input != True):
second_strat = int(second_strat)
if (second_strat == 4):
stratFile = raw_input("What is the name of the computer strat file? Press enter to use default ")
factors_list2 = loadStratFile(stratFile)
good_input = True
else:
good_input = True
#num_sims, fia, factor, mps
ppm = raw_input("How many points per match? ")
a = "_" + str(num_sims) +"_" + str(ppm) + "_" + str(first_strat) + "_" + str(second_strat)
#raw_input("wait")
#print "verify a, then re-run program"
simulateSession(first_strat, second_strat, num_sims, ppm, factors_list1, factors_list2, a)
elif (option == 4):
# Random strategy simulation to gather learning data
num_games = raw_input("How many strats would you like to simulate? ")
mps = raw_input("How many matches per strat? ")
ppm = raw_input("How many points per match? ")
name = raw_input("What would you like to name the output file? ")
generateSimulations(int(num_games), 12, 10, int(mps), int(ppm), name)
#print "change generateSimulations numer and restart program"
elif (option == 5):
#learning.py
inFile = raw_input("What file would you like to draw learning examples from? ")
sizeInput = raw_input("How many learning examples in the file? ")
outFile = raw_input("What would you like to call file containing the resulting list of weights? ")
print_flag_in = raw_input("Would you like to print intermediate steps and results -y/Y/yes/Yes?")
print_flag = False
if(print_flag_in == "y" or again_in == "Y" or again_in == "yes" or again_in == "Yes"):
print_flag = True
learning.learningFxn(inFile, int(sizeInput), outFile, print_flag)
elif (option == 6):
# plotResults.py
inFile = raw_input("What file would you like to plot algorithm values from? ")
factor = raw_input("What factor would you like to use? ")
good_input = False
while (good_input != True):
try:
f = int(factor)
good_input = True
except:
factor = raw_input("What factor would you like to use? ")
plotResults.plotResults(inFile, f)
elif (option == 7):
# backgammonTester.py
inFile = 'tryStratFile2001.txt'
backgammonTester.backgammonTester(inFile)
def loadStratFile(fileName):
print fileName
if (fileName == ""):
fileName = "tryStratFile2001.txt"
try:
try_strat_file = open(fileName, 'r')
except:
print "Bad strategy file name entered. Using default "
fileName = "tryStratFile2001.txt"
try_strat_file = open(fileName, 'r')
factors_list = []
num_games1 = try_strat_file.readline()
num_games1 = num_games1.rstrip("\n")
print num_games1
fia1 = try_strat_file.readline()
fia1 =fia1.rstrip("\n")
print fia1
mps1 = try_strat_file.readline()
mps1 = mps1.rstrip("\n")
print mps1
ppm1 = try_strat_file.readline()
ppm1 = ppm1.rstrip("\n")
print ppm1
line = try_strat_file.readline().rstrip(' ')
splitLine = line.split(' ')
print "Line: " + line
print splitLine
for item in splitLine:
factors_list.append(float(item))
print "factorsList: ",
print factors_list
return factors_list
def playMatch(num_points, first_strat, second_strat, print_flag, factors_list1, factors_list2):
white_points = 0
black_points = 0
while (white_points < 15 and black_points < 15):
winner, points = playSingleGame(first_strat, second_strat, print_flag, factors_list1, factors_list2)
if (winner == 0):
white_points = white_points + points
else:
black_points = black_points + points
if (print_flag):
print "White Points: " + str(white_points)
print "Black Points: " + str(black_points)
again_in = raw_input("Would you like to play again?\nEnter y/Y/yes/Yes for another game: ")
if(again_in == "y" or again_in == "Y" or again_in == "yes" or again_in == "Yes"):
again = True
else:
again = False
return again
def playSingleGame(first_strat, second_strat, print_flag, factors_list1, factors_list2):
''' Play a single game'''
winner = -1
points = 1
if (first_strat == 0 and second_strat == 0): #Play 2 humans
winner, points = playTwoHumans(first_strat, second_strat, print_flag)
elif (first_strat == 0 and second_strat != 0): #Human vs. comp
winner, points = playHumanVsComp(first_strat, second_strat, print_flag, factors_list1)
elif(first_strat != 0 and second_strat != 0):
winner, points = playCompVsComp(first_strat, second_strat, print_flag, factors_list1, factors_list2)
if (print_flag):
if (winner == 0):
if (points == 1):
print "Player One ('o') was the winner."
elif (points == 2):
print "Player One ('o') was the winner with a gammon (2 points)."
else:
print "Player One ('o') was the winner with a backgammon (3 points)."
else:
if (points == 1):
print "Player Two ('x') was the winner."
elif (points == 2):
print "Player Two ('x') was the winner with a gammon (2 points)."
else:
print "Player Two ('x') was the winner with a backgammon (3 points)."
return(winner, points)
def playTwoHumans(first_strat, second_strat, print_flag):
'''Function to manage gameplay between 2 humans'''
#set up initial parameters
die = dice.oneDie(6)
state = createInitialState(die)
winner = -1
playTurn(state, first_strat, print_flag)
while (winner == -1):
roll = die.rollDie()
state.updateRoll(roll)
state.switchTurn()
playTurn(state, second_strat, print_flag)
winner = state.testGameOver()
points = state.checkGammon(winner)
return (winner, points)
def playHumanVsComp(first_strat, second_strat, print_flag, factors_list):
#set up initial parameters
die = dice.oneDie(6)
state = createInitialState(die)
winner = -1
if (state.turn == 0):
# White/Human player goes first
playTurn(state, first_strat, print_flag)
else:
#Comp first
print "Comp first"
playCompTurn(state, second_strat, print_flag, factors_list)
while (winner == -1):
roll = die.rollDie()
state.updateRoll(roll)
state.switchTurn()
if (state.turn == 0):
playTurn(state, first_strat, 1)
else:
playCompTurn(state, second_strat, print_flag, factors_list)
winner = state.testGameOver()
if (winner == -1):
state.printState()
points = state.checkGammon(winner)
return (winner, points)
def playCompVsComp(first_strat, second_strat, print_flag, factors_list1, factors_list2):
#set up initial parameters
die = dice.oneDie(6)
state = createInitialState(die)
winner = -1
if (state.turn == 0):
playCompTurn(state, first_strat, print_flag, factors_list1)
else:
playCompTurn(state, second_strat, print_flag, factors_list2)
#raw_input("wait")
while (winner == -1):
# state.printState()
roll = die.rollDie()
state.updateRoll(roll)
state.switchTurn()
if (state.turn == 0):
playCompTurn(state, first_strat, print_flag, factors_list1)
elif(state.turn == 1):
playCompTurn(state, second_strat, print_flag, factors_list2)
winner = state.testGameOver()
points = state.checkGammon(winner)
return (winner, points)
def playCompTurn(state, strat, print_flag, factors_list):
'''Determine computer moves depending on desired strategy'''
if(int(strat) == 4):
#Use factors list to play a 'random' strat
if (print_flag):
state.printState()
# raw_input("wait")
#print "playcompturn" + str(factors_list)
new_state = lin_transform_strategy.playStratCompTurn(state, factors_list)
state.updateFromState(new_state)
elif (int(strat) == 3):
#Move with state tree
if (print_flag):
state.printState()
new_state = heuristic.moveWithStateTree(state)
state.updateFromState(new_state)
elif(int(strat) == 2):
#Play my human-like algo
if (print_flag):
state.printState()
new_state = heuristic.playStrategicCompTurn(state)
state.updateFromState(new_state)
elif (int(strat) == 1):
if (print_flag):
state.printState()
playTurn(state, int(strat), False)
def playTurn(state, num_flag, print_mode):
'''Plays one turn'''
stateList.append(copy.deepcopy(state))
if (num_flag == 0 or num_flag == 1):
val_moves = state.existValidMoves()
if (val_moves == False):
#stateList.append(copy.deepcopy(state))
if (print_mode):
print "No valid moves"
state.printState()
while (val_moves == True):
#stateList.append(copy.deepcopy(state))
if (print_mode):
state.printState()
valid_move = False
while (valid_move == False):
# Generate player moves and check if they are valid
if (num_flag == 0): #Human Player
space_to_valid = humanStrat.playHumanTurn(state)
elif (num_flag == 1): #Random computer player
space_to_valid = randomMove.playRandCompTurn(state)
valid_move = space_to_valid[0]
if (valid_move != True and state.turn == 0):
# If invalid, print relevant error
if (print_mode):
state.printError(space_to_valid[3])
#assign valid move values to actual move varialbes
space_from = space_to_valid[1]
space_to = space_to_valid[2]
move_dist = space_to_valid[3]
# Execute move
if (state.turn): #Black
state.board[space_from] = state.board[space_from] + 1
else: #White
state.board[space_from] = state.board[space_from] - 1
# Capture opponent piece and put it in jail
if ((state.board[space_to] < 0 and state.turn == False) or \
(state.board[space_to] > 0 and state.turn == True)):
if (int(math.fabs(state.board[space_to])) == 1):
if (state.turn): #Black
state.board[26] = state.board[26] + 1
else: #White
state.board[27] = state.board[27] - 1
state.board[space_to] = 0
if (state.turn): #Black
state.board[space_to] = state.board[space_to] - 1
else: #White
state.board[space_to] = state.board[space_to] + 1
#print state.roll
state.roll.remove(move_dist)
#print state.roll
state.updatePipCount()
val_moves = state.existValidMoves()
#print val_moves
if (print_mode):
state.printState()
def simulateSession(first_strat, second_strat, number_matches, points_per_match, factors_list1, factors_list2, a):
'''Simulates a given number of games and keeps track of results'''
#ppm = points per match
#name_learning_file = raw_input("What is the name of the learning file: ")
#a = a + name_learning_file
ppm = int(points_per_match)
fname = "simSessionFile" + a + ".txt"
sim_session_file = open(fname, 'a')
matches_won_by_white = 0
matches_won_by_black = 0
match_score_string = ""
for x in range(0, number_matches):
white_score = 0
black_score = 0
while (white_score < ppm and black_score < ppm):
winner, points = playCompVsComp(first_strat, second_strat, False, factors_list1, factors_list2)
if (winner == 0):
white_score = white_score + points
elif(winner == 1):
black_score = black_score + points
print "Match " + str(x + 1) + " completed - ",
#print winner
if (white_score > black_score):
print "White won"
matches_won_by_white += 1
else:
print "Black won"
matches_won_by_black += 1
val = str(white_score - black_score) + "\n"
print val,
match_score_string = match_score_string + val
if (matches_won_by_white > matches_won_by_black):
print "White wins"
else:
print "Black wins"
fp1 = "White's score for this round was: " + str(matches_won_by_white) + \
" while playing " + str(first_strat)
fp2 = "Black's score for this round was: " + str(matches_won_by_black) + \
" while playing " + str(second_strat)
print fp1
print fp2
#sim_session_file.write("learning w/ " + num_games + "\n")
# sim_session_file.write(match_score_string)
# sim_session_file.write(fp1 + "\n" + fp2 + "\n")
sim_session_file.close()
def generateSimulations(num_sims, fia, factor, mps, ppm, name):
''' Generate and run random simulations to generate data for learning'''
#fia == factors_in_algo
#mps == matches_per_strat
#ppm = points per match
if (name == ""):
a = "_" + str(num_sims) +"s_" + str(mps) + "m_" + str(ppm) + "p"
fname = "stratListviaGen" + a + ".txt"
else:
fname = name + ".txt"
good_strats_file = open(fname, "w")
good_strats_file.write(str(num_sims) + "\n")
good_strats_file.write(str(fia) + "\n")
good_strats_file.write(str(factor) + "\n")
good_strats_file.write(str(mps) + "\n")
good_strats_file.write(str(ppm) + "\n")
#factors_file = open("factors_file.txt", "w")
die = dice.oneDie()
for x in range(0, num_sims):
matches_won_by_white = 0
matches_won_by_black = 0
rand_matches_won_by_white = 0
rand_matches_won_by_black = 0
agg_match_score = 0
agg_rand_match_score = 0
print "Strategy " +str(x+1) + " of " + str(num_sims)
#For each simulation num_sim random strategies will be created
factors_list = []
for i in range(0, 2):
fl = genFactorsList(fia, factor)
for item in fl:
factors_list.append(item)
for x in range(0, mps):
black_rand_points = 0
white_rand_points = 0
while (black_rand_points < ppm and white_rand_points < ppm):
winnerRand, points = playCompVsComp(4, 1, False, factors_list, None)
if (winnerRand == 1):
black_rand_points = black_rand_points + points
else:
white_rand_points = white_rand_points + points
if (white_rand_points >= ppm):
rand_matches_won_by_white += 1
else:
rand_matches_won_by_black += 1
print (white_rand_points, black_rand_points)
agg_rand_match_score = agg_rand_match_score + (white_rand_points - black_rand_points)
print "vs. random: " + str(rand_matches_won_by_white) + " " + str(agg_rand_match_score)
for x in range(0, mps):
white_points = 0
black_points = 0
while (white_points < ppm and black_points < ppm):
winner, points = playCompVsComp(4, 2, False, factors_list, None)
if (winner == 1):
black_points = black_points + points
else:
white_points = white_points + points
if (white_points >= ppm):
matches_won_by_white += 1
else:
matches_won_by_black += 1
print (white_points, black_points)
agg_match_score = agg_match_score + (white_points - black_points)
print "vs. strat: " + str(matches_won_by_white) + " " + str(agg_match_score)
good_strats_string = ""
good_strats_string = good_strats_string + "Points vs. Random: " + str(rand_matches_won_by_white) + " " + str(agg_rand_match_score) + "\n"
good_strats_string = good_strats_string + "Points vs. Strat: " + str(matches_won_by_white) + " "+ str(agg_match_score) + "\n"
#good_strats_file.write("Points vs. Random: " + str(white_rand_points) + " " + str(black_rand_points) + "\n")
#good_strats_file.write("Points vs. Strat: " + str(white_points) + " "+ str(black_points) + "\n")
#print "Points vs. Random: " + str(rand_matches_won_by_white) + " " + str(agg_rand_match_score)
#print "Points vs. Strat: " + str(matches_won_by_white) + " "+ str(agg_match_score)
for item in factors_list:
#print str(item) + ", "
good_strats_string = good_strats_string + str(item) + " "
good_strats_string = good_strats_string + "\n\n\n"
print good_strats_string
good_strats_file.write(good_strats_string)
good_strats_file.flush()
good_strats_file.close()
def genFactorsList(fia, factor):
factors_list = []
for x in range(0, 12):
strat_val = factor*random.random()
factors_list.append(strat_val)
factors_list[1] = factors_list[1]
factors_list[3] = factors_list[3]
return factors_list
def createInitialState(die):
'''Create initial game state given a die object'''
gf = die.goesFirst()
t = gf[0]
r = gf[1]
st = state.state(t, r)
return st
if __name__ == "__main__":
main()
|
<filename>scripts/zone_analysis.py
"""
Analysis zones:
- vicinity of new metro stations, defined by walking distance to station
- vicinity of old metro stations, --||--
- areas relying on feeder bus (west metro), areas not in vicinity of metro stations and with direct bus to Kamppi
- commuter train station vicinities
- other areas
"""
# TODO: implement feeder bus zones based on polygon
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import matplotlib.lines as mlines
import matplotlib as mpl
from matplotlib import colors as mcolors
import itertools
import numpy as np
import pickle
import gtfspy.smopy_plot_helper
from gtfspy.gtfs import GTFS
from gtfspy.util import difference_of_pandas_dfs, makedirs
from gtfspy.mapviz_using_smopy_helper import plot_stops_with_categorical_attributes
from scripts.all_to_all_settings import *
from scripts.all_to_all_analyzer import AllToAllDifferenceAnalyzer
from gtfspy.routing.journey_data_analyzer import JourneyDataAnalyzer
from scripts.all_to_all_analyzer import stops_to_exclude
# ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3"]
zone_colors = ["#66c2a5", "#fc8d62", "#8da0cb", "#e78ac3", "#a6d854"]
def analysis_zones(as_dict=False):
"""
returns data containers that pair zone type to a set of stops
:param as_dict:
:return:
"""
gtfs_old = GTFS(OLD_DICT["gtfs_dir"])
gtfs_lm = GTFS(LM_DICT["gtfs_dir"])
station_distance = 600
upstream_ratio = 0.5
df_old = gtfs_old.get_stops_for_route_type(1)
df_lm = gtfs_lm.get_stops_for_route_type(1)
new_metro = difference_of_pandas_dfs(df_old, df_lm, ["stop_I"])
old_metro = difference_of_pandas_dfs(new_metro, df_lm, ["stop_I"])
train = gtfs_lm.get_stops_for_route_type(2)
feeder_area = pd.DataFrame()
other_stops = gtfs_lm.stops()
jda = JourneyDataAnalyzer(LM_DICT["journey_dir"], LM_DICT["gtfs_dir"])
# jda = JourneyDataAnalyzer(OLD_DICT["journey_dir"], OLD_DICT["gtfs_dir"])
areas_to_remove = stops_to_exclude(return_sqlite_list=False)
df = jda.get_upstream_stops_ratio(1040, [str(i.stop_I) for i in new_metro.itertuples()], upstream_ratio)
feeder_area = feeder_area.append(df)
# df = jda.get_upstream_stops_ratio(7193, 563, 0.7)
print("new metro")
for i in new_metro.itertuples():
df = gtfs_lm.get_stops_within_distance(i.stop_I, station_distance)
new_metro = new_metro.append(df)
print("old metro")
for i in old_metro.itertuples():
df = gtfs_lm.get_stops_within_distance(i.stop_I, station_distance)
old_metro = old_metro.append(df)
print("train")
for i in train.itertuples():
df = gtfs_lm.get_stops_within_distance(i.stop_I, station_distance)
train = train.append(df)
new_metro = new_metro.drop_duplicates().reset_index(drop=True)
old_metro = old_metro.drop_duplicates().reset_index(drop=True)
train = train.drop_duplicates().reset_index(drop=True)
feeder_area = feeder_area.drop_duplicates().reset_index(drop=True)
# cleaning up borders
new_metro = difference_of_pandas_dfs(old_metro, new_metro, ["stop_I"])
for zone in [new_metro, old_metro, areas_to_remove]:
train = difference_of_pandas_dfs(zone, train, ["stop_I"])
for zone in [new_metro, train, old_metro, areas_to_remove]:
feeder_area = difference_of_pandas_dfs(zone, feeder_area, ["stop_I"])
spec_areas = pd.concat([new_metro, old_metro, train, feeder_area, areas_to_remove])
other_stops = difference_of_pandas_dfs(spec_areas, other_stops, ["stop_I"])
old_metro = old_metro.assign(stop_cat=1)
new_metro = new_metro.assign(stop_cat=2)
train = train.assign(stop_cat=3)
feeder_area = feeder_area.assign(stop_cat=4)
other_stops = other_stops.assign(stop_cat=5)
all_stops = pd.concat([new_metro, old_metro, train, feeder_area, other_stops]).reset_index(drop=True)
if as_dict:
all_dfs = {"new_metro_stations": new_metro,
"feeder_bus_area": feeder_area,
"old_metro_stations": old_metro,
"commuter_train_stations": train,
"other_stops": other_stops}
else:
all_dfs = [("new_metro_stations", new_metro),
("feeder_bus_area", feeder_area),
("old_metro_stations", old_metro),
("commuter_train_stations", train),
("other_stops", other_stops)]
return all_dfs, all_stops
def zone_map(img_dir=None, targets=True):
# zone map
all_dfs, _ = analysis_zones()
all_lats = [x[1]["lat"] for x in reversed(all_dfs)]
all_lons = [x[1]["lon"] for x in reversed(all_dfs)]
all_cats = [x[1]["stop_cat"] for x in reversed(all_dfs)]
all_labels = [x[0].replace("_", " ") for x in reversed(all_dfs)]
#fig = plt.figure()
ax = plot_stops_with_categorical_attributes(all_lats, all_lons, all_cats, labels=all_labels,
spatial_bounds=SPATIAL_BOUNDS,
colors=zone_colors,
s=20)
#print(list((x[0] for x in reversed(all_dfs))))
ax.legend(scatterpoints=1,
loc='upper left',
ncol=1,
fontsize=8)
""",
#(x[0] for x in reversed(all_dfs)),
scatterpoints=1,
loc='upper left',
ncol=1,
fontsize=8)
"""
gtfs_old = GTFS(OLD_DICT["gtfs_dir"])
"""
for name, stop_I in TARGET_DICT.items():
lat, lon = gtfs_old.get_stop_coordinates(stop_I)
#ax.scatter(lon, lat, s=30, c='green', marker='X')
ax.text(lon, lat, TARGET_LETTERS[name], size=7, color='black') #,
#bbox={'facecolor': 'black', 'alpha': 0.5, 'pad': 0})
"""
if not img_dir:
img_dir = "/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"
plt.savefig(os.path.join(img_dir,
"study_areas"+".pdf"), format="pdf", dpi=300, bbox_inches='tight')
def get_combinations(a2aa, measure="mean", mode="temporal_distance", rerun=True, unit="s"):
"""
Returns rows for each combination of zone type
:param a2aa:
:param measure:
:param rerun:
:param mode:
:param unit:
:return:
"""
all_dfs, _ = analysis_zones()
combinations = itertools.product(all_dfs, all_dfs)
dfs = {}
pickle_path = os.path.join(makedirs("/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"),
"dataframe.pickle")
if rerun:
for ((i_name, i), (j_name, j)) in combinations:
dfs[(i_name, j_name)] = a2aa.get_rows_based_on_stop_list(i["stop_I"], j["stop_I"], measure, mode, unit=unit)
pickle.dump(dfs, open(pickle_path, 'wb'), -1)
else:
dfs = pickle.load(open(pickle_path, 'rb'))
return combinations, dfs, all_dfs
def get_combinations_and_to_all(a2aa, measure="mean", mode="temporal_distance", rerun=True, unit="s"):
"""
Returns rows for each combination of zone type
:param a2aa:
:param measure:
:param rerun:
:param mode:
:param unit:
:return:
"""
all_dfs, all_stops = analysis_zones()
row_dfs = all_dfs
col_dfs = [("all_stops", all_stops)] + all_dfs
combinations = itertools.product(row_dfs, col_dfs)
#print([(c[0][0],c[1][0]) for c in combinations])
dfs = {}
pickle_path = os.path.join(makedirs("/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"),
mode+"_c_and_all_dataframe.pickle")
if rerun:
for ((i_name, i), (j_name, j)) in combinations:
dfs[(i_name, j_name)] = a2aa.get_rows_based_on_stop_list(i["stop_I"], j["stop_I"], measure, mode, unit=unit)
pickle.dump(dfs, open(pickle_path, 'wb'), -1)
else:
dfs = pickle.load(open(pickle_path, 'rb'))
return combinations, dfs, row_dfs, col_dfs
def get_zone_to_all(a2aa, measure_mode, measure="mean", rerun=True):
"""
Returns rows for each combination of zone type
:param a2aa:
:param measure:
:param rerun:
:return:
"""
all_dfs, all_stops = analysis_zones()
dfs = {}
pickle_path = os.path.join(makedirs("/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"),
measure_mode+"_z2a_dataframe.pickle")
if rerun:
for ba in ["before", "after"]:
for (i_name, i) in all_dfs:
dfs[(i_name, ba)] = a2aa.get_rows_based_on_stop_list(i["stop_I"], all_stops["stop_I"], measure,
measure_mode, unit="s")
pickle.dump(dfs, open(pickle_path, 'wb'), -1)
else:
dfs = pickle.load(open(pickle_path, 'rb'))
return dfs, all_dfs
def old_vs_change_scatter_colored_by_zone(a2aa, measure="temporal_distance", file_id=None, img_dir=None):
all_dfs, _ = analysis_zones()
all_stops = []
#for df in all_dfs:
# all_stops += df[1]["stop_I"].tolist()
fig = plt.figure()
plt.title("", fontsize=20)
ax = fig.add_subplot(111)
for (name, df), c in zip(reversed(all_dfs), zone_colors):
df = a2aa.get_mean_change(measure, include_list=df["stop_I"])
ax.scatter(df["before"].apply(lambda x: x/60), df["diff_mean"].apply(lambda x: x/60), label=name, c=c, alpha=0.1)
ax.set_xlim([40, 120])
ax.set_ylim([-20, 20])
ax.legend()
plt.xlabel("Before MTT, minutes")
plt.ylabel("Change in MTT, minutes")
if not img_dir:
img_dir = "/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"
if not file_id:
file_id = ''
plt.savefig(os.path.join(img_dir,
"before_vs_change_scatter_"+str(file_id)+".pdf"), format="pdf", dpi=300)
def heatmap_singles(a2aa, measure="mean", rerun=True, img_dir=None):
# heatmap single
combinations, dfs, all_dfs = get_combinations(a2aa, measure=measure, rerun=rerun)
for ((i_name, i), (j_name, j)) in combinations:
df = dfs[(i_name, j_name)]
print(i_name, j_name)
xedges = range(0, 90, 1)
yedges = range(-30, 30, 1)
H, xedges, yedges = np.histogram2d(df["before_"+measure], df["diff_"+measure], bins=(xedges, yedges))
H = H.T # Let each row list bins with common y range.
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, title=i_name+" to "+j_name)
# ax.plot([0, 90], [0, 90], c="r")
plt.xlabel("before " + measure + " temporal distance (s)")
plt.ylabel("after-before " + measure + " temporal distance (s)")
plt.imshow(H, interpolation='nearest', origin='low', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
if not img_dir:
img_dir = "/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"
plt.savefig(os.path.join(img_dir,
"diff_"+i_name+"-"+j_name+".pdf"), format="pdf", dpi=300)
def heatmap_matrix(a2aa, measure="mean", mode="temporal_distance", rerun=True, img_dir=None, fig_name=None):
# heatmap matrix
combinations, dfs, all_dfs = get_combinations(a2aa, measure=measure, mode=mode, rerun=rerun, unit="s")
width = 10
height = 8
fig, axes2d = plt.subplots(nrows=len(all_dfs), ncols=len(all_dfs), figsize=(width, height))
for i, row in enumerate(axes2d):
for j, cell in enumerate(row):
i_name, i_df = all_dfs[i]
j_name, j_df = all_dfs[j]
df = dfs[(i_name, j_name)]
print(i_name, j_name)
if mode == "temporal_distance":
xedges = range(0, 90, 1)
yedges = range(-30, 30, 1)
else:
xedges = range(0, 4, 1)
yedges = range(-2, 2, 1)
H, xedges, yedges = np.histogram2d(df["before_"+measure], df["diff_"+measure], bins=(xedges, yedges))
#H = H.T # Let each row list bins with common y range.
#plt.xlabel("before " + measure + " temporal distance (s)")
#plt.ylabel("after-before " + measure + " temporal distance (s)")
cell.imshow(H.T, interpolation='nearest', origin='low', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
cell.set_xlim(min(xedges), max(xedges))
cell.set_ylim(min(yedges), max(yedges))
cell.yaxis.tick_right()
# cell.xaxis.tick_top()
cell.xaxis.set_label_position('top')
if i == len(axes2d) - 5:
cell.set_xlabel(j_name)
if not i == len(axes2d)-1:
cell.set_xticks([])
if j == 0:
cell.set_ylabel(i_name)
if not j == 4:
cell.set_yticks([])
fig.text(0.5, 0.04, "Travel time, before (minutes)", ha='center')
fig.text(0.04, 0.5, "Difference in travel time (minutes)", va='center', rotation='vertical')
fig.text(0.5, 1-0.04, "To", ha='center')
fig.text(1-0.04, 0.5, "From", va='center', rotation=-90)
fig.tight_layout()
if not img_dir:
img_dir = "/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"
plt.savefig(os.path.join(img_dir,
"diff_heatmap_matrix" +fig_name+ ".png"), format="png", dpi=300)
def histogram_matrix(a2aa, rerun, measure="mean", mode="temporal_distance", img_dir=None, fig_name=None):
# histogram matrix
if mode == "temporal_distance":
yedges = range(-20, 20, 1)
unit = "m"
else:
yedges = np.linspace(-3, 3, 40)
unit = "s"
combinations, dfs, row_dfs, col_dfs = get_combinations_and_to_all(a2aa, measure=measure, mode=mode, rerun=rerun,
unit=unit)
fig, axes2d = plt.subplots(nrows=len(row_dfs), ncols=len(col_dfs) + 1, figsize=(10, 8),
gridspec_kw={'width_ratios': [1, 0.5, 1, 1, 1, 1, 1]})
fig.subplots_adjust(hspace=0.04, wspace=0.04)
for i, row in enumerate(axes2d):
for j, cell in enumerate(row):
if j == 1:
cell.remove()
continue
if j > 1:
j = j - 1
i_name, _ = row_dfs[i]
j_name, _ = col_dfs[j]
print(dfs.keys())
df = dfs[(i_name, j_name)]
print(i_name, j_name)
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
cell.yaxis.tick_right()
n, bins, patches = cell.hist(np.array(df["diff_" + measure]), bins=yedges, normed=True, facecolor='green', alpha=0.75)
cell.yaxis.tick_right()
# cell.xaxis.tick_top()
cell.xaxis.set_label_position('top')
if i == 0:
j_name = j_name.replace("_", " ")
index = j_name.rfind(" ")
j_name = j_name[:index] + '\n' + j_name[index:]
cell.set_xlabel(j_name)
if mode == "temporal_distance":
cell.set_xticks([-15, 0, 15])
cell.set_ylim(0, 0.2)
cell.set_yticks([0.0, 0.05, 0.10, 0.15])
else:
cell.set_xticks([-2, 0, 2])
cell.set_ylim(0, 0.8)
cell.set_yticks([0.0, 0.20, 0.40, 0.60])
if not i == len(axes2d)-1:
cell.set_xticklabels([])
if j == 0:
i_name = i_name.replace("_", " ", 1)
#index = i_name.rfind(" ")
#i_name = i_name[:index] + '\n' + j_name[index:]
i_name = i_name.replace("_", '\n', 1)
cell.set_ylabel(i_name)
if not j == 5:
cell.set_yticklabels([])
"""
if i == len(axes2d) - 1:
cell.set_xlabel(j_name.replace("_", " "), wrap=True)
if j == 0:
cell.set_ylabel(i_name.replace("_", " "), wrap=True)
"""
#fig.tight_layout()
if mode == "temporal_distance":
fig.text(0.5, 0.04, "Change in travel time (minutes)", ha='center')
else:
fig.text(0.5, 0.04, "Change in number of transfers", ha='center')
fig.text(0.04, 0.5, "From", va='center', rotation='vertical')
fig.text(0.5, 1-0.04, "To", ha='center')
#fig.text(1-0.01, 0.5, "From", va='center', rotation=-90, bbox={'facecolor': 'white', 'pad': 0.5})
#
if not img_dir:
img_dir = "/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"
plt.savefig(os.path.join(img_dir,
"diff_histogram_matrix" +fig_name+ ".pdf"), format="pdf")#, dpi=300, bbox_inches='tight')
def ba_histogram_matrix(a2aa, rerun, measure_mode, measure="mean", img_dir=None, fig_name=None):
# before after histogram matrix
assert measure_mode in ["n_boardings", "temporal_distance"]
dfs, all_dfs = get_zone_to_all(a2aa, measure_mode, measure=measure, rerun=rerun)
ba = ["before", "after"]
colors = ['black', 'red']
fig, axes2d = plt.subplots(nrows=len(all_dfs), ncols=1, figsize=(5, 10))
#plt.rcParams.update({'font.size': 22})
#plt.rcParams['svg.fonttype'] = 'none'
font_size = 22
for i, cell in enumerate(axes2d):
for j, j_name in enumerate(ba):
i_name, _ = all_dfs[i]
df = dfs[(i_name, j_name)]
print(i_name, j_name)
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
#print(df)
if measure_mode == "temporal_distance":
bins = range(0, 120, 1)
else:
bins = np.linspace(0, 5, 120)
n, bins, patches = cell.hist(np.array(df[j_name+"_" + measure]), bins=bins, normed=True,
color=colors[j], alpha=1, histtype='step')
if measure_mode == "temporal_distance":
cell.set_ylim(0, 0.03)
else:
cell.set_ylim(0, 6)
if i == len(axes2d) - 1:
if measure_mode == "temporal_distance":
cell.set_xlabel("minutes", wrap=True)
else:
cell.set_xlabel("transfers", wrap=True)
#cell.xaxis.label.set_size(font_size)
cell.set_ylabel(i_name.replace("_", " "), wrap=True)#, fontsize=font_size)
#cell.yaxis.label.set_size(font_size)
labels = ba
handles = [mlines.Line2D([], [], color=c, label=l) for c, l in zip(colors, labels)]
fig.legend(handles, labels)
fig.tight_layout()
if not img_dir:
img_dir = "/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps"
plt.savefig(os.path.join(img_dir,
"diff_histogram_matrix" +fig_name+ ".pdf"), format="pdf")#, dpi=300)
def distance_vs_rows_histogram(a2aa, img_dir=None):
ignore_stops = stops_to_exclude(return_sqlite_list=True)
measure = "mean"
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111)
n_value = 180
for n, sign in zip([-1*n_value, n_value], ["<=", ">="]):
df = a2aa.get_rows_with_abs_change_greater_than_n(ignore_stops, measure, n, sign, unit="s")
n, bins, patches = ax.hist(np.array(df["before_"+measure]), normed=True, facecolor='green', alpha=0.75)
plt.ylim(0, 0.2)
plt.xlabel("travel time")
plt.ylabel("number of stop_pairs")
if not img_dir:
img_dir = makedirs("/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps")
plt.savefig(os.path.join(img_dir,
"distance_vs_volume_of_change_" + str(n) + ".png"), format="png", dpi=300)
def single_stop_change_histogram(target, measure, direction="to", indicator="diff_mean", a2aa=None, img_dir=None, ax=None, return_ax=False, cdf=False, color='blue', label=''):
if not a2aa:
a2aa = AllToAllDifferenceAnalyzer(GTFS_PATH, A2AA_DB_OLD_PATH, A2AA_DB_LM_PATH, A2AA_OUTPUT_DB_PATH)
if not ax:
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, title="")
if measure == "n_boardings":
yedges = np.arange(-2.0, 2.0, 0.1)
unit = "s"
else:
yedges = range(-25, 25, 1)
unit = "m"
if indicator == "diff_mean_relative":
yedges = np.arange(-0.7, 0.7, 0.05)
unit = "s"
df = a2aa.get_data_for_target(target, measure, direction=direction, unit=unit, ignore_stops=True)
if cdf:
values, base = np.histogram(np.array(df[indicator]), bins=yedges)
# evaluate the cumulative
cumulative = np.cumsum(values)
# plot the cumulative function
ax.plot(base[:-1], cumulative, c=color, label=label)
plt.ylim(0, max(cumulative))
else:
n, bins, patches = ax.hist(np.array(df[indicator]), bins=yedges, normed=True, facecolor='green', alpha=0.75)
plt.ylim(0, 0.2)
# ax.plot([0, 90], [0, 90], c="r")
if return_ax:
return ax
plt.xlabel("")
plt.ylabel("")
if not img_dir:
img_dir = makedirs("/home/clepe/production/results/helsinki/figs/all_to_all/heatmaps")
plt.savefig(os.path.join(img_dir,
"diff_" + str(target) + "-" + measure + "-" + indicator + ".pdf"), format="pdf", dpi=300)
if __name__ == "__main__":
zone_map()
|
import dxx
import numpy as np
class TestDtype:
def test_from_filename(self):
mock_filename = "mock.DSB"
assert dxx.Dtype.DSB == dxx.Dtype.from_filename(mock_filename)
def test_list_names(self):
assert ["DSA", "DFA", "DDA", "DSB", "DFB", "DDB"] == dxx.Dtype.list_names()
def test_byte_width(self):
dtype = dxx.Dtype.DSA
assert 2 == dtype.byte_width
dtype = dxx.Dtype.DFA
assert 4 == dtype.byte_width
dtype = dxx.Dtype.DDA
assert 8 == dtype.byte_width
dtype = dxx.Dtype.DSB
assert 2 == dtype.byte_width
dtype = dxx.Dtype.DFB
assert 4 == dtype.byte_width
dtype = dxx.Dtype.DDB
assert 8 == dtype.byte_width
def test__format_specifiers(self):
dtype = dxx.Dtype.DSA
assert "%d" == dtype._format_specifiers
dtype = dxx.Dtype.DFA
assert "%e" == dtype._format_specifiers
dtype = dxx.Dtype.DDA
assert "%le" == dtype._format_specifiers
dtype = dxx.Dtype.DSB
assert "%d" == dtype._format_specifiers
dtype = dxx.Dtype.DFB
assert "%e" == dtype._format_specifiers
dtype = dxx.Dtype.DDB
assert "%le" == dtype._format_specifiers
def test_numpy_dtype(self):
dtype = dxx.Dtype.DSA
assert np.int16 == dtype.numpy_dtype
dtype = dxx.Dtype.DFA
assert np.float32 == dtype.numpy_dtype
dtype = dxx.Dtype.DDA
assert np.float64 == dtype.numpy_dtype
dtype = dxx.Dtype.DSB
assert np.int16 == dtype.numpy_dtype
dtype = dxx.Dtype.DFB
assert np.float32 == dtype.numpy_dtype
dtype = dxx.Dtype.DDB
assert np.float64 == dtype.numpy_dtype
def test_is_DXA(self):
dtype = dxx.Dtype.DSA
assert True == dtype.is_DXA
dtype = dxx.Dtype.DFA
assert True == dtype.is_DXA
dtype = dxx.Dtype.DDA
assert True == dtype.is_DXA
dtype = dxx.Dtype.DSB
assert False == dtype.is_DXA
dtype = dxx.Dtype.DFB
assert False == dtype.is_DXA
dtype = dxx.Dtype.DDB
assert False == dtype.is_DXA
def test_is_DXB(self):
dtype = dxx.Dtype.DSA
assert False == dtype.is_DXB
dtype = dxx.Dtype.DFA
assert False == dtype.is_DXB
dtype = dxx.Dtype.DDA
assert False == dtype.is_DXB
dtype = dxx.Dtype.DSB
assert True == dtype.is_DXB
dtype = dxx.Dtype.DFB
assert True == dtype.is_DXB
dtype = dxx.Dtype.DDB
assert True == dtype.is_DXB
def test___str__(self):
dtype = dxx.Dtype.DSA
assert "DSA" == dtype.__str__()
dtype = dxx.Dtype.DFA
assert "DFA" == dtype.__str__()
dtype = dxx.Dtype.DDA
assert "DDA" == dtype.__str__()
dtype = dxx.Dtype.DSB
assert "DSB" == dtype.__str__()
dtype = dxx.Dtype.DFB
assert "DFB" == dtype.__str__()
dtype = dxx.Dtype.DDB
assert "DDB" == dtype.__str__()
def test_dxx_len_file(mock_data_file):
assert dxx.len_file(mock_data_file) == 5 * 48000
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
import sys
from unittest import TestCase
from packstack.modules import ospluginutils
from packstack.modules import puppet
from packstack.installer import basedefs
from packstack.installer import run_setup
from ..test_base import FakePopen
from ..test_base import PackstackTestCaseMixin
def makefile(path, content):
'''Create the named file with the specified content.'''
with open(path, 'w') as fd:
fd.write(content)
class CommandLineTestCase(PackstackTestCaseMixin, TestCase):
def test_running_install_hosts(self):
"""
Test packstack.installer.run_setup.main
This test effectivly runs all of the python code ran by
packstack --install-hosts=127.0.0.1 --os-swift-install=y \
--nagios-install=y
It is a fairly wide net but boost code coverage of the packstack
python code to about 85%, more finer grained tests should also be
Added to target speficic test cases.
Popen is replaced in PackstackTestCaseMixin so no actual commands get
run on the host running the unit tests
"""
subprocess.Popen = FakePopen
FakePopen.register('cat /etc/resolv.conf | grep nameserver',
stdout='nameserver 127.0.0.1')
# required by packstack.plugins.serverprep_949.mangage_rdo
FakePopen.register("rpm -q rdo-release "
"--qf='%{version}-%{release}.%{arch}\n'",
stdout='icehouse-2.noarch\n')
FakePopen.register_as_script('yum-config-manager --enable '
'openstack-icehouse',
stdout='[openstack-icehouse]\nenabled=1')
# required by packstack.plugins.nova_300.gather_host_keys
FakePopen.register('ssh-keyscan 127.0.0.1',
stdout='127.0.0.1 ssh-rsa hostkey-data')
# create a dummy public key
dummy_public_key = os.path.join(self.tempdir, 'id_rsa.pub')
makefile(dummy_public_key, 'ssh-rsa AAAAblablabla')
# create dummy keys for live migration mechanism
makefile(os.path.join(basedefs.VAR_DIR, 'nova_migration_key'),
'-----BEGIN RSA PRIVATE KEY-----\n'
'keydata\n'
'-----END RSA PRIVATE KEY-----\n')
makefile(os.path.join(basedefs.VAR_DIR, 'nova_migration_key.pub'),
'ssh-rsa keydata')
# Save sys.argv and replace it with the args we want optparse to use
orig_argv = sys.argv
sys.argv = ['packstack', '--debug',
'--ssh-public-key=%s' % dummy_public_key,
'--install-hosts=127.0.0.1', '--os-swift-install=y',
'--nagios-install=y', '--use-epel=y']
# There is no puppet logfile to validate, so replace
# ospluginutils.validate_puppet_logfile with a mock function
orig_validate_logfile = puppet.validate_logfile
puppet.validate_logfile = lambda a: None
puppet.scan_logfile = lambda a: []
# If there is a error in a plugin sys.exit() gets called, this masks
# the actual error that should be reported, so we replace it to
# raise Exception, packstack logging gives a more infomrative error
def raise_(ex):
raise ex
orig_sys_exit = sys.exit
sys.exit = lambda a: raise_(Exception('Error during install-hosts'))
try:
run_setup.main()
finally:
sys.argv = orig_argv
ospluginutils.validate_puppet_logfile = orig_validate_logfile
sys.exit = orig_sys_exit
try:
shutil.rmtree(basedefs.VAR_DIR)
except:
pass
|
from shutil import copyfile
import numpy as np
import sys_simulator.general as gen
from time import time
from sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent
from sys_simulator.dqn.externalDQNFramework import ExternalDQNFramework
from sys_simulator.parameters.parameters import DQNAgentParameters
import torch
import gym
ENV_NAME = 'CartPole-v1'
# ENV_NAME = 'MountainCar-v0'
MAX_STEPS = 30000
STEPS_PER_EPISODE = 300
EVAL_NUM_EPISODES = 10
REPLAY_MEMORY_TYPE = 'standard'
REPLAY_MEMORY_SIZE = int(1E3)
ALPHA = .6
BETA = .4
PRIO_BETA_ITS = int(.8*MAX_STEPS)
LEARNING_RATE = 1E-3
NUM_HIDDEN_LAYERS = 2
HIDDEN_SIZE = 128
BATCH_SIZE = 32
GAMMA = .99
EPSILON_INITIAL = 1
EPSILON_MIN = .01
EPSILON_DECAY = 1/(MAX_STEPS) # medium training
TARGET_UPDATE = 20
EVAL_EVERY = int(MAX_STEPS / 20)
EARLY_STOP_THRESHOLD = 450
torch_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
env = gym.make(ENV_NAME)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent_params = DQNAgentParameters(
EPSILON_MIN, EPSILON_DECAY, EPSILON_INITIAL, REPLAY_MEMORY_SIZE,
BATCH_SIZE, GAMMA
)
framework = ExternalDQNFramework(
agent_params,
state_size,
action_size,
HIDDEN_SIZE,
torch_device,
n_hidden_layers=NUM_HIDDEN_LAYERS,
learning_rate=LEARNING_RATE,
alpha=ALPHA,
beta=BETA,
beta_its=PRIO_BETA_ITS,
replay_memory_type=REPLAY_MEMORY_TYPE
)
agent = ExternalDQNAgent(agent_params, list(range(action_size)))
def print_stuff(step: int, now: int):
if REPLAY_MEMORY_TYPE == 'prioritized':
out = 'Training. ' + \
f'Step: {step}/{MAX_STEPS-1}. ' + \
f'Prio_Beta: {framework.replay_memory._beta}. ' + \
f'Elapsed time: {now} minutes.'
else:
out = 'Training. ' + \
f'Step: {step}/{MAX_STEPS-1}. ' + \
f'Epsilon: {agent.epsilon}. ' + \
f'Elapsed time: {now} minutes.'
print(out)
def train(start):
best_reward = float('-inf')
test_rewards = []
step = 0
early_stop = False
while step < MAX_STEPS and not early_stop:
obs = env.reset()
now = (time() - start) / 60
print_stuff(step, now)
reward = 0.0
done = False
t_flag = False
while not done:
action = agent.get_action(framework, obs)
next_obs, reward, done, _ = env.step(action)
framework.replay_memory.push(
obs, action, reward, next_obs, done
)
framework.learn()
best_reward = reward if reward > best_reward else best_reward
obs = next_obs
t_flag = True if step % EVAL_EVERY == 0 else t_flag
step += 1
if step % TARGET_UPDATE == 0:
framework.target_net.load_state_dict(
framework.policy_net.state_dict()
)
if t_flag:
t_rewards = test(framework)
test_rewards.append(t_rewards)
t_flag = False
ref = np.mean(t_rewards)
if ref > EARLY_STOP_THRESHOLD:
early_stop = True
# last test
t_rewards = test(framework)
test_rewards.append(t_rewards)
# save stuff
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/gym/{filename}'
data_path, _ = gen.make_dir_timestamp(data_path)
torch.save(framework, f'{data_path}/framework.pt')
return test_rewards
def test(framework: ExternalDQNFramework):
rewards = []
for _ in range(EVAL_NUM_EPISODES):
obs = env.reset()
done = False
i = 0
ep_rewards = []
while not done and i < STEPS_PER_EPISODE:
action = agent.act(framework, obs)
next_obs, reward, done, _ = env.step(action)
obs = next_obs
ep_rewards.append(reward)
# rewards.append(np.mean(ep_rewards))
rewards.append(np.sum(ep_rewards))
return rewards
def test_video(
framework: ExternalDQNFramework,
num_episodes: int,
steps_per_episode: int
):
env = gym.make(ENV_NAME)
for _ in range(num_episodes):
obs = env.reset()
done = False
i = 0
while not done and i < steps_per_episode:
env.render()
action = agent.act(framework, obs)
next_obs, _, done, _ = env.step(action)
obs = next_obs
def run():
train_rewards = []
test_rewards = []
start = time()
train_rewards = train(start)
test_rewards = test(framework)
# save stuff
now = (time() - start) / 60
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
dir_path = f'data/dql/gym/{filename}'
data_path, _ = gen.make_dir_timestamp(dir_path)
data_file_path = f'{data_path}/log.pickle'
data = {
'train_rewards': train_rewards,
'test_rewards': test_rewards,
'elapsed_time': now,
'eval_every': EVAL_EVERY,
}
gen.save_with_pickle(data, data_file_path)
copyfile(__file__, f'{data_path}/{filename}.py')
print(f'done. Elapsed time: {now} minutes.')
if __name__ == '__main__':
run()
|
from view import *
from model import Model
import os
import sys
import datetime
MAX_NAME_CHARS = 20
MAX_DESC_CHARS = 150
STARTING_INDEX = 1
class Controller:
"""
The middle man. Operates on model and updates the view to show information to user.
"""
OPTIONS = {'1': 'Add Todo Item',
'2': 'Modify Item',
'3': 'Delete Item',
'4': 'Mark Item',
'5': 'Display Items',
'6': 'Display Specific Item',
'7': 'Save tasks to file',
'8': 'Read from file',
'0': 'Exit program'}
def __init__(self):
self.model = Model()
def menu(self):
"""
Show menu to the user and ask to chose option.
"""
os.system('clear')
self.show_menu()
while True:
option = input('Choose option: ')
os.system('clear')
self.show_menu()
if option in self.OPTIONS.keys():
if option == '1':
self.add_todo_item()
elif option == '2':
self.modify_item()
elif option == '3':
self.delete_item()
elif option == '4':
self.mark_as_done()
elif option == '5':
self.display_items()
elif option == '6':
self.display_specific_item()
elif option == '7':
self.save_to_file()
elif option == '8':
self.read_from_file()
elif option == '0':
sys.exit()
def show_menu(self):
"""
Update menu view
"""
MenuView.display(self.OPTIONS)
def add_todo_item(self):
"""
Add to do item to todo_items list and update view
"""
name = self.ask_name_input()
description = self.ask_description_input()
date = self.ask_date_input()
self.model.add_item(name, description, date)
AddItemView.display(name)
def modify_item(self):
"""
Modify selected item and update view
"""
index = self.ask_index_input()
name = self.ask_name_input()
description = self.ask_description_input()
date = self.ask_date_input()
try:
self.model.modify_item(index, name, description, date)
ModifyItemView.display(index)
except IndexError:
print('Wrong index!')
def mark_as_done(self):
"""
Mark selected item as done and update view
"""
index = self.ask_index_input()
try:
self.model.mark_as_done(index)
MarkItemView.display(index)
except IndexError:
print('Wrong index!')
def delete_item(self):
"""
Delete selected item and update the view
"""
index = self.ask_index_input()
try:
self.model.delete_item(index)
DeleteItemView.display(index)
except IndexError:
print('Wrong index!')
def display_items(self):
"""
Update the view showing all items in todo items list
"""
DisplayListView.display(self.model.get_items())
def display_specific_item(self):
"""
Display selected item by updating the view
"""
index = self.ask_index_input()
try:
item = self.model.get_specific_item(index)
DisplaySpecificItemView.display(index, item)
except IndexError:
print('Wrong index!')
def save_to_file(self):
"""
Save all todo items to file
"""
self.model.save_to_file()
def read_from_file(self):
"""
Read todo items from file
"""
self.model.read_from_file()
@staticmethod
def ask_index_input():
"""
Ask user index input and handle possible exceptions
:return: int -> index
"""
while True:
try:
index = int(input('Enter index of item: '))
return index - STARTING_INDEX
except ValueError:
print('You need to enter a number!')
@staticmethod
def ask_name_input():
"""
Ask user about todo item name (max 20 characters)
:returns: string -> name
"""
while True:
name = input('Enter name (max 20 characters): ').strip()
if len(name) > MAX_NAME_CHARS:
print('Name is too long!')
else:
return name
@staticmethod
def ask_description_input():
"""
Ask user about todo item description (max 150 characters)
:returns: string -> description
"""
while True:
description = input('Enter description (max 150 characters): ').strip()
if len(description) > MAX_DESC_CHARS:
print('Description is too long!')
else:
return description
@staticmethod
def ask_date_input():
"""
Ask user about deadline date for todo item and return it as datetime object
:returns: Datetime object or None if no date entered
"""
year_index = 0
month_index = 1
day_index = 2
while True:
try:
date = input('Enter date in the following syntax or press enter to add without date: year,month,day ')
if not date:
return None
date = date.split(',')
deadline = datetime.date(int(date[year_index]), int(date[month_index]), int(date[day_index]))
return deadline
except:
print('Wrong input!')
|
import argparse
import socket
import sys
import signal
import traceback
from toolset.benchmark.benchmarker import Benchmarker
from toolset.utils.scaffolding import Scaffolding
from toolset.utils.audit import Audit
from toolset.utils import cleaner
from toolset.utils.benchmark_config import BenchmarkConfig
from toolset.utils.output_helper import log
# Enable cross-platform colored output
from colorama import init, Fore
init()
class StoreSeqAction(argparse.Action):
'''
Helper class for parsing a sequence from the command line
'''
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(StoreSeqAction, self).__init__(
option_strings, dest, type=str, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.parse_seq(values))
def parse_seq(self, argument):
result = argument.split(',')
sequences = [x for x in result if ":" in x]
for sequence in sequences:
try:
(start, step, end) = sequence.split(':')
except ValueError:
log(" Invalid: {!s}".format(sequence), color=Fore.RED)
log(" Requires start:step:end, e.g. 1:2:10", color=Fore.RED)
raise
result.remove(sequence)
result = result + range(int(start), int(end), int(step))
return [abs(int(item)) for item in result]
###################################################################################################
# Main
###################################################################################################
def main(argv=None):
'''
Runs the toolset.
'''
# Do argv default this way, as doing it in the functional declaration sets it at compile time
if argv is None:
argv = sys.argv
##########################################################
# Set up argument parser
##########################################################
parser = argparse.ArgumentParser(
description="Install or run the Framework Benchmarks test suite.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=
'''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
0:1:5 creates [0, 1, 2, 3, 4]
''')
# Suite options
parser.add_argument(
'--audit',
action='store_true',
default=False,
help='Audits framework tests for inconsistencies')
parser.add_argument(
'--clean',
action='store_true',
default=False,
help='Removes the results directory')
parser.add_argument(
'--new',
action='store_true',
default=False,
help='Initialize a new framework test')
parser.add_argument(
'--quiet',
action='store_true',
default=False,
help=
'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
)
parser.add_argument(
'--results-name',
help='Gives a name to this set of results, formatted as a date',
default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
parser.add_argument(
'--results-environment',
help='Describes the environment in which these results were gathered',
default='(unspecified, hostname = %s)' % socket.gethostname())
parser.add_argument(
'--results-upload-uri',
default=None,
help=
'A URI where the in-progress results.json file will be POSTed periodically'
)
parser.add_argument(
'--parse',
help=
'Parses the results of the given timestamp and merges that with the latest results'
)
# Test options
parser.add_argument(
'--test', default=None, nargs='+', help='names of tests to run')
parser.add_argument(
'--test-dir',
nargs='+',
dest='test_dir',
help='name of framework directory containing all tests to run')
parser.add_argument(
'--test-lang',
nargs='+',
dest='test_lang',
help='name of language directory containing all tests to run')
parser.add_argument(
'--exclude', default=None, nargs='+', help='names of tests to exclude')
parser.add_argument(
'--type',
choices=[
'all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update',
'plaintext'
],
default='all',
help='which type of test to run')
parser.add_argument(
'-m',
'--mode',
choices=['benchmark', 'verify', 'debug'],
default='benchmark',
help=
'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
)
parser.add_argument(
'--list-tests',
action='store_true',
default=False,
help='lists all the known tests that can run')
# Benchmark options
parser.add_argument(
'--duration',
default=15,
help='Time in seconds that each test should run for.')
parser.add_argument(
'--server-host',
default='tfb-server',
help='Hostname/IP for application server')
parser.add_argument(
'--database-host',
default='tfb-database',
help='Hostname/IP for database server')
parser.add_argument(
'--client-host', default='', help='Hostname/IP for client server')
parser.add_argument(
'--concurrency-levels',
nargs='+',
default=[16, 32, 64, 128, 256, 512],
help='List of concurrencies to benchmark')
parser.add_argument(
'--pipeline-concurrency-levels',
nargs='+',
default=[256, 1024, 4096, 16384],
help='List of pipeline concurrencies to benchmark')
parser.add_argument(
'--query-levels',
nargs='+',
default=[1, 5, 10, 15, 20],
help='List of query levels to benchmark')
parser.add_argument(
'--cached-query-levels',
nargs='+',
default=[1, 10, 20, 50, 100],
help='List of cached query levels to benchmark')
parser.add_argument(
'--benchmark-env',
default='none',
help='Hostname/IP for database server')
# Network options
parser.add_argument(
'--network-mode',
default=None,
help='The network mode to run docker in')
args = parser.parse_args()
config = BenchmarkConfig(args)
benchmarker = Benchmarker(config)
signal.signal(signal.SIGTERM, benchmarker.stop)
signal.signal(signal.SIGINT, benchmarker.stop)
try:
if config.new:
Scaffolding(benchmarker)
elif config.audit:
Audit(benchmarker).start_audit()
elif config.clean:
cleaner.clean(benchmarker.results)
benchmarker.docker_helper.clean()
elif config.list_tests:
all_tests = benchmarker.metadata.gather_tests()
for test in all_tests:
log(test.name)
elif config.parse:
all_tests = benchmarker.metadata.gather_tests()
for test in all_tests:
test.parse_all()
benchmarker.results.parse(all_tests)
else:
any_failed = benchmarker.run()
if config.mode == "verify":
return any_failed
except Exception:
tb = traceback.format_exc()
log("A fatal error has occurred", color=Fore.RED)
log(tb)
# try one last time to stop docker containers on fatal error
try:
benchmarker.stop()
except:
sys.exit(1)
return 0
if __name__ == "__main__":
sys.exit(main())
|
<reponame>nachonacho2/SIPEC
# SIPEC
# MARKUS MARKS
# RUN FULL INFERENCE
import os
import sys
import operator
import cv2
from SwissKnife.masksmoothing import MaskMatcher
from SwissKnife.poseestimation import heatmap_to_scatter, custom_binary_crossentropy
sys.path.append("../")
from argparse import ArgumentParser
import numpy as np
from tqdm import tqdm
from keras.engine.saving import load_model
import keras.backend as K
import keras.losses
# from SwissKnife.poseestimation import heatmap_to_scatter
from SwissKnife.segmentation import SegModel, mold_video
from SwissKnife.utils import (
setGPU,
loadVideo,
masks_to_coms,
apply_all_masks,
detect_primate,
check_directory,
rescale_img,
save_dict,
masks_to_coords,
)
# TODO: save molded imgs?
def full_inference(
videodata,
results_sink,
SegNet=None,
IdNet=None,
PoseNet=None,
BehaveNet=None,
mask_matching=False,
id_matching=False,
output_video=None,
):
# classes
classes = {
"Charles": 0,
"Max": 1,
"Paul": 2,
"Alan": 3,
}
maskmatcher = MaskMatcher()
maskmatcher.max_ids = 6
# invert classes / to go both ways
classes_invert = [el for el in classes.keys()]
# set threshold for detection of primate identities
threshold = 0.5
results = []
if type(videodata) == str:
length = len(os.listdir(videodata))
# for idx, el in tqdm(enumerate(videodata)):
for idx in range(length):
el = cv2.imread(videodata + "frame%d.jpg" % idx)
results_per_frame = {}
molded_img, masks, boxes, mask_scores = SegNet.detect_image(
el, verbose=0, mold=True
)
coms = masks_to_coms(masks)
# TODO: fixme
try:
masked_imgs, masked_masks = apply_all_masks(
masks, coms, molded_img, mask_size=128
)
except ValueError:
results.append(0)
continue
if mask_matching:
if not idx == 0:
mapping = maskmatcher.match_masks(
boxes[: maskmatcher.max_ids], results[-1]["boxes"]
)
print(mapping)
new_ids = maskmatcher.match_ids(
mapping, len(boxes[: maskmatcher.max_ids])
)
overlaps = [mapping[el][0] for el in mapping]
if len(overlaps) < len(boxes):
for i in range(len(boxes) - len(overlaps)):
overlaps.append(0)
if max(new_ids) > 0:
print("boxes before: ", str(boxes))
boxes = maskmatcher.map(mapping, boxes)
print("boxes after: ", str(boxes))
masks = np.swapaxes(masks, 0, 2)
masks = maskmatcher.map(mapping, masks)
masks = np.swapaxes(masks, 0, 2)
masked_imgs = maskmatcher.map(mapping, masked_imgs)
coms = maskmatcher.map(mapping, coms)
# overlaps = maskmatcher.map(mapping, overlaps)
print(new_ids)
results_per_frame["track_ids"] = new_ids
results_per_frame["overalps"] = overlaps
results_per_frame["mapping"] = mapping
else:
results_per_frame["track_ids"] = np.zeros(
(maskmatcher.max_ids,)
).astype("int")
results_per_frame["overalps"] = np.zeros((maskmatcher.max_ids,)).astype(
"float"
)
mask_size = 256
# mask_size = 128
rescaled_imgs = []
for box in boxes:
if box[0] == 0:
rescaled_imgs.append(np.zeros((mask_size, mask_size, 3)))
else:
rescaled_img = rescale_img(box, molded_img, mask_size=mask_size)
rescaled_imgs.append(rescaled_img)
rescaled_imgs = np.asarray(rescaled_imgs)
# resulting_frames[idx] = molded_img.astype("uint8")
results_per_frame["mask_coords"] = masks_to_coords(masks)
results_per_frame["mask_scores"] = mask_scores
results_per_frame["boxes"] = boxes
results_per_frame["coms"] = np.asarray(coms)
results_per_frame["masked_imgs"] = np.asarray(masked_imgs).astype("uint8")
results_per_frame["masked_masks"] = masked_masks.astype("uint8")
results_per_frame["rescaled_imgs"] = rescaled_imgs.astype("uint8")
# maskmatch.sort ()
# append IdNet results
if IdNet:
ids = []
confidences = []
for img in rescaled_imgs:
primate, confidence = detect_primate(
img, IdNet, classes_invert, threshold
)
ids.append(primate)
confidences.append(confidence)
results_per_frame["ids"] = ids
results_per_frame["confidences"] = confidences
if PoseNet:
maps = []
for img in masked_imgs:
heatmaps = PoseNet.predict(np.expand_dims(img, axis=0))
heatmaps = heatmaps[0, :, :, :]
coords_predict = heatmap_to_scatter(heatmaps)
maps.append(coords_predict)
results_per_frame["pose_coordinates"] = maps
results.append(results_per_frame)
if id_matching:
for idx, el in tqdm(enumerate(videodata)):
lookback = 150
if not (lookback < idx < len(videodata) - lookback):
results[idx]["smoothed_ids"] = ids
else:
corrected_ids = {}
for i in range(len(ids)):
prev_ids = {}
# for j in range(-lookback, 0):
# TODOL forward backward filter
for j in range(-lookback, lookback):
try:
prev_id = results[idx + j]["track_ids"][i]
prev_names = results[idx + j]["ids"]
confidences = results[idx + j]["confidences"]
try:
# prev_ids[prev_names[i]] = prev_ids[prev_names[i]] + confidences[i] * (10/(np.abs(j)+1))
prev_ids[prev_names[i]].append(confidences[i])
except KeyError:
# prev_ids[prev_names[i]] = confidences[prev_id]
prev_ids[prev_names[i]] = [confidences[i]]
except IndexError:
continue
if prev_ids == {}:
corrected_ids[i] = ids[i]
else:
for el in prev_ids.keys():
prev_ids[el] = np.median(prev_ids[el])
# corrected_ids[i] = max(prev_ids.items(), key=operator.itemgetter(1))[0]
sorted_x = sorted(prev_ids.items(), key=operator.itemgetter(1))
corrected_ids[i] = sorted_x[-1][0]
results[idx]["smoothed_ids"] = corrected_ids
np.save(results_sink + "inference_results.npy", results, allow_pickle=True)
# save_dict(
# results_sink + "inference_resulting_masks.pkl", resulting_masks,
# )
# save_dict(
# results_sink + "/inference_resulting_frames.pkl", resulting_frames,
# )
def main():
args = parser.parse_args()
operation = args.operation
gpu_name = args.gpu
if gpu_name:
setGPU(K, gpu_name)
if operation == "primate":
species = "primate"
SegNet = SegModel(species=species)
SegNet.inference_config.DETECTION_MIN_CONFIDENCE = 0.99
# indoor network
SegNet.set_inference(model_path="/home/nexus/mask_rcnn_primate_0119.h5")
# all cam network
# SegNet.set_inference(model_path="/media/nexus/storage5/swissknife_results/networks/mask_rcnn_primate_0400.h5")
vidbase = "/media/nexus/storage5/swissknife_data/primate/raw_videos_sorted/2018_merge/"
vidnames = [
"20180124T115800-20180124T122800b_%T1",
"20180115T150502-20180115T150902_%T1",
]
for videoname in vidnames:
results_sink = "/media/nexus/storage5/swissknife_results/full_inference/primate_july_test_no_matching/"
name_helper = videoname
IdNet = load_model(
"/media/nexus/storage5/swissknife_results/identification/primate/identification_full_ours_CV_fraction_1.0_2020-07-13-10_33/"
+ name_helper
+ "/IDnet_"
# + name_helper
# + "20180131T135402-20180131T142501_%T1"
+ videoname
+ "_recognitionNet.h5"
)
videoname = vidbase + videoname
# videoname = '../testovideo_short'
results_sink = results_sink + name_helper + "/"
# results_sink = "testing_short/"
check_directory(results_sink)
# load example video
print("loading video")
print(videoname)
batchsize = 5000
print("video loaded")
videodata = "path"
full_inference(
# videodata=molded_video,
videodata=videodata,
results_sink=results_sink,
SegNet=SegNet,
IdNet=IdNet,
)
elif operation == "mouse":
species = "mouse"
SegNet = SegModel(species=species)
SegNet.inference_config.DETECTION_MIN_CONFIDENCE = 0.8
SegNet.set_inference(
model_path="/media/nexus/storage4/swissknife_results/segmentation/mouse_/mouse20200624T1414/"
"mask_rcnn_mouse_0040.h5"
)
keras.losses.custom_binary_crossentropy = custom_binary_crossentropy
PoseNet = load_model(
"/media/nexus/storage4/swissknife_results/poseestimation/poseestimation_full_2020-07-01-21_20/posenetNet.h5",
custom_objects={"loss": custom_binary_crossentropy},
)
results_sink = (
"/media/nexus/storage4/swissknife_results/full_inference/mouse_test/"
)
# check_directory(results_sink)
videodata = loadVideo(videoname, greyscale=False, num_frames=700)[300:500]
molded_video = mold_video(videodata, dimension=1024, n_jobs=10)
full_inference(
videodata=molded_video,
results_sink=results_sink,
SegNet=SegNet,
# IdNet=IdNet,
PoseNet=PoseNet,
id_matching=False,
)
else:
raise NotImplementedError
print("DONE")
parser = ArgumentParser()
parser.add_argument(
"--operation",
action="store",
dest="operation",
type=str,
default="train_primate",
help="standard training options for SIPEC data",
)
parser.add_argument(
"--gpu",
action="store",
dest="gpu",
type=str,
default=None,
help="filename of the video to be processed (has to be a segmented one)",
)
if __name__ == "__main__":
main()
|
from setuptools import setup
import sys
import wx
APP = "batchcalc/zbc.py"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
DESCRIPTION = "Script for calculating batch composition of zeoliteis"
LICENSE = open('LICENSE.txt').read()
NAME = "batchcalc"
URL = "https://bitbucket.org/lukaszmentel/batchcalc"
VERSION = "0.2.1"
YEAR = "2014"
def readme():
with open('README.rst') as f:
return f.read()
RT_MANIFEST = 24
def BuildPy2Exe():
'''Generate the Py2exe files'''
try:
import py2exe
except ImportError:
print "\n!! You dont have py2exe installed. !!\n"
exit()
OPTS = {"py2exe" : {"compressed" : 1,
"optimize" : 1,
"bundle_files" : 2,
"excludes" : ["Tkinter",],
"dll_excludes": ["MSVCP90.dll"]}}
setup(
author = AUTHOR,
author_email = AUTHOR_EMAIL,
description = DESCRIPTION,
license = LICENSE,
name = NAME,
url = URL,
version = VERSION,
options = OPTS,
windows = [{"script": APP,
"icon_resources": [(1, "icons/icon.ico")],
}],
)
#"other_resources" : [(RT_MANIFEST, 1, manifest)],
def BuildOSXApp():
'''Build the OSX Applet'''
# py2app uses this to generate the plist xml for the applet
copyright = "Copyright {0} {1}".format(AUTHOR, YEAR)
appid = "com.{0}.{0}".format(NAME)
PLIST = dict(
CFBundleName = NAME,
CFBundleIconFile = 'icons/icon.icns',
CFBundleShortVersionString = VERSION,
CFBundleGetInfoString = NAME + " " + VERSION,
CFBundleExecutable = NAME,
CFBundleIdentifier = appid,
CFBundleTypeMIMETypes = ['text/plain',],
CFBundleDevelopmentRegion = "English",
NSHumanReadableCopyright = copyright
)
PY2APP_OPTS = dict(
iconfile = 'icons/icon.icns',
argv_emulation = False,
optimize = True,
plist = PLIST,
packages = ['batchcalc', 'sqlalchemy', 'jinja2'],
)
setup(
app = [APP,],
author = AUTHOR,
author_email = AUTHOR_EMAIL,
description = DESCRIPTION,
license = LICENSE,
name = NAME,
url = URL,
version = VERSION,
include_package_data = True,
options = dict(py2app = PY2APP_OPTS),
setup_requires = ['py2app'],
install_requires = [
'numpy>=1.5.1',
'sqlalchemy>=0.9.7',
'jinja2>=2.7.3',
],
)
def linux_setup():
setup(
author = AUTHOR,
author_email = AUTHOR_EMAIL,
description = DESCRIPTION,
license = LICENSE,
name = NAME,
url = URL,
version = VERSION,
scripts=[
'batchcalc/zbc.py'],
include_package_data = True,
install_requires = [
'numpy>=1.8.1',
'sqlalchemy>=0.9.7',
'jinja2>=2.7.3',
],
long_description = readme(),
packages = ["batchcalc"],
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: MIT',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
],
)
if __name__ == '__main__':
if wx.Platform == '__WXMSW__':
# Windows
BuildPy2Exe()
elif wx.Platform == '__WXMAC__':
# OSX
BuildOSXApp()
elif wx.Platform == '__WXGTK__':
linux_setup()
else:
print "Unsupported platform: %s" % wx.Platform
|
#!/usr/bin/env python
import os
import os.path
import glob
import landsat8_metadata
import math
bandList = ['B01','B02','B03','B04','B05','B09','B06','B07']
def process_tgz(context, path, dir=os.getcwd()):
context["aot_window_size"] = 500
list_intermediate_save_file = []
# ------------------------------------------------------------------------
# WORKFLOW Landsat8: iCOR
# ------------------------------------------------------------------------
l8_metadata = landsat8_metadata.Landsat8_metadata(path)
l8_metadata.parse_config_file()
solar_zenith = l8_metadata.sun_zenith()
if solar_zenith == None:
raise Exception("could not read solar zenith from MTL")
cos_solar_zenith = math.cos(math.radians(solar_zenith))
if cos_solar_zenith == 0.0:
raise Exception("cosine solar zentith should not be zero")
day_of_year = l8_metadata.get_doy()
if day_of_year == None:
raise Exception("could not read day of year from MTL")
#create working folder
input_base_name = path.split("_MTL.txt")[0]
context["name"] = l8_metadata.get_scene_name()
context["prefix_input"] = input_base_name.replace("\\","/")
working_folder = os.path.join(dir, context["name"])
if os.path.isdir(working_folder)==False or os.path.exists(working_folder) == False:
os.makedirs(working_folder)
# first save a log of the config
context.write_config(working_folder)
context["prefix"] = os.path.join(working_folder, context["name"])
# set defaults from command line
aot_string = " --override_aot " + str(context["aot_override"])
wv_override_string = " --override_watervapor " + str(context["watervapor_override"])
background_string = " --override_background " + str(context["bg_window"])
ozone_override_string = " --override_ozone " + str(context["ozone_override"])
# set defaults from command line
context["max_stages"] = 10
if context["simec"] == "false":
context["max_stages"] -= 1
if context["aot"] == "false":
context["max_stages"] -= 1
#=========================================================================
context.enter_stage("Aggregate bands, convert to Geotiff")
#=========================================================================
# Create a VRT file (Virtual Dataset)
#
# Originally, the bands are delivered in this order:
# +=BANDS================================+=Wavelength=(microm)+
# |Band 1 = Coastal aerosol | 0.43 to 0.45 |
# |Band 2 = Blue | 0.45 to 0.51 |
# |Band 3 = Green | 0.53 to 0.59 |
# |Band 4 = Red | 0.64 to 0.67 |
# |Band 5 = Near Infrared (NIR) | 0.85 to 0.88 |
# |Band 6 = SWIR 1 | 1.57 to 1.65 |
# |Band 7 = SWIR 2 | 2.11 to 2.29 |
# |Band 8 = Panchromatic | 0.50 to 0.68 |
# |Band 9 = Cirrus | 1.36 to 1.38 |
# |Band 10 = Thermal Infrared (TIRS) 1 | 10.60 to 11.19 |
# |Band 11 = Thermal Infrared (TIRS) 2 | 11.50 to 12.51 |
# +======================================+====================+
#
#
#
# For iCOR we reorder the bands
# VIS_NIR_SWIR (no suffix)
# +=BANDS================================+=Wavelength=(microm)+
# |Band 1 = Coastal aerosol | 0.43 to 0.45 |
# |Band 2 = Blue | 0.45 to 0.51 |
# |Band 3 = Green | 0.53 to 0.59 |
# |Band 4 = Red | 0.64 to 0.67 |
# |Band 5 = Near Infrared (NIR) | 0.85 to 0.88 |
# |Band 6 = Cirrus | 1.36 to 1.38 |
# |Band 7 = SWIR 1 | 1.57 to 1.65 |
# |Band 8 = SWIR 2 | 2.11 to 2.29 |
# +======================================+====================+
#
bandlist = [0,1,2,3,4,8,5,6]
reflectance_list=[]
radiance_list=[]
minimum = 0
reflectance_name=""
radiance_name=""
for band in bandlist:
location = "{prefix_input}_B" + str(band+1) + ".TIF"
reflectance_name = context["prefix"] + "_ACRUNNER_TOA_Reflectance_"+ str(band) + ".tif"
radiance_name = context["prefix"] + "_ACRUNNER_TOA_Radiance_"+ str(band)+ ".tif"
context.invoke_ac_runner_mine(
"[scale]\n"
"scale.input.location=" + location + "\n" +
"scale.output.location="+ reflectance_name + "\n" +
"scale.gain=" + str(l8_metadata.get_gain_reflectance(band)/cos_solar_zenith) + "\n" +
"scale.offset="+ str(l8_metadata.get_bias_reflectance(band)/cos_solar_zenith) +"\n" +
"scale.invalid.minimum=" + str(minimum) + "\n"
"scale.zero.invalid=true\n"
)
context.invoke_ac_runner_mine(
"[reflectance]\n" +
"reflectance.input.radiance.location=" + reflectance_name + "\n"+
"reflectance.image.dayofyear=94\n"+
"reflectance.bands=0\n"+
"reflectance.lut.bands=" + str(bandlist.index(band)) + "\n"
"reflectance.destination.location="+ radiance_name + "\n"+
"reflectance.override.sza=" + str(l8_metadata.sun_zenith()) + "\n"+
"reflectance.solarirradiance.location={ac_solar_irradiance}\n"+
"reflectance.response.curves.location={ac_response_curves}\n"
"reflectance.invert=true\n"
)
radiance_list.append(radiance_name)
reflectance_list.append(reflectance_name)
#=========================================================================
context.enter_stage("Single to MultiBand Radiance")
#=========================================================================
radiance_mb=""
radiance_output_name = context["prefix"] + "_ACRUNNER_Scaled_Radiance.tif"
for radiance_file in radiance_list:
radiance_mb += radiance_file + " "
context.invoke_ac_runner_mine(
"[singletomulti fast]\n" +
"multiband.input.images=" + radiance_mb + "\n"
"multiband.output.image=" + radiance_output_name + "\n"
)
#=========================================================================
context.enter_stage("Single to MultiBand Reflectance")
#=========================================================================
reflectance_mb=""
reflectance_output_name = context["prefix"] + "_ACRUNNER_TOA_Reflectance.tif"
for reflectance_file in reflectance_list:
reflectance_mb += reflectance_file + " "
context.invoke_ac_runner_mine(
"[singletomulti fast]\n" +
"multiband.input.images=" + reflectance_mb + "\n"
"multiband.output.image=" + reflectance_output_name + "\n"
)
#=========================================================================
context.enter_stage("Generate the DEM")
#=========================================================================
context.invoke_ac_runner_mine(
"[dem]\n" +
"dem.reference.location={prefix}_ACRUNNER_TOA_Reflectance.tif\n"
"dem.input.location={dem_world}\n" +
"dem.output.location={prefix}_ACRUNNER_DEM.tif\n"
"dem.conversion.factor=0.001"
)
#=========================================================================
context.enter_stage("Cloud Detection")
#=========================================================================
low_b = str(context["low_band"])
cloud_low_id_string ="cloud.low.id="+ str(bandList.index(low_b)) + "\n"
average_threshold_string ="cloud.avg.trh="+ str(context["average_threshold"]) + "\n"
cloud_low_threshold_string = "cloud.low.trh="+str(context["low_threshold"]) + "\n"
cirrus_thr = ""
cirrus_band =""
if context["cirrus"] == "true" :
cirrus_thr = "cloud.cirrus.threshold=" + str(context["cirrus_threshold"]) + "\n"
cirrus_band = "cloud.cirrus.band=5\n"
context.invoke_ac_runner_mine(
"[cloud detection]\n" +
"cloud.input.location={prefix}_ACRUNNER_TOA_Reflectance.tif\n" +
cloud_low_id_string +
"cloud.high.id=4\n" +
average_threshold_string +
cloud_low_threshold_string +
"cloud.mask.location={prefix}_ACRUNNER_cloud_mask.tif\n" +
"cloud.visible.bands= 0 1 2 3\n"+
cirrus_thr +
cirrus_band
)
wbandid = bandList.index((str(context["water_band"])))
water_band_string = "water.nir.band=" + str(wbandid) + "\n"
water_threshold = "water.treshold=" + str(context["water_threshold"]) + "\n"
#=========================================================================
context.enter_stage("Water Detection")
#=========================================================================
context.invoke_ac_runner_mine(
"[water detection]\n"
"water.input.location={prefix}_ACRUNNER_TOA_Reflectance.tif\n"+
water_band_string +
water_threshold +
"water.mask.location={prefix}_ACRUNNER_water_mask.tif\n"
)
#=========================================================================
context.enter_stage("Calculate Aerosol Optical Thickness (AOT)")
#=========================================================================
if context["aot"] == "true":
aot_string = "{prefix}_ACRUNNER_AOT.tif"
aot_window_string = str(context["aot_window_size"])
context.invoke_ac_runner_mine(
"[aot guanter]\n"
"aot.lut.location={ac_big_disort_lut}\n"
"aot.response.curves.location={ac_response_curves}\n"
"aot.solarirradiance.location={ac_solar_irradiance}\n"
"aot.input.location={prefix}_ACRUNNER_Scaled_Radiance.tif\n"
"aot.output.location=" + aot_string + "\n"
"aot.image.bands=0 1 2 3 4\n"
"aot.image.visible.bands=0 1 2 3\n" +
"aot.square.pixels=" + aot_window_string + "\n" +
"aot.ndvi.bands=3 4\n"
"aot.ndvi.list=0.01 0.10 0.45 0.7\n"
"aot.ndvi.refined.bands=3 4\n"
"aot.refpixels.nr=5\n"
"aot.limit.refsets=5\n"
"aot.weights=2.0 2.0 1.5 1.5 1.0\n"
"aot.centerwl.inverse.location={ac_inverse_profiles}\n"
"aot.vegetation.profiles={ac_vegetation_profiles}\n"
"aot.sand.profiles={ac_soil_profiles}\n"
"aot.watermask.location={prefix}_ACRUNNER_water_mask.tif\n"
"aot.cloudmask.location={prefix}_ACRUNNER_cloud_mask.tif\n"
"aot.cloudmask.dilate=10\n"
"aot.override.sza=" + str(l8_metadata.sun_zenith()) + "\n"
"aot.override.vza=" + str(0.0) + "\n"
"aot.override.raa=" + str(0.0) + "\n" +
"aot.override.ozone="+ str(context["ozone_override"]) +"\n"
"aot.override.watervapor="+str(context["watervapor_override"]) + "\n"
"aot.input.elevation.location={prefix}_ACRUNNER_DEM.tif\n"
)
# check if aot succeeded
returnCodeAOT = context.invoke_ac_runner_check(
"[valid]\n"
"valid.input=" + aot_string + "\n"
)
# Run SIMEC
if context["aot"] == "true" and returnCodeAOT == 0:
context.add_keep_tmp( str(context["prefix"] ) + "_ACRUNNER_AOT.tif")
simec_aot_string = "simec.aot.location={prefix}_ACRUNNER_AOT.tif\n"
else:
simec_aot_string = "simec.override.aot=" + context["aot_override"] + "\n"
if context["simec"] == "true":
#=========================================================================
context.enter_stage("Use SIMEC to calculate the background")
#=========================================================================
background_string="simec.output.location={prefix}_ACRUNNER_SIMEC.tif\n"
simec_wv_string = "simec.override.watervapor="+ str(context["watervapor_override"]) +"\n"
simec_ozone_override_string = "simec.override.ozone=" +str(context["ozone_override"]) + "\n"
context.invoke_ac_runner_mine(
"[simec]\n" +
"simec.lut.location={ac_big_disort_lut}\n" +
"simec.response.curves.location={ac_response_curves}\n" +
"simec.radiance.location={prefix}_ACRUNNER_Scaled_Radiance.tif\n" +
"simec.subsample.factor=10\n" +
"simec.subsample.band=4\n" +
"simec.nir.band=3\n" +
"simec.nir780.band=4\n" +
"simec.lut.band.nir=3\n" +
"simec.lut.band.nir780=4\n" +
"simec.max.window=100\n" +
"simec.sensor.resolution_km=0.3\n" +
"simec.override.sza=" + str(l8_metadata.sun_zenith()) + "\n" +
"simec.override.vza=" + str(0.0) + "\n" +
"simec.override.raa=" + str(0.0) + "\n" +
"simec.watermask.location={prefix}_ACRUNNER_water_mask.tif\n" +
"simec.cloudmask.location={prefix}_ACRUNNER_cloud_mask.tif\n" +
"simec.nir.similarity.location={ac_near_similarity_refl}\n" +
"simec.elevation.location={prefix}_ACRUNNER_DEM.tif\n" +
simec_aot_string +
simec_wv_string +
simec_ozone_override_string +
background_string +
"simec.default.background.size=" + str(context["bg_window"]) + "\n"
)
context.add_keep_tmp( context["prefix"] + "_ACRUNNER_SIMEC.tif")
#=========================================================================
context.enter_stage("Atmospheric correction")
#=========================================================================
atm_background_string = "atm.override.background=" + str(context["bg_window"]) +"\n"
if context["simec"] == "true":
atm_background_string = "atm.background.location={prefix}_ACRUNNER_SIMEC.tif\n"
atm_aot_string = "atm.override.aot=" + str(context["aot_override"]) + "\n"
if context["aot"] == "true" and returnCodeAOT == 0:
atm_aot_string = "atm.aot.location={prefix}_ACRUNNER_AOT.tif\n"
atm_ozone_string = "atm.override.ozone=" + str(context["ozone_override"]) + "\n"
atm_watervapor_string = "atm.override.watervapor=" + str(context["watervapor_override"]) + "\n"
context.invoke_ac_runner_mine(
"[watcor]\n"
"atm.lut.location={ac_big_disort_lut}\n" +
"atm.radiance.location={prefix}_ACRUNNER_Scaled_Radiance.tif\n" +
"atm.override.sza=" + str(l8_metadata.sun_zenith()) + "\n" +
"atm.override.vza=" + str(0.0) + "\n" +
"atm.override.raa=" + str(0.0) + "\n" +
"atm.elevation.location={prefix}_ACRUNNER_DEM.tif\n" +
"atm.watermask.location={prefix}_ACRUNNER_water_mask.tif\n" +
atm_background_string +
atm_aot_string +
atm_ozone_string +
atm_watervapor_string +
"atm.output.location=" + str(context["output_file"]) + "\n" +
"atm.radiance.bands=0 1 2 3 4 6 7\n"
)
#add intermediates
context.add_keep_tmp( str(context["prefix"] ) + "_ACRUNNER_cloud_mask.tif")
context.add_keep_tmp( str(context["prefix"] ) +"_ACRUNNER_water_mask.tif")
#=========================================================================
context.enter_stage("Remove intermediate files from filesystem")
#=========================================================================
keep_tmp = False
if context["keep_intermediate"] == "true":
keep_tmp = True
context.remove_tmp_files(working_folder,keep_tmp)
print "iCOR Atmospheric correction done for product : " + context["prefix"] |
<reponame>InternetNZ/irma-fab-integration<gh_stars>0
#!/usr/bin/env python3
import argparse
import json
import datetime
import time
import logging
import qrcode
import requests
import subprocess
RELYING_PARTY_ID = ""
RELYING_PARTY_NAME = ""
RELYING_PARTY_LOGO = ""
RELYING_PARTY_API = ""
API_KEY = ""
IRMA_TOKEN = ""
IRMA_SERVER = ""
FAB_IDENTITY_CREDENTIAL = 'identity'
FAB_IDENTITY_ATTRIBUTES = [
'given_names', 'surname', 'date-of-birth', 'gender'
]
# Config logger
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO)
LOGGER = logging.getLogger(__name__)
def fab_disclose(args):
qr = qrcode.QRCode()
relying_party_data = {
"relyingPartyLogo": args.relying_party_logo,
"relyingPartyId": args.relying_party_id,
"relyingPartySessionId": args.session_id,
"relyingPartyName": args.relying_party_name,
"purpose": args.purpose,
"attributesToSend": args.attributes
}
qrcode_data = json.dumps(relying_party_data, indent=None)
qr.add_data(qrcode_data)
qr.print_ascii()
LOGGER.info("Session ID: %s", args.session_id)
for i in range(10):
LOGGER.debug("Checking for response ...")
response = _fetch_fab_disclosed_attributes(args.session_id)
if response.status_code == requests.codes.no_content:
time.sleep(2)
continue
if response.status_code == requests.codes.ok:
break
response.raise_for_status()
else:
LOGGER.warning("Did not receive the disclosed VC! Please try again.")
exit(1)
LOGGER.debug("Disclosed VC from FAB: %s", response.json())
_extract_attributes(response)
def _fetch_fab_disclosed_attributes(session_id):
response = requests.get(
url=f"{RELYING_PARTY_API}{session_id}",
headers={
"Content-Type": "application/json",
"x-api-key": API_KEY
}
)
return response
def _extract_attributes(response):
disclosed_attributes = response.json()['verifiableCredential'][0]['credentialSubject']
for attribute in args.attributes:
if attribute == FAB_IDENTITY_CREDENTIAL:
print(str.upper(FAB_IDENTITY_CREDENTIAL))
for id_attribute in FAB_IDENTITY_ATTRIBUTES:
print(" {} : {}".format(str.upper(id_attribute), disclosed_attributes[id_attribute]['value']))
else:
print("{} : {}".format(str.upper(attribute), disclosed_attributes[attribute]['value']))
def get_fab_disclosed_attributes(args):
response = _fetch_fab_disclosed_attributes(args.session_id)
if response.status_code != requests.codes.ok:
response.raise_for_status()
_extract_attributes(response)
def irma_issue_nsn(args):
irma_command = ['irma', 'session', '--server', args.irma_server,
'-a', 'token', '--key', args.irma_token, '--issue',
'irma-demo.inz-nsn.nsn={}'.format(args.nsn)]
if args.verbose:
irma_command.append('-vv')
result = subprocess.run(irma_command, timeout=60)
result.check_returncode()
if __name__ == "__main__":
# Argument parser
parser = argparse.ArgumentParser(
description='FAB-IRMA integration tool'
)
parser.add_argument(
'-v',
'--verbose',
default=False,
action='store_true',
help='More verbose'
)
subparsers = parser.add_subparsers(help='Select one of the bellow commands')
parser_fab_disclose = subparsers.add_parser(
'fab_disclose',
help='Generates FAB relying party QR code to disclose attributes.'
)
parser_fab_disclose.add_argument(
'--relying-party-id',
nargs='?',
required=True,
default=RELYING_PARTY_ID,
help='Relying party ID'
)
parser_fab_disclose.add_argument(
'--relying-party-name',
nargs='?',
required=True,
default=RELYING_PARTY_NAME,
help='Relying party name'
)
parser_fab_disclose.add_argument(
'--relying-party-logo',
nargs='?',
default=RELYING_PARTY_LOGO,
help='Relying party name'
)
parser_fab_disclose.add_argument(
'--relying-party-api',
nargs='?',
required=True,
default=RELYING_PARTY_API,
help='Relying-party API endpoint'
)
parser_fab_disclose.add_argument(
'--session-id',
nargs='?',
default=datetime.datetime.now().strftime('%s'),
help='Session ID. Default epoch'
)
parser_fab_disclose.add_argument(
'--purpose',
nargs='?',
default="Testing FAB-IRMA integration",
help='Purpose of requesting the attributes'
)
parser_fab_disclose.add_argument(
'--attributes',
nargs='*',
default=['nsn'],
choices=['nsn', 'email', 'photo', 'identity'],
help='Attributes to be disclosed. Default `NSN`'
)
parser_fab_disclose.set_defaults(func=fab_disclose)
parser_get_disclosed_attributes = subparsers.add_parser(
'get_fab_disclosed_attributes',
help='Returns the value of disclosed attributes.'
)
parser_get_disclosed_attributes.add_argument(
'session_id',
help='Session ID'
)
parser_get_disclosed_attributes.add_argument(
'--attributes',
nargs='*',
default=['nsn'],
help='Attributes to be disclosed. Default `NSN`'
)
parser_get_disclosed_attributes.set_defaults(func=get_fab_disclosed_attributes)
parser_irma_issue_nsn = subparsers.add_parser(
'irma_issue_nsn',
help='Issues given NSN to IRMA Wallet.'
)
parser_irma_issue_nsn.add_argument(
'--irma-server',
required=True,
default=IRMA_SERVER,
help='IRMA server'
)
parser_irma_issue_nsn.add_argument(
'--irma-token',
required=True,
default=IRMA_TOKEN,
help='IRMA server token'
)
parser_irma_issue_nsn.add_argument(
'nsn',
help='National Student Number'
)
parser_irma_issue_nsn.set_defaults(func=irma_issue_nsn)
args = parser.parse_args()
if args.verbose:
LOGGER.setLevel(logging.DEBUG)
try:
args.func(args)
except AttributeError as err:
parser.print_help()
|
# Copyright 2021 Universität Tübingen, DKFZ and EMBL for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import (
Column,
Enum,
Boolean,
Integer,
BigInteger,
Float,
Text,
ForeignKey,
Date,
Time,
DateTime,
String,
Table
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship, backref
from .meta import Base
import datetime
import uuid
import enum
class DateTimeMode(enum.Enum):
DATE = 0
DATETIME = 1
TIME = 2
def type(self):
if self.value == 0:
return datetime.date
if self.value == 1:
return datetime.datetime
if self.value == 2:
return datetime.time
user_service_table = Table('service_user', Base.metadata,
Column('user_id', Integer, ForeignKey('users.id')),
Column('service_id', Integer, ForeignKey('services.id'))
)
class Group(Base):
__tablename__ = 'groups'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
site_id = Column(String(50), unique=True, nullable=False, index=True)
name = Column(Text, nullable=False, unique=True)
# Relationships
user = relationship('User', back_populates='group')
submissions = relationship('Submission', back_populates='group')
regrequests = relationship('RegRequest', back_populates='group')
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
site_id = Column(String(50), unique=True, nullable=False, index=True)
email = Column(Text, unique=True)
fullname = Column(Text)
pwhash = Column(String(60))
group_id = Column(Integer, ForeignKey('groups.id'), nullable=False)
enabled = Column(Boolean(create_constraint=False), nullable=False)
site_admin = Column(Boolean(create_constraint=False), nullable=False)
group_admin = Column(Boolean(create_constraint=False), nullable=False)
site_read = Column(Boolean(create_constraint=False), nullable=False)
# Relationships
group = relationship('Group', back_populates='user')
metadatasets = relationship('MetaDataSet', back_populates='user')
files = relationship('File', back_populates='user')
passwordtokens = relationship('PasswordToken', back_populates='user')
apikeys = relationship('ApiKey', back_populates='user')
services = relationship('Service', secondary=user_service_table, back_populates='users')
service_executions = relationship('ServiceExecution', back_populates='user')
class ApiKey(Base):
__tablename__ = 'apikeys'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
value = Column(String(64), nullable=False, unique=True)
label = Column(String(200))
expires = Column(DateTime, nullable=True)
# Relationships
user = relationship('User', back_populates='apikeys')
class PasswordToken(Base):
__tablename__ = 'passwordtokens'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
value = Column(Text, nullable=False, unique=True)
expires = Column(DateTime, nullable=False)
# Relationships
user = relationship('User', back_populates='passwordtokens')
class RegRequest(Base):
__tablename__ = 'regrequests'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
fullname = Column(Text, nullable=False)
email = Column(Text, nullable=False)
group_id = Column(Integer, ForeignKey('groups.id'), nullable=True)
new_group_name = Column(Text)
# Relationships
group = relationship('Group', back_populates='regrequests')
class File(Base):
__tablename__ = 'files'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
site_id = Column(String(50), unique=True, nullable=False, index=True)
name = Column(Text, nullable=False)
storage_uri = Column(String(2048), unique=True, nullable=True)
access_token = Column(String(64), nullable=True)
content_uploaded = Column(Boolean(create_constraint=False), nullable=False)
checksum = Column(Text, nullable=False)
filesize = Column(BigInteger, nullable=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
upload_expires = Column(DateTime, nullable=True)
# Relationships
metadatumrecord = relationship('MetaDatumRecord', back_populates='file', uselist=False)
user = relationship('User', back_populates='files')
downloadtokens = relationship('DownloadToken', back_populates='file')
class DownloadToken(Base):
__tablename__ = 'downloadtokens'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
file_id = Column(Integer, ForeignKey('files.id'), nullable=False)
value = Column(Text, nullable=False, unique=True)
expires = Column(DateTime, nullable=False)
# Relationships
file = relationship('File', back_populates='downloadtokens')
class Submission(Base):
__tablename__ = 'submissions'
id = Column(Integer, primary_key=True)
site_id = Column(String(50), unique=True, nullable=False, index=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
date = Column(DateTime)
label = Column(String(100), nullable=True)
group_id = Column(Integer, ForeignKey('groups.id'), nullable=False)
# Relationships
metadatasets = relationship('MetaDataSet', back_populates='submission')
group = relationship('Group', back_populates='submissions')
class MetaDatum(Base):
__tablename__ = 'metadata'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
name = Column(Text, nullable=False)
regexp = Column(Text, nullable=True)
short_description = Column(Text, nullable=True)
long_description = Column(Text, nullable=True)
datetimefmt = Column(Text, nullable=True)
datetimemode = Column(Enum(DateTimeMode), nullable=True)
mandatory = Column(Boolean(create_constraint=False), nullable=False)
example = Column(Text, nullable=False)
order = Column(Integer, nullable=False)
isfile = Column(Boolean(create_constraint=False), nullable=False)
submission_unique = Column(Boolean(create_constraint=False), nullable=False)
site_unique = Column(Boolean(create_constraint=False), nullable=False)
service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
# Relationships
metadatumrecords = relationship('MetaDatumRecord', back_populates='metadatum')
service = relationship('Service', back_populates='target_metadata')
class MetaDatumRecord(Base):
__tablename__ = 'metadatumrecords'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
metadatum_id = Column(Integer, ForeignKey('metadata.id'), nullable=False)
metadataset_id = Column(Integer, ForeignKey('metadatasets.id'), nullable=False)
file_id = Column(Integer, ForeignKey('files.id'), nullable=True)
value = Column(Text, nullable=True)
# Relationships
metadatum = relationship('MetaDatum', back_populates='metadatumrecords')
metadataset = relationship('MetaDataSet', back_populates='metadatumrecords')
file = relationship('File', back_populates='metadatumrecord')
class MetaDataSet(Base):
"""A MetaDataSet represents all metadata associated with *one* record"""
__tablename__ = 'metadatasets'
id = Column(Integer, primary_key=True)
site_id = Column(String(50), unique=True, nullable=False, index=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
submission_id = Column(Integer, ForeignKey('submissions.id'), nullable=True)
is_deprecated = Column(Boolean, default=False)
deprecated_label = Column(String, nullable=True)
replaced_by_id = Column(Integer, ForeignKey('metadatasets.id'), nullable=True)
# Relationships
user = relationship('User', back_populates='metadatasets')
submission = relationship('Submission', back_populates='metadatasets')
metadatumrecords = relationship('MetaDatumRecord', back_populates='metadataset')
replaces = relationship('MetaDataSet', backref=backref('replaced_by', remote_side=[id]))
service_executions = relationship('ServiceExecution', back_populates = 'metadataset')
class ApplicationSetting(Base):
__tablename__ = 'appsettings'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
key = Column(Text, unique=True, nullable=False)
int_value = Column(Integer, nullable=True)
str_value = Column(Text, nullable=True)
float_value = Column(Float, nullable=True)
date_value = Column(Date, nullable=True)
time_value = Column(Time, nullable=True)
class Service(Base):
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
site_id = Column(String(50), unique=True, nullable=False, index=True)
name = Column(Text, nullable=True, unique=True)
# Relationships
users = relationship('User', secondary=user_service_table, back_populates='services')
# unfortunately, 'metadata' is a reserved keyword for sqlalchemy classes
service_executions = relationship('ServiceExecution', back_populates = 'service')
target_metadata = relationship('MetaDatum', back_populates = 'service')
users = relationship('User',
secondary=user_service_table,
back_populates='services')
class ServiceExecution(Base):
__tablename__ = 'serviceexecutions'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4, nullable=False)
service_id = Column(Integer, ForeignKey('services.id'), nullable=False)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
metadataset_id = Column(Integer, ForeignKey('metadatasets.id'), nullable=False)
datetime = Column(DateTime, nullable=False)
# Relationships
metadataset = relationship('MetaDataSet', back_populates='service_executions')
service = relationship('Service', back_populates='service_executions')
user = relationship('User', back_populates='service_executions')
|
"""
masks.py
Provides functions which create specialized footprints or masks which are
subsequently fed as inputs to functions in scipy.ndimage.filters module.
AUTHOR: <NAME>
DATE: 5/17/2013
"""
import numpy as np
from scipy.ndimage import filters
import math
def getDeriv(image, weights=[3./16, 10./16, 3./16], axes=[0], mode="reflect",
cval=0.0):
"""
Calculates a first or second derivative on a multi-dimensional image
array by the method of finite differences using a 3X3 stencil.
Images in principle need not necessarily be 2D; i.e., 3D tomographic
images should work also, however, "caveat emptor" as this latter
functionality has not yet actually been tested fully.
Parameters
----------
image: array_like
Array containing grayscale image data. Only grayscale pixel
values are supported--cannot handle 3-channel color.
weights: array, optional
1D sequence of three numbers representing the type of finite
differences derivative (Prewitt, Sobel, Scharr, etc.) to compute.
Defaults to [3./16, 10./16, 3./16], i.e., Scharr type. It is
recommended that this sequence should be normalized so that all
components sum to 1. If not, the function will still return a
result, however, cross derivative (dxdy type) results will be
scaled incorrectly with respect to 1st derivatives and non-cross
2nd derivatives (e.g., dxdx, dydy).
axes: scalar or array_like, optional
Either a single value (1st derivative case) or two values (2nd
derivative) indicating axes along which derivatives are to be
taken. Examples:
axes=0 1st derivative, x-axis
axes=[0] Also indicates 1st derivative, x-axis
axes=(1, 1) 2nd derivative, y-axis (i.e, dydy type)
axes=[0, 2] 2nd derivative, x- and z-axes (i.e., dxdz,
assuming a tomographic style image with
three axes)
mode: ('reflect', 'constant', 'nearest', 'mirror', 'wrap')
Controls how edge pixels in the input image are treated. See
scipy.ndimage.filters.correlate1d() for details.
cval: scalar, optional
Only meaningful if mode is set to 'constant'. See
scipy.ndimage.filters.correlate1d() for details.
Returns
-------
output: ndarray
An estimate of first or second partial derivative with respect
to image brightness at each pixel.
"""
"""Check and/or condition the input variables"""
# Force treatment as float numpy array to avoid rounding errors later
image = np.asarray(image, dtype=float)
wmsg = 'weights input variable must be an array or list with ' + \
'exactly three elements'
try:
nw = len(weights) # Fails if weights is not iterable type
except:
raise TypeError(wmsg)
if nw != 3: # If weights is iterable, but still not correct length...
raise ValueError(wmsg)
"""Set appropriate weights, and slightly reconfigure axes specification"""
try: # Assume axes input value is iterable
nx = len(axes) # Will raise a TypeError if axes is not iterable
except TypeError:
# First derivative
wght = [-0.5, 0, 0.5]
myaxes = [axes] # Force myaxes to be iterable list containing one item
nx = 0
# Skip the rest, if axes input value was scalar (i.e., not iterable)
if nx == 0:
pass
# Alternative first derivative, if axes input is iterable
elif nx == 1:
wght = [-0.5, 0, 0.5]
myaxes = axes
elif nx == 2:
# Second derivative, along same axis twice
if axes[0] == axes[1]:
wght = [1.0, -2.0, 1.0]
myaxes = [axes[0]]
# Second derivative, along orthogonal axes
else:
wght = [-0.5, 0, 0.5]
myaxes = axes
else:
raise ValueError('Too many axes: 3rd derivatives and higher are ' +
'not yet supported')
"""Compute the derivative!!!"""
for ii in myaxes:
# Use fast compiled code from scipy.ndimage._nd_image.pyd
output = filters.correlate1d(image, wght, ii, mode=mode, cval=cval)
"""Apply smoothing weights (Prewitt, Sobel, Scharr, or whatever the
user has selected) to all remaining axes"""
# Get a list of all other axes. For 2D images, this will produce either
# a null list (in the dxdy case) or at most one other axis. For 3D
# images (e.g., such as a tomographic image), there will be either one
# or two other axes.
otheraxes = [ii for ii in range(image.ndim) if ii not in myaxes]
for ii in otheraxes:
output = filters.correlate1d(output, weights, ii, mode=mode, cval=cval)
return output
def circFootprint(radius, method='Area', npoints=10, dtype=bool):
"""
Generates a circular footprint based on the input radius. Intended
for use with usan() function.
Parameters
----------
radius: scalar
Radius of the circular footprint, in pixels.
method: ('Center', 'center', 'Area', 'area'), optional
Method by which to compute the footprint. If method=='Center'
or method=='center', each pixel is tested for membership in
the footprint based upon whether a single point at the center
of the pixel falls within the radius. Depending upon the
dtype selected, each pixel will assume either of two values:
(True, False), (0, 1), or (0., 1.). If method=='Area' or
method=='area', a square subgrid of size npoints X npoints
is superimposed upon each pixel, and membership is determined
by the total number of subgrid points (representing the fraction
of pixel area) that falls within the radius. Depending upon the
dtype selected, each pixel will assume values of either (True,
False), (0, 1), or a sliding scale value between 0. and 1.
npoints: scalar, optional
Number of points to use in subgrid when method='Area' or
method='area' is selected. See method input variable above
for further discussion.
dtype: data-type, optional
Data type to use for output. Note that float is only really
meaningful if method='Area' or method='area' is selected. If
dtype is set to float but method='Center' or method='center',
then the pixels in the footprint will be assigned floating
point values of 0. or 1., but will be unable to assume any
values in between. See method input variable above for further
discussion.
Returns
-------
footprint: ndarray
A square array defining a circular footprint. Values of zero
indicate a pixel is not within the footprint radius, non-zero
values indicate either absolute membership (bool or int) or
degree of partial membership (float).
"""
# Determine whether each test pixel falls within the circular mask based
# on whether the pixel's center falls within the radius
if method == 'Center' or method == 'center':
halfext = int(math.floor(radius))
ones = np.ones((2*halfext+1, 2*halfext+1), dtype=dtype)
zeros = np.zeros((2*halfext+1, 2*halfext+1), dtype=dtype)
# Make a square trial grid just large enough to contain radius
v, h = np.ogrid[-halfext:(halfext+1), -halfext:(halfext+1)]
# footprint consists of any pixel within the radius
footprint = np.where(v**2 + h**2 <= radius**2, ones, zeros)
# Determine each pixel's membership in circular mask based on total
# percentage of pixel area that falls within the radius
elif method == 'Area' or method == 'area':
step = 1./npoints
# Create a subgrid of size (npoints, npoints) within each pixel
v, h = np.ogrid[(-0.5+step/2):(0.5+step/2):step,
(-0.5+step/2):(0.5+step/2):step]
halfext = int(math.ceil(radius-0.5))
fpfloat = np.zeros((2*halfext+1, 2*halfext+1), dtype=float)
# Loop through each pixel in an implicit trial grid
for ii in range(-halfext, (halfext+1)):
for ij in range(-halfext, (halfext+1)):
# Values of True signify points within the footprint radius
subgrid = ((v-ii)**2 + (h-ij)**2 <= radius**2)
# Total area of (ii,ij)'th pixel is proportional to total
# fraction of True values in the subgrid
fpfloat[(ii+halfext),
(ij+halfext)] = float(sum(sum(subgrid)))/(npoints**2)
ones = np.ones((2*halfext+1, 2*halfext+1), dtype=dtype)
zeros = np.zeros((2*halfext+1, 2*halfext+1), dtype=dtype)
# For dtypes that aren't capable of representing fpfloat properly,
# create a footprint by rounding the values in fpfloat up or down...
if dtype == bool or dtype == int:
footprint = np.where(fpfloat >= 0.5, ones, zeros)
# ...but otherwise, just use fpfloat directly
else:
footprint = fpfloat.astype(dtype)
# If trial grid accidentally contains a one pixel wide perimeter band
# which doesn't fall within the circular footprint, then trim it off
if not footprint[0,halfext]:
footprint = footprint[1:(2*halfext),1:(2*halfext)]
else:
raise ValueError('Method ' + str(method) + ' not supported')
return footprint
def usan(image, mode='Edge', radius=3.4, fptype=bool, t=25, gfrac=None,
cgthresh=None, nshades=256):
"""
Calculates raw edge or corner response, based upon an algorithm
described in: "SUSAN--A New Approach to Low Level Image Processing",
<NAME> and <NAME>, Technical Report TR95SMSIc (1995),
available at:
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.24.2763 .
Alternatively, there is also a slightly abridged version of the
same reference, with identical title and authors, available at
International Journal of Computer Vision, 23(1), 45-78 (1997).
Parameters
----------
image: array_like
Array containing grayscale image data. Only grayscale pixel
values are supported--cannot handle 3-channel color.
mode: ('Edge', 'edge', 'EdgeDir', 'edgedir', 'Corner', 'corner'),
optional
Chooses usage mode. If mode=='Edge' or mode=='edge' or mode==
'Corner' or mode=='corner' then the output will be either an
edge or corner response. If mode=='EdgeDir' or mode=='edgedir'
then the output gives an estimate of the angle (in degrees)
of the edge normal unit vector. In general, a value for the
edge normal angle will be generated for all pixels, however it
isn't usually meaningful unless the edge response is non-zero.
radius: scalar, optional
Circular footprint radius, in units of pixels; passed through
as input to circFootprint(). Default is 3.4, as recommended
in reference.
fptype: data-type, optional
Data type of circular footprint; passed to circFootprint().
Default is bool, since the version of the algorithm originally
described in the reference did not define the algorithm behavior
for any other type of footprint (we have trivially extended it
to cover float type footprints as well, however there is not
much observable different in the results, as it only affects the
weighting of the pixels at the edge of the footprint, not the
central region).
t: scalar, optional
Threshold value for color difference required to exclude/include
a pixel in the USAN region described in the reference article.
Default is 25 grayscale levels, as suggested by article authors,
and assumes a grayscale image with a total of 256 distinct
shades. As described in eq. 4, this is technically a "soft"
threshold, rather than a hard cutoff, and moreover it's also
bidirectional; i.e., a setting of t=25 actually means +/- 25.
gfrac: scalar, optional
Fraction of maximum number of USAN pixels to assign to the "g"
parameter in eq. 3 of the reference article. Adhering to
recommendation of article authors, it defaults to 0.75 if user
specifies mode='Edge' or mode='edge', and 0.5 if user specifies
mode='Corner' or mode='corner'.
cgthesh: scaler, optional
Threshold value of USAN center of gravity, in units of pixels,
which will lead to a shift between one underlying set of
approximations/assumptions vs. another. Defaults to 0.3*radius
(i.e., 1.02 pixels, assuming a default USAN footprint radius of
3.4 pixels, which is consistent with author's nominal suggested
setting of about 1 pixel) if mode='Edge' or mode= 'edge', and
defaults to 0.45*radius if mode='Corner' or mode='corner' (the
article authors did not suggest a default setting for corner
mode, so this setting is based on trial-and-error). See
reference article for more details.
nshades: scalar, optional
Total number of distinct integer grayscale levels available in
image format. Defaults to 256 (i.e., 2**8), appropriate for an
8 bit image format such as jpeg. For image formats with higher
grayscale color resolution, e.g. such as 12-bit or 16-bit, set
nshades=4096 or nshades=65536 (i.e., 2**12 or 2**16).
Returns
-------
R or edgedir: ndarray
Context sensitive value, depending on the value of the mode input
variable. If mode='Edge' or mode='edge' or mode='Corner' or
mode='corner' then the return array holds the edge or corner
response as described by eq. 3 in the reference article. If
mode='EdgeDir' or mode='edgedir' then the return array holds
the raw estimated edge normal direction. In general this value
will only be meaningful if the edge response of the same pixel is
nonzero.
"""
""" Condition input variables """
# Make sure that methods of type ndarray are available for use later
image = np.asarray(image)
# Assign default values suggested in reference, if user doesn't override
if gfrac is None:
if mode == 'Edge' or mode == 'edge':
gfrac = 0.75
elif mode == 'Corner' or mode == 'corner':
gfrac = 0.5
# Assign default value based on reference for 'EdgeDir' mode, and based
# on trial-and-error experimentation, for 'Corner' mode
if cgthresh is None:
if mode == 'EdgeDir' or mode == 'edgedir':
cgthresh = 0.3 * radius
elif mode == 'Corner' or mode == 'corner':
cgthresh = 0.45 * radius
""" Create a lookup table, as recommended in the reference """
idx = np.arange(nshades)
explookup = np.zeros(len(idx))
for ii in range(len(idx)):
# From eq. 4 in the reference
explookup[ii] = math.exp(-(idx[ii].astype(float)/t)**6)
""" Set up USAN circular footprint and several related variables """
# Get the circular USAN mask
fp = circFootprint(radius, dtype=fptype)
# For the dtype=bool case, the areawgt variable essentially turns into
# a flattened array of ones, but for the dtype=float case, pixels near
# the edge of the circular footprint will be reweighted according to what
# fraction of their error falls within the footprint radius
areawgt = fp[(fp != 0)]
# Force fp to be all zeros and ones, to comport with expectations of
# scipy.ndimage.filters.generic_filter()
fp = fp.astype(bool).astype(int)
# Define arrays consisting of horizontal and vertical offsets between
# the nucleus and each of the surrounding pixels in the circular mask
halfext = (fp.shape[1] - 1)/2
xdiff, ydiff = np.mgrid[-halfext:(halfext+1), -halfext:(halfext+1)]
xdiff = xdiff[(fp != 0)]
ydiff = ydiff[(fp != 0)]
""" Define a function which will be called iteratively upon every pixel
in the image, using scipy.ndimage.filters.generic_filter() """
def filterfunc(maskout, areawgt, xdiff, ydiff, radius, mode, t, explookup,
gfrac, cgthresh, npoints=10):
"""
Internal function to usan() which gets passed down through to
scipy.ndimage.filters.generic_filters() to perform the actual
work of filtering. Gets called once for each pixel in image.
maskout: flattened ndarray
Contains values from surrounding pixels which fell within
footprint at each pixel.
areawgt: flattened ndarray
Same size as maskout; if fptype=float in usan() input, gives
fraction of each pixel area which fell within the circular
footprint radius. Otherwise, if fptype=bool or fptype=int,
it's simply an array of ones.
xdiff, ydiff: flattened ndarray
(x, y) offset between each pixel and nucleus (center pixel
of circular footprint).
radius, mode, t, gfrac, cgthresh
Straight pass-through of inputs to usan() function.
explookup: ndarray
1D array containing lookup table for eq. 4 in reference
article. Size should match nshades input argument to usan()
function.
npoints: scalar, optional
Specifies number of subpixel lattice points to use per pixel
when computing USAN contiguity if mode='Corner' or
mode='corner'.
Returns
-------
R or edgedir: scalar
Context sensitive value, depending on the value of the mode
input variable. If mode='Edge' or mode='edge' or mode='Corner'
or mode='corner' then the return value holds the edge or corner
response as described by eq. 3 in the reference article. If
mode='EdgeDir' or mode='edgedir' then the return value holds
the raw estimated edge normal direction. In general this value
will only be meaningful if the edge response of the same pixel
is nonzero.
"""
""" Condition inputs and pre-compute expressions which are required
in multiple locations below """
# Total number of pixels in mask
ntot = len(maskout) - 1
# Index and intensity of center pixel (i.e., the nucleus)
ctridx = ntot//2
nucleus = maskout[ctridx]
# Delete data of center pixel in family of arrays with same dimension
maskout = np.delete(maskout, ctridx)
areawgt = np.delete(areawgt, ctridx)
xdiff = np.delete(xdiff, ctridx)
ydiff = np.delete(ydiff, ctridx)
# Calculate color/grayscale shade difference between nucleus and all
# other surrounding pixels in the footprint. Cast type back to int
# in order to index lookup table--will definitely be handed off as
# a float otherwise (see explanatory note below for reason behind
# convoluted type casting flip-flop in the first place).
graydiff = np.abs(maskout-nucleus*np.ones(len(maskout))).astype(int)
# Calculate c as described in eq. 4 in the reference.
c = explookup[graydiff]
# Reduces to eq. 2 in reference, if areawgt values are all 1
n = (areawgt * c).sum()
# Total number of pixels in circular mask
nmax = areawgt.sum().astype(float)
""" Compute an appropriate response function for each usage mode """
if mode == 'Edge' or mode == 'edge':
# Eq. 3 in reference
R = gfrac*nmax - n
if R<0: R=0.
return R
elif mode == 'EdgeDir' or mode == 'edgedir':
denom = (areawgt * c).sum()
# Usual case
if denom:
# Calculate center of gravity, using eq. 5 in reference
xcg = ((xdiff * areawgt * c).sum()) / denom
ycg = ((ydiff * areawgt * c).sum()) / denom
# Divide-by-zero case, which can arise when a single noisy
# pixels is surrounded by many others of dissimilar brightness
else:
xcg, ycg = 0, 0
cgdist = math.sqrt(xcg**2 + ycg**2)
# The so-called "inter-pixel" case mentioned in the reference
if n >= (2*radius) and cgdist >= cgthresh:
# Compute angle associated with edge normal direction unit
# vector (i.e., the actual unit vector itself, had we needed
# to calculate it explicitly in the code, would have been
# (cos(edgedir), sin(edgedir))). Due to the way the USAN
# concept is defined by the reference authors, the edge
# direction is NOT a gradient pointing from lighter to darker
# (or vice versa) regions as is commonly the case with other
# types of edge finding algorithms. Rather, it always points
# perpendicularly away from the edge, no matter which side of
# the edge (lighter or darker) the pixel is on. Thus, as the
# USAN circular mask moves across an edge, the edgedir as it
# is defined in the inter-pixel case usually tends to flip
# very suddenly by 180 degrees. For edgedir values falling
# between 90 and 270 degrees, we will subtract 180 degrees
# at a later stage of processing in order to map them onto
# the interval -90 to +90 degrees, which is all that we have
# available anyway for the intra-pixel case (see below).
edgedir = math.atan2(ycg, xcg) * 180 / math.pi
# The "intra-pixel" case; see reference for description
else:
xvar = (xdiff * xdiff * areawgt * c).sum() # Eq. 6
yvar = (ydiff * ydiff * areawgt * c).sum() # Eq. 7
xycovar = (xdiff * ydiff * areawgt * c).sum() # Eq. 8
# Compute edge normal direction. The xvar and yvar quantities
# are essentially weighted variances, and are therefore
# positive definite. If the (x, y) content of the USAN
# is positively covariant (i.e., lies along a positively
# sloped line) then atan2(yvar, xvar)*180/pi gives an angle
# parallel to the edge and between 0 and 90 degrees, while
# (atan2(yvar, xvar)*180/pi - 90) gives the desired edge
# normal direction (i.e., an angle perpendicular to the edge
# and guaranteed to lie between -90 and 0 degrees). On the
# other hand, if the USAN is negatively covariant, then
# atan2(yvar, xvar)*180/pi instead gives an angle which is
# mirror flipped about the y-axis compared to the true edge
# line. I.e., say the true edge lies parallel to some angle
# which we shall define as (90 + theta), with
# 90 >= theta >= 0, (by definition therefore giving the edge
# line a negative slope) then atan2(yvar, xvar)*pi/2
# returns the value (90 - theta), which is mirror-flipped
# about the y-axis relative to the true value of
# (90 + theta). As a consequence of the way that we defined
# the true edge parallel direction (90 + theta), theta itself
# turns out to be just the edge normal direction that we had
# wanted to find, and thus, solving for theta, we get
# theta = -(atan2(yvar, xvar)*180/pi - 90). I.e., its's the
# same as for the positive covariance case, except for a sign
# flip. Note however that there is one key difference
# between this case and the so-called the "inter-pixel" case
# above: the angles in this "intra-pixel" case are
# pre-constrained to fall only between -90 and +90 degrees,
# whereas "inter-pixel" angles may fall anywhere from -180
# to +180.
edgedir = math.atan2(yvar, xvar) * 180 / math.pi - 90
if xycovar < 0:
edgedir = -edgedir
return edgedir
elif mode == 'Corner' or mode == 'corner':
# Eq. 3, but with an alternative default setting (as compared to
# 'Edge' mode) for the gfrac parameter
R = gfrac*nmax - n
if R<0: R=0.
# Do false corner suppression, but only if there appears to be
# a genuine non-zero corner response
if R>0:
denom = (areawgt * c).sum()
# Usual case
if denom:
# Calculate center of gravity, using eq. 5 in reference
xcg = ((xdiff * areawgt * c).sum()) / denom
ycg = ((ydiff * areawgt * c).sum()) / denom
# Divide-by-zero case, which can arise when a single noisy
# pixels is surrounded by many others of dissimilar brightness
else:
xcg, ycg = 0, 0
cgdist = math.sqrt(xcg**2 + ycg**2)
# False corner check #1: CG is too close to nucleus
if cgdist < cgthresh:
R=0
else:
# CG vector direction
theta = math.atan2(ycg.astype(float), xcg.astype(float))
# Calculate nearest pixel locations of a bunch of sub-
# pixel-spaced points on a line along the CG direction
for ii in np.arange(0, radius, 1./npoints):
xtest = int(round(ii * math.cos(theta)))
ytest = int(round(ii * math.sin(theta)))
for ij in range(len(xdiff)):
# Find corresponding index ij in footprint data
if xtest == xdiff[ij] and ytest == ydiff[ij]:
# False corner check #2: non-contiguous USAN
if areawgt[ij] == 1 and graydiff[ij] > t:
R=0
break
if R == 0:
break
return R
else:
raise ValueError('Mode ' + str(mode) + ' not recognized')
""" Finally, perform the USAN filter operation! """
# Note that image must be cast as float in order to force
# filters.generic_filter() to return the output type as float (which
# is what we usually want). However, the image itself is intrinisically
# type int, and the journal reference recommends using a lookup table to
# compare the relatively limited number (nshades=256) of different
# possible pixel grayscale color values against one another. This means
# that as soon as program control drops down into the filterfunc()
# (defined above), the color-related variables must be cast right back
# to int again in order to be able to index the lookup table properly.
# It's convoluted!
extraarg = (areawgt, xdiff, ydiff, radius, mode, t, explookup, gfrac,
cgthresh)
return filters.generic_filter(image.astype(float), filterfunc,
footprint=fp, extra_arguments=extraarg)
|
<reponame>roberamy/CS467_Animal_Adoption<filename>admin.py
###############################################################################
#
# Author: <NAME>, <NAME>, <NAME>
# Email: <EMAIL>
# Course: CS467_400_W2021
#
# Description: Routes for admin page
#
# Ref: https://werkzeug.palletsprojects.com/en/1.0.x/utils/
# https://wtforms.readthedocs.io/en/2.3.x/crash_course/
# https://docs.python.org/3/library/io.html
# https://wtforms.readthedocs.io/en/2.3.x/forms/
# https://flask.palletsprojects.com/en/1.1.x/api/
# https://www.w3schools.com/python/ref_string_join.asp
###############################################################################
# Library modules
from flask import Blueprint, request, redirect, render_template
from flask import session, send_from_directory
from google.cloud import datastore
import json
import constants
import logging
from datetime import datetime
from werkzeug.utils import secure_filename
from os.path import join, dirname, realpath
import random
import string
from google.cloud import storage
# User modules
from repository import PetDsRepository
from forms.admin_profile_form import AdminProfileForm
from OAuth import printSession
UPLOADS_PATH = join(dirname(realpath(__file__)), 'uploads/')
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
bp = Blueprint('admin', __name__)
client = datastore.Client()
# Used for /profiles route
species = "Any"
breed = "Any"
pdata = PetDsRepository.all()
###############################################################################
@bp.route('/admin_profiles', methods=['GET'])
def adminPage():
logging.debug(printSession('***** PROFILE ADMIN *****'))
if 'isAdmin' not in session:
return "isAdmin not in session."
# return redirect('/')
elif session['isAdmin'] is False:
return "Not an admin account."
# return redirect('/')
else:
# Return all pet entities to populate 'admin_profiles.html'
# Instantiate singleton PetDsRepository class with member functions
# See 'repository.py'
data = PetDsRepository.all()
for d in data:
# Format datetime to yyyy-mm-dd
d['created_at'] = datetime.strftime(d['created_at'], "%Y-%m-%d")
# Format properties to include \n to improve html display
d['properties'] = "\n".join(d['properties'].split(','))
return render_template('admin_profiles.html', pets=data)
###############################################################################
@bp.route('/add_profile', methods=["GET"])
def add_profile():
logging.debug(printSession('***** ADD PROFILE *****'))
if 'isAdmin' not in session:
return "isAdmin not in session."
# return redirect('/')
elif session['isAdmin'] is False:
return "Not an admin account."
# return redirect('/')
else:
# Get all breeds from database & sort alphabetically
query = client.query(kind=constants.breeds)
query.order = ["name"]
breeds = list(query.fetch())
# print("LENGTH:" + str(length))
form = AdminProfileForm()
return render_template('add_edit_profile.html',
breeds=breeds,
form=form,
public_url=constants.BUCKET)
###############################################################################
@bp.route('/update_profile/<key>', methods=["GET"])
def update_profile(key):
logging.debug(printSession('***** UPDATE ADMIN *****'))
pet = PetDsRepository.get(key)
# print(pet)
# print(pet['type'])
if 'isAdmin' not in session:
return "isAdmin not in session."
# return redirect('/')
elif session['isAdmin'] is False:
return "Not an admin account."
# return redirect('/')
else:
# Get all breeds from database & sort alphabetically
query = client.query(kind=constants.breeds)
query.order = ["name"]
breeds = list(query.fetch())
return render_template('add_edit_profile.html',
pet=pet,
breeds=breeds,
public_url=constants.BUCKET)
###############################################################################
@bp.route('/store_profile', methods=["POST"])
def store_profile():
if 'sub' not in session:
return ("sub not in session.", 403)
# return redirect('/')
else:
# Instantiate AdminProfileForm class used for input validation
form = AdminProfileForm(request.form)
if form.validate():
# Create new pet entity in data store if no key provided
if request.form['pet_key'] == '':
PetDsRepository.create(request.form)
# Update pet entity if key provided
else:
PetDsRepository.update(
form=request.form, key=request.form['pet_key'])
responseBody = {"success": True,
"message": "Data Successfully saved"}
else:
# errors = []
for fieldName, errorMessages in form.errors.items():
# field = []
print(fieldName)
for err in errorMessages:
print(err)
responseBody = {"success": False,
"message": fieldName.title() + ': ' + err}
return (json.dumps(responseBody), 200)
###############################################################################
# Returns random character string of provided length to be concatenated
# with fileName before storing in Google Storage bucket
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
###############################################################################
# Route to add image to storage bucket
@bp.route('/add_image', methods=["POST"])
def add_image():
file = request.files['image']
client = storage.Client()
bucket = client.get_bucket('datingappforanimaladoption.appspot.com')
if file.filename == '':
responseBody = {"success": False, "message": "No File Selected"}
if file:
# Construct secure filename with werkzeug module
name = file.filename.split('.')[0] + get_random_string(8)
# Secure file names
filename = secure_filename(name + '.' + file.filename.split('.')[1])
# file.save(os.path.join(UPLOADS_PATH, filename)) # Didn't work!!
blob = bucket.blob('uploads/' + filename)
blob.upload_from_string(file.getvalue())
responseBody = {"success": True, "message": "File Saved",
"profile_image_name": filename}
return (json.dumps(responseBody), 200)
###############################################################################
# Route to delete profile from datastore
@bp.route('/delete_profile', methods=["POST"])
def delete_profile():
key = request.form['key']
# Instantiate singleton PetDsRepository class with member functions
# see 'repository.py'
PetDsRepository.delete_profile(key=key)
responseBody = {"success": True, "message": "Deleted"}
return (json.dumps(responseBody), 200)
###############################################################################
# Route to download file from
@bp.route('/uploads/<filename>')
def send_file(filename):
return send_from_directory(UPLOADS_PATH, filename)
|
<gh_stars>1-10
import cv2
import numpy
import platform
import plateRecog
import SimpleCV
import time
import socket
import databaseConstants as const
class SpearHUD:
def __init__(self, hudPath, maskPath, scanHudPath, camResolution=(1920, 1080),
camIndex=1, displayResolution=(960, 540), windowName="SPEAR",
endKey='q', capKey='s', readKey='x', cannyKey='z',
incTH1='1', decTH1='!', incTH2='2', decTH2='@',
serverHost=None, serverPort=None,
savePath=None, dummyImagePath=None):
#Load images to be used for HUD
self.hudImage = cv2.imread(hudPath)
self.maskImage = cv2.imread(maskPath)
self.scanHudImage = cv2.imread(scanHudPath)
self.camResolution = camResolution
self.camIndex = camIndex
self.displayResolution = displayResolution
self.windowName = windowName
self.endKey = endKey
self.capKey = capKey
self.readKey = readKey
self.cannyKey = cannyKey
self.incTH1 = incTH1
self.decTH1 = decTH1
self.incTH2 = incTH2
self.decTH2 = decTH2
self.serverHost = serverHost
self.serverPort = serverPort
#Saves images as it scans
self.savePath = savePath
#Use a static image as dummy camera output
if dummyImagePath is not None:
self.dummyImage = cv2.imread(dummyImagePath)
else:
self.dummyImage = None
#System Information
self.sysInfo = list(set(platform.uname()))
self.sysInfo = filter(lambda (thisInfo): thisInfo != '', self.sysInfo)
self.sysInfo = filter(lambda (thisInfo): len(thisInfo) < 40, self.sysInfo)
self.sysInfo.reverse()
def run(self):
#Prepare Camera and Window
self.cam = cv2.VideoCapture(self.camIndex)
self.cam.set(3, self.camResolution[0]) #Width
self.cam.set(4, self.camResolution[1]) #Height
cv2.namedWindow(self.windowName, cv2.WINDOW_OPENGL)
cv2.resizeWindow(self.windowName, self.displayResolution[0], self.displayResolution[1])
#Connect to SPEAR Server if given an IP and a PORT
if self.serverHost and self.serverPort:
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
client_socket.connect((self.serverHost, self.serverPort))
self.clientSocket = client_socket
self.clientHost, self.clientPort = self.clientSocket.getsockname()
except socket.error:
print "Unable to connect to %s:%i" %(self.serverHost, self.serverPort)
self.connected = None
else:
self.clientSocket = None
self._showHUD()
def _showHUD(self):
#Initial Value for Recognition and Assessment
alpha_code_list = ['------']
assessment_list = ['N/A']
#For visual feedback
rect_list = None
visual_timeout = 3
#Capture and Show Image
while True:
__, stream_img = self.cam.read()
#Overwrite camera output if have dummy image
if self.dummyImage is not None:
stream_img = self.dummyImage.copy()
#Keybindings
k = cv2.waitKey(1) & 0xFF
if k == ord(self.endKey):
break
if k == ord(self.capKey):
cv2.imwrite(time.ctime() + ".jpg", stream_img)
if k == ord(self.readKey):
#Make independent copy of image
image_to_scan = stream_img.copy()
#Draw Standby HUD
_drawSemiTransparentBackground(stream_img, (384,1536), (216,864))
cv2.multiply(stream_img, self.maskImage, stream_img)
cv2.add(stream_img, self.scanHudImage, stream_img)
cv2.imshow(self.windowName, stream_img)
cv2.waitKey(1)
#Perform LPR
alpha_code_list, rect_list = _scanImage(image_to_scan)
if rect_list:
time_appeared = time.time()
#Query to SPEAR Server if connected and got a valid alpha_code_list
if (self.clientSocket) and (alpha_code_list != ['------']):
to_send = ''.join(alpha_code_list)
self.clientSocket.sendall(to_send)
to_recv = int(self.clientSocket.recv(2048).strip('\n'))
assessment_list = const.interpretCode(to_recv)
else:
assessment_list = ['N/A']
#Save image if given a testPath
if self.savePath:
filename = "%s/%s_[%s].jpg" %(self.savePath, time.ctime()[11:19], ','.join(alpha_code_list))
cv2.imwrite(filename, image_to_scan)
#Make a cropped copy of ROI
cropped_img = stream_img[216:864, 384:1536].copy()
#Draw semitransparent background to see HUD better
_drawSemiTransparentBackground(stream_img, (1568, 1883), (216, 522))
_drawSemiTransparentBackground(stream_img, (1568, 1883), (558, 864))
_drawSemiTransparentBackground(stream_img, (162, 260), (215, 865))
_drawSemiTransparentBackground(stream_img, (384,1536), (216,864))
#Apply Mask then HUD
cv2.multiply(stream_img, self.maskImage, stream_img)
cv2.add(stream_img, self.hudImage, stream_img)
#Draw Triangle pointer for Light Level
ave = int(cropped_img.mean() * float(648) / 255)
pt1 = (165, 864-ave-20)
pt2 = (185, 864-ave)
pt3 = (165, 864-ave+20)
cv2.line(stream_img, pt1, pt2, (0, 255, 0), 2)
cv2.line(stream_img, pt2, pt3, (0, 255, 0), 2)
cv2.line(stream_img, pt1, pt3, (0, 255, 0), 2)
#Draw Rectangle if found Characters
if rect_list:
for top_left, bottom_right in rect_list:
cv2.rectangle(stream_img, top_left, bottom_right, (0, 255, 0), 2)
if (time.time() - time_appeared) >= visual_timeout:
rect_list = None
#Print Text Data
_printListedText(stream_img, "SYSTEM INFO", self.sysInfo, (1585, 255))
_printListedText(stream_img, "PLATE RECOGNITION", alpha_code_list, (1585, 615),
text_scale_factor=1, header_list_spacing=20)
_printListedText(stream_img, "PLATE ASSESSMENT", assessment_list, (1585, 720))
if self.clientSocket:
connection = ["[Client] %s:%i" %(self.clientHost, self.clientPort),
"[Server] %s:%i" %(self.serverHost, self.serverPort)]
else:
connection = ['N/A']
_printListedText(stream_img, "CONNECTION", connection, (1585, 430))
#Update display
cv2.imshow(self.windowName, stream_img)
#Clean up
cv2.destroyWindow(self.windowName)
self.cam.release()
if self.clientSocket:
self.clientSocket.close()
def _scanImage(opencv_img):
img_SCV = SimpleCV.Image(opencv_img, cv2image=True)
result = plateRecog.scanImage(img_SCV)
if result:
char_val_list, rect_list = result
alphaNumCode = ''.join(char_val_list)
return [alphaNumCode], rect_list
else:
return ["------"], None
def _drawSemiTransparentBackground(image, (x1, x2), (y1, y2), alpha1=0.4, alpha2=0.6, gamma=0):
window = image[y1:y2, x1:x2]
window_bg = numpy.zeros(window.shape, numpy.uint8)
image[y1:y2, x1:x2] = cv2.addWeighted(window, alpha1, window_bg, alpha2, gamma)
def _printListedText(image, header_text, text_list, top_left,
font=cv2.FONT_HERSHEY_SIMPLEX, head_scale_factor=0.7,
text_scale_factor=0.6, text_color=(0, 255, 0), text_weight=2,
list_indent=15, header_list_spacing=10, break_spacing=30):
x, y = top_left
cv2.putText(image, header_text, top_left, font, head_scale_factor, text_color, text_weight)
for index, text in enumerate(text_list):
x_text = x + list_indent
y_text = y + header_list_spacing + ((index+1) * 30)
cv2.putText(image, text, (x_text, y_text), font, text_scale_factor, text_color, text_weight)
|
from django.apps import apps
import os, os.path
import collections
import zlib
from importlib import import_module
# set up the logger
import logging
log = logging.getLogger('django_mako_plus')
################################################################
### Special type of list used for url params
class URLParamList(list):
'''
A simple extension to Python's list that returns '' for indices that don't exist.
For example, if the object is ['a', 'b'] and you call obj[5], it will return ''
rather than throwing an IndexError. This makes dealing with url parameters
simpler since you don't have to check the length of the list.
'''
def __getitem__(self, idx):
'''Returns the element at idx, or '' if idx is beyond the length of the list'''
return self.get(idx, '')
def get(self, idx, default=''):
'''Returns the element at idx, or default if idx is beyond the length of the list'''
# if the index is beyond the length of the list, return ''
if isinstance(idx, int) and (idx >= len(self) or idx < -1 * len(self)):
return default
# else do the regular list function (for int, slice types, etc.)
return super().__getitem__(idx)
##########################################
### Utilities
def import_qualified(name):
'''
Imports a fully-qualified name from a module:
cls = import_qualified('homepage.views.index.MyForm')
Raises an ImportError if it can't be ipmorted.
'''
parts = name.rsplit('.', 1)
if len(parts) != 2:
raise ImportError('Invalid fully-qualified name: {}'.format(name))
try:
return getattr(import_module(parts[0]), parts[1])
except AttributeError:
raise ImportError('{} not found in module {}'.format(parts[1], parts[0]))
def merge_dicts(*dicts):
'''
Shallow merges an arbitrary number of dicts, starting
with the first argument and updating through the
last argument (last dict wins on conflicting keys).
'''
merged = {}
for d in dicts:
if d:
merged.update(d)
return merged
def flatten(*args):
'''Generator that recursively flattens embedded lists, tuples, etc.'''
for arg in args:
if isinstance(arg, collections.Iterable) and not isinstance(arg, (str, bytes)):
yield from flatten(*arg)
else:
yield arg
def split_app(path):
'''
Splits a file path on the app, returning (app config, relative path within app).
'''
parts = os.path.abspath(path).split(os.path.sep)
for i in reversed(range(0, len(parts) - 1)):
appdir, appname, filepath = os.path.sep.join(parts[:i]), parts[i], os.path.sep.join(parts[i + 1:])
config = apps.app_configs.get(appname)
if config is not None and os.path.samefile(config.path, appdir + os.path.sep + appname):
# got it!
return config, filepath
# not found
return None, path
def crc32(filename):
'''
Calculates the CRC checksum for a file.
Using CRC32 because security isn't the issue and don't need perfect noncollisions.
We just need to know if a file has changed.
On my machine, crc32 was 20 times faster than any hashlib algorithm,
including blake and md5 algorithms.
'''
result = 0
with open(filename, 'rb') as fin:
while True:
chunk = fin.read(48)
if len(chunk) == 0:
break
result = zlib.crc32(chunk, result)
return result
EMPTY = object()
def getdefaultattr(obj, name, default=None, factory=EMPTY):
'''
Gets the given attribute from the object,
creating it with a default or by calling
a factory if needed.
'''
try:
return getattr(obj, name)
except AttributeError:
pass
val = factory() if factory is not EMPTY else None
setattr(obj, name, val)
return val
def qualified_name(obj):
'''Returns the fully-qualified name of the given object'''
if not hasattr(obj, '__module__'):
obj = obj.__class__
module = obj.__module__
if module is None or module == str.__class__.__module__:
return obj.__qualname__
return '{}.{}'.format(module, obj.__qualname__)
|
import json
import sys
import falcon
import pytest
from falcon import testing
from ward.main import Ward
@pytest.fixture()
def client():
app = falcon.App()
ward = Ward(app=app)
return testing.TestClient(app)
def test_base_ward_initialization():
ward = Ward()
assert ward.packaged is False
assert ward.uiPath == ''
assert ward.headDir == ''
assert ward.ogler is None
assert ward.re is not None
assert ward.app is not None
assert ward.server is not None
assert ward.httpServerDoer is not None
def test_ward_initialization_packaged(monkeypatch):
monkeypatch.setattr(sys, 'frozen', True, raising=False)
monkeypatch.setattr(sys, '_MEIPASS', '', raising=False)
ward = Ward()
assert ward.packaged is False
monkeypatch.setattr(sys, 'frozen', False, raising=False)
monkeypatch.setattr(sys, '_MEIPASS', '/tmp/foo', raising=False)
ward = Ward()
assert ward.packaged is False
monkeypatch.setattr(sys, 'frozen', True, raising=False)
monkeypatch.setattr(sys, '_MEIPASS', '/tmp/foo', raising=False)
ward = Ward()
assert ward.packaged is True
assert ward.uiPath == '/tmp/foo/ui/'
assert ward.headDir == '/tmp/foo/'
def test_ward_initialization_windows(monkeypatch):
monkeypatch.setattr(sys, 'platform', 'win32', raising=False)
ward = Ward()
assert ward.ogler is not None
def test_on_post_passcode(client):
resp = client.simulate_post('/passcode')
assert resp is not None
assert len(resp.json) == 22
assert resp.status == falcon.HTTP_OK
def test_on_post_habery(client):
body = '{"passcode": ""}'
resp = client.simulate_post('/habery', json=body)
assert resp is not None
assert resp.status == falcon.HTTP_BAD_REQUEST
body = '{"passcode": "<PASSWORD>"}'
resp = client.simulate_post('/habery', json=body)
assert resp is not None
assert resp.status == falcon.HTTP_OK
def test_on_post_identifier_witness_config(client):
happy = dict(
icount=1,
ncount=1,
isith=1,
nsith=1,
alias='bob'
)
errWitnessList = dict(
witnesses=[]
) | happy
resp = client.simulate_post('/identifier', json=json.dumps(errWitnessList))
assert resp is not None
assert resp.status == falcon.HTTP_BAD_REQUEST
witnessList = dict(
witnesses=['will']
) | happy
resp = client.simulate_post('/identifier', json=json.dumps(witnessList))
assert resp is not None
assert resp.status == falcon.HTTP_OK
def test_on_post_identifier_n_config(client):
happy = dict(
icount=1,
isith=1,
alias='bob',
witnesses=['wil']
)
badN = dict(
ncount=1,
nsith=2
) | happy
resp = client.simulate_post('/identifier', json=json.dumps(badN))
assert resp is not None
assert resp.status == falcon.HTTP_BAD_REQUEST
n = dict(
ncount=2,
nsith=2
) | happy
resp = client.simulate_post('/identifier', json=json.dumps(n))
assert resp is not None
assert resp.status == falcon.HTTP_OK
def test_on_post_identifier_i_config(client):
happy = dict(
ncount=1,
nsith=1,
alias='bob',
witnesses=['wil']
)
badI = dict(
icount=1,
isith=2
) | happy
resp = client.simulate_post('/identifier', json=json.dumps(badI))
assert resp is not None
assert resp.status == falcon.HTTP_BAD_REQUEST
i = dict(
icount=2,
isith=2
) | happy
resp = client.simulate_post('/identifier', json=json.dumps(i))
assert resp is not None
assert resp.status == falcon.HTTP_OK
def test_on_post_identifier_alias(client):
missingAlias = dict(
icount=1,
ncount=1,
isith=1,
nsith=1,
witnesses=['wil']
)
resp = client.simulate_post('/identifier', json=json.dumps(missingAlias))
assert resp is not None
assert resp.status == falcon.HTTP_BAD_REQUEST
happy = missingAlias | dict(
alias='bob'
)
resp = client.simulate_post('/identifier', json=json.dumps(happy))
assert resp is not None
assert resp.status == falcon.HTTP_OK
|
import os
import sys
import sysconfig
def reset_tzpath(to=None):
global TZPATH
tzpaths = to
if tzpaths is not None:
if isinstance(tzpaths, (str, bytes)):
raise TypeError(
f"tzpaths must be a list or tuple, "
+ f"not {type(tzpaths)}: {tzpaths!r}"
)
elif not all(map(os.path.isabs, tzpaths)):
raise ValueError(_get_invalid_paths_message(tzpaths))
base_tzpath = tzpaths
else:
env_var = os.environ.get("PYTHONTZPATH", None)
if env_var is not None:
base_tzpath = _parse_python_tzpath(env_var)
else:
base_tzpath = _parse_python_tzpath(
sysconfig.get_config_var("TZPATH")
)
TZPATH = tuple(base_tzpath)
def _parse_python_tzpath(env_var):
if not env_var:
return ()
raw_tzpath = env_var.split(os.pathsep)
new_tzpath = tuple(filter(os.path.isabs, raw_tzpath))
# If anything has been filtered out, we will warn about it
if len(new_tzpath) != len(raw_tzpath):
import warnings
msg = _get_invalid_paths_message(raw_tzpath)
warnings.warn(
"Invalid paths specified in PYTHONTZPATH environment variable."
+ msg,
InvalidTZPathWarning,
)
return new_tzpath
def _get_invalid_paths_message(tzpaths):
invalid_paths = (path for path in tzpaths if not os.path.isabs(path))
prefix = "\n "
indented_str = prefix + prefix.join(invalid_paths)
return (
"Paths should be absolute but found the following relative paths:"
+ indented_str
)
def find_tzfile(key):
"""Retrieve the path to a TZif file from a key."""
_validate_tzfile_path(key)
for search_path in TZPATH:
filepath = os.path.join(search_path, key)
if os.path.isfile(filepath):
return filepath
return None
_TEST_PATH = os.path.normpath(os.path.join("_", "_"))[:-1]
def _validate_tzfile_path(path, _base=_TEST_PATH):
if os.path.isabs(path):
raise ValueError(
f"ZoneInfo keys may not be absolute paths, got: {path}"
)
# We only care about the kinds of path normalizations that would change the
# length of the key - e.g. a/../b -> a/b, or a/b/ -> a/b. On Windows,
# normpath will also change from a/b to a\b, but that would still preserve
# the length.
new_path = os.path.normpath(path)
if len(new_path) != len(path):
raise ValueError(
f"ZoneInfo keys must be normalized relative paths, got: {path}"
)
resolved = os.path.normpath(os.path.join(_base, new_path))
if not resolved.startswith(_base):
raise ValueError(
f"ZoneInfo keys must refer to subdirectories of TZPATH, got: {path}"
)
del _TEST_PATH
def available_timezones():
"""Returns a set containing all available time zones.
.. caution::
This may attempt to open a large number of files, since the best way to
determine if a given file on the time zone search path is to open it
and check for the "magic string" at the beginning.
"""
from importlib import resources
valid_zones = set()
# Start with loading from the tzdata package if it exists: this has a
# pre-assembled list of zones that only requires opening one file.
try:
with resources.open_text("tzdata", "zones") as f:
for zone in f:
zone = zone.strip()
if zone:
valid_zones.add(zone)
except (ImportError, FileNotFoundError):
pass
def valid_key(fpath):
try:
with open(fpath, "rb") as f:
return f.read(4) == b"TZif"
except Exception: # pragma: nocover
return False
for tz_root in TZPATH:
if not os.path.exists(tz_root):
continue
for root, dirnames, files in os.walk(tz_root):
if root == tz_root:
# right/ and posix/ are special directories and shouldn't be
# included in the output of available zones
if "right" in dirnames:
dirnames.remove("right")
if "posix" in dirnames:
dirnames.remove("posix")
for file in files:
fpath = os.path.join(root, file)
key = os.path.relpath(fpath, start=tz_root)
if os.sep != "/": # pragma: nocover
key = key.replace(os.sep, "/")
if not key or key in valid_zones:
continue
if valid_key(fpath):
valid_zones.add(key)
if "posixrules" in valid_zones:
# posixrules is a special symlink-only time zone where it exists, it
# should not be included in the output
valid_zones.remove("posixrules")
return valid_zones
class InvalidTZPathWarning(RuntimeWarning):
"""Warning raised if an invalid path is specified in PYTHONTZPATH."""
TZPATH = ()
reset_tzpath()
|
from ..common.grange import Grange
class RecordDigIS(Grange):
def __init__(self, genome_name, chrom, genome_seq, genome_len, qid, sid, qstart, qend, start, end, strand, acc, score, evalue):
self.qid = qid
self.sid = sid
self.qstart = qstart
self.qend = qend
self.acc = acc
self.score = score
self.evalue = evalue
super().__init__(genome_name, chrom, start, end, strand, genome_seq, genome_len)
@classmethod
def from_csv(cls, csv, genome_name, chrom, genome_seq, genome_len):
qid = csv['qid']
sid = csv['sid']
qstart = int(csv['qstart'])
qend = int(csv['qend'])
start = int(csv['sstart'])
end = int(csv['send'])
strand = csv['strand']
acc = csv['acc']
score = csv['score']
evalue = csv['evalue']
return cls(genome_name, chrom, genome_seq, genome_len, qid, sid, qstart, qend, start, end, strand, acc, score, evalue)
@classmethod
def from_hmmer(cls, hsp, sid, start, end, strand, genome_name, chrom, genome_seq, seq_len):
return cls(genome_name, chrom, genome_seq, seq_len, hsp.qid, sid, hsp.qstart, hsp.qend, start, end, strand, float(hsp.acc), float(hsp.dom_bitscore), float(hsp.dom_evalue))
# Regurements for merge in distance
# - the same strand
# - the same query_id (hmm model/outlier)
# - continuous fragments with respect to model
def should_be_merged_distance(self, other, merge_distance):
continuous_fragments = (self.strand == '+' and self.start < other.start and self.qend <= other.qstart) or \
(self.strand == '+' and other.start < self.start and other.qend <= self.qstart) or \
(self.strand == '-' and other.start < self.start and self.qend <= other.qstart) or \
(self.strand == '-' and self.start < other.start and other.qend <= self.qstart)
if self.qid == other.qid and not self.has_overlap(other) \
and self.has_overlap(other, flank=merge_distance) and continuous_fragments:
return True
else:
return False
def merge(self, other, merge_type):
if self.strand != other.strand or self.sid != other.sid:
raise ValueError('RecordDigIS.merge(): Records can not be merged')
# if both hits from the same
new_start = min(self.start, other.start)
new_end = max(self.end, other.end)
new_len = new_end - new_start + 1
intersection_length = self.get_overlap_length(other)
if self.acc > other.acc:
new_acc = (len(self)*self.acc + (len(other)-intersection_length)*other.acc) / new_len
else:
new_acc = ((len(self)-intersection_length)*self.acc + len(other)*other.acc) / new_len
if merge_type == "distance":
new_score = self.score + other.score
elif merge_type == "overlap":
new_score = max(self.score, other.score)
self.set_start(new_start)
self.set_end(new_end)
self.qstart = min(self.qstart, other.qstart)
self.qend = max(self.qend, other.qend)
self.qid = '-'.join(list(set(self.qid.split('-') + other.qid.split('-'))))
self.acc = new_acc
self.score = new_score
self.evalue = min(self.evalue, other.evalue)
@classmethod
def get_csv_header(cls):
return ["qid", "qstart", "qend", "sid", "sstart", "send", "strand", "acc", "score", "evalue"]
def to_csv(self):
return [self.qid, self.qstart, self.qend, self.sid, self.start, self.end, self.strand, round(self.acc, 2), round(self.score, 2), self.evalue]
def __str__(self):
return "{}, {}, {}, {}, {}, {}, {}, {}, {}, {}".format(self.qid, self.qstart, self.qend,
self.sid, self.start, self.end,
self.strand, self.acc, self.score, self.evalue)
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import json
import os
import os.path
import posixpath
from collections import defaultdict, namedtuple
try:
from urllib.parse import urlparse, urlsplit, urlunsplit
except ImportError:
from urlparse import urlparse, urlsplit, urlunsplit
import click
from .utils import (chdir, mkdir_p, link, rm_link, git, GitError, svn, gitsvn, gitsvnrebase, current_branch)
from .cli import echo, info, error
OLD_EXTERNALS_ROOT = os.path.join('.git', 'externals')
EXTERNALS_ROOT = '.git_externals'
EXTERNALS_JSON = 'git_externals.json'
ExtItem = namedtuple('ExtItem', ['branch', 'ref', 'path', 'name'])
def get_repo_name(repo):
externals = load_gitexts()
if repo in externals and 'name' in externals[repo]:
# echo ("for {} in pwd:{} returning {}".format(repo, os.getcwd(),
# externals[repo]['name']))
return externals[repo]['name']
if repo[-1] == '/':
repo = repo[:-1]
name = repo.split('/')[-1]
if name.endswith('.git'):
name = name[:-len('.git')]
if not name:
error("Invalid repository name: \"{}\"".format(repo), exitcode=1)
return name
def externals_json_path(pwd=None):
return os.path.join(pwd or root_path(), EXTERNALS_JSON)
def externals_root_path(pwd=None):
_old_root_path = os.path.join(pwd or root_path(), OLD_EXTERNALS_ROOT)
_root_path = os.path.join(pwd or root_path(), EXTERNALS_ROOT)
if os.path.exists(_old_root_path) and not os.path.exists(_root_path):
info("Moving old externals path to new location")
os.rename(_old_root_path, _root_path)
link_entries(load_gitexts(pwd))
elif os.path.exists(_old_root_path) and os.path.exists(_root_path):
error("Both new and old externals folder found, {} will be used".format(_root_path))
return _root_path
def root_path():
return git('rev-parse', '--show-toplevel').strip()
def is_git_repo(quiet=True):
"""Says if pwd is a Git working tree or not.
If not quiet: says it also on standard output
"""
try:
return git('rev-parse', '--is-inside-work-tree').strip() == 'true'
except GitError as err:
if not quiet:
print (str(err))
def normalize_gitext_url(url):
# an absolute url is already normalized
if urlparse(url).netloc != '' or url.startswith('git@'):
return url
# relative urls use the root url of the current origin
remote_name = git('config', 'branch.%s.remote' % current_branch()).strip()
remote_url = git('config', 'remote.%s.url' % remote_name).strip()
if remote_url.startswith('git@'):
prefix = remote_url[:remote_url.index(':')+1]
remote_url = prefix + url.strip('/')
else:
remote_url = urlunsplit(urlsplit(remote_url)._replace(path=url))
return remote_url
def get_entries():
return [get_repo_name(e)
for e in load_gitexts().keys()
if os.path.exists(os.path.join(externals_root_path(), get_repo_name(e)))]
def load_gitexts(pwd=None):
"""Load the *externals definition file* present in given
directory, or cwd
"""
d = pwd if pwd is not None else '.'
fn = os.path.join(d, EXTERNALS_JSON)
if os.path.exists(fn):
with open(fn) as f:
return normalize_gitexts(json.load(f))
return {}
def normalize_gitexts(gitext):
for url, _ in gitext.items():
# svn external url must be absolute and svn+ssh to be autodetected
gitext[url].setdefault('vcs', 'svn' if 'svn' in urlparse(url).scheme else 'git')
return gitext
def dump_gitexts(externals):
"""
Dump externals dictionary as json in current working directory
git_externals.json. Remove 'vcs' key that is only used at runtime.
"""
with open(externals_json_path(), 'w') as f:
json.dump(externals, f, sort_keys=True, indent=4, separators=(',', ': '))
f.write("\n")
def foreach_externals(pwd, callback, recursive=True, only=()):
"""
Iterates over externals, starting from directory pwd, recursively or not
callback is called for each externals with the following arguments:
- relative url of current external repository
- path to external working tree directory
- refs: external as a dictionary (straight from json file)
Iterates over all externals by default, or filter over the externals listed
in only (filters on externals path, url or part of it)
"""
externals = load_gitexts(pwd)
def filter_ext():
def take_external(url, path):
return any((expr in url or expr in path) for expr in only)
def take_all(*args):
return True
return take_external if len(only) else take_all
for rel_url in externals:
ext_path = os.path.join(externals_root_path(pwd), get_repo_name(rel_url))
if filter_ext()(rel_url, ext_path):
callback(rel_url, ext_path, externals[rel_url])
if recursive:
foreach_externals(ext_path, callback, recursive=recursive, only=only)
def foreach_externals_dir(pwd, callback, recursive=True, only=[]):
"""
Same as foreach_externals, but place the callback in the directory
context of the externals before calling it
"""
def run_from_dir(rel_url, ext_path, refs):
if os.path.exists(ext_path):
with chdir(ext_path):
callback(rel_url, ext_path, refs)
foreach_externals(root_path(), run_from_dir, recursive=recursive, only=only)
def sparse_checkout(repo_name, repo, dirs):
git('init', repo_name)
with chdir(repo_name):
git('remote', 'add', '-f', 'origin', repo)
git('config', 'core.sparsecheckout', 'true')
with open(os.path.join('.git', 'info', 'sparse-checkout'), 'wt') as fp:
fp.write('{}\n'.format(externals_json_path()))
for d in dirs:
# assume directories are terminated with /
fp.write(posixpath.normpath(d))
if d[-1] == '/':
fp.write('/')
fp.write('\n')
return repo_name
def is_workingtree_clean(path, fail_on_empty=True):
"""
Returns true if and only if there are no modifications to tracked files. By
modifications it is intended additions, deletions, file removal or
conflicts. If True is returned, that means that performing a
`git reset --hard` would result in no loss of local modifications because:
- tracked files are unchanged
- untracked files are not modified anyway
"""
if not os.path.exists(path):
return not fail_on_empty
if fail_on_empty and not os.path.exists(path):
return False
with chdir(path):
try:
return len([line.strip for line in git('status', '--untracked-files=no', '--porcelain').splitlines(True)]) == 0
except GitError as err:
echo('Couldn\'t retrieve Git status of', path)
error(str(err), exitcode=err.errcode)
def link_entries(git_externals):
entries = [(get_repo_name(repo), src, os.path.join(os.getcwd(), dst.replace('/', os.path.sep)))
for (repo, repo_data) in git_externals.items()
for (src, dsts) in repo_data['targets'].items()
for dst in dsts]
entries.sort(key=lambda x: x[2])
# remove links starting from the deepest dst
for _, __, dst in entries[::-1]:
if os.path.lexists(dst):
rm_link(dst)
# link starting from the highest dst
for repo_name, src, dst in entries:
with chdir(os.path.join(externals_root_path(), repo_name)):
mkdir_p(os.path.split(dst)[0])
link(os.path.abspath(src), dst)
def externals_sanity_check():
"""Check that we are not trying to track various refs of the same external repo"""
registry = defaultdict(set)
root = root_path()
def registry_add(url, path, ext):
registry[url].add(ExtItem(ext['branch'], ext['ref'], path, ext.get('name', '')))
foreach_externals(root, registry_add, recursive=True)
errmsg = None
for url, set_ in registry.items():
# we are only interested to know if branch-ref pairs are duplicated
if len({(s[0], s[1]) for s in set_}) > 1:
if errmsg is None:
errmsg = ["Error: one project can not refer to different branches/refs of the same git external repository,",
"however it appears to be the case for:"]
errmsg.append('\t- {}, tracked as:'.format(url))
for i in set_:
errmsg.append("\t\t- external directory: '{0}'".format(os.path.relpath(i.path, root)))
errmsg.append("\t\t branch: '{0}', ref: '{1}'".format(i.branch, i.ref))
if errmsg is not None:
errmsg.append("Please correct the corresponding {0} before proceeding".format(EXTERNALS_JSON))
error('\n'.join(errmsg), exitcode=1)
info('externals sanity check passed!')
# TODO: check if we don't have duplicate entries under `.git_externals/`
def filter_externals_not_needed(all_externals, entries):
git_externals = {}
for repo_name, repo_val in all_externals.items():
filtered_targets = {}
for src, dsts in repo_val['targets'].items():
filtered_dsts = []
for dst in dsts:
inside_external = any([os.path.abspath(dst).startswith(e) for e in entries])
if inside_external:
filtered_dsts.append(dst)
if filtered_dsts:
filtered_targets[src] = filtered_dsts
if filtered_targets:
git_externals[repo_name] = all_externals[repo_name]
git_externals[repo_name]['targets'] = filtered_targets
return git_externals
def resolve_revision(ref, mode='git'):
assert mode in ('git', 'svn'), "mode = {} not in (git, svn)".format(mode)
if ref is not None:
if ref.startswith('svn:r'):
# echo("Resolving {}".format(ref))
ref = ref.strip('svn:r')
# If the revision starts with 'svn:r' in 'git' mode we search
# for the matching hash.
if mode == 'git':
ref = git('log', '--grep', 'git-svn-id:.*@%s' % ref, '--format=%H', capture=True).strip()
return ref
def gitext_up(recursive, entries=None, reset=False, use_gitsvn=False):
if not os.path.exists(externals_json_path()):
return
all_externals = load_gitexts()
git_externals = all_externals if entries is None else filter_externals_not_needed(all_externals, entries)
def egit(command, *args):
if command == 'checkout' and reset:
args = ('--force',) + args
git(command, *args, capture=False)
def git_initial_checkout(repo_name, repo_url):
"""Perform the initial git clone (or sparse checkout)"""
dirs = git_externals[ext_repo]['targets'].keys()
if './' not in dirs:
echo('Doing a sparse checkout of:', ', '.join(dirs))
sparse_checkout(repo_name, repo_url, dirs)
else:
egit('clone', repo_url, repo_name)
def git_update_checkout(reset):
"""Update an already existing git working tree"""
if reset:
egit('reset', '--hard')
egit('clean', '-df')
egit('fetch', '--all')
egit('fetch', '--tags')
if 'tag' in git_externals[ext_repo]:
echo('Checking out tag', git_externals[ext_repo]['tag'])
egit('checkout', git_externals[ext_repo]['tag'])
else:
echo('Checking out branch', git_externals[ext_repo]['branch'])
egit('checkout', git_externals[ext_repo]['branch'])
rev = get_rev(ext_repo)
if rev is not None:
echo('Checking out commit', rev)
egit('checkout', rev)
def get_rev(ext_repo, mode='git'):
ref = git_externals[ext_repo]['ref']
return resolve_revision(ref, mode)
def gitsvn_initial_checkout(repo_name, repo_url):
"""Perform the initial git-svn clone (or sparse checkout)"""
min_rev = get_rev(ext_repo, mode='svn') or 'HEAD'
gitsvn('clone', normalized_ext_repo, repo_name, '-r%s' % min_rev, capture=False)
def gitsvn_update_checkout(reset):
"""Update an already existing git-svn working tree"""
# FIXME: seems this might be necessary sometimes (happened with
# 'vectorfonts' for example that the following error: "Unable to
# determine upstream SVN information from HEAD history" was fixed by
# adding that, but breaks sometimes. (investigate)
# git('rebase', '--onto', 'git-svn', '--root', 'master')
gitsvnrebase('.', capture=False)
rev = get_rev(ext_repo) or 'git-svn'
echo('Checking out commit', rev)
git('checkout', rev)
def svn_initial_checkout(repo_name, repo_url):
"""Perform the initial svn checkout"""
svn('checkout', '--ignore-externals', normalized_ext_repo, repo_name, capture=False)
def svn_update_checkout(reset):
"""Update an already existing svn working tree"""
if reset:
svn('revert', '-R', '.')
rev = get_rev(ext_repo, mode='svn') or 'HEAD'
echo('Updating to commit', rev)
svn('up', '--ignore-externals', '-r%s' % rev, capture=False)
def autosvn_update_checkout(reset):
if os.path.exists('.git'):
gitsvn_update_checkout(reset)
else:
svn_update_checkout(reset)
for ext_repo in git_externals.keys():
normalized_ext_repo = normalize_gitext_url(ext_repo)
if all_externals[ext_repo]['vcs'] == 'git':
_initial_checkout = git_initial_checkout
_update_checkout = git_update_checkout
else:
if use_gitsvn:
_initial_checkout = gitsvn_initial_checkout
else:
_initial_checkout = svn_initial_checkout
_update_checkout = autosvn_update_checkout
mkdir_p(externals_root_path())
with chdir(externals_root_path()):
repo_name = get_repo_name(normalized_ext_repo)
ext_name = git_externals[ext_repo].get('name', '')
ext_name = ext_name if ext_name else repo_name
info('External', ext_name)
if not os.path.exists(ext_name):
echo('Cloning external', ext_name)
_initial_checkout(ext_name, normalized_ext_repo)
with chdir(ext_name):
echo('Retrieving changes from server: ', ext_name)
_update_checkout(reset)
link_entries(git_externals)
if recursive:
for ext_repo in git_externals.keys():
entries = [os.path.realpath(d)
for t in git_externals[ext_repo]['targets'].values()
for d in t]
with chdir(os.path.join(externals_root_path(), get_repo_name(ext_repo))):
gitext_up(recursive, entries, reset=reset, use_gitsvn=use_gitsvn)
def gitext_recursive_info(root_dir, recursive=True, externals=[]):
git_exts = {ext_repo: ext for ext_repo, ext in load_gitexts().items()
if os.path.exists(os.path.join(externals_root_path(), get_repo_name(ext_repo)))}
for ext_repo, ext in git_exts.items():
entries = [os.path.realpath(d)
for t in git_exts[ext_repo]['targets'].values()
for d in t]
cwd = os.getcwd()
repo_name = get_repo_name(ext_repo)
if externals and repo_name not in externals:
continue
with chdir(os.path.join(externals_root_path(), repo_name)):
filtered = filter_externals_not_needed(load_gitexts(), entries)
print_gitext_info(ext_repo, ext, root_dir, checkout=os.getcwd())
# if required, recurse into the externals repo of current external
if recursive:
for dsts in ext['targets'].values():
for dst in dsts:
real_dst = os.path.realpath(os.path.join(cwd, dst))
has_deps = any([os.path.realpath(d).startswith(real_dst)
for e in filtered.values()
for ds in e['targets'].values()
for d in ds])
if has_deps:
gitext_recursive_info(os.path.join(root_dir, dst))
def print_gitext_info(ext_repo, ext, root_dir, checkout=False):
"""
print information for all externals, recursively or not.
`checkout` controls if printing the `Checkout` field (i.e real checkout
directory) is required or not.
"""
click.secho('Repo: {}'.format(ext_repo), fg='blue')
if checkout:
click.echo('Checkout: {}'.format(checkout))
if 'tag' in ext:
click.echo('Tag: {}'.format(ext['tag']))
else:
click.echo('Branch: {}'.format(ext['branch']))
click.echo('Ref: {}'.format(ext['ref']))
if 'name' in ext:
click.echo('Name: {}'.format(ext['name']))
for src, dsts in ext['targets'].items():
for dst in dsts:
click.echo(' {} -> {}'.format(src, os.path.join(root_dir, dst)))
click.echo('')
def iter_externals(externals, verbose=True):
if not externals:
externals = get_entries()
for entry in externals:
entry_path = os.path.join(externals_root_path(), entry)
if not os.path.exists(entry_path):
error('External {} not found'.format(entry), exitcode=None)
continue
with chdir(entry_path):
if verbose:
info('External {}'.format(entry))
yield entry
|
"""
BrowserUp Proxy
___ This is the REST API for controlling the BrowserUp Proxy. The BrowserUp Proxy is a swiss army knife for automated testing that captures HTTP traffic in HAR files. It is also useful for Selenium/Cypress tests. ___ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from BrowserUpProxyClient.api_client import ApiClient, Endpoint as _Endpoint
from BrowserUpProxyClient.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from BrowserUpProxyClient.model.har import Har
from BrowserUpProxyClient.model.match_criteria import MatchCriteria
from BrowserUpProxyClient.model.verify_result import VerifyResult
class BrowserUpProxyApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __add_custom_har_fields(
self,
**kwargs
):
"""add_custom_har_fields # noqa: E501
Add custom fields to the current HAR. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_custom_har_fields(async_req=True)
>>> result = thread.get()
Keyword Args:
body (CustomHarData): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.add_custom_har_fields = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/har/page',
'operation_id': 'add_custom_har_fields',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(CustomHarData,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__add_custom_har_fields
)
def __get_har_log(
self,
**kwargs
):
"""get_har_log # noqa: E501
Get the current HAR. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_har_log(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Har
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_har_log = _Endpoint(
settings={
'response_type': (Har,),
'auth': [],
'endpoint_path': '/har',
'operation_id': 'get_har_log',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_har_log
)
def __healthcheck(
self,
**kwargs
):
"""healthcheck # noqa: E501
Get the healthcheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.healthcheck(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.healthcheck = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/healthcheck',
'operation_id': 'healthcheck',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__healthcheck
)
def __reset_har_log(
self,
**kwargs
):
"""reset_har_log # noqa: E501
Starts a fresh HAR capture session. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_har_log(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Har
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.reset_har_log = _Endpoint(
settings={
'response_type': (Har,),
'auth': [],
'endpoint_path': '/har',
'operation_id': 'reset_har_log',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__reset_har_log
)
def __set_har_page(
self,
**kwargs
):
"""set_har_page # noqa: E501
Starts a fresh HAR Page in the current active HAR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_har_page(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Har
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.set_har_page = _Endpoint(
settings={
'response_type': (Har,),
'auth': [],
'endpoint_path': '/har/page',
'operation_id': 'set_har_page',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__set_har_page
)
def __verify_not_present(
self,
match_criteria,
**kwargs
):
"""verify_not_present # noqa: E501
Verify no matching items are present in the captured traffic # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.verify_not_present(match_criteria, async_req=True)
>>> result = thread.get()
Args:
match_criteria (MatchCriteria): Match criteria to select requests - response pairs for size tests
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VerifyResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['match_criteria'] = \
match_criteria
return self.call_with_http_info(**kwargs)
self.verify_not_present = _Endpoint(
settings={
'response_type': (VerifyResult,),
'auth': [],
'endpoint_path': '/verify/not_present',
'operation_id': 'verify_not_present',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'match_criteria',
],
'required': [
'match_criteria',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'match_criteria':
(MatchCriteria,),
},
'attribute_map': {
},
'location_map': {
'match_criteria': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__verify_not_present
)
def __verify_present(
self,
match_criteria,
**kwargs
):
"""verify_present # noqa: E501
Verify at least one matching item is present in the captured traffic # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.verify_present(match_criteria, async_req=True)
>>> result = thread.get()
Args:
match_criteria (MatchCriteria): Match criteria to select requests - response pairs for size tests
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VerifyResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['match_criteria'] = \
match_criteria
return self.call_with_http_info(**kwargs)
self.verify_present = _Endpoint(
settings={
'response_type': (VerifyResult,),
'auth': [],
'endpoint_path': '/verify/present',
'operation_id': 'verify_present',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'match_criteria',
],
'required': [
'match_criteria',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'match_criteria':
(MatchCriteria,),
},
'attribute_map': {
},
'location_map': {
'match_criteria': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__verify_present
)
def __verify_size(
self,
size,
match_criteria,
**kwargs
):
"""verify_size # noqa: E501
Verify matching items in the captured traffic meet the size criteria # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.verify_size(size, match_criteria, async_req=True)
>>> result = thread.get()
Args:
size (int): The size used for comparison
match_criteria (MatchCriteria): Match criteria to select requests - response pairs for size tests
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VerifyResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['size'] = \
size
kwargs['match_criteria'] = \
match_criteria
return self.call_with_http_info(**kwargs)
self.verify_size = _Endpoint(
settings={
'response_type': (VerifyResult,),
'auth': [],
'endpoint_path': '/verify/size/{size}',
'operation_id': 'verify_size',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'size',
'match_criteria',
],
'required': [
'size',
'match_criteria',
],
'nullable': [
],
'enum': [
],
'validation': [
'size',
]
},
root_map={
'validations': {
('size',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
},
'openapi_types': {
'size':
(int,),
'match_criteria':
(MatchCriteria,),
},
'attribute_map': {
'size': 'size',
},
'location_map': {
'size': 'path',
'match_criteria': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__verify_size
)
def __verify_sla(
self,
time,
match_criteria,
**kwargs
):
"""verify_sla # noqa: E501
Verify each traffic item matching the criteria meets is below SLA time # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.verify_sla(time, match_criteria, async_req=True)
>>> result = thread.get()
Args:
time (int): The time used for comparison
match_criteria (MatchCriteria): Match criteria to select requests - response pairs for size tests
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VerifyResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['time'] = \
time
kwargs['match_criteria'] = \
match_criteria
return self.call_with_http_info(**kwargs)
self.verify_sla = _Endpoint(
settings={
'response_type': (VerifyResult,),
'auth': [],
'endpoint_path': '/verify/sla/{time}',
'operation_id': 'verify_sla',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'time',
'match_criteria',
],
'required': [
'time',
'match_criteria',
],
'nullable': [
],
'enum': [
],
'validation': [
'time',
]
},
root_map={
'validations': {
('time',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
},
'openapi_types': {
'time':
(int,),
'match_criteria':
(MatchCriteria,),
},
'attribute_map': {
'time': 'time',
},
'location_map': {
'time': 'path',
'match_criteria': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__verify_sla
)
|
#!/usr/bin/env python
"""Command line tool for interaction with YI Dashcam"""
import argparse
import enum
import sys
import time
from . import __version__, YIDashcam, YIDashcamException
from .config import Option, option_map, PhotoResolution
def format_config(option, value):
return "{0}: {1}".format(
option.name.replace("_", " ").title(),
value.name.replace("_", " ").title()
if hasattr(value, 'name') else value)
parser = argparse.ArgumentParser(prog=YIDashcam.__module__)
parser.add_argument(
'--version', action='version', version='%(prog)s v{}'.format(__version__))
subparsers = parser.add_subparsers(
title="Commands", dest='command', metavar='COMMAND')
# Config
parser_config = subparsers.add_parser(
'config', help='camera information and configuration')
subparsers_config = parser_config.add_subparsers(
title="Config options", dest='option', metavar='')
for option, val_type in sorted(option_map.items(), key=lambda x: x[0].name):
if val_type is str:
continue
parser_option = subparsers_config.add_parser(
option.name, help=option.name.replace('_', ' ').title())
if issubclass(val_type, enum.Enum):
parser_option.add_argument(
'value', choices=[value.name for value in val_type])
elif val_type is bool:
parser_option.add_argument(
'value', type=str.lower, choices=['true', 'false'])
# Video stream
parser_stream = subparsers.add_parser(
'stream', help='put dashcam in mode to stream video')
# Photo capture
parser_snapshot = subparsers.add_parser(
'snapshot', help='take a photo with the dashcam')
parser_snapshot.add_argument(
'-r',
dest='photo_resolution',
choices=[res.name for res in PhotoResolution],
help="photo resolution (default: dashcam current setting)")
parser_snapshot.add_argument(
'-o',
dest='output_filename',
metavar="FILE",
help="output file to save image (default: filename on camera)")
# Web Application
parser_config = subparsers.add_parser(
'webapp', help='host local web app to view dashcam videos')
if "exposure" in sys.argv:
# Allow negative values for exposure
sys.argv.insert(len(sys.argv) - 1, "--")
args = parser.parse_args()
if args.command is None or args.command == "config":
with YIDashcam() as yi:
if getattr(args, 'option', None) is not None:
option = Option[args.option]
val_type = option_map[option]
time.sleep(1.5) # Need a chance for dashcam to settle...
if issubclass(val_type, enum.Enum):
yi.set_config(option, val_type[args.value])
elif val_type is bool:
yi.set_config(option, args.value.lower() == "true")
time.sleep(1) # Need a chance for config to set...
print(format_config(option, yi.config[option]))
else:
print(
*[format_config(option, value)
for option, value in sorted(
yi.config.items(), key=lambda x: x[0].name)],
sep="\n")
elif args.command == "stream":
with YIDashcam() as yi:
print("Connect to video stream at: rtsp://{0.HOST}/xxx.mov".format(yi))
print("Press enter to take video photo, or Ctrl-C to exit")
try:
while yi.connected:
input()
try:
yi.take_video_photo()
except YIDashcamException as e:
print("Error taking photo:", e)
else:
print("Photo taken!")
except KeyboardInterrupt:
pass
elif args.command == "snapshot":
with YIDashcam() as yi:
if args.photo_resolution is not None:
time.sleep(1) # Need a chance for dashcam to settle...
yi.set_config(Option.photo_resolution,
PhotoResolution[args.photo_resolution])
yi.take_photo()
photo = sorted(yi.photo_list)[-1]
if args.output_filename is None:
output_filename = photo.name
else:
output_filename = args.output_filename
with open(output_filename, 'wb') as output_file:
for data in yi.get_file(photo):
output_file.write(data)
print("Snapshot saved to: {}".format(output_filename))
elif args.command == "webapp":
from . import webapp
with YIDashcam(None) as yi:
webapp.yi = yi
webapp.app.run()
|
<reponame>LawrenceMMStewart/Optimal_Transport_MIT<gh_stars>1-10
#tests for ot1d.py
import pytest
from ot.ot1d import *
import numpy as np
def precision_eq(a,b):
return (np.abs(a-b)<1e-14).all()
def tensor_precision(a,b):
return tf.reduce_sum((a-b)**2)<1e-14
def test_ttu1d_5k():
#tests transform to uniform 1d
µ1 = np.array([0.2,0.5,0.3])
k=5
answer = np.array([[0.2,0.,0.,0.,0.],
[0.,0.2,0.2,0.1,0.],
[0.,0.,0.,0.1,0.2]])
guess = transport_to_uniform_1D(µ1,k)
assert precision_eq(guess,answer)
def test_ttu1d_1k():
µ1 = np.array([0.2,0.5,0.3])
k=1
answer = np.array([[0.2],
[0.5],
[0.3]])
guess = transport_to_uniform_1D(µ1,k)
assert precision_eq(guess,answer)
def test_ttu1d_2k():
µ1 = np.array([0.2,0.5,0.3])
k=2
answer = np.array([[0.2,0],
[0.3,0.2],
[0,0.3]])
guess = transport_to_uniform_1D(µ1,k)
assert precision_eq(guess,answer)
def test_ttu1d_remainder():
µ1 = np.array([[0.2],[0.5],[0.3]])
k=5
guess = transport_to_uniform_1D(µ1,k)
answer = np.array([[0.2, 0., 0., 0., 0.],
[0., 0.2, 0.2, 0.1, 0.],
[0., 0., 0., 0.1, 0.2]])
assert precision_eq(guess,answer)
def test_bary1d_222():
µ1= np.array([0.8,0.2])
x1 = np.array([1,2])
µ2 = np.array([0.4,0.6])
x2 = np.array([4,6])
k=1
answer = np.array([3.2])
guess = uniform_barycentre_1D([x1,x2],[µ1,µ2],k)
assert precision_eq(answer,guess)
def test_bary1d_222_rev():
µ1= np.array([0.8,0.2])
x1 = np.array([1,2])
µ2 = np.array([0.4,0.6])
x2 = np.array([4,6])
k=1
answer = np.array([3.2])
guess = uniform_barycentre_1D([x2,x1],[µ2,µ1],k)
assert precision_eq(answer,guess)
def test_bary1d_225():
µ1= np.array([0.8,0.2])
x1 = np.array([1,2])
µ2 = np.array([0.4,0.6])
x2 = np.array([4,6])
k=5
answer = np.array([2.5,2.5,3.5,3.5,4])
guess = uniform_barycentre_1D([x1,x2],[µ1,µ2],k)
assert precision_eq(answer,guess)
def test_bary1d_diracs():
x1 = np.array([1.])
x2 = np.array([2.])
µ=np.array([1.])
k=1
answer = np.array([1.5])
guess = uniform_barycentre_1D([x1,x2],[µ,µ],k)
assert precision_eq(answer,guess)
def test_bary1d_diracs_weighted():
x1 = np.array([1.])
x2 = np.array([2.])
µ=np.array([1.])
k=1
weights = [0.9,0.1]
answer = np.array([1.10])
guess = uniform_barycentre_1D([x1,x2],[µ,µ],k,weights=weights)
assert precision_eq(answer,guess)
def test_bary1d_mixedsizes():
x1 =[5.]
x2= [2.5,2.5,2.5]
K=1
µ1 = [1.]
µ2 = np.ones(len(x2))/len(x2)
answer = np.array([3.75])
guess = uniform_barycentre_1D([x1,x2],[µ1,µ2],K)
assert precision_eq(answer,guess)
def test_bary1d_mixed_sizes2():
x1 =[1.0,1]
x2= [2.,2.0]
K=1
µ1 = [0.5,0.5]
answer = np.array([1.5])
guess = uniform_barycentre_1D([x1,x2],[µ1,µ1],K)
assert precision_eq(answer,guess)
def test_bary1d_mixed_sizes3():
x1 =[1.0,1]
x2= [2.,2.0]
K=1
µ1 = [0.5,0.5]
µ2 = np.ones(len(x2))/len(x2)
answer = np.array([1.5])
guess = uniform_barycentre_1D([x1,x2],[µ1,µ2],K)
assert precision_eq(answer,guess)
def test_bary1d_mixed_sizes2_weighted():
x1 =[1.0,1]
x2= [2.,2.0]
K=1
µ1 = [0.5,0.5]
weights = [0.9,0.1]
answer = np.array([1.1])
guess = uniform_barycentre_1D([x1,x2],[µ1,µ1],K,weights=weights)
assert precision_eq(answer,guess)
#bary(ax,ay) = a bary(x,y)
def test_bary1d_scalarmultiplication():
x1 = [3.0,6.0]
x2 = [-3.1,0.5]
K = 10
µ = [0.2,0.8]
weights = [0.5,0.5]
sx1 = [a*2 for a in x1]
sx2 = [a*2 for a in x2]
guess = uniform_barycentre_1D([sx1,sx2],[µ,µ],K,weights=weights)
guess_scaled = uniform_barycentre_1D([x1,x2],[µ,µ],K,weights=weights)*2
assert precision_eq(guess,guess_scaled)
#bary(x+a,y+a) = a+bary
def test_bary1d_scalaraddition():
x1 = [3.0,6.0]
x2 = [-3.1,0.5]
K = 10
µ = [0.2,0.8]
weights = [0.5,0.5]
ax1 = [a+2 for a in x1]
ax2 = [a+2 for a in x2]
guess = uniform_barycentre_1D([ax1,ax2],[µ,µ],K,weights=weights)
guess_scaled = uniform_barycentre_1D([x1,x2],[µ,µ],K,weights=weights)+2
assert precision_eq(guess,guess_scaled)
# W(a,a)=0
def test_wasserstein1d_uniform_d1():
x = tf.reshape(tf.constant([-1.0,-2.0,2.0],dtype=tf.float32),[-1,1])
answer = tf.constant(0.0,dtype=tf.float32)
guess = Wasserstein1d_uniform(x,x)
assert tensor_precision(guess,answer)
#W(a,b)=W(b,a)
def test_wasserstein1d_uniform_d2():
x = tf.reshape(tf.constant([-1.0,-2.0,2.0],dtype=tf.float32),[-1,1])
y = tf.reshape(tf.constant([5.0,-1.0,7.0],dtype = tf.float32),[-1,1])
wxy = Wasserstein1d_uniform(x,y)
wyx = Wasserstein1d_uniform(y,x)
assert tensor_precision(wxy,wyx)
#trivial example
def test_wasserstein1d_uniform_exact():
x = tf.constant([[0.]],dtype=tf.float32)
y = tf.constant([[-0.5],[0.5]],dtype=tf.float32)
w2 = Wasserstein1d_uniform(x,y)
w1 = Wasserstein1d_uniform(x,y,p=tf.constant(1,dtype=tf.float32))
assert(tensor_precision(w2,0.25))
assert(tensor_precision(w1,0.5))
|
import argparse
import logging
import yaml
from medcat.cdb_maker import CDBMaker
from medcat.utils.make_vocab import MakeVocab
from medcat.cat import CAT
from medcat.config import Config
from medcat.utils.loggers import add_handlers
from pathlib import Path
# Create Logger
logger = logging.getLogger('__package__')
logger = add_handlers(logger)
logger.setLevel(logging.INFO)
def create_cdb(concept_csv_file, medcat_config):
"""Create concept database from csv.
Args:
concept_csv_file (pathlib.Path):
Path to CSV file containing all concepts and synonyms.
medcat_config (medcat.config.Config):
MedCAT configuration file.
Returns:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms, without context embeddings.
"""
logger.info('Creating concept database from concept table')
cdb_maker = CDBMaker(config=medcat_config)
cdb = cdb_maker.prepare_csvs([str(concept_csv_file)], full_build=True)
return cdb
def create_vocab(cdb, training_data_list, medcat_config, output_dir, unigram_table_size):
"""Create vocabulary for word embeddings and spell check from list of training documents and CDB.
Args:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms.
training_data_list (list):
List of example documents.
medcat_config (medcat.config.Config):
MedCAT configuration file.
output_dir (pathlib.Path):
Output directory to write vocabulary and data.txt (required to create vocabulary) to.
unigram_table_size (int):
Size of unigram table to be initialized before creating vocabulary.
Returns:
vocab (medcat.vocab.Vocab):
MedCAT vocabulary created from CDB and training documents.
"""
logger.info('Creating and saving vocabulary')
make_vocab = MakeVocab(cdb=cdb, config=medcat_config)
make_vocab.make(training_data_list, out_folder=str(output_dir))
make_vocab.add_vectors(in_path=str(output_dir/'data.txt'), unigram_table_size=unigram_table_size)
vocab = make_vocab.vocab
return vocab
def train_unsupervised(cdb, vocab, medcat_config, output_dir, training_data_list):
"""Perform unsupervised training and save updated CDB.
Although not returned explicitly in this function, the CDB will be updated with context embeddings.
Args:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms.
vocab (medcat.vocab.Vocab):
MedCAT vocabulary created from CDB and training documents.
medcat_config (medcat.config.Config):
MedCAT configuration file.
output_dir (pathlib.Path):
Output directory to write updated CDB to.
training_data_list (list):
List of example documents.
Returns:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms, as well as context embeddings.
"""
# Create MedCAT pipeline
cat = CAT(cdb=cdb, vocab=vocab, config=medcat_config)
# Perform unsupervised training and add model to concept database
logger.info('Performing unsupervised training')
cat.train(training_data_list)
# Save output
logger.info('Saving updated concept database')
cdb.save(str(output_dir / 'cdb.dat'))
return cdb
def create_models(config_file):
"""Create MedCAT CDB and Vocabulary models.
Args:
config_file (pathlib.Path):
Location of model creator configuration file to specify input, output and MedCAT configuration.
Returns:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms, as well as context embeddings.
vocab (medcat.vocab.Vocab):
MedCAT vocabulary created from CDB and training documents.
"""
# Load model creator configuration
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
# Load data for unsupervised training
with open(Path(config['unsupervised_training_data_file']), 'r', encoding='utf-8') as training_data:
training_data_list = [line.strip() for line in training_data]
# Load MedCAT configuration
medcat_config = Config()
if 'medcat_config_file' in config:
medcat_config.parse_config_file(Path(config['medcat_config_file']))
# Create output dir if it does not exist
output_dir = Path(config['output_dir'])
output_dir.mkdir(parents=True, exist_ok=True)
# Create models
cdb = create_cdb(Path(config['concept_csv_file']), medcat_config)
vocab = create_vocab(cdb, training_data_list, medcat_config, output_dir, config['unigram_table_size'])
cdb = train_unsupervised(cdb, vocab, medcat_config, output_dir, training_data_list)
return cdb, vocab
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('config_file', help='YAML formatted file containing the parameters for model creator. An '
'example can be found in `tests/model_creator/config_example.yml`')
args = parser.parse_args()
# Run pipeline
create_models(args.config_file)
|
<reponame>fountain111/Algorithm-
import entropy
import numpy as np
class ID3_trees():
'''
ID3
'''
def __init__(self):
#self.label = label
return
def _create_dataSets(self):
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no'],
]
fea_names = ['fea1', 'fea2']
return dataSet, fea_names
def createTrees(self,dataSet,fea_names):
'''
return tress or label value,
:param dataSet:
:param fea_name_list:
:return:
'''
dataSet = np.asarray(dataSet)
#print(dataSet[:,2])
labels = dataSet[:,-1]
unique_labels,counts = np.unique(labels,return_counts=True)
if len(labels) == counts[0]:
#print('split done,return label=',unique_labels[0])
return unique_labels[0]
if dataSet.shape[1] == 1:
print('stop return2 dataSet shape=1',)
return self.majorityCnt(dataSet)
#print(dataSet.shape)
best_fea_index = self.chooseBestFeatureToSplit(dataSet)
#best_fea_index = self.chooseBestFeatureToSplit_condition_entropy(dataSet)
best_fea_name = fea_names[best_fea_index]
#print('best fea=',best_fea_index)
#print('before',fea_names)
# print('after',fea_names)
bestValues =dataSet[:,best_fea_index]
#print('bestValues',best_fea_index)
tree = {best_fea_name:{}}
del fea_names[best_fea_index]
#print('best fea name=',best_fea_name) # best fea index 可能一直是0,但name会变。
for value in np.unique(bestValues):
#sub_fea_names = fea_names[:]
sub_dataSet = self.splitDataSet(dataSet,best_fea_index,value)
# print('value=',value)
# print(sub_dataSet.shape)
tree[best_fea_name][value] = self.createTrees(sub_dataSet,fea_names)
return tree
def splitDataSet(self,dataSet,index,value):
'''
:param dataSet:
:param index:
:param value:
:return: index是value的其他列的值,并删除Index列
'''
new_list = []
for row in dataSet:
if (row[index] == value):
new_list.append(row)
new_array = np.asarray(new_list)
return np.delete(new_array,index,axis=1)
def chooseBestFeatureToSplit(self,dataSet):
'''
Traversal all fea,find best information Gain
:param dataSet:array_like,list
:return: best fea index
'''
dataSet = np.asarray(dataSet)
best_informationGain = 0
best_index = None
baseEntropy = self.calcEntropy(dataSet)
#print('base entropy=',baseEntropy)
fea_lens = dataSet.shape[1]-1
for i in range(fea_lens):
unique_feas = np.unique(dataSet[:,i])
condition_entropy = 0
#print('unique_feas=',unique_feas)
for value in unique_feas:
sub_dataSet = self.splitDataSet(dataSet,i,value)
# print('sub_dataSet shape =',sub_dataSet.shape)
prob = sub_dataSet.shape[0]/dataSet.shape[0] # 随机变量P(X)的概率
condition_entropy += prob*self.calcEntropy(sub_dataSet) # 条件熵概率
# print('condition entropy=',condition_entropy)
information_gain = baseEntropy - condition_entropy
#print(information_gain)
if best_informationGain < information_gain:
best_informationGain = information_gain
best_index = i
return best_index
def chooseBestFeatureToSplit_condition_entropy(self,dataSet):
'''
Traversal all fea,find best information Gain
:param dataSet:array_like,list
:return: best fea index
'''
dataSet = np.asarray(dataSet)
best_informationGain = 100000000
best_index = None
baseEntropy = self.calcEntropy(dataSet)
#print('base entropy=',baseEntropy)
fea_lens = dataSet.shape[1]-1
for i in range(fea_lens):
unique_feas = np.unique(dataSet[:,i])
condition_entropy = 0
#print('unique_feas=',unique_feas)
for value in unique_feas:
sub_dataSet = self.splitDataSet(dataSet,i,value)
# print('sub_dataSet shape =',sub_dataSet.shape)
prob = sub_dataSet.shape[0]/dataSet.shape[0] # 随机变量P(X)的概率
condition_entropy += prob*self.calcEntropy(sub_dataSet) # 条件熵概率
# print('condition entropy=',condition_entropy)
information_gain = condition_entropy
#print(information_gain)
if best_informationGain > information_gain:
best_informationGain = information_gain
best_index = i
return best_index
def calcEntropy(self,dataSet):
'''
其实没必要带整个数据集进去,
labels的信息熵
:param dataSet:
:return: entropy
'''
dataSet = np.asarray(dataSet)
labels = dataSet[:,-1]
_,counts = np.unique(labels,return_counts=True)
#print(list(map(lambda x:x/dataSet.shape[0],counts)))
return_entropy = entropy._entropy(list(map( lambda x:x/dataSet.shape[0],counts)),base=2)
#print(return_entropy)
return return_entropy
def majorityCnt(self,dataSet):
'''
majority wins, if equal ,random choose,print warning,
:param dataSet:
:return:
'''
values,counts = np.unique(dataSet[:,-1],return_counts=True)
indices = np.argmax(counts)
if isinstance(indices,list):
print('majority euqal ,random pick ')
indices = indices[0]
label = values[indices]
print('marjority vote=',label)
return label
def fit(self,dataSet,fea_names):
return self.createTrees(dataSet,fea_names)
def _predict_example(self,trees,fea_names,test_row):
'''
predict a single example(row)
:param trees:dict,
:param fea_names:
:param test_data: array_like
:return: label of class
'''
#test_row = list(test_row)
# print(test_row)
first_key = list(trees.keys())[0]
next_tree = trees[first_key] # next tree or label
fea_index = fea_names.index(first_key)
#print(fea_index)
#print(test_row)
key = test_row[fea_index]
valueInRow = next_tree[key]
if isinstance(valueInRow,dict):
class_label = self._predict_example(valueInRow,fea_names,test_row)
else:
class_label = valueInRow
#print(class_label)
return class_label
def predict(self,trees,fea_names,test_data):
'''
:param trees:
:param fea_names:
:param test_data: array_like
:return:
'''
test_data = np.asarray(test_data)
try:
test_data.shape[1]
except IndexError:
test_data = test_data.reshape(1,test_data.shape[0])
print('reshape data',test_data)
pass
labels = list(map(lambda x: self._predict_example(trees, fea_names, x), test_data))
#print(labels)
return labels
def main():
model = ID3_trees()
#label = [0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1,1, 0]
#model._label_entropy(label,base=2)
array = np.random.random((3,4))
# for value in array.T:
# print(value)
dataset,fea_names = model._create_dataSets()
trees = model.fit(dataset,fea_names.copy())
#calcShannonEnt(dataset)
#splitDataSet(dataset,1,'e')
#print('fea names',fea_names)
#print('a',fea_names.index('no surfacing'))
#print(dataset[0])
labels = model.predict(trees,fea_names,np.asarray(dataset)[:,:-1])
print(labels)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
A Cell object represents a place in the environment where an organism could
reside. If that Cell is occupied by an organism, the Cell object also defines
that organism.
"""
__author__ = "<NAME> <<EMAIL>>"
__credits__ = "<NAME>"
import random
from seeds.SEEDSError import *
class Cell(object):
"""
Interface for Cell objects
Properties:
experiment
A reference to the Experiment in which the Cell exists
population
A reference to the Population in which the Cell exists
id
A unique ID representing that Cell
node
The ID of the node of the population topology graph on which this Cell
resides
types
List of strings describing the possible types the Cell could be
type
Number indicating which type the current Cell is. This number is also
an index into the 'types' parameter.
max_types
Maximum number of different types possible with this Cell
type_colors
A list of colors (matplotlib color strings or hex colors) to be used to
represent the different cell types by scripts that plot or visualize
the population. A default list is defined that allows for coloring of
up to 8 types.
name
The name of the Cell type
label
A unique label identifying this Cell's configuration
neighbors
A list of Cells with which this Cell interacts. These are cells on
neighboring nodes in the topology.
Configuration:
Configuration options for each custom Cell object should be stored in a
configuration block bearing the name of that Cell type (e.g.,
"[DemoCell]")
"""
types = []
type_colors = []
max_types = 0
def __init__(self, experiment, population, node, type=None, name=None, label=None):
"""Initialize a Cell object
Parameters:
*experiment*
A reference to the Experiment in which this Cell exists
*population*
A reference to the Population in which this Cell exists
*node*
The ID of the node of the population topology graph on which this
Cell resides
*type*
The type of Cell this is (specific to the Cell class used)
*name*
The name of the Cell type
*label*
A unique label for the configuration of this cell
"""
self.experiment = experiment
self.population = population
self.id = self.population.get_cell_id()
self.node = node
self.name = name
self.label = label
self.type_colors = ['r','g','b','y','c', 'm', 'k']
if type:
if type not in range(len(self.types)):
raise CellTypeError(type)
else:
self.type = type
else:
self.type = random.randint(0, len(self.types)-1)
if self.label:
self.config_section = "{name}:{label}".format(name=self.name, label=self.label)
else:
self.config_section = "{name}".format(name=self.name)
self.neighbors = []
def __str__(self):
"""Produce a string to be used when a Cell object is printed"""
return "Cell {id} Type {type}".format(id=self.id, type=self.type)
def add_neighbor(self, neighbor):
"""Make the given cell a neighbor"""
self.population.topology.add_edge(self.id, neighbor.id)
self.neighbors = self.get_neighbors()
neighbor.neighbors = neighbor.get_neighbors()
def remove_neighbor(self, neighbor):
"""Disconnect the Cell from the given Cell, making them no longer
neighbors
"""
self.population.topology.remove_edge(self.id, neighbor.id)
self.update_neighbors()
neighbor.update_neighbors()
def get_neighbors(self):
"""Get a list of neighboring cells"""
return self.population.get_neighbors(self)
def update_neighbors(self):
"""Update the list of neighboring cells"""
self.neighbors = self.get_neighbors()
def update(self):
"""Update the Cell according to its update rules"""
pass
def teardown(self):
"""Perform any necessary cleanup at the end of the experiment"""
pass
def coords(self):
"""Get the coordinates of the Cell in space"""
return self.population.topology.graph.node[self.node]['coords']
def get_neighbor_distance(self, neighbor):
"""Get the Cartesian distance to the given neighbor Cell"""
return self.population.cell_distance(self, neighbor)
def get_neighbor_distances(self):
"""Get an array of distances to all neighbors"""
return [self.get_neighbor_distance(n) for n in self.get_neighbors()]
|
"""
Binomial distribution
---------------------
"""
import mpmath
from ..fun import logbinomial
__all__ = ['pmf', 'logpmf', 'cdf', 'sf', 'mean', 'var']
def _validate_np(n, p):
if p < 0 or p > 1:
raise ValueError('p must be in the range [0, 1]')
if n < 0:
raise ValueError('n must be a nonnegative integer.')
return
def pmf(k, n, p):
"""
Probability mass function of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
p = mpmath.mpf(p)
return (mpmath.binomial(n, k) *
mpmath.power(p, k) *
mpmath.power(1 - p, n - k))
def logpmf(k, n, p):
"""
Natural log of the probability mass function of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
return (logbinomial(n, k)
+ k*mpmath.log(p)
+ mpmath.fsum([n, -k])*mpmath.log1p(-p))
def cdf(k, n, p, method='incbeta'):
"""
Cumulative distribution function of the binomial distribution.
`method` must be either "sumpmf" or "incbeta". When `method` is "sumpmf",
the CDF is computed with a simple sum of the PMF values. When `method`
is "incbeta", the incomplete beta function is used. This method is
generally faster than the "sumpmf" method, but for large values of k
or n, the incomplete beta function of mpmath might fail.
"""
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
# XXX For large values of k and/or n, betainc fails. The failure
# occurs in one of the hypergeometric functions.
return mpmath.betainc(n - k, k + 1, x1=0, x2=1 - p,
regularized=True)
else:
# method is "sumpmf"
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1)])
return c
def sf(k, n, p, method='incbeta'):
"""
Survival function of the binomial distribution.
`method` must be either "sumpmf" or "incbeta". When `method` is "sumpmf",
the survival function is computed with a simple sum of the PMF values.
When `method` is "incbeta", the incomplete beta function is used. This
method is generally faster than the "sumpmf" method, but for large values
of k or n, the incomplete beta function of mpmath might fail.
"""
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
# XXX For large values of k and/or n, betainc fails. The failure
# occurs in one of the hypergeometric functions.
return mpmath.betainc(n - k, k + 1, x1=1-p, x2=1,
regularized=True)
else:
# method is "sumpmf"
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1, n + 1)])
return c
def mean(n, p):
"""
Mean of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n*p
def var(n, p):
"""
Variance of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n * p * (1 - p)
|
<filename>SComplexity/scrape.py<gh_stars>1-10
# Scientific readability project
# authors: other authors,
# ...,
# <NAME>
# https://github.com/russelljjarvis/
# <EMAIL>
# <NAME>
# <EMAIL>
from numpy import random
import os
from bs4 import BeautifulSoup
import pickle
import _pickle as cPickle #Using cPickle will result in performance gains
from GoogleScraper import scrape_with_config, GoogleSearchError
import dask.bag as db
import pdfminer
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfdevice import PDFDevice
from pdfminer.layout import LAParams
from pdfminer.converter import TextConverter
from SComplexity.crawl import convert_pdf_to_txt
from SComplexity.crawl import print_best_text
from SComplexity.crawl import collect_pubs
from SComplexity.scholar_scrape import scholar
from delver import Crawler
C = Crawler()
import requests
import io
import selenium
from selenium import webdriver
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1024, 768))
display.start()
from selenium.webdriver.firefox.options import Options
import re
from bs4 import BeautifulSoup
import bs4 as bs
import urllib.request
from io import StringIO
import io
#options = Options()
#options.headless = True
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver',chrome_options=chrome_options)
#driver = webdriver.Chrome(chrome_options=chrome_options)
driver.implicitly_wait(10)
from selenium.common.exceptions import NoSuchElementException
rsrcmgr = PDFResourceManager()
retstr = StringIO()
laparams = LAParams()
codec = 'utf-8'
device = TextConverter(rsrcmgr, retstr, codec = codec, laparams = laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
#from pyPdf import PdfFileReader
#from StringIO import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
import os
import sys, getopt
from io import StringIO
#converts pdf, returns its text content as a string
def pdf_to_txt_(infile):#, pages=None):
#if not pages:
#pagenums = set()
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
#infile = file(fname, 'rb')
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close
return text
import PyPDF2
from PyPDF2 import PdfFileReader
import textract
#from nltk.tokenize import word_tokenize
#from nltk.corpus import stopwords
def pdf_to_txt(url):
if str(content) == str('<Response [404]>'):
return None
else:
# from
# https://medium.com/@rqaiserr/how-to-convert-pdfs-into-searchable-key-words-with-python-85aab86c544f
try:
input_buffer = StringIO(content.content)
pdfReader = PyPDF2.PdfFileReader(input_buffer)
except:
pdfFileObj = io.BytesIO(content.content)
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
num_pages = pdfReader.numPages
count = 0
text = ""
while count < num_pages:
pageObj = pdfReader.getPage(count)
count +=1
text += pageObj.extractText()
if text != "":
text = text
else:
text = textract.process(fileurl, method='tesseract', language='eng')
return text
'''
parser = PDFParser(pdf)
document = PDFDocument(parser, password=None)
write_text = ''
for page in PDFPage.create_pages(document):
interpreter.process_page(page)
write_text = write_text.join(retstr.getvalue())
text = str(write_text)
'''
def html_to_txt(content):
soup = BeautifulSoup(content, 'html.parser')
#strip HTML
for script in soup(["script", "style"]):
script.extract() # rip it out
text = soup.get_text()
wt = copy.copy(text)
#organize text
lines = (line.strip() for line in text.splitlines()) # break into lines and remove leading and trailing space on each
chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) # break multi-headlines into a line each
text = '\n'.join(chunk for chunk in chunks if chunk) # drop blank lines
str_text = str(text)
return str_text
def convert(content,link):
# This is really ugly, but it's proven to be both fault tolerant and effective.
try:
if str('.html') in link:
text = html_to_txt(content)
print(text)
elif str('.pdf') in link:
text = pdf_to_txt(content)
else:
try:
text = html_to_txt(content)
print(text)
except:
text = None
except:
text = None
return text
def url_to_text(link_tuple):
se_b, page_rank, link, category, buff = link_tuple
if str('pdf') not in link:
if C.open(link) is not None:
content = C.open(link).content
buff = convert(content,link)
else:
print('problem')
else:
pdf_file = requests.get(link, stream=True)
f = io.BytesIO(pdf_file.content)
reader = PdfFileReader(f)
buff = reader.getPage(0).extractText().split('\n')
print(buff)
link_tuple = ( se_b, page_rank, link, category, buff )
return link_tuple
#@jit
def buffer_to_pickle(link_tuple):
se_b, page_rank, link, category, buff = link_tuple
link_tuple = se_b, page_rank, link, category, buff
fname = 'results_dir/{0}_{1}_{2}.p'.format(category,se_b,page_rank)
if type(buff) is not None:
with open(fname,'wb') as f:
pickle.dump(link_tuple,f)
return
def process(item):
text = url_to_text(item)
buffer_to_pickle(text)
return
# this should not be hard coded, it should be set in the class init, but can't be bothered refactoring.
NUM_LINKS = 10
# this should be a class method with self and self.NUM_LINKS but can't be bothered refactoring.
def wiki_get(get_links):
# wikipedia is robot friendly
# surfraw is fine.
se_,index,link,category,buff = get_links
url_of_links = str('https://en.wikipedia.org/w/index.php?search=')+str(category)
links = collect_pubs(url_of_links)
if len(links) > NUM_LINKS: links = links[0:NUM_LINKS]
[ process((se_,index,l,category,buff)) for index,l in enumerate(links) ]
# this should be a class method with self and self.NUM_LINKS but can't be bothered refactoring.
def scholar_pedia_get(get_links):
# wikipedia is robot friendly
# surfraw is fine.
se_,index,link,category,buff = get_links
url_of_links = str('http://www.scholarpedia.org/w/index.php?search=')+str(category)+str('&title=Special%3ASearch')
links = collect_pubs(url_of_links)
if len(links) > NUM_LINKS: links = links[0:NUM_LINKS]
[ process((se_,index,l,category,buff)) for index,l in enumerate(links) ]
# this should be a class method with self and self.NUM_LINKS but can't be bothered refactoring.
def search_scholar(get_links):
# from https://github.com/ckreibich/scholar.py/issues/80
se_,index,category,category,buff = get_links
querier = scholar.ScholarQuerier()
settings = scholar.ScholarSettings()
querier.apply_settings(settings)
query = scholar.SearchScholarQuery()
query.set_words(category)
querier.send_query(query)
links = [ a.attrs['url'][0] for a in querier.articles if a.attrs['url'][0] is not None ]
#links = query.get_url()
#print(links)
#if len(links) > NUM_LINKS: links = links[0:NUM_LINKS]
[ process((se_,index,l,category,buff)) for index,l in enumerate(links) ]
def search_author(get_links):
# from https://github.com/ckreibich/scholar.py/issues/80
se_,index,category,category,buff = get_links
querier = scholar.ScholarQuerier()
settings = scholar.ScholarSettings()
querier.apply_settings(settings)
query = scholar.SearchScholarQuery()
query.set_words(category)
querier.send_query(query)
links = [ a.attrs['url'][0] for a in querier.articles if a.attrs['url'][0] is not None ]
#links = query.get_url()
#print(links)
#if len(links) > NUM_LINKS: links = links[0:NUM_LINKS]
[ process((se_,index,l,category,buff)) for index,l in enumerate(links) ]
class SW(object):
def __init__(self,sengines,sterms,nlinks=10):
self.NUM_LINKS = nlinks
self.links = None
if not os.path.exists('results_dir'):
os.makedirs('results_dir')
self.iterable = [ (v,category) for category in sterms for v in sengines.values() ]
random.shuffle(self.iterable)
def slat_(self,config):
try:
if str('wiki') in config['search_engines']:
get_links = (str('wikipedia'),0,None,config['keyword'],None)
wiki_get(get_links)
elif str('info_wars') in config['search_engines']:
get_links = (str('info_wars'),0,None,config['keyword'],None)
info_wars_get(get_links)
elif str('scholar') in config['search_engines']:
get_links = (str('scholar'),0,None,config['keyword'],None)
search_scholar(get_links)
elif str('scholarpedia') in config['search_engines']:
get_links = (str('scholar'),0,None,config['keyword'],None)
scholar_pedia_get(get_links)
else:
search = scrape_with_config(config)
links = []
for serp in search.serps:
print(serp)
links.extend([link.link for link in serp.links])
# This code block jumps over gate two
# The (possibly private, or hosted server as a gatekeeper).
if len(links) > self.NUM_LINKS: links = links[0:self.NUM_LINKS]
if len(links) > 0:
print(links)
buffer = None
se_ = config['search_engines']
category = config['keyword']
get_links = ((se_,index,link,category,buffer) for index, link in enumerate(links) )
for gl in get_links:
process(gl)
# map over the function in parallel since it's 2018
#b = db.from_sequence(get_links,npartitions=8)
#_ = list(b.map(process).compute())
except GoogleSearchError as e:
print(e)
return None
print('done scraping')
#@jit
def scrapelandtext(self,fi):
se_,category = fi
config = {}
#driver = rotate_profiles()
# This code block, jumps over gate one (the search engine as a gatekeeper)
# google scholar or wikipedia is not supported by google scraper
# duckduckgo bang expansion _cannot_ be used as to access engines that GS does not support
# without significant development. Redirection to the right search results does occur,
# but google scrape also has tools for reading links out of web pages, and it needs to know
# which brand of SE to expect in order to deal with idiosyncratic formatting.
# it's easier not to use bang expansion, for that reason.
# for example twitter etc
config['keyword'] = str(category)
config['search_engines'] = se_
#config['scrape_method'] = 'http'
config['scrape_method'] = 'selenium'
config['num_pages_for_keyword'] = 1
config['use_own_ip'] = True
config['sel_browser'] = 'chrome'
config['do_caching'] = False # bloat warning.
# Google scrap + selenium implements a lot of human centric browser masquarading tools.
# Search Engine: 'who are you?' code: 'I am an honest human centric browser, and certainly note a robot surfing in the nude'. Search Engine: 'good, here are some pages'.
# Time elapses and the reality is exposed just like in 'the Emperors New Clothes'.
# The file crawl.py contains methods for crawling the scrapped links.
# For this reason, a subsequent action, c.download (crawl download ) is ncessary.
config['output_filename'] = '{0}_{1}.csv'.format(category,se_)
self.slat_(config)
return
def run(self):
# someone should write a unit_test.
# one reason I have not, is I would want to use travis.cl, and scrapping probably violates policies.
# a unit test might begin like this:
# self.iterable.insert(0,("scholar"),str("arbitrary test")))
# self.iterable.insert(0,("wiki"),str("arbitrary test")))
_ = list(map(self.scrapelandtext,self.iterable))
return
|
<gh_stars>1-10
"""Switch platform for Hyperion."""
from __future__ import annotations
import functools
from typing import Any, Callable
from hyperion import client
from hyperion.const import (
KEY_COMPONENT,
KEY_COMPONENTID_ALL,
KEY_COMPONENTID_BLACKBORDER,
KEY_COMPONENTID_BOBLIGHTSERVER,
KEY_COMPONENTID_FORWARDER,
KEY_COMPONENTID_GRABBER,
KEY_COMPONENTID_LEDDEVICE,
KEY_COMPONENTID_SMOOTHING,
KEY_COMPONENTID_V4L,
KEY_COMPONENTS,
KEY_COMPONENTSTATE,
KEY_ENABLED,
KEY_NAME,
KEY_STATE,
KEY_UPDATE,
)
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import slugify
from . import get_hyperion_unique_id, listen_for_instance_updates
from .const import (
COMPONENT_TO_NAME,
CONF_INSTANCE_CLIENTS,
DOMAIN,
NAME_SUFFIX_HYPERION_COMPONENT_SWITCH,
SIGNAL_ENTITY_REMOVE,
TYPE_HYPERION_COMPONENT_SWITCH_BASE,
)
COMPONENT_SWITCHES = [
KEY_COMPONENTID_ALL,
KEY_COMPONENTID_SMOOTHING,
KEY_COMPONENTID_BLACKBORDER,
KEY_COMPONENTID_FORWARDER,
KEY_COMPONENTID_BOBLIGHTSERVER,
KEY_COMPONENTID_GRABBER,
KEY_COMPONENTID_LEDDEVICE,
KEY_COMPONENTID_V4L,
]
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities: Callable
) -> bool:
"""Set up a Hyperion platform from config entry."""
entry_data = hass.data[DOMAIN][config_entry.entry_id]
server_id = config_entry.unique_id
def component_to_switch_type(component: str) -> str:
"""Convert a component to a switch type string."""
return slugify(
f"{TYPE_HYPERION_COMPONENT_SWITCH_BASE} {COMPONENT_TO_NAME[component]}"
)
def component_to_unique_id(component: str, instance_num: int) -> str:
"""Convert a component to a unique_id."""
assert server_id
return get_hyperion_unique_id(
server_id, instance_num, component_to_switch_type(component)
)
def component_to_switch_name(component: str, instance_name: str) -> str:
"""Convert a component to a switch name."""
return (
f"{instance_name} "
f"{NAME_SUFFIX_HYPERION_COMPONENT_SWITCH} "
f"{COMPONENT_TO_NAME.get(component, component.capitalize())}"
)
@callback
def instance_add(instance_num: int, instance_name: str) -> None:
"""Add entities for a new Hyperion instance."""
assert server_id
switches = []
for component in COMPONENT_SWITCHES:
switches.append(
HyperionComponentSwitch(
component_to_unique_id(component, instance_num),
component_to_switch_name(component, instance_name),
component,
entry_data[CONF_INSTANCE_CLIENTS][instance_num],
),
)
async_add_entities(switches)
@callback
def instance_remove(instance_num: int) -> None:
"""Remove entities for an old Hyperion instance."""
assert server_id
for component in COMPONENT_SWITCHES:
async_dispatcher_send(
hass,
SIGNAL_ENTITY_REMOVE.format(
component_to_unique_id(component, instance_num),
),
)
listen_for_instance_updates(hass, config_entry, instance_add, instance_remove)
return True
class HyperionComponentSwitch(SwitchEntity):
"""ComponentBinarySwitch switch class."""
def __init__(
self,
unique_id: str,
name: str,
component_name: str,
hyperion_client: client.HyperionClient,
) -> None:
"""Initialize the switch."""
self._unique_id = unique_id
self._name = name
self._component_name = component_name
self._client = hyperion_client
self._client_callbacks = {
f"{KEY_COMPONENTS}-{KEY_UPDATE}": self._update_components
}
@property
def should_poll(self) -> bool:
"""Return whether or not this entity should be polled."""
return False
@property
def entity_registry_enabled_default(self) -> bool:
"""Whether or not the entity is enabled by default."""
# These component controls are for advanced users and are disabled by default.
return False
@property
def unique_id(self) -> str:
"""Return a unique id for this instance."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the switch."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if the switch is on."""
for component in self._client.components or []:
if component[KEY_NAME] == self._component_name:
return bool(component.setdefault(KEY_ENABLED, False))
return False
@property
def available(self) -> bool:
"""Return server availability."""
return bool(self._client.has_loaded_state)
async def _async_send_set_component(self, value: bool) -> None:
"""Send a component control request."""
await self._client.async_send_set_component(
**{
KEY_COMPONENTSTATE: {
KEY_COMPONENT: self._component_name,
KEY_STATE: value,
}
}
)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the switch."""
await self._async_send_set_component(True)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the switch."""
await self._async_send_set_component(False)
@callback
def _update_components(self, _: dict[str, Any] | None = None) -> None:
"""Update Hyperion components."""
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Register callbacks when entity added to hass."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_ENTITY_REMOVE.format(self._unique_id),
functools.partial(self.async_remove, force_remove=True),
)
)
self._client.add_callbacks(self._client_callbacks)
async def async_will_remove_from_hass(self) -> None:
"""Cleanup prior to hass removal."""
self._client.remove_callbacks(self._client_callbacks)
|
import pandas as pd
import numpy as np
from sklearn.base import TransformerMixin
import scipy
def close(X: np.ndarray):
if X.ndim == 2:
return np.divide(X, np.sum(X, axis=1)[:, np.newaxis])
else:
return np.divide(X, np.sum(X, axis=0))
def nancov(X, method='replace'):
"""
Generates a covariance matrix excluding nan-components.
Done on a column-column/pairwise basis.
The result Y may not be a positive definite matrix.
"""
if method=='rowexclude':
Xnanfree = X[np.all(np.isfinite(X), axis=1), :].T
#assert Xnanfree.shape[1] > Xnanfree.shape[0]
#(1/m)X^T*X
return np.cov(Xnanfree)
else:
X = np.array(X, ndmin=2, dtype=float)
X -= np.nanmean(X, axis=0)#[:, np.newaxis]
cov = np.empty((X.shape[1], X.shape[1]))
cols = range(X.shape[1])
for n in cols:
for m in [i for i in cols if i>=n] :
fn = np.isfinite(X[:, n])
fm = np.isfinite(X[:, m])
if method=='replace':
X[~fn, n] = 0
X[~fm, m] = 0
fact = fn.shape[0] - 1
c= np.dot(X[:, n], X[:, m])/fact
else:
f = fn & fm
fact = f.shape[0] - 1
c = np.dot(X[f, n], X[f, m])/fact
cov[n, m] = c
cov[m, n] = c
return cov
def renormalise(df: pd.DataFrame, components:list=[]):
"""
Renormalises compositional data to ensure closure.
A subset of components can be used for flexibility.
For data which sums to 0, 100 is returned - e.g. for TE-only datasets
"""
dfc = df.copy()
if components:
cmpnts = [c for c in components if c in dfc.columns]
dfc.loc[:, cmpnts] = 100. * dfc.loc[:, cmpnts].divide(
dfc.loc[:, cmpnts].sum(axis=1).replace(0, np.nan),
axis=0)
return dfc
else:
dfc = dfc.divide(dfc.sum(axis=1).replace(0, 100), axis=0) * 100.
return dfc
def add_ratio(df: pd.DataFrame, ratio, alias=''):
"""
Add a ratio of components A and B, given in the form of string 'A/B'.
Can be assigned an alias name
"""
num, den = ratio.split('/')
name = [ratio if not alias else alias][0]
df.loc[:, name] = df.loc[:, num] / df.loc[:, den]
class LinearTransform(TransformerMixin):
def __init__(self, **kwargs):
self.kpairs = kwargs
self.label = 'Linear/Crude'
self.longlabel = 'Linear Transform'
def transform(self, X, *args):
X = np.array(X)
return X
def inverse_transform(self, Y, *args):
Y = np.array(Y)
return Y
def fit(self, X, *args):
return self
class ALRTransform(TransformerMixin):
def __init__(self, **kwargs):
self.kpairs = kwargs
self.label = 'ALR'
self.longlabel = 'Additive Log-ratio Transform'
def transform(self, X, *args):
X = np.array(X)
return alr(X)
def inverse_transform(self, Y, *args):
Y = np.array(Y)
return inv_alr(Y)
def fit(self, X, *args):
return self
class CLRTransform(TransformerMixin):
def __init__(self, **kwargs):
self.kpairs = kwargs
self.label = 'CLR'
self.longlabel = 'Centred Log-ratio Transform'
def transform(self, X, *args):
X = np.array(X)
return clr(X)
def inverse_transform(self, Y, *args):
Y = np.array(Y)
return inv_clr(Y)
def fit(self, X, *args):
return self
class ILRTransform(TransformerMixin):
def __init__(self, **kwargs):
self.kpairs = kwargs
self.label = 'ILR'
self.longlabel = 'Isometric Log-ratio Transform'
def transform(self, X, *args):
X = np.array(X)
self.X = X
return ilr(X)
def inverse_transform(self, Y, *args):
Y = np.array(Y)
return inv_ilr(Y, X=self.X)
def fit(self, X, *args):
return self
def additive_log_ratio(X: np.ndarray, ind: int=-1):
"""Additive log ratio transform. """
Y = X.copy()
assert Y.ndim in [1, 2]
dimensions = Y.shape[Y.ndim-1]
if ind < 0: ind += dimensions
if Y.ndim == 2:
Y = np.divide(Y, Y[:, ind][:, np.newaxis])
Y = np.log(Y[:, [i for i in range(dimensions) if not i==ind]])
else:
Y = np.divide(X, X[ind])
Y = np.log(Y[[i for i in range(dimensions) if not i==ind]])
return Y
def inverse_additive_log_ratio(Y: np.ndarray, ind=-1):
"""
Inverse additive log ratio transform.
"""
assert Y.ndim in [1, 2]
X = Y.copy()
dimensions = X.shape[X.ndim-1]
idx = np.arange(0, dimensions+1)
if ind != -1:
idx = np.array(list(idx[idx < ind]) +
[-1] +
list(idx[idx >= ind+1]-1))
# Add a zero-column and reorder columns
if Y.ndim == 2:
X = np.concatenate((X, np.zeros((X.shape[0], 1))), axis=1)
X = X[:, idx]
else:
X = np.append(X, np.array([0]))
X = X[idx]
# Inverse log and closure operations
X = np.exp(X)
X = close(X)
return X
def alr(*args, **kwargs):
return additive_log_ratio(*args, **kwargs)
def inv_alr(*args, **kwargs):
return inverse_additive_log_ratio(*args, **kwargs)
def ALR_mean(X, index=-1):
alr_transform = ALRTransform()
M_ALR = alr_transform.fit_transform(X)
M_ALR_mean = np.nanmean(M_ALR, axis=0)
M_mean = alr_transform.inverse_transform(M_ALR_mean)
return M_mean
def clr(X: np.ndarray):
X = np.divide(X, np.sum(X, axis=1)[:, np.newaxis]) # Closure operation
Y = np.log(X) # Log operation
Y -= 1/X.shape[1] * np.nansum(Y, axis=1)[:, np.newaxis]
return Y
def inv_clr(Y: np.ndarray):
X = np.exp(Y) # Inverse of log operation
X = np.divide(X, np.nansum(X, axis=1)[:, np.newaxis]) #Closure operation
return X
def orthagonal_basis(X: np.ndarray):
D = X.shape[1]
H = scipy.linalg.helmert(D, full=False) # D-1, D Helmert matrix, exact representation of ψ as in Egozogue's book
return H[::-1]
def ilr(X: np.ndarray):
d = X.shape[1]
Y = clr(X)
psi = orthagonal_basis(X) # Get a basis
psi = orthagonal_basis(clr(X)) # trying to get right algorithm
assert np.allclose(psi @ psi.T, np.eye(d-1))
return Y @ psi.T
def inv_ilr(Y: np.ndarray, X: np.ndarray=None):
psi = orthagonal_basis(X)
C = Y @ psi
X = inv_clr(C) # Inverse log operation
return X |
<filename>download.py
import json
import sys
import cgi
import os
import datetime
from do_authentication import authenticate
from do_http_get import do_get
##############################################################################################################
# First Step: Get the config data from config.json file
##############################################################################################################
try:
if 'CZDS_CONFIG' in os.environ:
config_data = os.environ['CZDS_CONFIG']
config = json.loads(config_data)
else:
config_file = open("config.json", "r")
config = json.load(config_file)
config_file.close()
except:
sys.stderr.write("Error loading config.json file.\n")
exit(1)
# The config.json file must contain the following data:
username = config['icann.account.username']
password = config['<PASSWORD>']
authen_base_url = config['authentication.base.url']
czds_base_url = config['czds.base.url']
# This is optional. Default to current directory
working_directory = config.get('working.directory', '.') # Default to current directory
if not username:
sys.stderr.write("'icann.account.username' parameter not found in the config.json file\n")
exit(1)
if not password:
sys.stderr.write("'icann.account.password' parameter not found in the config.json file\n")
exit(1)
if not authen_base_url:
sys.stderr.write("'authentication.base.url' parameter not found in the config.json file\n")
exit(1)
if not czds_base_url:
sys.stderr.write("'czds.base.url' parameter not found in the config.json file\n")
exit(1)
##############################################################################################################
# Second Step: authenticate the user to get an access_token.
# Note that the access_token is global for all the REST API calls afterwards
##############################################################################################################
print("Authenticate user {0}".format(username))
access_token = authenticate(username, password, authen_base_url)
##############################################################################################################
# Third Step: Get the download zone file links
##############################################################################################################
# Function definition for listing the zone links
def get_zone_links(czds_base_url):
global access_token
links_url = czds_base_url + "/czds/downloads/links"
links_response = do_get(links_url, access_token)
status_code = links_response.status_code
if status_code == 200:
zone_links = links_response.json()
print("{0}: The number of zone files to be downloaded is {1}".format(datetime.datetime.now(),len(zone_links)))
return zone_links
elif status_code == 401:
print("The access_token has been expired. Re-authenticate user {0}".format(username))
access_token = authenticate(username, password, authen_base_url)
get_zone_links(czds_base_url)
else:
sys.stderr.write("Failed to get zone links from {0} with error code {1}\n".format(links_url, status_code))
return None
# Get the zone links
zone_links = get_zone_links(czds_base_url)
if not zone_links:
exit(1)
# We may not want to download all the zone files, for a poor connection this will take a long time.
# So, enable the user to specify which files to download at the command line. They just need to add
# the TLD of all those that they wish to download, i.e. `python3 download.py net com org` to download
# the .net, .com, and .org zone files. The zone files downloaded will be a subset of those available
# and those specified.
# Zones to download are those passed by name
zones_to_download = sys.argv[1:]
if any([zone == "*" for zone in zones_to_download]):
# If any are a *, then just download all
pass
else:
# Append .zone to each to make it easier to search
zones_to_download = [f"{zone.lower()}.zone" for zone in zones_to_download]
zone_links = [zone for zone in zone_links if any([zone.endswith(z) for z in zones_to_download])]
if len(zone_links) != len(zones_to_download):
# We get here if either we don't have permission to download zones we wanted, or they don't exist
not_found = [zone for zone in zones_to_download if not any([z.endswith(zone) for z in zone_links])]
print(zone_links, zones_to_download)
print("The following zones could not be downloaded because they either do not exist or you do not have permission:\n\t{0}".format('\n\t'.join(not_found)))
##############################################################################################################
# Fourth Step: download zone files
##############################################################################################################
# Function definition to download one zone file
def download_one_zone(url, output_directory):
print("{0}: Downloading zone file from {1}".format(str(datetime.datetime.now()), url))
global access_token
download_zone_response = do_get(url, access_token)
status_code = download_zone_response.status_code
if status_code == 200:
# Try to get the filename from the header
_,option = cgi.parse_header(download_zone_response.headers['content-disposition'])
filename = option.get('filename')
# If could get a filename from the header, then makeup one like [tld].txt.gz
if not filename:
filename = url.rsplit('/', 1)[-1].rsplit('.')[-2] + '.txt.gz'
# This is where the zone file will be saved
path = '{0}/{1}'.format(output_directory, filename)
with open(path, 'wb') as f:
for chunk in download_zone_response.iter_content(1024):
f.write(chunk)
print("{0}: Completed downloading zone to file {1}".format(str(datetime.datetime.now()), path))
elif status_code == 401:
print("The access_token has been expired. Re-authenticate user {0}".format(username))
access_token = authenticate(username, password, authen_base_url)
download_one_zone(url, output_directory)
elif status_code == 404:
print("No zone file found for {0}".format(url))
else:
sys.stderr.write('Failed to download zone from {0} with code {1}\n'.format(url, status_code))
# Function definition for downloading all the zone files
def download_zone_files(urls, working_directory):
# The zone files will be saved in a sub-directory
output_directory = working_directory + "/zonefiles"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Download the zone files one by one
for link in urls:
download_one_zone(link, output_directory)
# Finally, download all zone files
start_time = datetime.datetime.now()
download_zone_files(zone_links, working_directory)
end_time = datetime.datetime.now()
print("{0}: DONE DONE. Completed downloading all zone files. Time spent: {1}".format(str(end_time), (end_time-start_time)))
|
<filename>hypothesis_generator/er_mlp/er_mlp_imp/er_mlp_max_margin.py<gh_stars>1-10
"""
Filename: er_mlp_max_margin.py
Authors:
<NAME> - <EMAIL>
<NAME> - <EMAIL>
Description:
Construct ER MLP using max margin loss and perform train, evaluation, and test.
To-do:
"""
# standard imports
import logging as log
import os
import pickle
import random
# third party imports
import numpy as np
import tensorflow as tf
# local imports
from data_processor import DataProcessor
from er_mlp import ERMLP
from metrics import plot_cost, plot_map
def run_model(params, final_model=False):
"""
Run the ER_MLP model using max margin loss.
Inputs:
params: dictionary containing different
parameters to be used when running the model
"""
######################
# data preprocessing #
######################
processor = DataProcessor()
# load data
train_df = processor.load(os.path.join(params['data_path'], params['train_file']))
train_local_df = processor.load(os.path.join(params['data_path'], 'train_local.txt'))
log.debug('train dataframe shape: %s', train_df.shape)
log.debug('train_local dataframe shape: %s', train_local_df.shape)
if not final_model:
dev_df = processor.load(os.path.join(params['data_path'], 'dev.txt'))
test_df = processor.load(os.path.join(params['data_path'], 'test.txt'))
log.debug('dev dataframe shape: %s', dev_df.shape)
log.debug('test dataframe shape: %s', test_df.shape)
# make sure we have label column
if len(train_df.columns) < 4:
log.warning('Label (last column) is missing')
train_df['one'] = 1
# do word embeddings
if params['word_embedding']:
indexed_entities, num_entity_words, entity_dic = processor.machine_translate_using_word(
os.path.join(params['data_path'], 'entities.txt'))
indexed_predicates, num_pred_words, pred_dic = processor.machine_translate_using_word(
os.path.join(params['data_path'], 'relations.txt'))
else:
entity_dic = processor.machine_translate(os.path.join(params['data_path'], 'entities.txt'))
pred_dic = processor.machine_translate(os.path.join(params['data_path'], 'relations.txt'))
# numerically represent the data
indexed_train_data = processor.create_indexed_triplets_with_label(
train_df.values, entity_dic, pred_dic)
indexed_train_local_data = processor.create_indexed_triplets_with_label(
train_local_df.values, entity_dic, pred_dic)
if not final_model:
indexed_dev_data = processor.create_indexed_triplets_with_label(
dev_df.values, entity_dic, pred_dic)
indexed_test_data = processor.create_indexed_triplets_with_label(
test_df.values, entity_dic, pred_dic)
# change label from -1 to 0
if not final_model:
indexed_train_local_data[:, 3][indexed_train_local_data[:, 3] == -1] = 0
indexed_dev_data[:, 3][indexed_dev_data[:, 3] == -1] = 0
indexed_test_data[:, 3][indexed_test_data[:, 3] == -1] = 0
# shuffle test data
np.random.shuffle(indexed_train_local_data)
np.random.shuffle(indexed_test_data)
# construct new parameter dictionary to be fed into the network
er_mlp_params = {
'word_embedding': params['word_embedding'],
'embedding_size': params['embedding_size'],
'layer_size': params['layer_size'],
'corrupt_size': params['corrupt_size'],
'lambda': params['lambda'],
'num_entities': len(entity_dic),
'num_preds': len(pred_dic),
'learning_rate': params['learning_rate'],
'batch_size': params['batch_size'],
'add_layers': params['add_layers'],
'act_function': params['act_function'],
'drop_out_percent': params['drop_out_percent'],
'margin': params['margin']
}
# append word embedding related parameters to the dictionary
if params['word_embedding']:
er_mlp_params['num_entity_words'] = num_entity_words
er_mlp_params['num_pred_words'] = num_pred_words
er_mlp_params['indexed_entities'] = indexed_entities
er_mlp_params['indexed_predicates'] = indexed_predicates
#########################
# construct the network #
#########################
er_mlp = ERMLP(er_mlp_params)
# network used for training
train_predictions = er_mlp.build_traininig_model()
tf.add_to_collection('train_predictions', train_predictions)
# network used for testing
test_predictions = er_mlp.build_testing_model()
tf.add_to_collection('test_predictions', test_predictions)
# loss
cost = er_mlp.loss()
tf.add_to_collection('cost', cost)
tf.summary.scalar('cost', cost)
# optimizer
if params['optimizer'] == 0:
optimizer = er_mlp.train_adagrad(cost) # adagrad
else:
optimizer = er_mlp.train_adam(cost) # adam
tf.add_to_collection('optimizer', optimizer)
# merge summary
merged = tf.summary.merge_all()
# saver to save the model
saver = tf.train.Saver()
# choose the positive training data
data_train = indexed_train_data[indexed_train_data[:, 3] == 1]
data_train = data_train[:, :3]
# some variable initializations
iter_list = []
cost_list = []
train_local_map_list = []
test_map_list = []
iteration = 0
# init variables
log.info('Initializing tensor variables...')
init_all = tf.global_variables_initializer()
#########################
# train the network #
#########################
log.info('Begin training...')
# begin session
with tf.Session() as sess:
# writer
train_writer = tf.summary.FileWriter(
os.path.join(params['model_save_directory'], 'log'),
sess.graph)
# run init
sess.run(init_all)
# epoch
for epoch in range(params['training_epochs']):
log.info('****** Epoch: %d/%d ******', epoch, params['training_epochs'])
total_batch = int(np.ceil(data_train.shape[0] / params['batch_size']))
# shuffle the training data for each epoch
np.random.shuffle(data_train)
# iteration
for i in range(total_batch):
# get corrupted batch using the un-corrupted data_train
start_idx = i * params['batch_size']
end_idx = (i + 1) * params['batch_size']
batch_xs = er_mlp.get_training_batch_with_corrupted(data_train[start_idx:end_idx])
# flip bit
flip = bool(random.getrandbits(1))
# feed dictionary
feed_dict = {
er_mlp.train_triplets: batch_xs,
er_mlp.flip_placeholder: flip}
# display progress
if (i == 0) and (epoch % params['display_step'] == 0):
_, train_summary, current_cost = sess.run(
[optimizer, merged, cost],
feed_dict=feed_dict)
train_writer.add_summary(train_summary, iteration)
log.info('current cost: %f', current_cost)
train_local_map = er_mlp.test_model(
sess,
indexed_train_local_data,
pred_dic,
_type='train local')
train_local_map_list.append(train_local_map)
if not final_model:
thresholds = er_mlp.determine_threshold(
sess,
indexed_dev_data,
use_f1=params['f1_for_threshold'])
test_map = er_mlp.test_model(
sess,
indexed_test_data,
pred_dic,
threshold=thresholds,
_type='current test')
test_map_list.append(test_map)
iter_list.append(iteration)
cost_list.append(current_cost)
else:
sess.run(optimizer, feed_dict=feed_dict)
# update iteration
iteration += 1
# close writers
train_writer.close()
# do final threshold determination and testing model
if not final_model:
log.info('determine threshold for classification')
thresholds = er_mlp.determine_threshold(
sess,
indexed_dev_data,
use_f1=params['f1_for_threshold'])
er_mlp.test_model(
sess,
indexed_test_data,
pred_dic,
threshold=thresholds,
_type='final')
# plot the cost graph
plot_cost(
iter_list,
cost_list,
params['model_save_directory'])
plot_map(
iter_list,
train_local_map_list,
params['model_save_directory'],
filename='train_local_map.png')
if not final_model:
plot_map(
iter_list,
test_map_list,
params['model_save_directory'],
filename='map.png')
# save the model & parameters if prompted
if params['save_model']:
saver.save(sess, os.path.join(params['model_save_directory'], 'model'))
log.info('model saved in: %s', params['model_save_directory'])
save_object = {
'entity_dic': entity_dic,
'pred_dic': pred_dic
}
if not final_model:
save_object['thresholds'] = thresholds
if params['word_embedding']:
save_object['indexed_entities'] = indexed_entities
save_object['indexed_predicates'] = indexed_predicates
save_object['num_pred_words'] = num_pred_words
save_object['num_entity_words'] = num_entity_words
with open(os.path.join(params['model_save_directory'], 'params.pkl'), 'wb') as output:
pickle.dump(save_object, output, pickle.HIGHEST_PROTOCOL)
|
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import apache.airavata.model.appcatalog.computeresource.ttypes
import apache.airavata.model.data.movement.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class ComputeResourcePreference:
"""
Gateway specific preferences for a Computer Resource
computeResourceId:
Corelate the preference to a compute resource.
overridebyAiravata:
If turned true, Airavata will override the preferences of better alternatives exist.
loginUserName:
If turned true, Airavata will override the preferences of better alternatives exist.
preferredJobSubmissionProtocol:
For resources with multiple job submission protocols, the gateway can pick a preferred option.
preferredDataMovementProtocol:
For resources with multiple data movement protocols, the gateway can pick a preferred option.
preferredBatchQueue:
Gateways can choose a defualt batch queue based on average job dimention, reservations or other metrics.
scratchLocation:
Path to the local scratch space on a HPC cluster. Typically used to create working directory for job execution.
allocationProjectNumber:
Typically used on HPC machines to charge computing usage to a account number. For instance, on XSEDE once an
allocation is approved, an allocation number is assigned. Before passing this number with job submittions, the
account to be used has to be added to the allocation.
resourceSpecificCredentialStoreToken:
Resource specific credential store token. If this token is specified, then it is superceeded by the gateway's
default credential store.
Attributes:
- computeResourceId
- overridebyAiravata
- loginUserName
- preferredJobSubmissionProtocol
- preferredDataMovementProtocol
- preferredBatchQueue
- scratchLocation
- allocationProjectNumber
- resourceSpecificCredentialStoreToken
- usageReportingGatewayId
- qualityOfService
- reservation
- reservationStartTime
- reservationEndTime
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'computeResourceId', None, None, ), # 1
(2, TType.BOOL, 'overridebyAiravata', None, True, ), # 2
(3, TType.STRING, 'loginUserName', None, None, ), # 3
(4, TType.I32, 'preferredJobSubmissionProtocol', None, None, ), # 4
(5, TType.I32, 'preferredDataMovementProtocol', None, None, ), # 5
(6, TType.STRING, 'preferredBatchQueue', None, None, ), # 6
(7, TType.STRING, 'scratchLocation', None, None, ), # 7
(8, TType.STRING, 'allocationProjectNumber', None, None, ), # 8
(9, TType.STRING, 'resourceSpecificCredentialStoreToken', None, None, ), # 9
(10, TType.STRING, 'usageReportingGatewayId', None, None, ), # 10
(11, TType.STRING, 'qualityOfService', None, None, ), # 11
(12, TType.STRING, 'reservation', None, None, ), # 12
(13, TType.I64, 'reservationStartTime', None, None, ), # 13
(14, TType.I64, 'reservationEndTime', None, None, ), # 14
)
def __init__(self, computeResourceId=None, overridebyAiravata=thrift_spec[2][4], loginUserName=None, preferredJobSubmissionProtocol=None, preferredDataMovementProtocol=None, preferredBatchQueue=None, scratchLocation=None, allocationProjectNumber=None, resourceSpecificCredentialStoreToken=None, usageReportingGatewayId=None, qualityOfService=None, reservation=None, reservationStartTime=None, reservationEndTime=None,):
self.computeResourceId = computeResourceId
self.overridebyAiravata = overridebyAiravata
self.loginUserName = loginUserName
self.preferredJobSubmissionProtocol = preferredJobSubmissionProtocol
self.preferredDataMovementProtocol = preferredDataMovementProtocol
self.preferredBatchQueue = preferredBatchQueue
self.scratchLocation = scratchLocation
self.allocationProjectNumber = allocationProjectNumber
self.resourceSpecificCredentialStoreToken = resourceSpecificCredentialStoreToken
self.usageReportingGatewayId = usageReportingGatewayId
self.qualityOfService = qualityOfService
self.reservation = reservation
self.reservationStartTime = reservationStartTime
self.reservationEndTime = reservationEndTime
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.computeResourceId = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.overridebyAiravata = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.loginUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.preferredJobSubmissionProtocol = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.preferredDataMovementProtocol = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.preferredBatchQueue = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.scratchLocation = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.allocationProjectNumber = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.resourceSpecificCredentialStoreToken = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRING:
self.usageReportingGatewayId = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.qualityOfService = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRING:
self.reservation = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I64:
self.reservationStartTime = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I64:
self.reservationEndTime = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ComputeResourcePreference')
if self.computeResourceId is not None:
oprot.writeFieldBegin('computeResourceId', TType.STRING, 1)
oprot.writeString(self.computeResourceId)
oprot.writeFieldEnd()
if self.overridebyAiravata is not None:
oprot.writeFieldBegin('overridebyAiravata', TType.BOOL, 2)
oprot.writeBool(self.overridebyAiravata)
oprot.writeFieldEnd()
if self.loginUserName is not None:
oprot.writeFieldBegin('loginUserName', TType.STRING, 3)
oprot.writeString(self.loginUserName)
oprot.writeFieldEnd()
if self.preferredJobSubmissionProtocol is not None:
oprot.writeFieldBegin('preferredJobSubmissionProtocol', TType.I32, 4)
oprot.writeI32(self.preferredJobSubmissionProtocol)
oprot.writeFieldEnd()
if self.preferredDataMovementProtocol is not None:
oprot.writeFieldBegin('preferredDataMovementProtocol', TType.I32, 5)
oprot.writeI32(self.preferredDataMovementProtocol)
oprot.writeFieldEnd()
if self.preferredBatchQueue is not None:
oprot.writeFieldBegin('preferredBatchQueue', TType.STRING, 6)
oprot.writeString(self.preferredBatchQueue)
oprot.writeFieldEnd()
if self.scratchLocation is not None:
oprot.writeFieldBegin('scratchLocation', TType.STRING, 7)
oprot.writeString(self.scratchLocation)
oprot.writeFieldEnd()
if self.allocationProjectNumber is not None:
oprot.writeFieldBegin('allocationProjectNumber', TType.STRING, 8)
oprot.writeString(self.allocationProjectNumber)
oprot.writeFieldEnd()
if self.resourceSpecificCredentialStoreToken is not None:
oprot.writeFieldBegin('resourceSpecificCredentialStoreToken', TType.STRING, 9)
oprot.writeString(self.resourceSpecificCredentialStoreToken)
oprot.writeFieldEnd()
if self.usageReportingGatewayId is not None:
oprot.writeFieldBegin('usageReportingGatewayId', TType.STRING, 10)
oprot.writeString(self.usageReportingGatewayId)
oprot.writeFieldEnd()
if self.qualityOfService is not None:
oprot.writeFieldBegin('qualityOfService', TType.STRING, 11)
oprot.writeString(self.qualityOfService)
oprot.writeFieldEnd()
if self.reservation is not None:
oprot.writeFieldBegin('reservation', TType.STRING, 12)
oprot.writeString(self.reservation)
oprot.writeFieldEnd()
if self.reservationStartTime is not None:
oprot.writeFieldBegin('reservationStartTime', TType.I64, 13)
oprot.writeI64(self.reservationStartTime)
oprot.writeFieldEnd()
if self.reservationEndTime is not None:
oprot.writeFieldBegin('reservationEndTime', TType.I64, 14)
oprot.writeI64(self.reservationEndTime)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.computeResourceId is None:
raise TProtocol.TProtocolException(message='Required field computeResourceId is unset!')
if self.overridebyAiravata is None:
raise TProtocol.TProtocolException(message='Required field overridebyAiravata is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.computeResourceId)
value = (value * 31) ^ hash(self.overridebyAiravata)
value = (value * 31) ^ hash(self.loginUserName)
value = (value * 31) ^ hash(self.preferredJobSubmissionProtocol)
value = (value * 31) ^ hash(self.preferredDataMovementProtocol)
value = (value * 31) ^ hash(self.preferredBatchQueue)
value = (value * 31) ^ hash(self.scratchLocation)
value = (value * 31) ^ hash(self.allocationProjectNumber)
value = (value * 31) ^ hash(self.resourceSpecificCredentialStoreToken)
value = (value * 31) ^ hash(self.usageReportingGatewayId)
value = (value * 31) ^ hash(self.qualityOfService)
value = (value * 31) ^ hash(self.reservation)
value = (value * 31) ^ hash(self.reservationStartTime)
value = (value * 31) ^ hash(self.reservationEndTime)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StoragePreference:
"""
Attributes:
- storageResourceId
- loginUserName
- fileSystemRootLocation
- resourceSpecificCredentialStoreToken
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'storageResourceId', None, None, ), # 1
(2, TType.STRING, 'loginUserName', None, None, ), # 2
(3, TType.STRING, 'fileSystemRootLocation', None, None, ), # 3
(4, TType.STRING, 'resourceSpecificCredentialStoreToken', None, None, ), # 4
)
def __init__(self, storageResourceId=None, loginUserName=None, fileSystemRootLocation=None, resourceSpecificCredentialStoreToken=None,):
self.storageResourceId = storageResourceId
self.loginUserName = loginUserName
self.fileSystemRootLocation = fileSystemRootLocation
self.resourceSpecificCredentialStoreToken = resourceSpecificCredentialStoreToken
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.storageResourceId = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.loginUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.fileSystemRootLocation = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.resourceSpecificCredentialStoreToken = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StoragePreference')
if self.storageResourceId is not None:
oprot.writeFieldBegin('storageResourceId', TType.STRING, 1)
oprot.writeString(self.storageResourceId)
oprot.writeFieldEnd()
if self.loginUserName is not None:
oprot.writeFieldBegin('loginUserName', TType.STRING, 2)
oprot.writeString(self.loginUserName)
oprot.writeFieldEnd()
if self.fileSystemRootLocation is not None:
oprot.writeFieldBegin('fileSystemRootLocation', TType.STRING, 3)
oprot.writeString(self.fileSystemRootLocation)
oprot.writeFieldEnd()
if self.resourceSpecificCredentialStoreToken is not None:
oprot.writeFieldBegin('resourceSpecificCredentialStoreToken', TType.STRING, 4)
oprot.writeString(self.resourceSpecificCredentialStoreToken)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.storageResourceId is None:
raise TProtocol.TProtocolException(message='Required field storageResourceId is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.storageResourceId)
value = (value * 31) ^ hash(self.loginUserName)
value = (value * 31) ^ hash(self.fileSystemRootLocation)
value = (value * 31) ^ hash(self.resourceSpecificCredentialStoreToken)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GatewayResourceProfile:
"""
Gateway Resource Profile
gatewayID:
Unique identifier for the gateway assigned by Airavata. Corelate this to Airavata Admin API Gateway Registration.
credentialStoreToken:
Gateway's defualt credential store token.
computeResourcePreferences:
List of resource preferences for each of the registered compute resources.
identityServerTenant:
identityServerPwdCredToken:
Attributes:
- gatewayID
- credentialStoreToken
- computeResourcePreferences
- storagePreferences
- identityServerTenant
- identityServerPwdCredToken
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'gatewayID', None, None, ), # 1
(2, TType.STRING, 'credentialStoreToken', None, None, ), # 2
(3, TType.LIST, 'computeResourcePreferences', (TType.STRUCT,(ComputeResourcePreference, ComputeResourcePreference.thrift_spec)), None, ), # 3
(4, TType.LIST, 'storagePreferences', (TType.STRUCT,(StoragePreference, StoragePreference.thrift_spec)), None, ), # 4
(5, TType.STRING, 'identityServerTenant', None, None, ), # 5
(6, TType.STRING, 'identityServerPwdCredToken', None, None, ), # 6
)
def __init__(self, gatewayID=None, credentialStoreToken=None, computeResourcePreferences=None, storagePreferences=None, identityServerTenant=None, identityServerPwdCredToken=None,):
self.gatewayID = gatewayID
self.credentialStoreToken = credentialStoreToken
self.computeResourcePreferences = computeResourcePreferences
self.storagePreferences = storagePreferences
self.identityServerTenant = identityServerTenant
self.identityServerPwdCredToken = <PASSWORD>ServerPwdCredToken
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.gatewayID = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.credentialStoreToken = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.computeResourcePreferences = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = ComputeResourcePreference()
_elem5.read(iprot)
self.computeResourcePreferences.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.storagePreferences = []
(_etype9, _size6) = iprot.readListBegin()
for _i10 in xrange(_size6):
_elem11 = StoragePreference()
_elem11.read(iprot)
self.storagePreferences.append(_elem11)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.identityServerTenant = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.identityServerPwdCredToken = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GatewayResourceProfile')
if self.gatewayID is not None:
oprot.writeFieldBegin('gatewayID', TType.STRING, 1)
oprot.writeString(self.gatewayID)
oprot.writeFieldEnd()
if self.credentialStoreToken is not None:
oprot.writeFieldBegin('credentialStoreToken', TType.STRING, 2)
oprot.writeString(self.credentialStoreToken)
oprot.writeFieldEnd()
if self.computeResourcePreferences is not None:
oprot.writeFieldBegin('computeResourcePreferences', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.computeResourcePreferences))
for iter12 in self.computeResourcePreferences:
iter12.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.storagePreferences is not None:
oprot.writeFieldBegin('storagePreferences', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.storagePreferences))
for iter13 in self.storagePreferences:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.identityServerTenant is not None:
oprot.writeFieldBegin('identityServerTenant', TType.STRING, 5)
oprot.writeString(self.identityServerTenant)
oprot.writeFieldEnd()
if self.identityServerPwdCredToken is not None:
oprot.writeFieldBegin('identityServerPwdCredToken', TType.STRING, 6)
oprot.writeString(self.identityServerPwdCredToken)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.gatewayID is None:
raise TProtocol.TProtocolException(message='Required field gatewayID is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.gatewayID)
value = (value * 31) ^ hash(self.credentialStoreToken)
value = (value * 31) ^ hash(self.computeResourcePreferences)
value = (value * 31) ^ hash(self.storagePreferences)
value = (value * 31) ^ hash(self.identityServerTenant)
value = (value * 31) ^ hash(self.identityServerPwdCredToken)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
<gh_stars>1-10
import os
from typing import List, Optional
import cv2.cv2 as cv2
import dlib
import numpy as np
from facepy import config
from facepy.view import geometry_renderer
from . import img
from .detector import RawModel, StaticFaceDetector
from .geometry import Landmarks, Point
class FeatureExtractor:
"""Abstract feature extractor class."""
def extract_features(self, image: np.array) -> Optional[np.array]:
"""
Subclasses should override this in order to return the vector of features
extracted from the specified image.
"""
raise NotImplementedError
class GeometricFeatureExtractor(FeatureExtractor):
"""
This feature extractor is an adaptation of very early, geometry-based face recognition methods
(e.g. those introduced by <NAME>). It basically extracts a subset of the landmarks
returned by Dlib's face pose estimator, and derives normalized distances between all possible
non-ordered pairs of said points.
"""
def __init__(self) -> None:
self.__detector = StaticFaceDetector(scale_factor=1)
def extract_features(self, image: np.array) -> Optional[np.array]:
face = self.__detector.detect_main_face(image)
if face is None:
return None
# We use a subset of the landmarks returned by Dlib's pose estimator.
traits = self.__rec_traits(face.landmarks)
# Visualize the extracted landmarks in debug mode.
if config.DEBUG:
new_image = image.copy()
geometry_renderer.draw_points(new_image, traits)
img.save(new_image, os.path.join(config.Paths.DEBUG_DIR, 'features.png'))
# Normalize distances based on the distance between the inner eye points.
norm_factor = face.landmarks.left_eye[0].distance(face.landmarks.right_eye[3])
# Size of the feature vector is given by (n choose 2)
n = len(traits)
embedding_size = n * (n - 1) // 2
embedding = np.zeros(embedding_size)
# Compute the feature vector of normalized pairwise distances.
idx = 0
for i in range(n - 1):
for j in range(i + 1, n):
embedding[idx] = traits[i].distance(traits[j]) / norm_factor
idx += 1
return embedding
# Private
def __rec_traits(self, lm: Landmarks) -> List[Point]:
"""Returns the subset of face landmarks used by the feature extractor."""
return [
lm.left_eye[0], lm.left_eye[3], lm.right_eye[0], lm.right_eye[3],
lm.nose_bridge[0], lm.nose_tip[2], lm.nose_tip[0], lm.nose_tip[-1],
lm.top_lip[0], lm.top_lip[2], lm.top_lip[4], lm.top_lip[6], lm.bottom_lip[3]
]
class CNNFeatureExtractor(FeatureExtractor):
"""
This feature extractor is a wrapper for Dlib's CNN facial feature extractor.
For details: http://blog.dlib.net/2017/02/high-quality-face-recognition-with-deep.html
"""
def __init__(self) -> None:
self.__net = dlib.face_recognition_model_v1(config.Paths.CNN_FACE_DESCRIPTOR_MODEL)
def extract_features(self, image: np.array) -> Optional[np.array]:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
detections = RawModel.hog_detector()(image)
embedding = None
if detections is not None and len(detections) > 0:
landmarks = RawModel.shape_predictor_small()(image, detections[0])
embedding = np.array(self.__net.compute_face_descriptor(image, landmarks))
return embedding
|
import os
import numpy as np
import matplotlib.pylab as plt
import matplotlib
import xlrd
from matplotlib import rcParams
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import cm
def split_data(data):
result = list()
temp = data.split("\t")
for i in range(len(temp)):
if temp[i] != "":
result.append(temp[i])
return result
def get_data(route, method):
data = dict()
with open(route + "/" + method, "r", encoding="utf8") as f:
for line in f.readlines():
line = line.strip()
line = line.split(":")
if len(line) == 2:
data[line[0]] = split_data(line[1])
else:
for i in range(len(line) - 2):
data_key = split_data(line[i])[-1]
data_content = split_data(line[i + 1])[0:-1]
data[data_key] = data_content
# print(split_data(line[-2]))
last_data_key = split_data(line[-2])[-1]
last_data_content = split_data(line[-1])
data[last_data_key] = last_data_content
return data
def get_all_data(dataset, method, root_name="VLDB2020"):
abs_route = os.path.abspath(".")
abs_route = os.path.normpath(abs_route)
route_list = abs_route.split(root_name)
root_route = route_list[0] + root_name
dataset_root = os.path.normpath(root_route + "/output/analyse_results")
data_list = dict()
for item in dataset:
dataset_dir = os.path.normpath(dataset_root + "/" + item + "/")
data_list[item] = dict()
fold_list = list()
dirs = os.listdir(dataset_dir)
data_db_list = dict()
for next_dir in dirs:
data_db_list[next_dir] = dict()
temp = os.path.join(dataset_dir, next_dir + "/721_5fold")
fold_list.append(temp)
for fold in fold_list:
data_fold_list = dict()
for i in range(1, 6):
temp_path = os.path.normpath(fold + "/" + str(i))
if not os.path.exists(temp_path):
continue
last_file = os.listdir(temp_path)
for file in last_file:
data_final_dir = os.path.normpath(temp_path + "/" + file)
if not os.path.exists(data_final_dir + "/" + method):
continue
data_fold_list[file] = get_data(data_final_dir, method)
data_db_list[fold.split("/")[-2]] = data_fold_list
data_list[item] = data_db_list
return data_list
def get_graphics_data(data, dataset_list, need):
compare_method = []
final_data = []
for key, value in data.items():
compare_method.append(key)
temp_value = []
for item in dataset_list:
for need_result in (data[key][item]).values():
temp_value.append(need_result[need])
final_data.append(float_data(temp_value))
return compare_method, final_data
def float_data(data):
final_data = []
for item in data:
if type(item).__name__ == "list":
temp_data = []
for part_data in item:
temp_data.append(float(part_data))
else:
temp_data = float(item)
final_data.append(temp_data)
np_data = np.array(final_data)
np_data = np.mean(np_data, axis=0)
final_data = list(np_data)
return final_data
def plot_config():
params = {
"font.family": "serif",
"font.serif": "Times New Roman",
}
rcParams.update(params)
def config_color_map():
cdict = {
"red": (
(0, 1, 1),
(0.2, 1, 1),
(0.5, 0.3, 0.3),
(0.7, 0, 0),
(1, 0, 0)
),
"green": (
(0, 1, 1),
(0.2, 1, 1),
(0.5, 0.8, 0.8),
(0.7, 0.4, 0.4),
(1, 0.2, 0.2),
),
"blue": (
(0, 1, 1),
(0.2, 0.6, 0.6),
(0.5, 1, 1),
(0.7, 0.8, 0.8),
(1, 0.4, 0.4)
)
}
cmap = LinearSegmentedColormap("user_color", cdict, 256)
return cmap
def grid_figure(x_list, compare_method, analyse_data):
user_cmap = config_color_map()
x_position = 0
plt.figure(figsize=(32, 6))
for i in range(len(x_list)):
data = list()
plt.subplot(1, len(x_list), i + 1)
plt.xticks([x for x in range(len(compare_method))], compare_method, fontsize=20, rotation=-90, )
plt.yticks([])
for j in range(x_position + i, len(analyse_data), len(x_list)):
data.append(analyse_data[j][0:5])
pic_data = np.array(data).T
plt.imshow(pic_data, cmap=user_cmap)
# sns.heatmap(pic_data)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
# plt.tight_layout()
plt.subplots_adjust(left=0.05, right=0.9, bottom=0.3, wspace=0)
plt.show()
def plot_figure(x_list, compare_method, analyse_data, ran):
marker = ["o", ".", "p", "s", "d", "*"]
line_list = []
x_position = 0
plt.figure(figsize=(16, 12))
for i in range(len(x_list)):
data = list()
# plt.subplot(len(x_list),2,i+1)
plt.xticks([x for x in range(len(compare_method))], compare_method, rotation=0)
plt.yticks(np.arange(ran[0], ran[1], (ran[1] - ran[0]) / 5))
plt.ylim((ran[0], ran[1]))
for j in range(x_position + i, len(analyse_data), len(x_list)):
data.append(analyse_data[j][0])
line, = plt.plot(compare_method, data, marker=marker[i])
line_list.append(line)
plt.legend(line_list, x_list, loc="upper right")
plt.subplots_adjust(left=0.05, right=0.9, wspace=0)
plt.show()
# ****************用来测试raw_analyse中分布情况*********************************
def hub_picture(data, dataset, method):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 10))
ax1.set_xticks([])
ax2.set_xticks([])
ax1.set_ylabel("Proportion", fontsize=16)
ax2.set_ylabel("Proportion", fontsize=16)
width = 0.8
startposition = 0.8
color_list = ["tomato", 'deepskyblue', "orange"]
label_list = ["more than 5 times", "1 to 5 times", "never appear"]
legend_list = list()
for i in range(len(method)):
bottom = 0
for j in range(3):
bar1, = ax1.bar(startposition+i*width, height=data[j][i][0], width=0.5, bottom=bottom, color=color_list[j], align="edge", edgecolor="black")
bottom += data[j][i][0]
ax1.text(startposition+i*width+0.1, y=-0.03, s=method[i], fontsize="14",rotation=-90)
ax1.text(startposition + (len(method)/2-2) * width, y=1.08, s=dataset[0], fontsize="14")
for i in range(len(method)):
bottom = 0
for j in range(3):
bar2, = ax2.bar(startposition+i*width, height=data[j][i+len(method)][0], width=0.5, bottom=bottom, color=color_list[j], align="edge", edgecolor="black")
bottom += data[j][i+len(method)][0]
if i == 0:
legend_list.append(bar2)
ax2.text(startposition + i * width + 0.1, y=-0.03, s=method[i], fontsize="14", rotation=-90)
ax2.text(startposition + (len(method) / 2 - 2) * width, y=1.08, s=dataset[1], fontsize="14")
plt.figlegend(legend_list, labels=label_list, loc="best")
# plt.legend()
plt.show()
def running_time(dir, method_list, dataset_list):
user_cmap = config_color_map()
color_map = cm.get_cmap("tab20c")
print()
cnorm = matplotlib.colors.Normalize(vmin=0, vmax=20)
sclarmap = cm.ScalarMappable(norm=cnorm, cmap=color_map)
data = xlrd.open_workbook(dir)
tabel = data.sheet_by_name("run_time")
# *****************行*******************
left_row_start = 3
left_row_end = 14
right_row_start = 3
right_row_end = 9
# ***********************列坐标*************
left_col_start = 3
left_col_end = 31
right_col_start = 34
right_col_end = 65
data_kind = ["DBP_en_DBP_de", "DBP_en_DBP_fr", "DBP_en_WD_en", "DBP_en_YG_en"]
left_data = []
right_data = []
plt.figure(figsize=(32, 6))
sub_i = 1
for dataset in dataset_list:
remove = data_kind.index(dataset)
left_col_start += 11 * remove
left_col_end += 11 * remove
right_col_start += 6 * remove
right_col_end += 6 * remove
for i in range(left_row_start, left_row_end):
line_all_row = tabel.row_slice(i, left_col_start, left_col_end)
left_data.append(line_all_row)
for i in range(right_row_start, right_row_end):
line_all_row = tabel.row_slice(i, right_col_start, right_col_end)
right_data.append(line_all_row)
print(left_data)
print(right_data)
all_data = left_data + right_data
# ****************************************开始画图相关工作*********************
x_label_list = [["15K_V1", "15K_V2"], ["100K_V1", "100K_V2"]]
color_list = ["darkorange", "forestgreen", "lightsteelblue", "rosybrown", "gold", "indigo", "red", "sienna",
"skyblue",
"deeppink", "slategray", "peru", "grey", "olive", "cyan", "blue", "lightpink"]
color_list = [sclarmap.to_rgba(i) for i in range(17)]
# ****************现在是17个方法******************
first_x_position = [0.5, 12, ]
width = 0.5
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
ax1.set_xticks([])
ax2.set_xticks([])
ax1.set_ylabel("Time(s)",fontsize=16)
ax2.set_ylabel("Time(s)",fontsize=16)
for method_pos in range(len(first_x_position)):
for i in range(len(all_data)):
bar1, = ax1.bar(first_x_position[method_pos] + i * (width),
height=all_data[i][5 + method_pos * 7].value, width=0.5,
bottom=0, color=color_list[i], edgecolor="black", label=method_list[i])
ax1.text(x=3.5, y=-120,s=x_label_list[0][0],fontsize="16")
ax1.text(x=15.5, y=-120, s=x_label_list[0][1],fontsize="16")
legend_bar=list()
for method_pos in range(len(first_x_position)):
for i in range(len(all_data)):
bar2, = ax2.bar(first_x_position[method_pos] + i * (width),
height=all_data[i][14 + 5 + method_pos * 7].value, width=0.5,
bottom=0, color=color_list[i], edgecolor="black",label=method_list[i])
if i < len(first_x_position)/2:
legend_bar.append(bar2)
ax2.text(x=3.5, y=-2400, s=x_label_list[1][0], fontsize="16")
ax2.text(x=15.5, y=-2400, s=x_label_list[1][1], fontsize="16")
plt.figlegend(legend_bar, labels=method_list,loc="upper center",ncol=10,bbox_to_anchor=(0.5,0.95))
# plt.legend()
plt.show()
if __name__ == "__main__":
# dataset = ["BootEA", "GCN_Align", "IPTransE", "TransD", "TransH", "RotatE", "ConvE", "ProjE"]
dataset = ["AttrE", "BootEA", "RotatE", "GCN_Align", "ProjE", "IPTransE", "ConvE", "TransH", "MTransE"]
method_list = ['allents', "conicity", "nearents", "quartile"]
method = "nearents"
x_list = ["DBP_en_DBP_de_15K_V1", "DBP_en_DBP_de_15K_V2", ]
stride = len(x_list)
# **************************no font ******************************************************
plot_config()
# ****************************用来分析hub现象***************************************
data = get_all_data(dataset, method="hub")
compare_methond, gt10 = get_graphics_data(data, x_list, "gt5")
_, to10 = get_graphics_data(data, x_list, "1to5")
_, eq0 = get_graphics_data(data, x_list, "eq0")
hub_picture([gt10, to10, eq0], x_list, compare_methond)
# ******************************draw runningtime**********************************
# rt_method_list = ["MTransE", 'IPTransE', "JAPE", "BootEA", "KDCoE", "GCN-Align", "AttrE", "IMUSE", "SEA", "RSN4EA"
# , "MultiKE", "TransH", "TransD", "ProjE", "ConvE", "SimplE", "RotatE"]
# # rt_dataset_list = ["DBP_en_DBP_de", "DBP_en_DBP_fr", "DBP_en_WD_en", "DBP_en_YG_en"]
# rt_dataset_list = ["DBP_en_DBP_de"]
# rt_dir = "/home/cmwang/桌面/VLDB_exp.xlsx"
# running_time(rt_dir, rt_method_list, rt_dataset_list)
# **********************************grid figure***********************************
# data = get_all_data(dataset, method)
# compare_method, temp_data = get_graphics_data(data, x_list, "sim_result")
# grid_figure(x_list, compare_method, temp_data)
# ********************************************************************************
# **************************************belong to one***************************
# data = get_all_data(dataset, method="nearents")
# compare_method, temp_data = get_graphics_data(data, x_list, "ent1_aver")
# plot_figure(x_list, compare_method, temp_data,[0,10])
# *******************************************************************************
# **********************************aver_near_similarity 距离最近所有实体的平均相似度 ***************************
# data = get_all_data(dataset, method="nearents")
# compare_method, temp_data = get_graphics_data(data, x_list, "ent1_aver_near_aver")
# plot_figure(x_list, compare_method, temp_data, [0,1])
# *********************************************************************************
# ***********************************quar_devi 后四分之一与前四分之一的差值*********************************************
# data = get_all_data(dataset, method="quartile")
# compare_method, temp_data = get_graphics_data(data, x_list, "quar_devi")
# plot_figure(x_list, compare_method, temp_data, [0,8])
# **********************************************************************************
# ***********************************quar_devi 中间1/2的均值*********************************************
# data = get_all_data(dataset, method="quartile")
# compare_method, temp_data = get_graphics_data(data, x_list, "mean_value")
# plot_figure(x_list, compare_method, temp_data, [3, 10])
# ****************************************************************************************
# **************************************不同kg里实体的关系************************************
# data = get_all_data(dataset, method="allents")
# compare_method, temp_data = get_graphics_data(data, x_list, "aver_most_sim")
# plot_figure(x_list, compare_method, temp_data, [0,1])
# **************************************************************************************
# data = get_all_data(dataset, method="conicity")
# compare_method, temp_data = get_graphics_data(data, x_list, "all_ents_conicity")
# plot_figure(x_list, compare_method, temp_data, [0, 0.6])
# compare_method, temp_data = get_graphics_data(data, x_list, "all_ents_vs")
# plot_figure(x_list, compare_method, temp_data, [0, 0.5])
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -----------------------------------IMPORTS-----------------------------------
import json
import os
import subprocess
import sys
from config import gen_config as gc
from config import rfam_config as rc
from scripts.export.genomes import genome_fetch as gf
# -----------------------------------------------------------------------------
def domain_download_validator(domain_dir, filename=None):
"""
Lists all proteome directories in dest_dir and creates a list
of the genomes that were not downloaded successfully. If filename
is provided then the Upids will be listed in filename.list
domain_dir: Destination directory, could be one of the four domains
filename: A filename for the UPID list/ validation report
returns: None if filename is provided, otherwise it will return a list
of upids
"""
recovery_list = []
updirs = os.listdir(domain_dir)
for updir in updirs:
lsf_output_file = os.path.join(domain_dir, os.path.join(updir, "download.out"))
status = check_genome_download_status(lsf_output_file)
if status == 0:
recovery_list.append(updir)
if filename is not None:
recovery_file = open(os.path.join(domain_dir, filename + '.list'))
for upid in recovery_list:
recovery_file.write(upid + '\n')
recovery_file.close()
else:
return recovery_list
return None
# -----------------------------------------------------------------------------
def check_genome_download_status(lsf_out_file, keyword):
"""
Opens LSF output file and checks whether the job's status is success
lsf_out_file: LSF platform's output file generated by -o option
keyword: A string to look for in the file (e.g. Success)
returns: status 1 if the keyword was found, otherwise 0
"""
infile_fp = open(lsf_out_file, 'r')
status = False
for line in infile_fp:
if line.find(keyword) != -1:
status = True
infile_fp.close()
return status
# -----------------------------------------------------------------------------
def project_download_validator(project_dir, id_pairs_file=None, filename=None):
"""
Loops over a genome download project directory and reports all the upids
that need to be recovered
project_dir: Destination directory of genome download pipeline
id_pairs_file: A json file with all the UPids of the corresponding
Uniprot's release. If None simply reports a list of UPIds
filename: A name for the output file. "recovery.tsv" will be used otherwise
returns: void
"""
upids_to_recover = []
sub_dirs = [x for x in os.listdir(project_dir) if x in gc.DOMAINS]
for sub_dir in sub_dirs:
domain_dir_path = os.path.join(project_dir, sub_dir)
upids_to_recover.extend(domain_download_validator(domain_dir_path,
filename=None))
# would also be good to remove those genome dirs
if filename is None:
filename = "recovery"
if len(upids_to_recover) != 0:
fp_out = open(os.path.join(project_dir, filename + '.tsv'), 'w')
if id_pairs_file is not None:
fp_in = open(id_pairs_file, 'r')
all_id_pairs = json.load(fp_in)
fp_in.close()
for upid in upids_to_recover:
fp_out.write(upid + '\t' + str(all_id_pairs[upid]["GCA"]) +
'\t' + all_id_pairs[upid]["DOM"] + '\n')
# list upids if the UPID/GCA pairs are not available
else:
for upid in upids_to_recover:
fp_out.write(upid + '\n')
fp_out.close()
else:
print "\nGenome Download Success!"
# -----------------------------------------------------------------------------
def get_empty_file_accessions(domain_dir):
"""
Loops over all genome directories under the main domain directory and looks
for any empty files. If there are any, it will create a json file with all
the missing file accessions and prints out whether download was a Success
or Failure
domain_dir: The path to a domain directory (e.g. bacteria) where all files
have been downloaded
returns: void
"""
empty_file_accessions = {}
upid_dirs = [x for x in os.listdir(domain_dir) if os.path.isdir(os.path.join(domain_dir, x))]
for upid in upid_dirs:
empty_files = []
updir_loc = os.path.join(domain_dir, upid)
genome_files = [x for x in os.listdir(os.path.join(domain_dir, upid)) if
x.endswith(".fa") and x.find("UP") == -1]
for gen_file in genome_files:
gen_file_path = os.path.join(updir_loc, gen_file)
if os.path.getsize(gen_file_path) == 0:
empty_files.append(gen_file.split(".fa")[0])
if len(empty_files) > 0:
empty_file_accessions[upid] = empty_files
if len(empty_file_accessions.keys()) > 0:
filename = os.path.split(domain_dir)[1]
fp_out = open(os.path.join(domain_dir, filename + "_empty_files.json"), 'w')
json.dump(empty_file_accessions, fp_out)
fp_out.close()
# -----------------------------------------------------------------------------
def check_all_genome_files_exist(project_dir, upid_gca_file=None):
"""
This function will extract all accessions per genome and check that all files
exist. A json file will be generated with all missing accessions so that they
can be downloaded using restore_gen_download. It reports download status for
all domain subdirectories and mark it as "Success" or "Failure". In case of
failure it will generate a json file with all accessions to restore
project_dir: A project directory as generated by genome_downloader pipeline
upid_gca_file: All upid-gca pairs either in json or tsv format. None by
default. If None it will use the json file produced in project_dir during
genome download
"""
# list all domain directories in project
domain_dirs = [x for x in os.listdir(project_dir) if x in gc.DOMAINS]
upid_gca_pairs = None
# load upid_gca pairs from project directory (.json)
if upid_gca_file is None:
upid_gca_fp = open(os.path.join(project_dir, "upid_gca_dict.json"), 'r')
upid_gca_pairs = json.load(upid_gca_fp)
upid_gca_fp.close()
for domain in domain_dirs:
domain_missing_accs = {}
domain_dir_loc = os.path.join(project_dir, domain)
upids = [x for x in os.listdir(domain_dir_loc)
if os.path.isdir(os.path.join(domain_dir_loc, x))]
gen_missing_accs = []
for upid in upids:
upid_dir = os.path.join(domain_dir_loc, upid)
gca_acc = upid_gca_pairs[upid]["GCA"]
accessions = gf.fetch_genome_accessions(upid, gca_acc)
# assuming that all files are decompressed and should be to avoid problems
for accession in accessions:
if accession is not None:
if check_file_format(os.path.join(upid_dir, accession + ".fa")) is False:
gen_missing_accs.append(accession)
else:
print upid
if len(gen_missing_accs) > 0:
domain_missing_accs[upid] = gen_missing_accs
if len(domain_missing_accs.keys()) > 0:
fp_out = open(os.path.join(domain_dir_loc, domain + "acc_recovery.json"), 'w')
json.dump(domain_missing_accs, fp_out)
fp_out.close()
print "%s Validation: Failure" % domain
else:
print "%s Validation: Success" % domain
# -----------------------------------------------------------------------------
def check_file_format(seq_file):
"""
Performs some sanity checks on the sequence file. Checks if file is
compressed and if not validates the format using esl-seqstat. It will also
check if the sequence file provided is empty or not
seq_file: The path to a valid sequence file
returns: True if file passed validation checks, False otherwise
"""
status = True
# compressed file
if seq_file.endswith(".gz"):
if not os.path.exists(seq_file):
return False
else:
return check_compressed_file(seq_file)
# uncompressed fasta format
elif seq_file.endswith(".fa"):
# check that file exists
if not os.path.exists(seq_file):
return False
# check content
else:
cmd_args = [rc.ESL_SEQSTAT, '--informat', "fasta", "--dna", seq_file]
channel = subprocess.Popen(cmd_args, stdout=subprocess.PIPE)
# fetch esl-seqstat result
proc_output = channel.communicate()
# process the response
esl_seqstat_out = proc_output[0].split('\n')
# check only first line of response
if esl_seqstat_out[0].find("Parse failed") != -1 \
or esl_seqstat_out[0].find("Format") == -1:
return False
# check the size of the file
if os.path.getsize(seq_file) == 0:
return False
return status
# -----------------------------------------------------------------------------
def check_compressed_file(filename):
"""
Checks if the provided file is in one of the compressed formats
filename: The path to input file
returns: Boolean - True if the file is compressed, False otherwise
"""
magic_dict = {
"\x1f\x8b\x08": "gz",
"\x42\x5a\x68": "bz2",
"\x50\x4b\x03\x04": "zip"
}
max_len = max(len(x) for x in magic_dict)
with open(filename) as fp_in:
file_start = fp_in.read(max_len)
for magic, filetype in magic_dict.items():
if file_start.startswith(magic):
return True # can also return filetype
fp_in.close()
return False
# -----------------------------------------------------------------------------
def validate_domain_dir(domain_dir, out_file=True):
"""
Validate sequence files downloaded in domain directory
domain_dir: The path to a domain directory
out_file: If True this one will generate an json file with all erroneous
files per upid that we need to download again.
return: A dict with all erroneous accessions in the format {upid: [acc1,...]}
"""
domain_err_accs = {}
upids = [x for x in os.listdir(domain_dir)
if os.path.isdir(os.path.join(domain_dir, x))]
for upid in upids:
upid_err_accs = []
upid_dir = os.path.join(domain_dir, upid)
seq_files = [x for x in os.listdir(upid_dir) if x.endswith(".fa")]
for seq_file in seq_files:
seq_file_loc = os.path.join(upid_dir, seq_file)
if check_file_format(seq_file_loc) is False:
upid_err_accs.append(seq_file)
if len(upid_err_accs) > 0:
domain_err_accs[upid] = upid_err_accs
if out_file is True:
fp_out = open(os.path.join(domain_dir, "erroneous_accessions.json"), 'w')
json.dump(domain_err_accs, fp_out)
fp_out.close()
return domain_err_accs
# -----------------------------------------------------------------------------
def check_luigi_worker_status(err_file):
"""
Parse the lsf err file and look for a report of failed tasks
lsf_err_file: The path to a valid lsf err file
return: True if success, False otherwise
"""
err_file_fp = open(err_file, 'r')
for line in err_file:
if line.find("failed tasks") != -1:
err_file_fp.close()
return False
err_file_fp.close()
return True
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
if __name__ == '__main__':
project_dir = sys.argv[1]
if len(sys.argv) == 2:
upid_gca_file = sys.argv[2]
else:
upid_gca_file = None
check_all_genome_files_exist(project_dir, upid_gca_file=upid_gca_file)
|
# stdlib
from collections import deque
from datetime import datetime, timedelta
from io import TextIOBase
from smtplib import SMTPException
from typing import Deque
# lib
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.core.mail.backends.smtp import EmailBackend
from django.core.management.base import BaseCommand
from django.db.models import Prefetch, F
from django.template.loader import render_to_string
# local
from membership.models import Member, MemberLink, User
class ExpirationBackend(EmailBackend):
"""
Custom email backend used to send out the expiration emails.
Returns the success count
"""
def send_messages(self, emails: Deque[EmailMultiAlternatives], out: TextIOBase) -> int:
if len(emails) == 0:
out.write('No emails to send.')
return -1, -1
with self._lock:
conn = self.open()
if self.connection is None:
# We failed silently on open, log and cancel
out.write('Connection failed.')
return -1, -1
successes = 0
for email in emails:
try:
sent = self._send(email)
if sent:
successes += 1
except SMTPException as e:
out.write('Error sending message')
out.write(f'To: {", ".join(email.recipients)}')
out.write(f'Exception: {e}')
out.write('-' * 50)
if conn:
self.close()
return successes
class Command(BaseCommand):
"""
Send email to warn Users about upcoming expiration dates
Users are sent warnings when their expiry date is 30 days away
"""
help = 'Send email to warn Users about upcoming expirations'
can_import_settings = True
def handle(self, *args, **kwargs):
"""
Run the command by:
- Finding all the Self Managed Members with Users who are about to expire
- Iterating through these Members, getting all said Users and emailing them
"""
self.range_start = datetime.now().date() + timedelta(days=4)
self.range_end = datetime.now().date() + timedelta(days=32)
emails: Deque[EmailMultiAlternatives] = deque()
emails.extend(self.get_self_managed_emails())
emails.extend(self.get_non_self_managed_emails())
self.stdout.write('Sending emails.')
self.send_emails(emails)
def get_self_managed_emails(self):
"""
Create the emails that need to be sent to admins in self-managed members
:returns: A deque of email objects
"""
# Get the members that need to be notified
self_managed_member_ids = User.objects.filter(
expiry_date__range=(self.range_start, self.range_end),
administrator=False,
member__self_managed=True,
member__deleted__isnull=True,
).distinct().values_list(
'member_id',
flat=True,
)
members = Member.objects.filter(
id__in=self_managed_member_ids,
).prefetch_related(
Prefetch(
'user_set',
User.objects.filter(administrator=True).order_by('surname'),
to_attr='admins',
),
Prefetch(
'user_set',
User.objects.filter(
administrator=False,
expiry_date__range=(self.range_start, self.range_end),
).order_by(
'surname',
),
to_attr='expiring_users',
),
)
# Create the Admin Emails
emails: Deque[EmailMultiAlternatives] = deque()
for member in members:
for admin in member.admins:
txt, html = [
render_to_string(
f'email/admin_expiry_reminder.{version}',
context={
'member': member,
'admin': admin,
'users': member.expiring_users,
},
) for version in ['txt', 'html']
]
emails.append(self.create_email(
user=admin,
subject=f'{settings.ORGANIZATION} Membership User is about to expire!',
body_txt=txt,
body_html=html,
))
return emails
def get_non_self_managed_emails(self) -> Deque[EmailMultiAlternatives]:
"""
Create the emails that need to be sent to the admins in charge of non-self-managed partner members
:returns: A deque of email objects
"""
# Get the users in non-self-managed Members who are expiring
expiring_users = User.objects.filter(
expiry_date__range=(self.range_start, self.range_end),
administrator=False,
member__self_managed=False,
member__deleted__isnull=True,
).prefetch_related(
# Prefetch the Link to the partner that created the non-self-managed Member
Prefetch(
'member__member',
MemberLink.objects.exclude(contra_member_id=F('member_id')),
'links',
),
)
# Get the Members that manage these users
members_to_notify = {u.member.links[0].contra_member_id for u in expiring_users}
members = Member.objects.filter(
id__in=members_to_notify,
).order_by(
'id',
).prefetch_related(
Prefetch(
'user_set',
User.objects.filter(administrator=True),
to_attr='admins',
),
)
# Cast these members to dictionaries to make it easier to link up with the expiring users
partner_members = dict()
for m in members:
partner_members[m.id] = {
'member': m,
'admins': m.admins,
'expiring_users': list(),
}
for u in expiring_users:
partner_members[u.member.links[0].contra_member_id]['expiring_users'].append(u)
emails: Deque[EmailMultiAlternatives] = deque()
for item in partner_members.values():
for admin in item['admins']:
txt, html = [
render_to_string(
f'email/non_self_managed_expiry_email.{version}',
context={
'member': item['member'],
'admin': admin,
'users': item['expiring_users'],
},
) for version in ['txt', 'html']
]
emails.append(self.create_email(
user=admin,
subject=f'{settings.ORGANIZATION} Membership Users in Non-Self-Managed Partners will soon expire',
body_txt=txt,
body_html=html,
))
return emails
def create_email(self, user: User, subject: str, body_txt: str, body_html: str):
"""
Create an email object with the given parameters
:param user: The user who will receive the email
:param subject: The subject of the email
:param body_txt: The body of the email in plain text
:param body_html: The body of the email in html
:return:
"""
if settings.PRODUCTION_DEPLOYMENT:
to = [f'{user.first_name} {user.surname} <{user.email}>']
else:
to = settings.DEVELOPER_EMAILS
email = EmailMultiAlternatives(
from_email=settings.EMAIL_HOST_USER,
to=to,
subject=subject,
body=body_txt,
headers={'Reply-To': f'{settings.REPLY_TO}'},
)
email.attach_alternative(body_html, 'text/html')
return email
def send_emails(self, emails: Deque[EmailMultiAlternatives]):
mail_backend = ExpirationBackend()
successes = mail_backend.send_messages(emails, self.stdout)
self.stdout.write(f'Sent {successes} of {len(emails)} emails successfully!')
|
<gh_stars>0
##############################################################
##
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import ListedColormap as lcm
from matplotlib import ticker
try:
from palettable.colorbrewer import sequential as sq
from palettable.colorbrewer import diverging as dv
from palettable.colorbrewer import qualitative as ql
except Exception as err:
print(err, ': please install module palettable')
import argparse
##
##
##
##
##############################################################
##############################################################
##
## Save and load to csv files, requires a reshaping to a 2d array (from 3d)
def save_reshape(results, shape, filename = 'results'):
results = results.reshape(-1, shape[-1])
pd.DataFrame(results).to_csv(filename + '.csv', header = None, index = None)
results = results.reshape(shape)
##
def load_reshape(filename, shape):
results = pd.read_csv(filename + '.csv', header = None)
results = np.array(results).reshape(shape)
return results
##
##
##
##
##############################################################
##############################################################
##############################################################
## PLOTTING FUNCTIONS
## (to match Lungeralla et al 2006)
##
## Plot #1: (Figure 3)
## Used for simulations of linear processes
def sim_plot1(mean_vals, std_vals, lambda_vals, ylabs, ylims = None, \
analytic_solutions = None, nrows = 3, ncols = 4, labelpads = None, \
figpad = None, skip_ax = list(), figsize = None, cols = None, \
filename = 'ci_figure1'):
##
rowcol = [(x, y) for x in range(nrows) for y in range(ncols)]
## If we want to skip an axis (in order to group certain indices then use
## skip_ax argument to remove this from rowcol)
rowcol_show = [rowcol[ii] for ii in range(len(rowcol)) if ii not in skip_ax]
n_inds = mean_vals.shape[1]
n_ax = np.min((nrows * ncols, n_inds))
if labelpads == None:
labelpads = [None] * n_inds
##
with PdfPages(filename + '.pdf') as pdf:
##
if figsize is None:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, sharex = True)
else:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, \
sharex = True, figsize = figsize)
##
for ii in range(n_ax):
ax_temp = ax[rowcol_show[ii]]
##
## Add in error bars for one standard deviation (before means)
for jj in range(n_lambda):
for kk in range(2):
ax_temp.plot([lambda_vals[jj], lambda_vals[jj]], \
mean_vals[jj, ii, kk] + \
np.array([-1, 1]) * std_vals[jj, ii, kk], \
c = 'black', lw = 0.1)
##
if cols is None:
## Default: blue for x>y, red for y>x
cols = ['blue', 'red', 'darkblue', 'darkred']
##
ax_temp.plot(lambda_vals, mean_vals[:, ii, 0], \
c = cols[0], lw = 1.8, label = r'$i_{X\rightarrow Y}$')
ax_temp.plot(lambda_vals, mean_vals[:, ii, 1], \
c = cols[1], lw = 1.8, label = r'$i_{Y\rightarrow X}$')
if analytic_solutions is not None:
as_temp = analytic_solutions[ii]
if as_temp is not None:
ax_temp.plot(lambda_vals, as_temp[:, 0], c = cols[2], \
lw = 1.8, linestyle = 'dashed', \
label = r'$i_{X\rightarrow Y}$: Analytic solution')
ax_temp.plot(lambda_vals, as_temp[:, 1], c = cols[3], \
lw = 1.8, linestyle = 'dashed',
label = r'$i_{Y\rightarrow X}$: Analytic solution')
ax_temp.set_ylabel(ylabs[ii].upper(), labelpad = labelpads[ii])
## If ylims specified as an (n_ind, 2) array then include this
if ylims is not None:
if np.any(ylims[ii,:] is not None):
ax_temp.set_ylim(ylims[ii, 0], ylims[ii, 1])
## x labels only on the bottom row
if rowcol_show[ii][0] == nrows - 1:
ax_temp.set_xlabel(r'Coupling ${\lambda}$')
##
##
## Set 'axis = off' for remaining axes
for ii in range(n_ax + len(skip_ax), nrows * ncols):
ax[rowcol[ii]].axis('off')
for ax_ind in skip_ax:
ax[rowcol[ax_ind]].axis('off')
##
## Add axis to the bottom right plot
if ii < nrows * ncols:
ax_legend = ax[rowcol[-1]]
label_params = ax[rowcol[0]].get_legend_handles_labels()
ax_legend.legend(*label_params, fontsize = 'medium', loc = 'center')
else:
ax_temp.legend(fontsize = 'x-small')
##
if figpad is None:
plt.tight_layout()
else:
plt.tight_layout(pad = figpad[0], \
h_pad = figpad[1], w_pad = figpad[2])
plt.savefig(filename + '.eps', format = 'eps', dpi = 300)
pdf.savefig(fig)
plt.close()
## end function sim_plot1
##
##
## Plot #2: (Figure 4b)
## Used for simulations of Henon unidirectional maps using the indices
## C_i = i_xy - i_yx where i is any of the causality measures
def sim_plot2(mean_vals, std_vals, lambda_vals, ylabs, ylims = None, \
nrows = 3, ncols = 4, skip_ax = list(), figpad = None, figsize = None, \
cols = None, linestyles = None, labelpads = None, \
filename = 'ci_figure2'):
##
rowcol = [(x, y) for x in range(nrows) for y in range(ncols)]
rowcol_show = [rowcol[ii] for ii in range(len(rowcol)) if ii not in skip_ax]
n_inds = mean_vals.shape[1]
n_ax = np.min((nrows * ncols, n_inds))
if labelpads == None:
labelpads = [None] * n_inds
##
with PdfPages(filename + '.pdf') as pdf:
##
if figsize is None:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, sharex = True)
else:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, \
sharex = True, figsize = figsize)
##
for ii in range(n_ax):
ax_temp = ax[rowcol_show[ii]]
##
for jj in range(n_lambda):
for kk in range(3):
ax_temp.plot([lambda_vals[jj], lambda_vals[jj]], \
mean_vals[jj, ii, kk] + \
np.array([-1, 1]) * std_vals[jj, ii, kk], \
c = 'black', lw = 0.1)
## Showing simulation results for different lengths of input
if cols is None:
## Default: blue for 10^3, red for 10^4, green for 10^5
cols = ['blue', 'red', 'green']
if linestyles is None:
linestyles = ['dotted', 'dashed', 'solid']
label_str = r'i$_{X\rightarrow Y}$ - i$_{Y\rightarrow X}$, '
label_str_add = [r'$T = 10^3$', r'$T = 10^4$', r'$T = 10^5$']
##
for kk in range(3):
ax_temp.plot(lambda_vals, mean_vals[:, ii, kk], \
label = label_str + label_str_add[kk], \
c = cols[kk], linestyle = linestyles[kk], lw = 1.8)
# ax_temp.plot(lambda_vals, mean_vals[:, ii, 0], \
# label = label_str + r'$T = 10^3$', \
# c = cols[0], linestyle = linestyles[0], lw = 1.5)
# ax_temp.plot(lambda_vals, mean_vals[:, ii, 1], \
# label = label_str + r'$T = 10^4$', \
# c = cols[1], linestyle = linestyles[1], lw = 1.5)
# ax_temp.plot(lambda_vals, mean_vals[:, ii, 2], \
# label = label_str + r'$T = 10^5$', \
# c = cols[2], linestyle = linestyles[2], lw = 1.5)
ax_temp.set_ylabel(ylabs[ii].upper(), labelpad = labelpads[ii])
##
## If ylims specified as an (n_ind, 2) array then include this
if ylims is not None:
ax_temp.set_ylim(ylims[ii, 0], ylims[ii, 1])
if rowcol_show[ii][0] == nrows - 1:
ax_temp.set_xlabel(r'Coupling ${\lambda}$')
## x labels only on the bottom row
##
## Set 'axis = off' for remaining axes
for ii in range(n_ax + len(skip_ax), nrows * ncols):
ax[rowcol[ii]].axis('off')
for ax_ind in skip_ax:
ax[rowcol[ax_ind]].axis('off')
##
## Add axis to the bottom right plot
if ii < nrows * ncols:
ax_legend = ax[rowcol[-1]]
label_params = ax_temp.get_legend_handles_labels()
ax_legend.legend(*label_params, fontsize = 'medium', loc = 'center')
else:
ax_temp.legend(fontsize = 'x-small')
##
if figpad is None:
plt.tight_layout()
else:
plt.tight_layout(pad = figpad[0], \
h_pad = figpad[1], w_pad = figpad[2])
plt.savefig(filename + '.eps', format = 'eps', dpi = 300)
pdf.savefig(fig)
plt.close()
## end function sim_plot2
##
##
## Plot #3: (Figure 5)
## Used for simulations of Henon bidirectional maps using the indices
## C_i = i_xy - i_yx where i is any of the causality measures
## This is a set of heatmaps rather than plots
def sim_plot3(vals, lambda_vals, titles, vlims = None, transpose = True, \
vlim_percentiles = [1, 99], nrows = 3, ncols = 4, skip_ax = list(), \
cmap = None, figpad = None, figsize = None, filename = 'ci_figure3'):
##
rowcol = [(x, y) for x in range(nrows) for y in range(ncols)]
rowcol_show = [rowcol[ii] for ii in range(len(rowcol)) if ii not in skip_ax]
n_inds = vals.shape[2]
n_ax = np.min((nrows * ncols, n_inds))
extent = np.min(lambda_vals[0]), np.max(lambda_vals[0]), \
np.min(lambda_vals[1]), np.max(lambda_vals[1])
##
with PdfPages(filename + '.pdf') as pdf:
##
if figsize is None:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, \
sharex = True, sharey = True)
else:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, \
sharex = True, sharey = True, figsize = figsize)
##
for ii in range(n_ax):
##
ax_temp = ax[rowcol_show[ii]]
##
if vlims is None:
vlims = np.zeros((n_ax, 2))
## cut vlim so extreme values don't influence the colour scale
percentiles = np.nanpercentile(vals[:, :, ii], vlim_percentiles)
vlims[ii, :] = np.array([-1, 1]) * np.max(np.abs(percentiles))
# vlim = np.max(np.abs(np.nanpercentile(results_temp,[0, 100])))
if transpose is True:
z = vals[:, :, ii].T
else:
z = vals[:, :, ii]
if cmap is None:
cmap = 'RdYlGn'
im = ax_temp.imshow(z, origin = 'lower', cmap = cmap, \
vmin = vlims[ii, 0], vmax = vlims[ii, 1], extent = extent)
##
## Set title as the indices (rather than as ylabels as in
## the other plotting functions)
if titles is not None:
ax_temp.set_title(titles[ii].upper())
# ax_temp.set_title(titles[ii].upper(), fontsize = 14)
##
# plt.setp(ax_temp.get_xticklabels(), fontsize = 10)
# plt.setp(ax_temp.get_yticklabels(), fontsize = 10)
##
if rowcol_show[ii][0] == nrows - 1:
ax_temp.set_xlabel(r'$\lambda_{xy}$')
# ax_temp.set_xlabel(r'$\lambda_{xy}$', fontsize = 12)
if rowcol_show[ii][1] == 0:
ax_temp.set_ylabel(r'$\lambda_{yx}$')
# ax_temp.set_ylabel(r'$\lambda_{yx}$', fontsize = 12)
##
## Add a colourbar to each subplot
fm = ticker.ScalarFormatter()
fm.set_powerlimits((-3, 3))
cbar = ax_temp.figure.colorbar(im, ax = ax_temp, \
fraction = 0.046, pad = 0.04, format = fm)
# cbar.ax.tick_params(labelsize = 10)
##
## Add axis to the bottom right plot
for ii in range(n_ax + len(skip_ax), nrows * ncols):
ax[rowcol[ii]].axis('off')
for ax_ind in skip_ax:
ax[rowcol[ax_ind]].axis('off')
##
if figpad is None:
plt.tight_layout()
else:
plt.tight_layout(pad = figpad[0], \
h_pad = figpad[1], w_pad = figpad[2])
plt.savefig(filename + '.eps', format = 'eps', dpi = 200)
pdf.savefig(fig)
plt.close()
## end function sim_plot3
##
##
## Plot #4: (Figure 4a, S2, S3)
## Used for simulations of Ulam lattice (allows multiple experiments on the
## same subplot)
def sim_plot4(mean_vals, std_vals, lambda_vals, ylabs, ylims = None, \
cols = None, tf_names = None, nrows = 3, ncols = 4, labelpads = None, \
figpad = None, skip_ax = list(), figsize = None, linestyles = None, \
yticks = None, filename = 'ci_figure4'):
##
rowcol = [(x, y) for x in range(nrows) for y in range(ncols)]
## If we want to skip an axis (in order to group certain indices then use
## skip_ax argument to remove this from rowcol)
rowcol_show = [rowcol[ii] for ii in range(len(rowcol)) if ii not in skip_ax]
n_inds = mean_vals.shape[1]
n_xy = mean_vals.shape[2]
n_tf = mean_vals.shape[3]
n_ax = np.min((nrows * ncols, n_inds))
if labelpads is None:
labelpads = [None] * n_inds
if cols is None:
cols = np.array(['blue'] * n_tf * n_xy)
cols = cols.reshape((n_tf, n_xy), order = 'F')
if tf_names is None:
tf_names = np.array(['Result' + str(x) for x in np.arange(n_tf * n_xy)])
tf_names = tf_names.reshape(n_tf, n_xy)
##
with PdfPages(filename + '.pdf') as pdf:
##
if figsize is None:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, sharex = True)
else:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, \
sharex = True, figsize = figsize)
##
for ii in range(n_ax):
ax_temp = ax[rowcol_show[ii]]
##
for kk in range(n_xy):
for ll in range(n_tf):
ll = n_tf - ll - 1
##
## Add in error bars for one std (before means)
for jj in range(n_lambda):
ax_temp.plot([lambda_vals[jj], lambda_vals[jj]], \
mean_vals[jj, ii, kk, ll] + np.array([-1, 1]) * \
std_vals[jj, ii, kk, ll], \
c = 'black', lw = 0.1)
##
if linestyles is None:
ls = 'solid'
else:
ls = linestyles[ll]
ax_temp.plot(lambda_vals, mean_vals[:, ii, kk, ll], \
c = cols[kk, ll, :], lw = 1.5, ls = ls, \
label = tf_names[ll, kk])
##
##
ax_temp.set_ylabel(ylabs[ii].upper(), labelpad = labelpads[ii])
## If ylims specified as an (n_ind, 2) array then include this
if ylims is not None:
if np.any(ylims[ii,:] is not None):
ax_temp.set_ylim(ylims[ii, 0], ylims[ii, 1])
if yticks is not None:
if yticks[ii] is not None:
ax_temp.set_yticks(yticks[ii])
## x labels only on the bottom row
if rowcol_show[ii][0] == nrows - 1:
ax_temp.set_xlabel(r'Coupling ${\lambda}$')
##
##
## Set 'axis = off' for remaining axes
for ii in range(n_ax + len(skip_ax), nrows * ncols):
ax[rowcol[ii]].axis('off')
for ax_ind in skip_ax:
ax[rowcol[ax_ind]].axis('off')
##
## Add axis to the bottom right plot
if ii < nrows * ncols:
ax_legend = ax[rowcol[-1]]
handles, labels = ax[rowcol[0]].get_legend_handles_labels()
ax_legend.legend(handles[::-1], labels[::-1], \
fontsize = 'small', loc = 'center')
else:
ax_temp.legend(fontsize = 'x-small')
##
if figpad is None:
plt.tight_layout()
else:
plt.tight_layout(pad = figpad[0], \
h_pad = figpad[1], w_pad = figpad[2])
plt.savefig(filename + '.eps', format = 'eps', dpi = 300)
pdf.savefig(fig)
plt.close()
## end function sim_plot4
##
##
def corr_plots(corr_array, skip_ax = list(), ylabs = None, titles = None, \
nrows = 3, ncols = 4, indices_groups = None, figsize = None, \
cmap = None, figpad = None, fontsize = None, filename = 'corr_plot'):
n_plots = corr_array.shape[2]
n_x = corr_array.shape[0]
rowcol = [(x, y) for x in range(nrows) for y in range(ncols)]
rowcol_show = [rowcol[ii] for ii in range(len(rowcol)) if ii not in skip_ax]
n_ax = np.min((nrows * ncols, n_plots))
with PdfPages(filename + '.pdf') as pdf:
if figsize is None:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols)
else:
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, \
figsize = figsize)
for ii in range(n_ax):
##
ax_temp = ax[rowcol_show[ii]]
if cmap is None:
cmap = 'RdYlGn'
im = ax_temp.imshow(corr_array[:,:,ii], origin = 'upper', \
vmin = -1, vmax = 1, cmap = cmap)
group_x = [-0.5, n_x - 0.5]
ax_temp.plot(group_x, group_x, color = 'k', lw = 1.5)
if indices_groups is not None:
groups_cs = np.cumsum(indices_groups)
for kk in range(len(groups_cs)):
y_vals = np.repeat(groups_cs[kk], 2) - 0.5
ax_temp.plot(group_x, y_vals, color = 'k', lw = 1)
ax_temp.plot(y_vals, group_x, color = 'k', lw = 1)
##
## Set title as the indices (rather than as ylabels as in
## the other plotting functions)
if titles is not None:
if fontsize is None:
ax_temp.set_title(titles[ii])
else:
ax_temp.set_title(titles[ii], fontsize = fontsize[0])
##
if (rowcol_show[ii][0] == nrows - 1) and (ylabs is not None):
ax_temp.set_xticks(np.arange(n_x))
ax_temp.set_xticklabels([x for x in ylabs])
if fontsize is None:
plt.setp(ax_temp.get_xticklabels())
else:
plt.setp(ax_temp.get_xticklabels(), fontsize = fontsize[1])
ax_temp.tick_params('x', labelrotation = 90)
else:
ax_temp.tick_params('x', bottom = False, labelbottom = False)
if (rowcol_show[ii][1] == 0) and (ylabs is not None):
ax_temp.set_yticks(np.arange(n_x))
ax_temp.set_yticklabels([x for x in ylabs])
if fontsize is None:
plt.setp(ax_temp.get_yticklabels())
else:
plt.setp(ax_temp.get_yticklabels(), fontsize = fontsize[1])
else:
ax_temp.tick_params('y', left = False, labelleft = False)
##
##
## Add axis to the bottom right plot
for ii in range(n_ax + len(skip_ax), nrows * ncols):
ax[rowcol[ii]].axis('off')
for ax_ind in skip_ax:
ax[rowcol[ax_ind]].axis('off')
##
if figpad is None:
plt.tight_layout()
else:
plt.tight_layout(pad = figpad[0], \
h_pad = figpad[1], w_pad = figpad[2])
##
plt.subplots_adjust(right = 0.95)
cax = plt.axes([0.95, 0.1, 0.02, 0.85])
cbar = plt.colorbar(im, cax = cax)
if fontsize is not None:
cax.tick_params(labelsize = fontsize[2])
cbar.set_ticks([-1, 0, 1])
##
plt.savefig(filename + '.eps', format = 'eps', dpi = 200)
pdf.savefig(fig)
plt.close()
return
## end function corr_plots
##
##
def corr_transforms_plot(corr_array, xlabs = None, ylabs = None, \
x_groups = None, y_groups = None, figsize = None, cmap = None, \
figpad = None, fontsize = None, filename = 'corr_transforms_plot'):
##
n_x = corr_array.shape[1]
n_y = corr_array.shape[0]
##
with PdfPages(filename + '.pdf') as pdf:
if figsize is None:
fig, ax = plt.subplots()
else:
fig, ax = plt.subplots(figsize = figsize)
##
if cmap is None:
cmap = 'RdYlGn'
im = ax.imshow(corr_array, origin = 'upper', \
vmin = -1, vmax = 1, cmap = cmap)
x_lims = [-0.5, n_x - 0.5]
y_lims = [-0.5, n_y - 0.5]
if x_groups is not None:
x_groups_cs = np.cumsum(x_groups)
for kk in range(len(x_groups_cs)):
x_vals = np.repeat(x_groups_cs[kk], 2) - 0.5
ax.plot(x_vals, y_lims, color = 'k', lw = 1)
if y_groups is not None:
y_groups_cs = np.cumsum(y_groups)
for kk in range(len(y_groups_cs)):
y_vals = np.repeat(y_groups_cs[kk], 2) - 0.5
ax.plot(x_lims, y_vals, color = 'k', lw = 1)
##
ax.set_xticks(np.arange(n_x))
ax.set_xticklabels([x for x in xlabs])
if fontsize is not None:
plt.setp(ax.get_xticklabels(), fontsize = fontsize[1])
ax.tick_params('x', labelrotation = 90)
##
ax.set_yticks(np.arange(n_y))
ax.set_yticklabels([x for x in ylabs])
if fontsize is not None:
plt.setp(ax.get_yticklabels(), fontsize = fontsize[1])
## Add a colourbar to each subplot
cbar = ax.figure.colorbar(im, ax = ax, fraction = 0.046, pad = 0.04)
if fontsize is not None:
cbar.ax.tick_params(labelsize = fontsize[2])
cbar.set_ticks([-1, 0, 1])
##
if figpad is None:
plt.tight_layout()
else:
plt.tight_layout(pad = figpad[0], \
h_pad = figpad[1], w_pad = figpad[2])
##
plt.savefig(filename + '.eps', format = 'eps', dpi = 200)
pdf.savefig(fig)
plt.close()
return
##
##
##
##
##############################################################
##############################################################
## ANALYTICAL SOLUTIONS
## for information theoretic indices, linear (Gaussian) process
##
## Computes the analytical solution for transfer entropy in the case of
## the linear process simulation (see simulated_data_log.py for simulated and
## and the preprint for the working)
def te_gaussian(lambda_, b_x, b_y, var_x, var_y):
c = (1 - b_y ** 2) * (1 - b_y * b_x) ** 2 * var_x ** 2
numer = c + 2 * lambda_ ** 2 * (1 - b_x * b_y) * var_x * var_y + \
lambda_ ** 4 * var_y ** 2
denom = c + lambda_ ** 2 * (1 - b_x ** 2 * b_y ** 2) * var_x * var_y
te_xy = 0
te_yx = 0.5 * np.log(numer / denom)
return te_xy, te_yx
##
##
##
## Similarly for coarse-grained transinformation rate in the case of
## the linear process simulation (see simulated_data_log.py for simulated and
## and the preprint for the working)
def ctir_gaussian(lambda_, b_x, b_y, var_x, var_y, tau_max):
u = (1 - b_y ** 2) / var_y
v = (1 - b_x ** 2)
w = (1 - b_x * b_y)
def b_psum(n):
if n < 0:
return 0
else:
out = np.sum([b_x ** ii * b_y ** (n - ii) for ii in range(n + 1)])
return out
##
cyyn = lambda n: (b_y ** n) / u
cxxn = lambda n: (b_x ** n) * var_x / v + \
lambda_ ** 2 / (u * v * w) * \
(v * b_psum(n) + b_x ** (n + 1) * (b_x + b_y))
cyxn = lambda n: lambda_ / (u * w) * (b_x ** n * b_y + w * b_psum(n - 1))
cxyn = lambda n: lambda_ / (u * w) * b_y ** (n + 1)
##
ctir_vals = np.zeros((tau_max, 4))
for ii in range(tau_max):
kk = ii + 1
det_yx = cyyn(0) * cxxn(0) - cyxn(0) ** 2
det_yxn = cyyn(0) * cxxn(0) - cyxn(kk) ** 2
det_xyn = cyyn(0) * cxxn(0) - cxyn(kk) ** 2
det_xxn = cxxn(0) ** 2 - cxxn(kk) ** 2
det_yyn = cyyn(0) ** 2 - cyyn(kk) ** 2
det_yxxn = cyyn(0) * cxxn(0) ** 2 + \
2 * cxyn(0) * cxxn(kk) * cyxn(kk) - cxxn(0) * cyxn(kk) ** 2 - \
cyyn(0) * cxxn(kk) ** 2 - cxxn(0) * cxyn(0) ** 2
det_xyyn = cxxn(0) * cyyn(0) ** 2 + \
2 * cyxn(0) * cyyn(kk) * cxyn(kk) - cyyn(0) * cxyn(kk) ** 2 - \
cxxn(0) * cyyn(kk) ** 2 - cyyn(0) * cyxn(0) ** 2
ctir_vals[ii, 0] = np.log((det_yx * det_yyn) / (det_xyyn * cyyn(0)))
ctir_vals[ii, 1] = np.log((det_yx * det_xxn) / (det_yxxn * cxxn(0)))
ctir_vals[ii, 2] = np.log((cxxn(0) * cyyn(0)) / det_xyn)
ctir_vals[ii, 3] = np.log((cxxn(0) * cyyn(0)) / det_yxn)
ctir_vals *= 0.5
ctir_xy = (2 * ctir_vals[:,0] - ctir_vals[:,2] - ctir_vals[:,3]).mean() / 2
ctir_yx = (2 * ctir_vals[:,1] - ctir_vals[:,2] - ctir_vals[:,3]).mean() / 2
return ctir_xy, ctir_yx
##
##
##
##############################################################
##############################################################
##############################################################
## Argument parsing
## For input arguments when running the script (i.e. a subset of indices,
## verbose, logging time values)
parser = argparse.ArgumentParser( \
description = 'Simulations for causality indices')
parser.add_argument('--figure', '--fig', default = 'all', dest = 'fig', \
help = 'Figures to be generated')
## list of indices to compute (transfer entropy, coarse-grained transinformation
## rate, nonlinear Granger causality, extended Granger causality,
## predictability improvement, similarity indices)
parser.add_argument('--table', '--tab', default = 'all', dest = 'tab', \
help = 'Tables to be printed')
##
args = parser.parse_args()
figures = args.fig
if figures == 'all':
figures = ['lp', 'ul', 'hu', 'hb', 'ul-scaling', 'ul-rounding', \
'ul-missing', 'ul-gaussian', 'corr-all', 'corr-ul-transforms']
else:
figures = [figures]
tables = args.tab
if tables == 'all':
tables = ['ul-transforms', 'computational-times']
else:
tables = [tables]
##
##
##############################################################
##############################################################
##############################################################
## Shared figure values
##
file_dir = 'simulation-data/'
plot_dir = 'figures/'
##
n_runs = 10
lambda_vals = np.arange(0, 1 + 0.01, 0.01)
## NOTE: lambda_vals_hb is different here than from simulated_data_log.py (only
## the first two elements of the tuple)
lambda_vals_hb = (np.arange(0, 0.41, 0.01), np.arange(0, 0.41, 0.01))
n_lambda = len(lambda_vals)
n_lambda_hb = np.prod([len(x) for x in lambda_vals_hb])
indices = ['te', 'ete', 'te-ksg', 'ctir', 'egc', 'nlgc', 'pi', 'si1']
indices = indices + ['si2', 'ccm']
##
xy_list = ['xy', 'yx']
inds = [x + '_' + y for x in indices for y in xy_list]
n_inds = len(indices)
inds_time = ['te' in indices or 'ete' in indices, \
'te-ksg' in indices, 'ctir' in indices, 'egc' in indices, \
'nlgc' in indices, 'pi' in indices, \
'si1' in indices or 'si2' in indices, 'ccm' in indices]
n_time = sum(inds_time)
##
indices_plot1 = ['TE (H)', 'ETE (H)', 'TE (KSG)', 'CTIR', 'EGC', 'NLGC']
indices_plot1 = indices_plot1 + ['PI', r'SI$^{(1)}$', r'SI$^{(2)}$', 'CCM']
indices_plot2 = ['TE (H)', 'ETE (H)', 'TE (KSG)', 'CTIR', 'EGC', 'NLGC', 'PI']
indices_plot2 = indices_plot2 + ['EGC*', r'SI$^{(1)}$', r'SI$^{(2)}$', 'CCM']
plot_params = {'lambda_vals': lambda_vals, 'skip_ax': [7], \
'ylabs': indices_plot1, 'figsize': [10, 7], 'figpad': [1, 0.1, 0.01]}
plot_params_egc = {'lambda_vals': lambda_vals, 'skip_ax': list(), \
'ylabs': indices_plot2, 'figsize': [10, 7], 'figpad': [1, 0.1, 0.01]}
plot_params_hb = {'lambda_vals': lambda_vals_hb, \
'skip_ax': [7], 'transpose': True, 'cmap': dv.RdYlBu_11_r.mpl_colormap, \
'titles': indices_plot1, 'figsize': [11, 7], 'figpad': [1, 0.1, 0.01]}
##
lp_shape = (n_runs, n_lambda, 2 * n_inds), (n_runs, n_lambda, n_time)
ul_shape = (n_runs, n_lambda, 4 * n_inds), (n_runs, n_lambda, 2 * n_time)
hu_shape = (n_runs, n_lambda, 3 * n_inds), (n_runs, n_lambda, 3 * n_time)
hb_shape = (n_runs, n_lambda_hb * 2, n_inds), (n_runs, n_lambda_hb * 2, n_time)
##
##
##############################################################
##############################################################
##############################################################
## Correlations between methods (Figure 1)
##
if 'corr-all' in figures:
jjs = np.hstack((np.arange(10) * n_lambda, \
n_lambda_hb + n_lambda * 9, n_lambda_hb * 2 + n_lambda * 9))
try:
lp_results = load_reshape(file_dir + 'lp_values', lp_shape[0])
ul_results = load_reshape(file_dir + 'ul_values', ul_shape[0])
hu_results = load_reshape(file_dir + 'hu_values', hu_shape[0])
hb_results = load_reshape(file_dir + 'hb_values', hb_shape[0])
except Exception as err:
print('At least one *_values.csv missing from ' + file_dir)
raise
##
all_results = np.zeros((n_runs, 9 * n_lambda + 2 * n_lambda_hb, n_inds))
all_results[:,jjs[0]:jjs[1],:] = lp_results[:,:,range(0, 2 * n_inds, 2)]
all_results[:,jjs[1]:jjs[2],:] = lp_results[:,:,range(1, 2 * n_inds, 2)]
all_results[:,jjs[2]:jjs[3],:] = ul_results[:,:,range(0, 2 * n_inds, 2)]
all_results[:,jjs[3]:jjs[4],:] = ul_results[:,:,range(1, 2 * n_inds, 2)]
all_results[:,jjs[4]:jjs[5],:] = \
ul_results[:,:,range(2 * n_inds, 4 * n_inds, 2)]
all_results[:,jjs[5]:jjs[6],:] = \
ul_results[:,:,range(2 * n_inds + 1, 4 * n_inds, 2)]
all_results[:,jjs[6]:jjs[7],:] = hu_results[:,:,range(0, n_inds)]
all_results[:,jjs[7]:jjs[8],:] = hu_results[:,:,range(n_inds, 2 * n_inds)]
all_results[:,jjs[8]:jjs[9],:] = \
hu_results[:,:,range(2 * n_inds, 3 * n_inds)]
all_results[:,jjs[9]:jjs[11],:] = hb_results
##
## Remove anomalous results (as above)
ind_exclude = [16, 17, 18, 19, 81, 82, 83, 84]
all_results[:,jjs[2] + ind_exclude, :] = np.nan
all_results[:,jjs[3] + ind_exclude, :] = np.nan
all_results[:,jjs[4] + ind_exclude, :] = np.nan
all_results[:,jjs[5] + ind_exclude, :] = np.nan
all_results[:,jjs[6] + 70:jjs[7],4] = np.nan
all_results[:,jjs[7] + 70:jjs[8],4] = np.nan
all_results[:,jjs[8] + 70:jjs[9],4] = np.nan
##
z = all_results[:,jjs[9]:jjs[11],:]
z[:,np.isnan(z).all(axis = 0).any(axis = 1),:] = np.nan
all_results[:,jjs[9]:jjs[11],:] = z
all_results[:,np.isnan(all_results).all(axis = 2).any(axis = 0),:] = np.nan
for ii in range(10):
z = all_results[:,jjs[9]:jjs[10],ii]
percentiles = np.nanpercentile(np.nanmean(z, axis = 0), [5, 95])
vlim = np.max(np.abs(percentiles) + np.diff(percentiles))
z[z > vlim] = np.nan
z[z < -vlim] = np.nan
all_results[:,jjs[9]:jjs[10],ii] = z
z = all_results[:,jjs[10]:jjs[11],ii]
percentiles = np.nanpercentile(np.nanmean(z, axis = 0), [5, 95])
vlim = np.max(np.abs(percentiles) + np.diff(percentiles))
z[z > vlim] = np.nan
z[z < -vlim] = np.nan
all_results[:,jjs[10]:jjs[11],ii] = z
##
corr_array = np.zeros((n_inds, n_inds, len(jjs)))
for ii in range(len(jjs) - 1):
z = all_results[:,jjs[ii]:jjs[ii + 1],:]
c = pd.DataFrame(z.reshape(-1, n_inds)).corr(method = 'pearson')
c = np.array(c)
sp_corr = pd.DataFrame(z.reshape(-1, n_inds)).corr(method = 'spearman')
sp_corr = np.array(sp_corr)
c[np.triu_indices(n_inds)] = sp_corr[np.triu_indices(n_inds)]
corr_array[:,:,ii] = c
##
corr_array_mean = np.zeros((n_inds, n_inds, 8))
for ii in range(3):
z = all_results[:,jjs[2 * ii]:jjs[2 * ii + 1],:] - \
all_results[:,jjs[2 * ii + 1]:jjs[2 * ii + 2],:]
c = pd.DataFrame(z.reshape(-1, n_inds)).corr(method = 'pearson')
c = np.array(c)
sp_corr = pd.DataFrame(z.reshape(-1, n_inds)).corr(method = 'spearman')
sp_corr = np.array(sp_corr)
c[np.triu_indices(n_inds)] = sp_corr[np.triu_indices(n_inds)]
corr_array_mean[:,:,ii] = c
corr_array_mean[:,:,3:8] = corr_array[:,:,6:11]
corr_array[:,:,-1] = corr_array_mean[:,:,:-1].mean(axis = 2)
##
##
sims = [r'LP, $T = 10^4,~i_{X \rightarrow Y}$', \
r'LP, $T = 10^4,~i_{Y \rightarrow X}$', \
r'UL, $T = 10^3,~i_{X \rightarrow Y}$', \
r'UL, $T = 10^3,~i_{Y \rightarrow X}$', \
r'UL, $T = 10^5,~i_{X \rightarrow Y}$', \
r'UL, $T = 10^5,~i_{Y \rightarrow X}$', \
r'HU, $T = 10^3,~D_{X \rightarrow Y}$', \
r'HU, $T = 10^4,~D_{X \rightarrow Y}$', \
r'HU, $T = 10^5,~D_{X \rightarrow Y}$', \
r'HB(I), $T = 10^4,~D_{X \rightarrow Y}$', \
r'HB(NI), $T = 10^4,~D_{X \rightarrow Y}$', \
r'Mean, $D_{X \rightarrow Y}$']
##
corr_plots(corr_array, titles = sims, ylabs = indices_plot1, \
indices_groups = [4, 3, 3], figsize = [10, 7], \
figpad = [0.6, 0.5, 0.1], fontsize = [12, 9, 7], \
cmap = dv.PRGn_11.mpl_colormap, filename = plot_dir + 'corr_plots')
##
##############################################################
## LINEAR PROCESS (FIGURE 3)
if 'lp' in figures:
try:
lp_results = load_reshape(file_dir + 'lp_values', lp_shape[0])
except Exception as err:
print('lp_values.csv missing from ' + file_dir)
raise
##
lp_params_gaussian = {'b_x': 0.8, 'b_y': 0.4, 'var_x': 0.2, 'var_y': 0.2}
lp_te_gaussian = [te_gaussian(x, **lp_params_gaussian) for x in lambda_vals]
lp_te_gaussian = np.array(lp_te_gaussian)
lp_ctir_gaussian = [ctir_gaussian(x, **lp_params_gaussian, tau_max = 20) \
for x in lambda_vals]
lp_ctir_gaussian = np.array(lp_ctir_gaussian)
lp_as = [lp_te_gaussian, lp_te_gaussian, lp_te_gaussian, lp_ctir_gaussian, \
None, None, None, None, None, None]
##
lp_cols = [ql.Paired_4.mpl_colors[x] for x in list([1, 3, 0, 2])]
lp_means = np.nanmean(lp_results, axis = 0).reshape(n_lambda, n_inds, 2)
lp_std = np.nanstd(lp_results, axis = 0).reshape(n_lambda, n_inds, 2)
labelpads = [None, None, None, -2, None, None, None, None, None, None]
##
sim_plot1(mean_vals = lp_means, std_vals = lp_std, **plot_params, \
labelpads = labelpads, analytic_solutions = lp_as, cols = lp_cols, \
filename = plot_dir + 'lp_figure')
##
##############################################################
## HENON UNIDIRECTIONAL (Figure 4b)
if 'hu' in figures:
try:
hu_results = load_reshape(file_dir + 'hu_values', hu_shape[0])
except Exception as err:
print('hu_values.csv missing from ' + file_dir)
raise
##
hu_reshape = (n_lambda, n_inds, 3)
hu_means = np.nanmean(hu_results, axis = 0).reshape(hu_reshape, order = 'F')
hu_std = np.nanstd(hu_results, axis = 0).reshape(hu_reshape, order = 'F')
##
hu_means1 = np.zeros((n_lambda, n_inds + 1, 3))
hu_means1[:,:7,:] = hu_means[:,:7,:]
hu_means1[:,8:,:] = hu_means[:,7:,:]
hu_means1[:,7,:] = hu_means[:,4,:]
hu_means1[70:,7,:] = np.nan
##
hu_std1 = np.zeros((n_lambda, n_inds + 1, 3))
hu_std1[:,:7,:] = hu_std[:,:7,:]
hu_std1[:,8:,:] = hu_std[:,7:,:]
hu_std1[:,7,:] = hu_std[:,4,:]
hu_std1[70:,7,:] = np.nan
##
labelpads = [None, None, None, None, -4, -6, 1, None, None, None, None]
hu_cols = ql.Dark2_3.mpl_colors[:3]
hu_ls = [(0, (1, 0)), (0, (1, 1)), (0, (5, 2))]
##
sim_plot2(mean_vals = hu_means1, std_vals = hu_std1, **plot_params_egc, \
cols = hu_cols, linestyles = hu_ls, labelpads = labelpads, \
filename = plot_dir + 'hu_figure')
##
##############################################################
## HENON BIDIRECTIONAL (Figure 5)
if 'hb' in figures:
try:
hb_results = load_reshape(file_dir + 'hb_values', hb_shape[0])
except Exception as err:
print('hb_values.csv missing from ' + file_dir)
raise
##
hb_means = np.nanmean(hb_results, axis = 0)
n_lambda_hb1 = len(lambda_vals_hb[0])
hb_means1 = hb_means[:n_lambda_hb,:]
hb_means1 = hb_means1.reshape(n_lambda_hb1, n_lambda_hb1, n_inds)
hb_means2 = hb_means[n_lambda_hb:,:]
hb_means2 = hb_means2.reshape(n_lambda_hb1, n_lambda_hb1, n_inds)
##
def hb_vlims(vals, percentiles1 = [5, 95], percentiles2 = [1, 99]):
n_inds = vals.shape[2]
vlims = np.zeros((n_inds, 2))
for ii in range(n_inds):
percentiles = np.nanpercentile(vals[:, :, ii], percentiles1)
vlim = np.max(np.abs(percentiles) + np.diff(percentiles))
minmax = np.abs(np.nanpercentile(vals[:, :, ii], percentiles2))
vlims[ii, :] = np.min((np.max(minmax), vlim)) * np.array([-1, 1])
return vlims
##
def hb_vlims(vals, percentiles = [5, 95]):
n_inds = vals.shape[2]
vlims = np.zeros((n_inds, 2))
for ii in range(n_inds):
minmax = np.abs(np.nanpercentile(vals[:, :, ii], percentiles))
vlims[ii, :] = np.min(np.max(minmax)) * np.array([-1, 1])
return vlims
##
vlims1 = hb_vlims(hb_means1, percentiles = [1, 99])
vlims2 = hb_vlims(hb_means2, percentiles = [5, 95])
sim_plot3(vals = hb_means1, **plot_params_hb, vlims = vlims1, \
filename = plot_dir + 'hb_figure1')
sim_plot3(vals = hb_means2, **plot_params_hb, vlims = vlims2, \
filename = plot_dir + 'hb_figure2')
##
##
##############################################################
## ULAM LATTICE (Figure 4a)
##
def ul_cols(n, type = 'diverging', cbrewer = None, seq = None):
if type == 'diverging':
if seq is None:
seq = np.linspace(0, 1, 2 * n)
seq1 = seq[:n]
seq2 = seq[n:]
seq1 = np.linspace(seq[0], seq[1], n)
seq2 = np.flip(np.linspace(seq[2], seq[3], n))
if cbrewer is None:
cbrewer = dv.RdBu_11_r
cols = np.stack([lcm(cbrewer.mpl_colormap(seq1)).colors, \
lcm(cbrewer.mpl_colormap(seq2)).colors])
elif type == 'qualitative':
if cbrewer is None:
cbrewer = ql.Set1_9
if n > 9:
raise ValueError('Max value of n for type "qualitative" is 9')
cols = np.array(cbrewer.colors)[np.newaxis, :n] / 256
return cols
##
ixy_str = r'i$_{X\rightarrow Y}$'
iyx_str = r'i$_{Y\rightarrow X}$'
ul_ls = [(0, (1, 0)), (0, (1, 1)), (0, (5, 2)), (0, (5, 2, 1, 2))]
##
def ul_ylims(means, std, pad = 0.05):
ymax = np.nanmax(means + std, axis = (0, 2, 3))
ymin = np.nanmin(means - std, axis = (0, 2, 3))
ymax = ymax + pad * (ymax - ymin)
ymin = ymin + pad * (ymin - ymax)
return np.stack((ymin, ymax), axis = 1)
##
##
if 'ul' in figures:
try:
ul_results = load_reshape(file_dir + 'ul_values', ul_shape[0])
except Exception as err:
print('ul_values.csv missing from ' + file_dir)
raise
##
ul_shape1 = (n_lambda, n_inds, 2, 2)
##
ul_means = np.nanmean(ul_results, axis = 0).reshape(n_lambda, 2 * n_inds, 2)
ul_std = np.nanstd(ul_results, axis = 0).reshape(n_lambda, 2 * n_inds, 2)
##
ul_means[[17,18,81,82],14,:] = np.nan
ul_means = ul_means.reshape(ul_shape1, order = 'f').swapaxes(2,3)
ul_std = ul_std.reshape(ul_shape1, order = 'f').swapaxes(2,3)
##
ylims = ul_ylims(ul_means, ul_std)
ul_names = np.array([ixy_str + r': $T = 10^3$', iyx_str + r': $T = 10^3$', \
ixy_str + r': $T = 10^5$', iyx_str + r': $T = 10^5$']).reshape(-1, 2)
labelpads = [None, None, None, -8, None, None, None, -4, -4, None]
sim_plot4(ul_means, ul_std, **plot_params, tf_names = ul_names, \
cols = ul_cols(2, seq = [0.1, 0.3, 0.7, 0.9]), \
linestyles = ul_ls[:2], labelpads = labelpads, ylims = ylims, \
filename = plot_dir + 'ul_figure')
##
##
##############################################################
## ULAM LATTICE, TRANSFORMATIONS (Figure S2, S3)
##
tf_list = np.array([{'normalise': True}, \
{'scale_x': 10, 'scale_y': 1}, {'scale_x': 1, 'scale_y': 10},
{'round_x': 1}, {'round_y': 1}, {'round_x': 2, 'round_y': 2}, \
{'na_x': 10, 'na_y': 0}, {'na_x': 20, 'na_y': 20}, \
{'gaussian_x': 0.1, 'gaussian_y': 0.1}, \
{'gaussian_x': 1}, {'gaussian_y': 1}])
n_tf = len(tf_list)
##
ult_shape = (n_runs, n_lambda, 2 * n_inds, n_tf), \
(n_runs, n_lambda, n_time, n_tf)
##
ul_transforms = ['ul-scaling', 'ul-rounding', 'ul-missing', 'ul-gaussian']
ul_transforms = ul_transforms + ['corr-ul-transforms']
if any(x in figures for x in ul_transforms):
try:
ul_results = load_reshape(file_dir + 'ul_values', ul_shape[0])
ult_results = load_reshape(file_dir + 'ult_values', ult_shape[0])
except Exception as err:
print('\nul_values.csv or ult_values.csv missing from ' + file_dir)
raise
##
ul_shape1 = (n_lambda, n_inds, 2, 2)
##
ul_means = np.nanmean(ul_results, axis = 0).reshape(n_lambda, 2 * n_inds, 2)
ul_std = np.nanstd(ul_results, axis = 0).reshape(n_lambda, 2 * n_inds, 2)
##
ul_means[[17,18,81,82],14,:] = np.nan
ul_means = ul_means.reshape(ul_shape1, order = 'f').swapaxes(2,3)
ul_std = ul_std.reshape(ul_shape1, order = 'f').swapaxes(2,3)
##
ult_means = np.nanmean(ult_results, axis = 0)
ult_means = ult_means.reshape(n_lambda, n_inds, 2, n_tf)
ult_std = np.nanstd(ult_results, axis = 0)
ult_std = ult_std.reshape(n_lambda, n_inds, 2, n_tf)
##
ul_means1n = ul_means[:,:,:,:1]
ul_std1n = ul_std[:,:,:,:1]
##
if 'ul-scaling' in figures:
ylims = ul_ylims(np.insert(ult_means, [0], ul_means, axis = 3), \
np.insert(ult_std, [0], ul_std, axis = 3))
ult_means1 = np.insert(ult_means[:,:,:,:3], [0], ul_means1n, axis = 3)
ult_std1 = np.insert(ult_std[:,:,:,:3], [0], ul_std1n, axis = 3)
tf_names = np.array([ixy_str, iyx_str, \
ixy_str + ': Standardised', iyx_str + ': Standardised',
r'i$_{10X\rightarrow Y}$', r'i$_{Y\rightarrow 10X}$',
r'i$_{X\rightarrow 10Y}$', r'i$_{10Y\rightarrow X}$']).reshape(-1, 2)
labelpads = [None, None, None, -8, None, -2, -4, -4, -4, None]
sim_plot4(ult_means1, ult_std1, **plot_params, tf_names = tf_names, \
cols = ul_cols(4, seq = [0.1, 0.3, 0.7, 0.9]), \
linestyles = ul_ls[:4], labelpads = labelpads, ylims = ylims, \
filename = plot_dir + 'ul_scaling_figure')
##
if 'ul-rounding' in figures:
ult_means_ylim = ult_means.copy()
ult_means_ylim[:,5:7,:,1:3] = np.nan
ylims = ul_ylims(np.insert(ult_means_ylim, [0], ul_means, axis = 3), \
np.insert(ult_std, [0], ul_std, axis = 3))
ult_means2 = np.insert(ult_means[:,:,:,3:6], [0], ul_means1n, axis = 3)
ult_std2 = np.insert(ult_std[:,:,:,3:6], [0], ul_std1n, axis = 3)
tf_names = np.array([ixy_str, iyx_str,
ixy_str + ': X to 1dp', iyx_str + ': X to 1dp',
ixy_str + ': Y to 1dp', iyx_str + ': Y to 1dp',
ixy_str + ': X, Y to 2dp', iyx_str + ': X, Y to 2dp']).reshape(-1, 2)
labelpads = [None, None, None, -8, None, None, None, -4, -4, None]
sim_plot4(ult_means2, ult_std2, **plot_params, tf_names = tf_names, \
cols = ul_cols(4, seq = [0.1, 0.3, 0.7, 0.9]), \
linestyles = ul_ls[:4], labelpads = labelpads, ylims = ylims, \
filename = plot_dir + 'ul_rounding_figure')
##
if 'ul-missing' in figures:
ult_means_ylim = ult_means.copy()
ult_means_ylim[:,5:7,:,1:3] = np.nan
ylims = ul_ylims(np.insert(ult_means_ylim, [0], ul_means, axis = 3), \
np.insert(ult_std, [0], ul_std, axis = 3))
ult_means3 = np.insert(ult_means[:,:,:,6:8], [0], ul_means1n, axis = 3)
ult_std3 = np.insert(ult_std[:,:,:,6:8], [0], ul_std1n, axis = 3)
tf_names = np.array([ixy_str, iyx_str,
ixy_str + ': X, Y 10% NA', iyx_str + ': X, Y 10% NA',
ixy_str + ': X, Y 20% NA', iyx_str + ': X, Y 20% NA']).reshape(-1, 2)
labelpads = [None, None, None, -8, None, None, None, -4, -4, None]
sim_plot4(ult_means3, ult_std3, **plot_params, tf_names = tf_names, \
cols = ul_cols(3, seq = [0.1, 0.3, 0.7, 0.9]), \
linestyles = ul_ls[:3], labelpads = labelpads, ylims = ylims, \
filename = plot_dir + 'ul_missing_figure')
##
if 'ul-gaussian' in figures:
ult_means_ylim = ult_means.copy()
ult_means_ylim[:,5:7,:,1:3] = np.nan
ylims = ul_ylims(np.insert(ult_means_ylim, [0], ul_means, axis = 3), \
np.insert(ult_std, [0], ul_std, axis = 3))
ult_means4 = np.insert(ult_means[:,:,:,8:], [0], ul_means1n, axis = 3)
ult_std4 = np.insert(ult_std[:,:,:,8:], [0], ul_std1n, axis = 3)
tf_names = np.array([ixy_str, iyx_str, \
ixy_str + r': $\sigma^2_G$ = 0.1', \
iyx_str + r': $\sigma^2_G$ = 0.1', \
ixy_str + r': $\sigma^2_{G,X}$ = 1', \
iyx_str + r': $\sigma^2_{G,X}$ = 1', \
ixy_str + r': $\sigma^2_{G,Y}$ = 1', \
iyx_str + r': $\sigma^2_{G,Y}$ = 1']).reshape(-1, 2)
yticks = [None, np.arange(0, 1.1, step = 0.2), None, \
np.arange(-12.5, 1, step = 2.5), \
np.arange(-8, 3, step = 2), None, None, \
None, None, np.arange(0, 1.1, step = 0.2)]
labelpads = [None, None, None, -8, None, None, None, -4, -4, None]
sim_plot4(ult_means4, ult_std4, **plot_params, tf_names = tf_names, \
cols = ul_cols(4, seq = [0.1, 0.3, 0.7, 0.9]), \
linestyles = ul_ls[:4], labelpads = labelpads, ylims = ylims, \
yticks = yticks, filename = plot_dir + 'ul_gaussian_figure')
##
##
if 'corr-ul-transforms' in figures:
ind_exclude = [16, 17, 18, 19, 81, 82, 83, 84]
ind_include = [ii for ii in range(n_lambda) if ii not in ind_exclude]
##
ult_D = ult_results[:,:,range(0, 2 * n_inds, 2),:] - \
ult_results[:,:,range(1, 2 * n_inds, 2),:]
ult_D = ult_D[:,ind_include,:,:]
##
ul_D = ul_results[:,:,range(0, 4 * n_inds, 2)] - \
ul_results[:,:,range(1, 4 * n_inds, 2)]
ul_D = np.stack((ul_D[:,:,:n_inds], ul_D[:,:,n_inds:]), axis = 3)
ul_D = ul_D[:,ind_include,:,:]
##
ult_D = np.insert(ult_D, [0], ul_D, axis = 3)
ult_corr_array = np.zeros((n_tf + 2, n_tf + 2, n_inds))
for ii in range(n_inds):
z = ult_D[:,:,ii,:]
pe_corr = pd.DataFrame(z.reshape(-1, n_tf + 2)).corr(method = 'pearson')
sp_corr = \
pd.DataFrame(z.reshape(-1, n_tf + 2)).corr(method = 'spearman')
ult_corr_array[:, :, ii] = np.array(pe_corr)
mask = np.triu_indices(n_tf - 2)
ult_corr_array[:, :, ii][mask] = np.array(sp_corr)[mask]
##
ult_corr_ylabs = np.array([r'$T = 10^3$', r'$T = 10^5$', 'Stand.', \
r'$D_{10X\rightarrow Y}$', r'$D_{X\rightarrow 10Y}$', r'$X$ to 1dp', \
r'$Y$ to 1dp', r'$X$, $Y$ to 2dp', '10% NA', '20% NA', \
r'$\sigma^2_G$ = 0.1', r'$\sigma^2_{G,X}$ = 1', \
r'$\sigma^2_{G,Y}$ = 1'])
##
corr_transforms_plot(ult_corr_array[0,:,:], ylabs = ult_corr_ylabs, \
xlabs = indices_plot1, y_groups = [2, 3, 3, 2, 3], \
x_groups = [4, 3, 3], figsize = [5, 5], fontsize = [12, 9, 7], \
figpad = [0.6, 0.1, 0.1], cmap = dv.PRGn_11.mpl_colormap, \
filename = plot_dir + 'corr_transforms_plots')
##
##
##############################################################
##############################################################
##############################################################
## TABLES
##
##############################################################
## Ulam lattice transformations table (Table III)
##
if 'ul-transforms' in tables:
try:
ul_results = load_reshape(file_dir + 'ul_values', ul_shape[0])
ult_results = load_reshape(file_dir + 'ult_values', ult_shape[0])
except Exception as err:
print('ul_values.csv or ult_values.csv missing from ' + file_dir)
raise
##
ind_exclude = [16, 17, 18, 19, 81, 82, 83, 84]
ind_include = [ii for ii in range(n_lambda) if ii not in ind_exclude]
##
ult_D = ult_results[:,:,range(0, 2 * n_inds, 2),:] - \
ult_results[:,:,range(1, 2 * n_inds, 2),:]
ult_D = ult_D[:,ind_include,:,:]
##
ul_D = ul_results[:,:,range(0, 4 * n_inds, 2)] - \
ul_results[:,:,range(1, 4 * n_inds, 2)]
ul_D = np.stack((ul_D[:,:,:n_inds], ul_D[:,:,n_inds:]), axis = 3)
ul_D = ul_D[:,ind_include,:,:]
##
ult_D = np.insert(ult_D, [0], ul_D, axis = 3)
##
ult_table = np.zeros((n_inds, n_tf + 2, 2))
denom = np.nanmean(np.abs(np.nanmean(ul_D[:,:,:,:1], axis = 0)), axis = 0)
ult_table[:,0,0] = np.nanmean(ult_D[:,:,:,0], axis = (0, 1))
ult_table[:,1:,0] = \
np.nanmean(ult_D[:,:,:,:1] - ult_D[:,:,:,1:], axis = (0, 1))
ult_table[:,1:,0] /= denom
ult_table[:,0,1] = np.nanmean(np.nanstd(ul_D[:,:,:,0], axis = 0), axis = 0)
ult_table[:,1:,1] = \
np.nanmean(np.nanstd(ult_D[:,:,:,1:], axis = 0), axis = 0)
ult_table[:,1:,1] /= ult_table[:,:1,1]
ult_table = np.round(ult_table, 3)
inds_order = [4,5,6,0,1,2,3,7,8,9]
str_table = ''
for ii in inds_order:
str_table = str_table + indices_plot1[ii]
str_table = str_table + r' & $\langle\mu\rangle$ = '
str_table = str_table + str('%.3f' % ult_table[ii,0,0])
str_table = str_table + r' & $f(\mu,\hat{\mu})$'
for jj in range(1, n_tf + 2):
str_table = str_table + ' & ' + str('%.3f' % ult_table[ii,jj,0])
str_table = str_table + r' \\' + '\n'
str_table = str_table + r' & $\langle\sigma\rangle$ = '
str_table = str_table + str('%.3f' % ult_table[ii,0,1])
str_table = str_table + r' & $g(\sigma,\hat{\sigma})$'
for jj in range(1, n_tf + 2):
str_table = str_table + ' & ' + str('%.3f' % ult_table[ii,jj,1])
str_table = str_table + r' \\' + '\n'
txt_table = open(plot_dir + 'ul-transforms.txt', 'w')
txt_table.write(str_table)
txt_table.close()
##
##
##############################################################
##############################################################
##############################################################
## Computation times (Table S.II)
##
if 'computational-times' in tables:
try:
lp_time = load_reshape(file_dir + 'lp_time', lp_shape[1])
ul_time = load_reshape(file_dir + 'ul_time', ul_shape[1])
hu_time = load_reshape(file_dir + 'hu_time', hu_shape[1])
hb_time = load_reshape(file_dir + 'hb_time', hb_shape[1])
except Exception as err:
print('At least one *_time.csv missing from ' + file_dir)
raise
##
time_table = np.zeros((n_time, 8, 2))
time_table[:,0,0] = np.nanmean(lp_time, axis = (0, 1))
time_table[:,1,0] = np.nanmean(ul_time[:,:,:n_time], axis = (0, 1))
time_table[:,2,0] = np.nanmean(ul_time[:,:,n_time:], axis = (0, 1))
time_table[:,3,0] = np.nanmean(hu_time[:,:,:n_time], axis = (0, 1))
time_table[:,4,0] = \
np.nanmean(hu_time[:,:,n_time:(2 * n_time)],axis = (0, 1))
time_table[:,5,0] = np.nanmean(hu_time[:,:,(2 * n_time):], axis = (0, 1))
time_table[:,6,0] = np.nanmean(hb_time[:,:n_lambda_hb,:], axis = (0, 1))
time_table[:,7,0] = np.nanmean(hb_time[:,n_lambda_hb:,:], axis = (0, 1))
time_table[:,0,1] = np.nanstd(lp_time, axis = (0, 1))
time_table[:,1,1] = np.nanstd(ul_time[:,:,:n_time], axis = (0, 1))
time_table[:,2,1] = np.nanstd(ul_time[:,:,n_time:], axis = (0, 1))
time_table[:,3,1] = np.nanstd(hu_time[:,:,:n_time], axis = (0, 1))
time_table[:,4,1] = \
np.nanstd(hu_time[:,:,n_time:(2 * n_time)], axis = (0, 1))
time_table[:,5,1] = np.nanstd(hu_time[:,:,(2 * n_time):], axis = (0, 1))
time_table[:,6,1] = np.nanstd(hb_time[:,:n_lambda_hb,:], axis = (0, 1))
time_table[:,7,1] = np.nanstd(hb_time[:,n_lambda_hb:,:], axis = (0, 1))
time_table = np.round(time_table, 3)
##
indices_time = ['ETE (H)', 'TE (KSG)', 'CTIR', 'EGC', 'NLGC', 'PI']
indices_time = indices_time + [r'SI$^{(1,2)}$', 'CCM']
## order is EGC, NLGC, PI, ETE (H), TE (KSG), CTIR, SI, CCM
inds_order = [3,4,5,0,1,2,6,7]
str_table = ''
for ii in inds_order:
str_table = str_table + indices_time[ii]
for jj in range(8):
str_table = str_table + ' & ' + str('%.3f' % time_table[ii,jj,0])
str_table = str_table + ' (' + str('%.3f' % time_table[ii,jj,1])
str_table = str_table + ') '
str_table = str_table + r' \\' + '\n'
txt_table = open(plot_dir + 'computational-times.txt', 'w')
txt_table.write(str_table)
txt_table.close()
##
##############################################################
## Transfer entropy from idtxl package
##
## idtxl package also calculates transfer entropy
# import numpy as np
# from idtxl.estimators_jidt import (JidtDiscreteMI, JidtDiscreteTE,
# JidtKraskovMI, JidtKraskovTE)
# ##
# settings_h = {}
# settings_h['history_target'] = 1
# settings_h['history_source'] = 1
# settings_h['source_target_delay'] = 1
# settings_h['discretise_method'] = 'equal'
# settings_h['n_discrete_bins'] = 8
# settings_h['noise_level'] = 0
# ##
# settings_ksg = {}
# settings_ksg['history_target'] = 1
# settings_ksg['history_source'] = 1
# settings_ksg['source_target_delay'] = 1
# settings_ksg['algorithm_num'] = 1
# settings_ksg['kraskov_k'] = 4
# settings_ksg['noise_level'] = 0
# settings_ksg['normalise'] = False
# settings_ksg['local_values'] = False
# ##
# est_ksg = JidtKraskovTE(settings_ksg)
# te_ksg = est_ksg.estimate(x, y)
# est_h = JidtDiscreteTE(settings_h)
# te_h = est_h.estimate(x, y)
##
## NOTE: kmeans vs cmeans computational burden!
## kmeans: shape (100, 3), clusters 50, time 0.27243233360350133
## kmeans: shape (1000, 3), clusters 50, time 1.325215889001265
## kmeans: shape (10000, 3), clusters 50, time 49.92137239077128
## kmeans: shape (999998, 3), clusters 50, time 962.7682773035951
## cmeans: shape (100, 3), clusters 50, time 0.5133263552095741
## cmeans: shape (1000, 3), clusters 50, time 25.340735886571927
## cmeans: shape (10000, 3), clusters 50, time 189.5237478673924
## cmeans: shape (999998, 3), clusters 50, time 221623.170088802
|
<gh_stars>1-10
import datetime
import pytz
from dateutil.relativedelta import relativedelta
from fast_pyspark_tester.sql.casts import get_time_formatter, get_unix_timestamp_parser
from fast_pyspark_tester.sql.expressions.expressions import Expression, UnaryExpression
from fast_pyspark_tester.sql.types import DateType, TimestampType, FloatType
from fast_pyspark_tester.utils import parse_tz
GMT_TIMEZONE = pytz.timezone('GMT')
DAYS_OF_WEEK = ('MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN')
class AddMonths(Expression):
def __init__(self, start_date, num_months):
super().__init__(start_date)
self.start_date = start_date
self.num_months = num_months.get_literal_value()
self.timedelta = datetime.timedelta(days=self.num_months)
def eval(self, row, schema):
return self.start_date.cast(DateType()).eval(row, schema) + self.timedelta
def __str__(self):
return 'add_months({0}, {1})'.format(self.start_date, self.num_months)
class DateAdd(Expression):
def __init__(self, start_date, num_days):
super().__init__(start_date)
self.start_date = start_date
self.num_days = num_days.get_literal_value()
self.timedelta = datetime.timedelta(days=self.num_days)
def eval(self, row, schema):
return self.start_date.cast(DateType()).eval(row, schema) + self.timedelta
def __str__(self):
return 'date_add({0}, {1})'.format(self.start_date, self.num_days)
class DateSub(Expression):
def __init__(self, start_date, num_days):
super().__init__(start_date)
self.start_date = start_date
self.num_days = num_days.get_literal_value()
self.timedelta = datetime.timedelta(days=self.num_days)
def eval(self, row, schema):
return self.start_date.cast(DateType()).eval(row, schema) - self.timedelta
def __str__(self):
return 'date_sub({0}, {1})'.format(self.start_date, self.num_days)
class Year(UnaryExpression):
def eval(self, row, schema):
return self.column.cast(DateType()).eval(row, schema).year
def __str__(self):
return 'year({0})'.format(self.column)
class Month(UnaryExpression):
def eval(self, row, schema):
return self.column.cast(DateType()).eval(row, schema).month
def __str__(self):
return 'month({0})'.format(self.column)
class Quarter(UnaryExpression):
def eval(self, row, schema):
month = self.column.cast(DateType()).eval(row, schema).month
return 1 + int((month - 1) / 3)
def __str__(self):
return 'quarter({0})'.format(self.column)
class Hour(UnaryExpression):
def eval(self, row, schema):
return self.column.cast(TimestampType()).eval(row, schema).hour
def __str__(self):
return 'hour({0})'.format(self.column)
class Minute(UnaryExpression):
def eval(self, row, schema):
return self.column.cast(TimestampType()).eval(row, schema).minute
def __str__(self):
return 'minute({0})'.format(self.column)
class Second(UnaryExpression):
def eval(self, row, schema):
return self.column.cast(TimestampType()).eval(row, schema).second
def __str__(self):
return 'second({0})'.format(self.column)
class DayOfMonth(UnaryExpression):
def eval(self, row, schema):
return self.column.cast(DateType()).eval(row, schema).day
def __str__(self):
return 'dayofmonth({0})'.format(self.column)
class DayOfYear(UnaryExpression):
def eval(self, row, schema):
value = self.column.cast(DateType()).eval(row, schema)
day_from_the_first = value - datetime.date(value.year, 1, 1)
return 1 + day_from_the_first.days
def __str__(self):
return 'dayofyear({0})'.format(self.column)
class LastDay(UnaryExpression):
def eval(self, row, schema):
value = self.column.cast(DateType()).eval(row, schema)
first_of_next_month = value + relativedelta(months=1, day=1)
return first_of_next_month - datetime.timedelta(days=1)
def __str__(self):
return 'last_day({0})'.format(self.column)
class WeekOfYear(UnaryExpression):
def eval(self, row, schema):
return self.column.cast(DateType()).eval(row, schema).isocalendar()[1]
def __str__(self):
return 'weekofyear({0})'.format(self.column)
class DayOfWeek(UnaryExpression):
def eval(self, row, schema):
date = self.column.cast(DateType()).eval(row, schema)
return date.isoweekday() + 1 if date.isoweekday() != 7 else 1
def __str__(self):
return 'dayofweek({0})'.format(self.column)
class NextDay(Expression):
def __init__(self, column, day_of_week):
super().__init__(column)
self.column = column
self.day_of_week = day_of_week.get_literal_value()
def eval(self, row, schema):
value = self.column.cast(DateType()).eval(row, schema)
if self.day_of_week.upper() not in DAYS_OF_WEEK:
return None
today = value.isoweekday()
target = DAYS_OF_WEEK.index(self.day_of_week.upper()) + 1
delta = target - today
if delta <= 0:
delta += 7
return value + datetime.timedelta(days=delta)
def __str__(self):
return 'next_day({0}, {1})'.format(self.column, self.day_of_week)
class MonthsBetween(Expression):
def __init__(self, column1, column2, round_off):
super().__init__(column1, column2)
self.column1 = column1
self.column2 = column2
self.round_off = round_off.get_literal_value()
def eval(self, row, schema):
value_1 = self.column1.cast(TimestampType()).eval(row, schema)
value_2 = self.column2.cast(TimestampType()).eval(row, schema)
if not isinstance(value_1, datetime.datetime) or not isinstance(value_2, datetime.datetime):
return None
one_day = datetime.timedelta(days=1)
value_1_is_the_last_of_its_month = value_1.month != (value_1 + one_day).month
value_2_is_the_last_of_its_month = value_2.month != (value_2 + one_day).month
if value_1.day == value_2.day or (value_1_is_the_last_of_its_month and value_2_is_the_last_of_its_month):
# Special cases where time of day is not consider
diff = (value_1.year - value_2.year) * 12 + (value_1.month - value_2.month)
else:
day_offset = (
value_1.day
- value_2.day
+ (value_1.hour - value_2.hour) / 24
+ (value_1.minute - value_2.minute) / 1440
+ (value_1.second - value_2.second) / 86400
)
diff = (value_1.year - value_2.year) * 12 + (value_1.month - value_2.month) * 1 + day_offset / 31
if self.round_off:
return float(round(diff, 8))
return float(diff)
def __str__(self):
return 'months_between({0}, {1}, {2})'.format(self.column1, self.column2, str(self.round_off).lower())
class DateDiff(Expression):
def __init__(self, column1, column2):
super().__init__(column1, column2)
self.column1 = column1
self.column2 = column2
def eval(self, row, schema):
value_1 = self.column1.cast(DateType()).eval(row, schema)
value_2 = self.column2.cast(DateType()).eval(row, schema)
if not isinstance(value_1, datetime.date) or not isinstance(value_2, datetime.date):
return None
return (value_1 - value_2).days
def __str__(self):
return 'datediff({0}, {1})'.format(self.column1, self.column2)
class FromUnixTime(Expression):
def __init__(self, column, f):
super().__init__(column)
self.column = column
self.format = f.get_literal_value()
self.formatter = get_time_formatter(self.format)
def eval(self, row, schema):
timestamp = self.column.cast(FloatType()).eval(row, schema)
return self.formatter(datetime.datetime.fromtimestamp(timestamp))
def __str__(self):
return 'from_unixtime({0}, {1})'.format(self.column, self.format)
class DateFormat(Expression):
def __init__(self, column, f):
super().__init__(column)
self.column = column
self.format = f.get_literal_value()
self.formatter = get_time_formatter(self.format)
def eval(self, row, schema):
timestamp = self.column.cast(TimestampType()).eval(row, schema)
return self.formatter(timestamp)
def __str__(self):
return 'date_format({0}, {1})'.format(self.column, self.format)
class CurrentTimestamp(Expression):
def __init__(self):
super().__init__()
self.current_timestamp = None
def eval(self, row, schema):
return self.current_timestamp
def initialize(self, partition_index):
super().initialize(partition_index)
self.current_timestamp = datetime.datetime.now()
def __str__(self):
return 'current_timestamp()'
class CurrentDate(Expression):
def __init__(self):
super().__init__()
self.current_timestamp = None
def eval(self, row, schema):
return self.current_timestamp.date()
def initialize(self, partition_index):
super().initialize(partition_index)
self.current_timestamp = datetime.datetime.now()
def __str__(self):
return 'current_date()'
class UnixTimestamp(Expression):
def __init__(self, column, f):
super().__init__(column)
self.column = column
self.format = f.get_literal_value()
self.parser = get_unix_timestamp_parser(self.format)
def eval(self, row, schema):
datetime_as_string = self.column.eval(row, schema)
return self.parser(datetime_as_string)
def __str__(self):
return 'unix_timestamp({0}, {1})'.format(self.column, self.format)
class ParseToTimestamp(Expression):
def __init__(self, column, f):
super().__init__(column)
self.column = column
self.format = f.get_literal_value()
self.parser = get_unix_timestamp_parser(self.format)
def eval(self, row, schema):
datetime_as_string = self.column.eval(row, schema)
return datetime.datetime.fromtimestamp(self.parser(datetime_as_string))
def __str__(self):
return "to_timestamp('{0}'{1})".format(
self.column, ", '{0}'".format(self.format) if self.format is not None else '',
)
class ParseToDate(Expression):
def __init__(self, column, f):
super().__init__(column)
self.column = column
self.format = f.get_literal_value()
self.parser = get_unix_timestamp_parser(self.format)
def eval(self, row, schema):
datetime_as_string = self.column.eval(row, schema)
return datetime.date.fromtimestamp(self.parser(datetime_as_string))
def __str__(self):
return "to_date('{0}'{1})".format(
self.column, ", '{0}'".format(self.format) if self.format is not None else '',
)
class TruncDate(Expression):
def __init__(self, column, level):
super().__init__(column)
self.column = column
self.level = level.get_literal_value()
def eval(self, row, schema):
value = self.column.cast(DateType()).eval(row, schema)
if self.level in ('year', 'yyyy', 'yy'):
return datetime.date(value.year, 1, 1)
if self.level in ('month', 'mon', 'mm'):
return datetime.date(value.year, value.month, 1)
return None
def __str__(self):
return 'trunc({0}, {1})'.format(self.column, self.level)
class TruncTimestamp(Expression):
def __init__(self, level, column):
super().__init__(column)
self.level = level.get_literal_value()
self.column = column
def eval(self, row, schema):
value = self.column.cast(TimestampType()).eval(row, schema)
day_truncation = self.truncate_to_day(value)
if day_truncation:
return day_truncation
time_truncated = self.truncate_to_time(value)
if time_truncated:
return time_truncated
return None
def truncate_to_day(self, value):
if self.level in ('year', 'yyyy', 'yy'):
return datetime.datetime(value.year, 1, 1)
if self.level in ('month', 'mon', 'mm'):
return datetime.datetime(value.year, value.month, 1)
if self.level in ('day', 'dd'):
return datetime.datetime(value.year, value.month, value.day)
if self.level in ('quarter',):
quarter_start_month = int((value.month - 1) / 3) * 3 + 1
return datetime.datetime(value.year, quarter_start_month, 1)
if self.level in ('week',):
return datetime.datetime(value.year, value.month, value.day) - datetime.timedelta(
days=value.isoweekday() - 1
)
return None
def truncate_to_time(self, value):
if self.level in ('hour',):
return datetime.datetime(value.year, value.month, value.day, value.hour)
if self.level in ('minute',):
return datetime.datetime(value.year, value.month, value.day, value.hour, value.minute)
if self.level in ('second',):
return datetime.datetime(value.year, value.month, value.day, value.hour, value.minute, value.second,)
return None
def __str__(self):
return 'date_trunc({0}, {1})'.format(self.level, self.column)
class FromUTCTimestamp(Expression):
def __init__(self, column, tz):
super().__init__(column)
self.column = column
self.tz = tz.get_literal_value()
self.pytz = parse_tz(self.tz)
def eval(self, row, schema):
value = self.column.cast(TimestampType()).eval(row, schema)
if self.pytz is None:
return value
gmt_date = GMT_TIMEZONE.localize(value)
local_date = gmt_date.astimezone(self.pytz)
return local_date.replace(tzinfo=None)
def __str__(self):
return 'from_utc_timestamp({0}, {1})'.format(self.column, self.tz)
class ToUTCTimestamp(Expression):
def __init__(self, column, tz):
super().__init__(column)
self.column = column
self.tz = tz.get_literal_value()
self.pytz = parse_tz(self.tz)
def eval(self, row, schema):
value = self.column.cast(TimestampType()).eval(row, schema)
if self.pytz is None:
return value
local_date = self.pytz.localize(value)
gmt_date = local_date.astimezone(GMT_TIMEZONE)
return gmt_date.replace(tzinfo=None)
def __str__(self):
return 'to_utc_timestamp({0}, {1})'.format(self.column, self.tz)
__all__ = [
'ToUTCTimestamp',
'FromUTCTimestamp',
'TruncTimestamp',
'TruncDate',
'ParseToDate',
'ParseToTimestamp',
'UnixTimestamp',
'CurrentTimestamp',
'FromUnixTime',
'WeekOfYear',
'NextDay',
'MonthsBetween',
'LastDay',
'DayOfYear',
'DayOfMonth',
'DayOfWeek',
'Month',
'Quarter',
'Year',
'DateDiff',
'DateSub',
'DateAdd',
'DateFormat',
'CurrentDate',
'AddMonths',
'Hour',
'Minute',
'Second',
]
|
import yaml
# Model definition lists
### elements ###
# Definition
# the indices 0 1 2 3 4 5 6 7 8
listElementField = ['id', 'name', 'description', 'creationDate', 'version', 'type', 'parentId', 'path', 'refId']
ELEMENT_ID = 0
ELEMENT_NAME = 1
ELEMENT_DESCRIPTION = 2
ELEMENT_DATE = 3
ELEMENT_VERSION = 4
ELEMENT_TYPE = 5
ELEMENT_PARENT_ID = 6
ELEMENT_PATH = 7
ELEMENT_REFERENCE_ID = 8
# the indices 0 1 2 3 4 5 6 7
listElementTypes = ['folder', 'function', 'block', 'component', 'data', 'actor', 'usecase', 'reference']
E_TYPE_FOLDER = 0
E_TYPE_FUNCTION = 1
E_TYPE_BLOCK = 2
E_TYPE_COMPONENT = 3
E_TYPE_DATA = 4
E_TYPE_ACTOR = 5
E_TYPE_USECASE = 6
E_TYPE_REFERENCE = 7
# SQL - Please keep it consistent with Definiton
strElementTableName = "element"
listElementFieldSQL = ['INT PRIMARY KEY AUTO_INCREMENT UNIQUE',
'VARCHAR(255) NOT NULL',
'VARCHAR(1000)',
'TIMESTAMP DEFAULT CURRENT_TIMESTAMP',
'FLOAT',
'ENUM(' + ",".join(["'{}'".format(x) for x in listElementTypes]) + ") NOT NULL",
'INT REFERENCES ' + strElementTableName + '(id)',
'VARCHAR(1024)',
'INT REFERENCES ' + strElementTableName + '(id)']
# CMD - Please keep it consistent with Definiton
listElementTypesColours = ['blue', 'bright_green', 'bright_cyan', 'bright_magenta', 'magenta', 'bright_yellow', 'cyan', 'bright_blue']
# PlantUML
listElementTypesPlot = ['folder', 'rectangle', 'node', 'component', 'rectangle', 'actor', 'usecase', 'hexagon']
### links def ###
# Definition
# the indices 0 1 2 3 4 5 6 7 8
listLinkField = ['id', 'name', 'description', 'creationDate', 'version', 'type', 'parentId', 'source', 'destination']
LINK_ID = 0
LINK_NAME = 1
LINK_DESCRIPTION = 2
LINK_DATE = 3
LINK_VERSION = 4
LINK_TYPE = 5
LINK_PARENT_ID = 6
LINK_SOURCE_ID = 7
LINK_DESTINATION_ID = 8
# the indices 0 1 2
listLinkTypes = ['dataflow', 'aggregation', 'link']
L_TYPE_DATAFLOW = 0
L_TYPE_AGGREGATION = 1
L_TYPE_LINK = 2
# SQL - Please keep it consistent with Definiton
strLinkTableName = "link"
listLinkFieldSQL = ['INT PRIMARY KEY AUTO_INCREMENT UNIQUE',
'VARCHAR(255) NOT NULL',
'VARCHAR(1000)',
'TIMESTAMP DEFAULT CURRENT_TIMESTAMP',
'FLOAT',
'ENUM(' + ",".join(["'{}'".format(x) for x in listLinkTypes]) + ") NOT NULL",
'INT REFERENCES ' + strLinkTableName + '(id)',
'INT, FOREIGN KEY (source) REFERENCES ' + strElementTableName + '(id) ON DELETE CASCADE',
'INT, FOREIGN KEY (destination) REFERENCES ' + strElementTableName + '(id) ON DELETE CASCADE']
# CMD - Please keep it consistent with Definiton
listLinkTypesSymbols = ['->', '-|>', '-']
### conveyed ###
# Definition
listConveyedField = ['link', 'element']
# SQL
strConveyedTableName = "conveyed"
listConveyedFieldSQL = ['INT, FOREIGN KEY ' + strLinkTableName+ '(id)',
'INT, FOREIGN KEY ' + strElementTableName + '(id)']
class config():
def __init__(self):
# load configuration
with open("config.yml", 'r') as stream:
try:
self.config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
|
#! /usr/bin/env/python 3.0
#
# Running this script will update the batch and make files that compile the test cases
# for each CWE into a separate .exe or executable file. This script also edits source code
# and header files needed for a successful compilation with these files.
#
#charm
import os
import shutil
import sys
import py_common
import update_main_cpp_and_testcases_h
def create_makefile(cwe, is_dir_split):
# set flags
contents = ""
contents += "CC=/usr/bin/gcc\n"
contents += "CPP=/usr/bin/g++\n"
contents += "DEBUG=-g\n"
contents += "CFLAGS=-c\n"
contents += "LFLAGS=-pthread -lm\n"
contents += "LD=ld\n"
contents += "INCLUDE_MAIN=-DINCLUDEMAIN\n"
# for cwes that are divided into s01 - ...
if is_dir_split:
contents += "\nINCLUDES=-I ../../../src/testcasesupport\n"
else:
contents += "\nINCLUDES=-I ../../src/testcasesupport\n"
contents += "\nMAIN=main_linux.cpp\n"
contents += "MAIN_OBJECT=$(MAIN:.cpp=.o)\n"
# for cwes that are divided into s01 - ...
if is_dir_split:
contents += "\nC_SUPPORT_PATH=../../../src/testcasesupport/\n"
else:
contents += "\nC_SUPPORT_PATH=../../src/testcasesupport/\n"
contents += "C_SUPPORT_FILES=$(C_SUPPORT_PATH)io.c $(C_SUPPORT_PATH)std_thread.c\n"
contents += "C_SUPPORT_OBJECTS=io.o std_thread.o\n"
# filter windows specific cwes
contents += "FILTER_OUT=$(wildcard CWE*w32*.c*) $(wildcard CWE*wchar_t*.c*)\n"
contents += "\n# only grab the .c files without \"w32\" or \"wchar_t\" in the name\n"
contents += "C_SOURCES=$(filter-out $(FILTER_OUT),$(wildcard CWE*.c))\n"
contents += "C_OBJECTS=$(C_SOURCES:.c=.o)\n"
contents += "\n# only grab the .cpp files without \"w32\" or \"wchar_t\" in the name\n"
contents += "CPP_SOURCES=$(filter-out $(FILTER_OUT),$(wildcard CWE*.cpp))\n"
contents += "CPP_OBJECTS=$(CPP_SOURCES:.cpp=.o)\n"
contents += "\nSIMPLES=$(filter-out $(FILTER_OUT), $(wildcard CWE*0.c*) $(wildcard CWE*1.c*) $(wildcard CWE*2.c*) $(wildcard CWE*3.c*) $(wildcard CWE*4.c*)) \\\n"
contents += " $(filter-out $(FILTER_OUT), $(wildcard CWE*5.c*) $(wildcard CWE*6.c*) $(wildcard CWE*7.c*) $(wildcard CWE*8.c*) $(wildcard CWE*9.c*))\n"
contents += "SIMPLES_C=$(filter-out $(CPP_SOURCES), $(SIMPLES))\n"
contents += "SIMPLES_CPP=$(filter-out $(C_SOURCES), $(SIMPLES))\n\n"
contents += "LETTEREDS=$(filter-out $(FILTER_OUT), $(wildcard CWE*a.c*))\n"
contents += "LETTEREDS_C=$(subst a.,.,$(filter-out $(CPP_SOURCES), $(LETTEREDS)))\n"
contents += "LETTEREDS_CPP=$(subst a.,.,$(filter-out $(C_SOURCES), $(LETTEREDS)))\n\n"
contents += "GOOD1S=$(filter-out $(FILTER_OUT), $(wildcard CWE*_good1.cpp))\n"
contents += "BADS=$(subst _good1.,_bad.,$(GOOD1S))\n\n"
contents += "INDIVIDUALS_C=$(addsuffix .out, $(sort $(subst .c,,$(SIMPLES_C) $(LETTEREDS_C))))\n"
contents += "INDIVIDUALS_CPP=$(addsuffix .out, $(sort $(subst .cpp,,$(SIMPLES_CPP) $(LETTEREDS_CPP) $(BADS) $(GOOD1S))))\n"
contents += "\nOBJECTS=$(MAIN_OBJECT) $(C_OBJECTS) $(CPP_OBJECTS) $(C_SUPPORT_OBJECTS)\n"
contents += "# TARGET is the only line in this file specific to the CWE\n"
if is_dir_split:
contents += "TARGET=../../../build/" + cwe + "\n"
else:
contents += "TARGET=../../build/" + cwe + "\n"
contents += "\nall: $(TARGET)\n"
contents += "\npartial.o: $(C_OBJECTS) $(CPP_OBJECTS)\n"
contents += " $(LD) -r $(C_OBJECTS) $(CPP_OBJECTS) -o $@\n"
contents += "\nindividuals: $(INDIVIDUALS_C) $(INDIVIDUALS_CPP)\n\n"
contents += "$(INDIVIDUALS_C): $(C_SUPPORT_OBJECTS)\n"
contents += " $(CC) $(INCLUDES) $(INCLUDE_MAIN) -o $@ $(wildcard $(subst .out,,$@)*.c) $(C_SUPPORT_OBJECTS) $(LFLAGS)\n\n"
contents += "$(INDIVIDUALS_CPP): $(C_SUPPORT_OBJECTS)\n"
contents += " $(CPP) $(INCLUDES) $(INCLUDE_MAIN) -o $@ $(wildcard $(subst .out,,$@)*.cpp) $(C_SUPPORT_OBJECTS) $(LFLAGS)\n"
contents += "\n$(TARGET) : $(OBJECTS)\n"
contents += " $(CPP) $(LFLAGS) $(OBJECTS) -o $(TARGET)\n"
contents += "\n$(C_OBJECTS) : %.o:%.c \n"
contents += " $(CC) $(CFLAGS) $(INCLUDES) $^ -o $@\n"
contents += "\n$(CPP_OBJECTS) : %.o:%.cpp\n"
contents += " $(CPP) $(CFLAGS) $(INCLUDES) $^ -o $@\n"
contents += "\n$(C_SUPPORT_OBJECTS) : $(C_SUPPORT_FILES)\n"
contents += " $(CC) $(CFLAGS) $(INCLUDES) $(C_SUPPORT_PATH)$(@:.o=.c) -o $@\n"
contents += "\n$(MAIN_OBJECT) : $(MAIN)\n"
contents += " $(CC) $(CFLAGS) $(INCLUDES) $(MAIN) -o $@\n"
contents += "\nclean:\n"
contents += " rm -rf *.o *.out\n"
return contents
def check_if_c_files_exist(directory):
files = py_common.find_files_in_dir(directory, "CWE.*\.c$")
if len(files) > 0:
return True
return False
def check_if_cpp_files_exist(directory):
files = py_common.find_files_in_dir(directory, "CWE.*\.cpp$")
if len(files) > 0:
return True
return False
def help():
sys.stderr.write('Usage: \n')
sys.stderr.write(' create_per_cwe_files.py (builds per CWE files for all testcases without debug flags)\n')
sys.stderr.write(
' create_per_cwe_files.py CWE False (builds per CWE files for all testcases without debug flags)\n')
sys.stderr.write(
' create_per_cwe_files.py CWE(78|15) (builds per CWE files for test cases for CWE 78 and CWE 15 without debug flags)\n')
sys.stderr.write(
' create_per_cwe_files.py CWE(78|15) True (builds per CWE files for test cases for CWE 78 and CWE 15 with debug flags)')
# may need /bigobj flag: http://msdn.microsoft.com/en-us/library/ms173499%28VS.90%29.aspx
# Only one of our C/C++ tools requires debug flags so the debug flags that are set are specific for this tool
debug_flags = '/I"..\\..\\src\\testcasesupport" /Zi /Od /MTd /GS- /INCREMENTAL:NO /DEBUG /W3 /bigobj /EHsc /nologo' # if this line is modified, change the one below
split_debug_flags = '/I"..\\..\\..\\src\\testcasesupport" /Zi /Od /MTd /GS- /INCREMENTAL:NO /DEBUG /W3 /bigobj /EHsc /nologo'
linker_flags = '/I"..\\..\\src\\testcasesupport" /W3 /MT /GS /RTC1 /bigobj /EHsc /nologo' # if this line is modified, change the one below
split_linker_flags = '/I"..\\..\\..\\src\\testcasesupport" /W3 /MT /GS /RTC1 /bigobj /EHsc /nologo'
compile_flags = linker_flags + " /c"
split_compile_flags = split_linker_flags + " /c"
debug_compile_flags = debug_flags + " /c"
split_debug_compile_flags = split_debug_flags + " /c"
if __name__ == "__main__":
# check if ./testcases directory exists, if not, we are running
# from wrong working directory
if not os.path.exists("../testcases"):
py_common.print_with_timestamp("Wrong working directory; could not find testcases directory")
exit()
# default values which are used if no arguments are passed on command line
cwe_regex = "CWE"
use_debug = False
if len(sys.argv) > 1:
if ((sys.argv[1] == '-h') or (len(sys.argv) > 3)):
help()
exit()
if len(sys.argv) == 2:
cwe_regex = sys.argv[1]
if len(sys.argv) == 3:
cwe_regex = sys.argv[1]
use_debug = (sys.argv[2] == "True")
# get the CWE directories in testcases folder
cwe_dirs = py_common.find_directories_in_dir("../testcases", cwe_regex)
# only allow directories
cwe_dirs = filter(lambda x: os.path.isdir(x), cwe_dirs)
for dir in cwe_dirs:
if 's01' in os.listdir(dir):
is_dir_split = True
else:
is_dir_split = False
if is_dir_split:
# get the list of subdirectories
cwe_sub_dirs = py_common.find_directories_in_dir(dir, "^s\d.*")
for sub_dir in cwe_sub_dirs:
# copy main.cpp and testcases.h into this testcase dir
shutil.copy("testcasesupport/main.cpp", sub_dir)
shutil.copy("testcasesupport/testcases.h", sub_dir)
# update main.cpp/testcases.h to call only this functional variant's testcases
testcase_files = update_main_cpp_and_testcases_h.build_list_of_primary_c_cpp_testcase_files(sub_dir,
None)
fcl = update_main_cpp_and_testcases_h.generate_calls_to_fxs(testcase_files)
update_main_cpp_and_testcases_h.update_main_cpp(sub_dir, "main.cpp", fcl)
update_main_cpp_and_testcases_h.update_testcases_h(sub_dir, "testcases.h", fcl)
# get the CWE number from the directory name (not the full path since that may also have the string CWE in it)
this_cwe_dir = os.path.basename(dir)
cwe_index = this_cwe_dir.index("CWE")
unders_index = this_cwe_dir.index("_", cwe_index)
cwe = this_cwe_dir[cwe_index:unders_index]
sub_dir_number = os.path.basename(sub_dir)
cwe = cwe + "_" + sub_dir_number
# check if any .c files exist to compile
c_files_exist = check_if_c_files_exist(sub_dir)
cpp_files_exist = check_if_cpp_files_exist(sub_dir)
linux_testcase_exists = False
for file in testcase_files:
if ('w32' not in file) and ('wchar_t' not in file):
linux_testcase_exists = True
break
# only generate main_linux.cpp and Makefile if there are Linux test cases for this CWE
if linux_testcase_exists:
shutil.copy("testcasesupport/main_linux.cpp", sub_dir)
linux_fcl = update_main_cpp_and_testcases_h.generate_calls_to_linux_fxs(testcase_files)
update_main_cpp_and_testcases_h.update_main_cpp(sub_dir, "main_linux.cpp", linux_fcl)
# no need to update testcases.h
makefile_contents = create_makefile(cwe, is_dir_split)
makefile_fullpath = os.path.join(sub_dir, "Makefile")
py_common.write_file(makefile_fullpath, makefile_contents)
else:
py_common.print_with_timestamp(
"No Makefile created for " + cwe + ". All of the test cases are Windows-specific.")
else:
# copy main.cpp and testcases.h into this testcase dir
shutil.copy("testcasesupport/main.cpp", dir)
shutil.copy("testcasesupport/testcases.h", dir)
# update main.cpp/testcases.h to call only this cwe's testcases
testcase_files = update_main_cpp_and_testcases_h.build_list_of_primary_c_cpp_testcase_files(dir, None)
fcl = update_main_cpp_and_testcases_h.generate_calls_to_fxs(testcase_files)
update_main_cpp_and_testcases_h.update_main_cpp(dir, "main.cpp", fcl)
update_main_cpp_and_testcases_h.update_testcases_h(dir, "testcases.h", fcl)
# get the CWE number from the directory name (not the full path since that may also have the string CWE in it)
thisdir = os.path.basename(dir)
cwe_index = thisdir.index("CWE")
unders_index = thisdir.index("_", cwe_index)
cwe = thisdir[cwe_index:unders_index]
# check if any .c files exist to compile
c_files_exist = check_if_c_files_exist(dir)
cpp_files_exist = check_if_cpp_files_exist(dir)
linux_testcase_exists = False
for file in testcase_files:
if ('w32' not in file) and ('wchar_t' not in file):
linux_testcase_exists = True
break
# only generate main_linux.cpp and Makefile if there are Linux test cases for this CWE
if linux_testcase_exists:
shutil.copy("testcasesupport/main_linux.cpp", dir)
linux_fcl = update_main_cpp_and_testcases_h.generate_calls_to_linux_fxs(testcase_files)
update_main_cpp_and_testcases_h.update_main_cpp(dir, "main_linux.cpp", linux_fcl)
# no need to update testcases.h
makefile_contents = create_makefile(cwe, is_dir_split)
makefile_fullpath = os.path.join(dir, "Makefile")
py_common.write_file(makefile_fullpath, makefile_contents)
else:
py_common.print_with_timestamp(
"No Makefile created for " + cwe + ". All of the test cases are Windows-specific.")
|
# babelizer.py - API for simple access to babelfish.altavista.com.
# Requires python 2.0 or better.
#
# See it in use at http://babel.MrFeinberg.com/
"""API for simple access to babelfish.altavista.com.
Summary:
import babelizer
print ' '.join(babelizer.available_languages)
print babelizer.translate( 'How much is that doggie in the window?',
'English', 'French' )
def babel_callback(phrase):
print phrase
sys.stdout.flush()
babelizer.babelize( 'I love a reigning knight.',
'English', 'German',
callback = babel_callback )
available_languages
A list of languages available for use with babelfish.
translate( phrase, from_lang, to_lang )
Uses babelfish to translate phrase from from_lang to to_lang.
babelize(phrase, from_lang, through_lang, limit = 12, callback = None)
Uses babelfish to translate back and forth between from_lang and
through_lang until either no more changes occur in translation or
limit iterations have been reached, whichever comes first. Takes
an optional callback function which should receive a single
parameter, being the next translation. Without the callback
returns a list of successive translations.
It's only guaranteed to work if 'english' is one of the two languages
given to either of the translation methods.
Both translation methods throw exceptions which are all subclasses of
BabelizerError. They include
LanguageNotAvailableError
Thrown on an attempt to use an unknown language.
BabelfishChangedError
Thrown when babelfish.altavista.com changes some detail of their
layout, and babelizer can no longer parse the results or submit
the correct form (a not infrequent occurance).
BabelizerIOError
Thrown for various networking and IO errors.
Version: $Id: babelizer.py,v 1.4 2001/06/04 21:25:09 Administrator Exp $
Author: <NAME> <<EMAIL>>
"""
import re, string, urllib.request, urllib.parse, urllib.error
"""
Various patterns I have encountered in looking for the babelfish result.
We try each of them in turn, based on the relative number of times I've
seen each of these patterns. $1.00 to anyone who can provide a heuristic
for knowing which one to use. This includes AltaVista employees.
"""
__where = [ re.compile(r'name=\"q\">([^<]*)'),
re.compile(r'td bgcolor=white>([^<]*)'),
re.compile(r'<\/strong><br>([^<]*)')
]
__languages = { 'english' : 'en',
'french' : 'fr',
'spanish' : 'es',
'german' : 'de',
'italian' : 'it',
'portugese' : 'pt',
}
"""
All of the available language names.
"""
available_languages = [ x.title() for x in list(__languages.keys()) ]
"""
Calling translate() or babelize() can raise a BabelizerError
"""
class BabelizerError(Exception):
pass
class LanguageNotAvailableError(BabelizerError):
pass
class BabelfishChangedError(BabelizerError):
pass
class BabelizerIOError(BabelizerError):
pass
def clean(text):
return ' '.join(string.replace(text.strip(), "\n", ' ').split())
def translate(phrase, from_lang, to_lang):
phrase = clean(phrase)
try:
from_code = __languages[from_lang.lower()]
to_code = __languages[to_lang.lower()]
except KeyError as lang:
raise LanguageNotAvailableError(lang)
params = urllib.parse.urlencode( { 'BabelFishFrontPage' : 'yes',
'doit' : 'done',
'urltext' : phrase,
'lp' : from_code + '_' + to_code } )
try:
response = urllib.request.urlopen('http://babelfish.altavista.com/tr', params)
except IOError as what:
raise BabelizerIOError("Couldn't talk to server: %s" % what)
except:
print("Unexpected error:", sys.exc_info()[0])
html = response.read()
for regex in __where:
match = regex.search(html)
if match: break
if not match: raise BabelfishChangedError("Can't recognize translated string.")
return clean(match.group(1))
def babelize(phrase, from_language, through_language, limit = 12, callback = None):
phrase = clean(phrase)
seen = { phrase: 1 }
if callback:
callback(phrase)
else:
results = [ phrase ]
flip = { from_language: through_language, through_language: from_language }
next = from_language
for i in range(limit):
phrase = translate(phrase, next, flip[next])
if phrase in seen: break
seen[phrase] = 1
if callback:
callback(phrase)
else:
results.append(phrase)
next = flip[next]
if not callback: return results
if __name__ == '__main__':
import sys
def printer(x):
print(x)
sys.stdout.flush();
babelize("I won't take that sort of treatment from you, or from your doggie!",
'english', 'french', callback = printer)
|
<reponame>Kramer84/StochasticProcessSensitivityAnalysis<gh_stars>1-10
import gc
import os, sys, re
import time
import numpy as np
import pandas as pd
import warnings
import openturns as ot
if not sys.warnoptions:
warnings.simplefilter("ignore")
import matplotlib as mpl
mpl.use('Qt4Agg')
from matplotlib.figure import Figure
from traits.api import (HasTraits,Bool,Event,File,Int,Str,String,
Directory,Function,Color, Enum,List,Button,
Range,Instance,Float,Trait,Any,CFloat,
Property,Either, on_trait_change)
from traitsui.api import (Handler,View,Item,OKCancelButtons,
OKButton, CancelButton,Spring,
InstanceEditor, Group,ListStrEditor,
CheckListEditor,HSplit,FileEditor,
VSplit,Action,HGroup, TextEditor,
ImageEnumEditor,UIInfo,Label,VGroup,
ListEditor,TableEditor, ObjectColumn,
WindowColor, message,
auto_close_message, message,
BooleanEditor,EnumEditor)
from pyface.qt import QtGui, QtCore
from traitsui.qt4.editor import Editor
from traitsui.qt4.basic_editor_factory import BasicEditorFactory
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from traits.api import (HasTraits,Bool,Event,File,Int,Str,String,
Directory,Function,Color, Enum,List,Button,
Range,Instance,Float,Trait,Any,CFloat,
Property,Either, on_trait_change)
from traitsui.api import (Handler,View,Item,OKCancelButtons,
OKButton, CancelButton,Spring,
InstanceEditor, Group,ListStrEditor,
CheckListEditor,HSplit,FileEditor,
VSplit,Action,HGroup, TextEditor,
ImageEnumEditor,UIInfo,Label,VGroup,
ListEditor,TableEditor, ObjectColumn,
WindowColor, message,
auto_close_message, message,
BooleanEditor,EnumEditor)
class TextDisplay(HasTraits):
string = String()
view= View( Item('string',show_label=False, springy=True, style='custom' ))
class _MPLFigureEditor(Editor):
scrollable = True
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
""" Create the MPL canvas. """
# matplotlib commands to create a canvas
frame = QtGui.QWidget()
mpl_canvas = FigureCanvas(self.value)
mpl_canvas.setParent(frame)
mpl_toolbar = NavigationToolbar2QT(mpl_canvas,frame)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(mpl_canvas)
vbox.addWidget(mpl_toolbar)
frame.setLayout(vbox)
return frame
class _MPLFigureEditor(BasicEditorFactory):
klass = _MPLFigureEditor
class interfaceGUIHandler(Handler):
## Look in the traitsui.api handler script for infos
def init_info(self,info):
## Here we create the UIInfo object
pass
def init(self, info):
"""This method gets called after the controls have all been
created but before they are displayed.
"""
info.object.mpl_setup()
return True
def close(self, info, is_ok=True):
## This happens when you click on the top right cross
confirmation = message(message='Are you sure to close the window?',
title='Warning!',buttons=['OK','Cancel'])
if confirmation is True:
return True
else:
return False
def closed(self, info, is_ok):
## This is to do clean-up after having destroyed (disposed) the window
## THIS IS TO BE DONE !!!!!
pass
def object_destroy_changed(self,info):
if info.initialized:
info.bind_context() ## Does nothing (to me)
info.ui.dispose()
def loadMetaAnalysisResults(metaAnalyisPath = './meta_analysis_results'):
'''Function to load the data of our simulation
'''
analysis_dirs = os.listdir(metaAnalyisPath)
lhs_params = ['young','scaleYoung','diam','scaleDiam',
'forcePos','forceNorm']
size_lhs = len(analysis_dirs)
lhs_doe = np.zeros((size_lhs, 6))
for i in range(size_lhs):
lhs_doe[i,...]= re.findall(r"[-+]?\d*\.\d+|\d+", analysis_dirs[i])
# size_lhs, n_nu, n_thresh, sobol_E, sobol_D, sobol_FP, sobol_FN
# for the model we have only one evaluation
meta_analysis_array_model = np.zeros((size_lhs, 3, 3, 4, 3))
# for the metamodel we have an envaluation with 3 different LHS sizes
meta_analysis_array_mm1000 = np.zeros((size_lhs, 3, 3, 3, 4, 3))
meta_analysis_array_mm50000 = np.zeros((size_lhs, 3, 3, 3, 4, 3))
# size_lhs, n_nu, n_thresh, kl_dim
kl_dimension_array = np.zeros((size_lhs, 3, 3))
nu_array = np.zeros((size_lhs, 3, 3))
thresh_array = np.zeros((size_lhs, 3, 3))
for k_lhs in range(size_lhs):
thresh_paths = os.listdir(os.path.join(metaAnalyisPath,analysis_dirs[k_lhs]))
for k_thresh in range(3):
thresh_path = thresh_paths[k_thresh]
thresh_val = re.findall("-?\d+.?\d*(?:[Ee]-\d+)?", thresh_path)[0]
nu_paths = os.listdir(os.path.join(metaAnalyisPath,
analysis_dirs[k_lhs],
thresh_paths[k_thresh]))
for k_nu in range(3):
nu_path = nu_paths[k_nu]
nu_val = re.findall(r"[-+]?\d*\.\d+|\d+",nu_path)[0]
csv_path = os.path.join(metaAnalyisPath,
analysis_dirs[k_lhs],
thresh_path,
nu_path)
sample = ot.Sample_ImportFromCSVFile(csv_path)
kl_dimension_array[k_lhs, k_nu, k_thresh] = sample[0,3]
nu_array[k_lhs, k_nu, k_thresh] = nu_val
thresh_array[k_lhs, k_nu, k_thresh] = thresh_val
meta_analysis_array_model[
k_lhs, k_nu, k_thresh, ...] = np.reshape(np.array(sample[0,4:]),(4,3))
# Here we iterate over the 3 LHS sizes (25,50,100)
for i_lhs in range(3):
meta_analysis_array_mm1000[k_lhs, k_nu, k_thresh, i_lhs,
...] = np.reshape(np.array(sample[4 + i_lhs ,4:]),(4,3))
meta_analysis_array_mm50000[k_lhs, k_nu, k_thresh, i_lhs,
...] = np.reshape(np.array(sample[1 + i_lhs ,4:]),(4,3))
return lhs_params, lhs_doe, meta_analysis_array_model, meta_analysis_array_mm1000, meta_analysis_array_mm50000, kl_dimension_array, nu_array, thresh_array
def set_indices_figure(fig,
sobol_model, err_model,
sobol_metaLHS25, err_metaLHS25,
sobol_metaLHS50, err_metaLHS50,
sobol_metaLHS100, err_metaLHS100):
xlabels = ['Sobol Young', 'Sobol Diameter', 'Sobol Force Position', 'Sobol Force Norm']
x_val = np.array([0,1,2,3])
ax = fig.add_subplot(111)
ax = fig.axes[0]
offset = 0.1
sobol_model = ax.errorbar(
x = x_val,
y = sobol_model,
yerr=[np.absolute(sobol_model-err_model[0,...]),
np.absolute(sobol_model-err_model[1,...])],
fmt='s', color='navy', ecolor='navy', label ='Model')
sobol_meta_LHS25 = ax.errorbar(
x = x_val + offset ,
y = sobol_metaLHS25,
yerr=[np.absolute(sobol_metaLHS25-err_metaLHS25[0,...]),
np.absolute(sobol_metaLHS25-err_metaLHS25[1,...])],
fmt='s', color='yellow', ecolor='yellow', label = 'LHS25')
sobol_meta_LHS50 = ax.errorbar(
x = x_val + 2*offset ,
y = sobol_metaLHS50,
yerr=[np.absolute(sobol_metaLHS50-err_metaLHS50[0,...]),
np.absolute(sobol_metaLHS50-err_metaLHS50[1,...])],
fmt='s', color='darkorange', ecolor='darkorange', label = 'LHS50')
sobol_meta_LHS100 = ax.errorbar(
x = x_val + 3*offset ,
y = sobol_metaLHS100,
yerr=[np.absolute(sobol_metaLHS100-err_metaLHS100[0,...]),
np.absolute(sobol_metaLHS100-err_metaLHS100[1,...])],
fmt='s', color='red', ecolor='red', label = 'LHS100')
ax.set_ylim(0,1)
ax.set_xticks(x_val+.2)
ax.set_xticklabels(xlabels)
ax.legend(loc='upper right')
return fig
class metaAnalysisVisualizer(HasTraits):
'''This is the class that handles the GUI, there will be some buttons and the matplotlib figure
'''
index_lhs = Int(-1)
index_nu = Int(0)
index_thresh = Int(0)
index_eval_size = Int(0)
threshold_value = Float(0)
nu_value = Float(0)
size_sobol_expriment_metamodel = Int(1000)
young_modulus_params = Str('')
diameter_params = Str('')
force_norm_params = Str('')
force_pos_params = Str('')
kl_size_params = Int(0)
figure = Instance(Figure,())
next_realization = Button(label = 'Select next realization')
previous_realization = Button(label = 'Select previous realization')
change_threshold = Button(label = 'change threshold')
change_nu = Button(label = 'change NU')
change_sobol_size_LHS = Button(label='change sobol experiment size metamodel')
view = View(
HSplit(
VSplit(
VGroup(
Item('next_realization',
show_label=False,
height=.1),
Item('previous_realization',
show_label=False,
height=.1),
Item('change_threshold',
show_label=False,
height=.1),
Item('change_nu',
show_label=False,
height=.1),
Item('change_sobol_size_LHS',
show_label=False,
height=.1),),
VGroup(
Item('young_modulus_params',
show_label=True,
label = 'E (VAR / SCALE) :',
style='readonly'),
Item('diameter_params',
show_label=True,
label = 'D (VAR / SCALE) :',
style='readonly'),
Item('force_norm_params',
show_label=True,
label = 'FN (VAR) :',
style='readonly'),
Item('force_pos_params',
show_label=True,
label = 'FP (VAR) :',
style='readonly'),
Item('kl_size_params',
show_label=True,
label = 'Size KL :',
style='readonly'),),
VGroup(
Item('index_lhs',
show_label=True,
style='readonly'),
Item('threshold_value',
show_label=True,
label='Threshold',
style='readonly'),
Item('nu_value',
show_label=True,
label='Nu',
style='readonly'),
Item('size_sobol_expriment_metamodel',
show_label=True,
label='Experiment size MM',
style='readonly')),
),
Item('figure',
editor = _MPLFigureEditor(),
show_label=False,
width = 0.75)),
handler = interfaceGUIHandler(),
resizable = True,
scrollable = False,
height = 1,
width = 1,
title = 'Blood Analysis Interface',
icon = 'Interface')
def __init__(self):
self.xlabels = ['SE', 'SD', 'SFN', 'SFP']
output = loadMetaAnalysisResults()
self.lhs_params = output[0]
self.lhs_doe = output[1]
self.meta_analysis_array_model = output[2]
self.meta_analysis_array_mm1000 = output[3]
self.meta_analysis_array_mm50000 = output[4]
self.kl_dimension_array = output[5]
self.nu_array = output[6]
self.thresh_array = output[7]
def _figure_default(self):
"""Initialises the display."""
x_val = np.array([0,1,2,3])
figure = Figure()
figure = set_indices_figure(figure,
np.zeros((4,))+.25, np.ones((2,4))*.1,
np.zeros((4,))+.25, np.ones((2,4))*.1,
np.zeros((4,))+.25, np.ones((2,4))*.1,
np.zeros((4,))+.25, np.ones((2,4))*.1)
print('Figure initialized')
# Set matplotlib canvas colour to be white
rect = figure.patch
rect.set_facecolor([0.6,0.6,0.6])
return figure
def getDataFromIndex(self):
if self.index_lhs >= 0 and self.index_nu >= 0 and self.index_thresh >= 0 and self.index_eval_size == 0:
sobol_model = self.meta_analysis_array_model[self.index_lhs,self.index_nu,self.index_thresh,:, 0]
err_model = self.meta_analysis_array_model[self.index_lhs,self.index_nu, self.index_thresh, :, 1:]
err_model = np.array([err_model[:,0],err_model[:,1]])
sobol_metaLHS25 = self.meta_analysis_array_mm1000[self.index_lhs,self.index_nu,self.index_thresh,0,:, 0]
err_metaLHS25 = self.meta_analysis_array_mm1000[self.index_lhs,self.index_nu,self.index_thresh,0,:, 1:]
err_metaLHS25 = np.array([err_metaLHS25[:,0],err_metaLHS25[:,1]])
sobol_metaLHS50 = self.meta_analysis_array_mm1000[self.index_lhs,self.index_nu,self.index_thresh,1,:, 0]
err_metaLHS50 = self.meta_analysis_array_mm1000[self.index_lhs,self.index_nu,self.index_thresh,1,:, 1:]
err_metaLHS50 = np.array([err_metaLHS50[:,0],err_metaLHS50[:,1]])
sobol_metaLHS100 = self.meta_analysis_array_mm1000[self.index_lhs,self.index_nu,self.index_thresh,2,:, 0]
err_metaLHS100 = self.meta_analysis_array_mm1000[self.index_lhs,self.index_nu,self.index_thresh,2,:, 1:]
err_metaLHS100 = np.array([err_metaLHS100[:,0],err_metaLHS100[:,1]])
return sobol_model, err_model, sobol_metaLHS25, err_metaLHS25, sobol_metaLHS50, err_metaLHS50, sobol_metaLHS100, err_metaLHS100
elif self.index_lhs >= 0 and self.index_nu >= 0 and self.index_thresh >= 0 and self.index_eval_size == 1:
sobol_model = self.meta_analysis_array_model[self.index_lhs,self.index_nu,self.index_thresh,:, 0]
err_model = self.meta_analysis_array_model[self.index_lhs,self.index_nu, self.index_thresh, :, 1:]
err_model = np.array([err_model[:,0],err_model[:,1]])
sobol_metaLHS25 = self.meta_analysis_array_mm50000[self.index_lhs,self.index_nu,self.index_thresh,0,:, 0]
err_metaLHS25 = self.meta_analysis_array_mm50000[self.index_lhs,self.index_nu,self.index_thresh,0,:, 1:]
err_metaLHS25 = np.array([err_metaLHS25[:,0],err_metaLHS25[:,1]])
sobol_metaLHS50 = self.meta_analysis_array_mm50000[self.index_lhs,self.index_nu,self.index_thresh,1,:, 0]
err_metaLHS50 = self.meta_analysis_array_mm50000[self.index_lhs,self.index_nu,self.index_thresh,1,:, 1:]
err_metaLHS50 = np.array([err_metaLHS50[:,0],err_metaLHS50[:,1]])
sobol_metaLHS100 = self.meta_analysis_array_mm50000[self.index_lhs,self.index_nu,self.index_thresh,2,:, 0]
err_metaLHS100 = self.meta_analysis_array_mm50000[self.index_lhs,self.index_nu,self.index_thresh,2,:, 1:]
err_metaLHS100 = np.array([err_metaLHS100[:,0],err_metaLHS100[:,1]])
return sobol_model, err_model, sobol_metaLHS25, err_metaLHS25, sobol_metaLHS50, err_metaLHS50, sobol_metaLHS100, err_metaLHS100
else :
#dummy sobol / dummy error
ds = np.zeros((4,))+.25
de = np.ones((2,4))*.1
return ds, de, ds, de, ds, de, ds, de
def regenerate_plot(self):
self.young_modulus_params = str(round(self.lhs_doe[self.index_lhs, 0] / 210000 ,4))+' / '+ str(round(self.lhs_doe[self.index_lhs, 1] / 1000,4))
self.diameter_params = str(round(self.lhs_doe[self.index_lhs, 2]/10,4))+ ' / '+ str(round(self.lhs_doe[self.index_lhs, 3] / 1000,4))
self.force_norm_params = str(round(self.lhs_doe[self.index_lhs, 5]/100,4))
self.force_pos_params = str(round(self.lhs_doe[self.index_lhs, 4]/500,4))
self.kl_size_params = int(self.kl_dimension_array[self.index_lhs, self.index_nu, self.index_thresh])
sobol_model, err_model, sobol_metaLHS25, err_metaLHS25, sobol_metaLHS50, err_metaLHS50, sobol_metaLHS100, err_metaLHS100 = self.getDataFromIndex()
figure = self.figure
figure.clear()
set_indices_figure(figure,
sobol_model, err_model,
sobol_metaLHS25, err_metaLHS25,
sobol_metaLHS50, err_metaLHS50,
sobol_metaLHS100, err_metaLHS100)
canvas = self.figure.canvas
if canvas is not None:
canvas.draw()
def _init_index(self):
if self.index_lhs == -1 :
self.index_lhs = 0
self.threshold_value = self.thresh_array[self.index_lhs, self.index_nu, self.index_thresh]
self.nu_value = self.nu_array[self.index_lhs, self.index_nu, self.index_thresh]
def _index_lhs_changed(self):
self.regenerate_plot()
def _index_thresh_changed(self):
self.regenerate_plot()
self.threshold_value = self.thresh_array[self.index_lhs, self.index_nu, self.index_thresh]
def _index_eval_size_changed(self):
self.regenerate_plot()
def _index_nu_changed(self):
self.regenerate_plot()
self.nu_value = self.nu_array[self.index_lhs, self.index_nu, self.index_thresh]
def _next_realization_fired(self):
self._init_index()
self.index_lhs = (self.index_lhs + 1)%self.lhs_doe.shape[0]
def _previous_realization_fired(self):
self._init_index()
self.index_lhs = (self.index_lhs - 1)%self.lhs_doe.shape[0]
def _change_sobol_size_LHS_fired(self):
self._init_index()
if self.index_eval_size == 0 :
self.index_eval_size = 1
self.size_sobol_expriment_metamodel = 50000
else :
self.index_eval_size = 0
self.size_sobol_expriment_metamodel = 1000
def _change_threshold_fired(self):
self._init_index()
self.index_thresh = (self.index_thresh + 1)%3
def _change_nu_fired(self):
self._init_index()
self.index_nu = (self.index_nu + 1)%3
def mpl_setup(self):
pass
if __name__ == '__main__':
view = metaAnalysisVisualizer()
view.configure_traits()
'''
import meta_model_analysis_visualization as mmav
X = mmav.metaAnalysisVisualizer()
X.configure_traits()
''' |
from itertools import izip_longest
outpath = "redd-test/"
dir = "redd-test/house_1/"
with open(dir+'channel_5_6.dat') as f3,\
open(dir+'channel_6_7.dat') as f4, open(dir+'channel_7_5.dat') as f5, open(dir+'channel_8_5.dat') as f6,\
open(dir+'channel_9_3.dat') as f7, open(dir+'channel_11_9.dat') as f8, open(dir+'channel_12_10.dat') as f9,\
open(dir+'channel_15_5.dat') as f12, open(dir+'channel_17_3.dat') as f10, open(dir+'channel_18_3.dat') as f11,\
open(outpath+'redd-combined-1.dat', 'wb') as res: # open(dir+'channel_2.dat') as f12,
for l1,l2,l3,l4,l5,l6,l7,l8,l9,l10 in izip_longest(f3,f4,f5,f6,f7,f8,f9,f10,f11,f12, fillvalue=""):
res.write("{},{},{},{},{},{},{},{},{},{}\n".\
format(l1.rstrip(), l2.rstrip(),l3.rstrip(), l4.rstrip(),\
l5.rstrip(), l6.rstrip(),l7.rstrip(), l8.rstrip(),\
l9.rstrip(), l10.rstrip()))
dir2 = "redd-test/house_2/"
with open(dir2+'channel_3_5.dat') as f3,\
open(dir2+'channel_4_3.dat') as f4, open(dir2+'channel_5_1.dat') as f5, open(dir2+'channel_6_9.dat') as f6,\
open(dir2+'channel_7_4.dat') as f7, open(dir2+'channel_8_5.dat') as f8, open(dir2+'channel_9_6.dat') as f9,\
open(outpath+'redd-combined-2.dat', 'wb') as res:
for l1,l2,l3,l4,l5,l6,l7 in izip_longest(f3,f4,f5,f6,f7,f8,f9, fillvalue=""):
res.write("{},{},{},{},{},{},{}\n".\
format(l1.rstrip(), l2.rstrip(),l3.rstrip(), l4.rstrip(),\
l5.rstrip(), l6.rstrip(),l7.rstrip()))
dir3 = "redd-test/house_3/"
with open(dir3+'channel_5_3.dat') as f3,\
open(dir3+'channel_7_6.dat') as f4, open(dir3+'channel_9_7.dat') as f5, open(dir3+'channel_10_8.dat') as f6,\
open(dir3+'channel_11_3.dat') as f7, open(dir3+'channel_15_3.dat') as f8, open(dir3+'channel_16_9.dat') as f9,\
open(dir3+'channel_17_3.dat') as f10, open(dir3+'channel_19_3.dat') as f11,\
open(outpath+'redd-combined-3.dat', 'wb') as res:
for l1,l2,l3,l4,l5,l6,l7,l8,l9 in izip_longest(f3,f4,f5,f6,f7,f8,f9,f10,f11, fillvalue=""):
res.write("{},{},{},{},{},{},{},{},{}\n".\
format(l1.rstrip(), l2.rstrip(),l3.rstrip(), l4.rstrip(),\
l5.rstrip(), l6.rstrip(),l7.rstrip(), l8.rstrip(),\
l9.rstrip()))
dir4 = "redd-test/house_4/"
with open(dir4+'channel_3_3.dat') as f3,\
open(dir4+'channel_4_8.dat') as f4, open(dir4+'channel_5_5.dat') as f5, open(dir4+'channel_7_4.dat') as f6,\
open(dir4+'channel_8_1.dat') as f7, open(dir4+'channel_13_3.dat') as f8, open(dir4+'channel_14_5.dat') as f9,\
open(dir4+'channel_18_3.dat') as f10, open(dir4+'channel_19_3.dat') as f11,\
open(outpath+'redd-combined-4.dat', 'wb') as res:
for l1,l2,l3,l4,l5,l6,l7,l8,l9 in izip_longest(f3,f4,f5,f6,f7,f8,f9,f10,f11, fillvalue=""):
res.write("{},{},{},{},{},{},{},{},{}\n".\
format(l1.rstrip(), l2.rstrip(),l3.rstrip(), l4.rstrip(),\
l5.rstrip(), l6.rstrip(),l7.rstrip(), l8.rstrip(),\
l9.rstrip()))
dir5 = "redd-test/house_5/"
with open(dir5+'channel_3_9.dat') as f3,\
open(dir5+'channel_6_8.dat') as f4, open(dir5+'channel_14_3.dat') as f5, open(dir5+'channel_16_10.dat') as f6,\
open(dir5+'channel_18_6.dat') as f7, open(dir5+'channel_19_3.dat') as f8, open(dir5+'channel_20_7.dat') as f9,\
open(dir5+'channel_23_3.dat') as f10,\
open(outpath+'redd-combined-5.dat', 'wb') as res:
for l1,l2,l3,l4,l5,l6,l7,l8 in izip_longest(f3,f4,f5,f6,f7,f8,f9,f10, fillvalue=""):
res.write("{},{},{},{},{},{},{},{}\n".\
format(l1.rstrip(), l2.rstrip(),l3.rstrip(), l4.rstrip(),\
l5.rstrip(), l6.rstrip(),l7.rstrip(), l8.rstrip()))
dir6 = "redd-test/house_6/"
with open(dir6+'channel_3_5.dat') as f3,\
open(dir6+'channel_4_4.dat') as f4, open(dir6+'channel_5_1.dat') as f5, open(dir6+'channel_7_10.dat') as f6,\
open(dir6+'channel_8_6.dat') as f7, open(dir6+'channel_12_2.dat') as f8, open(dir6+'channel_13_2.dat') as f9,\
open(dir6+'channel_14_3.dat') as f10, open(dir6+'channel_15_2.dat') as f11,\
open(outpath+'redd-combined-6.dat', 'wb') as res:
for l1,l2,l3,l4,l5,l6,l7,l8,l9 in izip_longest(f3,f4,f5,f6,f7,f8,f9,f10,f11, fillvalue=""):
res.write("{},{},{},{},{},{},{},{},{}\n".\
format(l1.rstrip(), l2.rstrip(),l3.rstrip(), l4.rstrip(),\
l5.rstrip(), l6.rstrip(),l7.rstrip(), l8.rstrip(),\
l9.rstrip()))
|
<filename>tests/common/utils/test_http_utils.py<gh_stars>0
import os
import responses
import requests
from unittest import mock
from checkov.common.util.http_utils import request_wrapper
@responses.activate
@mock.patch.dict(os.environ, {"REQUEST_MAX_TRIES": "5", "SLEEP_BETWEEN_REQUEST_TRIES": "0.01"})
def test_request_wrapper_all_fail_with_connection_error_for_get_scan_result(mock_bc_integration):
# given
mock_url = mock_bc_integration.bc_api_url + "/api/v1/vulnerabilities/scan-results/2e97f5afea42664309f492a1e2083b43479c2936"
responses.add(
method=responses.GET,
url=mock_url,
body=requests.exceptions.ConnectionError()
)
try:
request_wrapper("GET", mock_url, {})
assert False, "\'request_wrapper\' is expected to fail in this scenario"
except requests.exceptions.ConnectionError:
responses.assert_call_count(mock_url, 5)
@responses.activate
@mock.patch.dict(os.environ, {"REQUEST_MAX_TRIES": "5", "SLEEP_BETWEEN_REQUEST_TRIES": "0.01"})
def test_request_wrapper_all_fail_with_connection_error_for_post_scan(mock_bc_integration):
# given
mock_url = mock_bc_integration.bc_api_url + "/api/v1/vulnerabilities/scan"
responses.add(
method=responses.POST,
url=mock_url,
body=requests.exceptions.ConnectionError()
)
try:
request_wrapper("POST", mock_url, {}, data={'mocked_key': 'mocked_value'})
assert False, "\'request_wrapper\' is expected to fail in this scenario"
except requests.exceptions.ConnectionError:
responses.assert_call_count(mock_url, 5)
@responses.activate
@mock.patch.dict(os.environ, {"REQUEST_MAX_TRIES": "5", "SLEEP_BETWEEN_REQUEST_TRIES": "0.01"})
def test_request_wrapper_all_fail_with_http_error(mock_bc_integration):
# given
mock_url = mock_bc_integration.bc_api_url + "/api/v1/vulnerabilities/twistcli?os=linux"
responses.add(
method=responses.GET,
url=mock_url,
json={'error': "mocked client error"},
status=403
)
request_wrapper("GET", mock_url, {})
responses.assert_call_count(mock_url, 1)
@responses.activate
@mock.patch.dict(os.environ, {"REQUEST_MAX_TRIES": "5", "SLEEP_BETWEEN_REQUEST_TRIES": "0.01"})
def test_request_wrapper_all_fail_with_http_error_should_call_raise_for_status(mock_bc_integration):
# given
mock_url = mock_bc_integration.bc_api_url + "/api/v1/vulnerabilities/twistcli?os=linux"
responses.add(
method=responses.GET,
url=mock_url,
json={'error': "mocked client error"},
status=403
)
try:
request_wrapper("GET", mock_url, {}, should_call_raise_for_status=True)
assert False, "\'request_wrapper\' is expected to fail in this scenario"
except requests.exceptions.HTTPError:
responses.assert_call_count(mock_url, 5)
@responses.activate
@mock.patch.dict(os.environ, {"REQUEST_MAX_TRIES": "3", "SLEEP_BETWEEN_REQUEST_TRIES": "0.01"})
def test_request_wrapper_with_success_for_get_scan_result(mock_bc_integration, scan_result_success_response):
# given
mock_url = mock_bc_integration.bc_api_url + "/api/v1/vulnerabilities/scan-results/2e97f5afea42664309f492a1e2083b43479c2936"
responses.add(
method=responses.GET,
url=mock_url,
json=scan_result_success_response,
status=200
)
request_wrapper("GET", mock_url, {})
responses.assert_call_count(mock_url, 1)
@responses.activate
@mock.patch.dict(os.environ, {"REQUEST_MAX_TRIES": "3", "SLEEP_BETWEEN_REQUEST_TRIES": "0.01"})
def test_request_wrapper_with_success_for_download_twistcli(mock_bc_integration):
# given
mock_url = mock_bc_integration.bc_api_url + "/api/v1/vulnerabilities/twistcli?os=linux"
responses.add(
method=responses.GET,
url=mock_url,
json={},
status=200
)
request_wrapper("GET", mock_url, {})
responses.assert_call_count(mock_url, 1)
@responses.activate
@mock.patch.dict(os.environ, {"REQUEST_MAX_TRIES": "3", "SLEEP_BETWEEN_REQUEST_TRIES": "0.01"})
def test_request_wrapper_with_success_for_post_scan(mock_bc_integration, scan_result_success_response):
# given
mock_url = mock_bc_integration.bc_api_url + "/api/v1/vulnerabilities/scan"
responses.add(
method=responses.POST,
url=mock_url,
json=scan_result_success_response,
status=200
)
request_wrapper("POST", mock_url, {}, data={'mocked_key': 'mocked_value'})
responses.assert_call_count(mock_url, 1)
|
import numpy as np
import pandas as pd
import scipy
import os
import json
from functools import partial
from tqdm import tqdm
import torch
import torch.utils.data as data
from sklearn.model_selection import train_test_split
COUGHVID = "/home/shubham/datasets/coughvid/public_dataset"
metadf = pd.read_csv(os.path.join(COUGHVID,"all_metadata.csv"))
# useful_df = metadf[~metadf["status"].isna()] # and metadf["cough_detected"]>0.65]
useful_df = pd.read_csv(os.path.join(COUGHVID,"use_metadata.csv"))
vggish = torch.hub.load('harritaylor/torchvggish', 'vggish')
vggish.cuda().eval()
def vggish_reader(path):
x = vggish(path).detach()
if x.ndim > 1:
x = x.mean(dim=0)
return x
def make_dataset(metadata,audio_read = vggish_reader,label_read = {'positive':0,'negative':1}):
flist = metadata["uuid"]
labels = metadata["Labels"]
indices = list(metadata.index)
x,y = [],[]
for i in tqdm(indices):
f = os.path.join(COUGHVID,flist[i]+'.wav')
x.append(audio_read(f).cpu().detach().numpy())
y.append(label_read[labels[i]])
return np.array(x),np.array(y)
# The PASE model use the raw (padded) signal as input
def audio_reader(path, max_seconds):
# y, sfr = wav_read(path)
sr, y = scipy.io.wavfile.read(path)
y = y/32768 # Normalize to -1..1
y = y.astype(np.float32)
# if len(y) > 16000*max_seconds:
# print(path, ':', len(y)/16000, 'seconds')
y.resize(16000*max_seconds) # Ten seconds with zero padding
return y
def audio_labeler(file_name,df):
return 0
# PyTorch Dataset
class Loader(data.Dataset):
def __init__(self, metadata, max_seconds=20):
# classes, weight, class_to_id = get_classes()
# self.classes = classes
self.weight = None
# self.class_to_id = {label: 2*i-1 for i, label in enumerate(classes)}
self.df = metadata
self.audio_read = partial(audio_reader,max_seconds=max_seconds)
self.label_read = {'positive':1,'negative':-1}
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (key, params, target) where target is class_index of the target class.
"""
info = self.df.iloc[index]
key = info["uuid"]
filename = info["uuid"] + '.wav'
label = info["Labels"]
path = os.path.join(COUGHVID , filename)
audio = self.audio_read(path)
target = self.label_read[label]
return key, audio, target
def __len__(self):
return len(self.df)
def wav_read(path):
sr, y = scipy.io.wavfile.read(path)
y = y/32768 # Normalize to -1..1
return y, sr
# Create a dataset with (key, wave_file, target_id) entries
## ?????????????
# def make_dataset(kaldi_path, class_to_id):
# text_path = os.path.join(kaldi_path, 'text') # labels
# wav_path = os.path.join(kaldi_path, 'wav.scp') # audio files
# key_to_word = dict()
# key_to_wav = dict()
# with open(wav_path, 'rt') as wav_scp:
# for line in wav_scp:
# key, wav = line.strip().split(' ', 1)
# key_to_wav[key] = wav
# key_to_word[key] = None # default
# if os.path.isfile(text_path):
# with open(text_path, 'rt') as text:
# for line in text:
# key, word = line.strip().split(' ', 1)
# key_to_word[key] = word
# wavs = []
# for key, wav_command in key_to_wav.items():
# word = key_to_word[key]
# word_id = class_to_id[word] if word is not None else -1 # default for test
# wav_item = [key, wav_command, word_id]
# wavs.append(wav_item)
# return wavs
## prepare dataset :-|
def coughvid_dataset():
flist = os.listdir(COUGHVID)
json_list = [f for f in flist if f.endswith(".json")]
wav_list = [f for f in flist if f.endswith(".wav")]
metalist = []
for j in json_list:
js = os.path.join(COUGHVID,j)
with open(js) as f:
temp = json.load(f)
temp['uuid'] = j[:-5]
metalist.append(temp)
dfo = pd.DataFrame(metalist)
return dfo ## saved as all metadat csv flie
|
<reponame>takuyakawanishi/kldmwr
import matplotlib.pyplot as plt
from matplotlib import lines, rc
#from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import numpy as np
#import pandas as pd
#import sys
#import warnings
rc('text', usetex='False')
rc('mathtext', default='regular')
rc('mathtext', fontset='stix')
WFG = 4.5
FSTL = 7 # Size of Tick Labels
FSTI = 5 # Size of Tick labels of mInor tics
FSAL = 8 # Font size axis labels
FSFA = 7 # Font size figure annotation
LWT = .32 # Line width for thin line.
CSP = 'k'
MKS = 2.7
MSP = 2.7 # Marker Size for Plot
LWP = .32 # Line Width for Plot
LWPT = .8 # Line width for plot (thick)
def color_pallet_cud():
cud = {
'red': (255 / 255, 75 / 255, 0),
'yellow': (255 / 255, 241 / 255, 0),
'green': (3 / 255, 175 / 255, 122 / 255),
'blue': (0, 90 / 255, 255 / 255),
'sky': (77 / 255, 196 / 255, 255 / 255),
'pink': (255 / 255, 128 / 255, 130 / 255),
'orange': (246 / 255, 170 / 255, 0),
'purple': (153 / 255, 0, 153 / 255),
'brown': (128 / 255, 64 / 255, 0)
}
return cud
def color_palette_okabe_ito():
cud_o_i = {
'black': (0, 0, 0),
'orange': (.90, .60, 0),
'sky blue': (.35, .70, .90),
'bluish green': (0, .6, .5),
'yellow': (.95, .90, .25),
'blue': (0, .45, .7),
'vermillion': (.8, .4, 0),
'reddish purple': (.8, .6, .7)
}
return cud_o_i
def color_palette_ichihara_et_al():
cud_i = {
'black': (0, 0, 0),
'red': (255 / 255, 59 / 255, 0),
'blue': (0, 179 / 255, 1),
'green': (34 / 255, 230 / 255, 92 / 255)
}
return cud_i
def color_palette_gs(steps, i):
gscls = []
for step in range(steps + 1):
rel = step / steps
gscls.append((rel, rel, rel))
return gscls[i]
def set_figure_dimensions_in_points():
""" old ones
ws = 54 # spine width (57, or 54)
wl = 72 - ws # width of the label area:
wyl = wl / 2 # width of yaxis label
wtl = wl / 2 # width of yaxis tick label
wfm = 4 # horizontal figure margin
wsp = 3 # spine pad width, subtract
hfm = 2 # vertical figure margin (1)
hs = 36 # spine height 38=57*2/3
hl = 12 # height o the label area
hxl = hl / 2 # height of xaxis label
htl = hl / 2 # height of xaxis tick label
hsp = wsp # spine pad height, subtract
"""
dimensions = {
'wom': 0, # Width of outer margin
'hom': 0, # Width of outer margin
'wfm': 4, # Width of figure margin
'hfm': 4,
'wpg': 4, # Width of panel gap
'hpg': 4,
'ws': 72,
'wyl': 7,
'wtl': 20, # 24 - wpg, 24 makes panel width * 1 / 3
'hs': 72,
'hxl': 12,
'htl': 12,
'wsp': 0,
'hsp': 0,
'wfg': 8, # Width of figure gap
'hfg': 8, # Height of panel gap
}
dimensions['wl'] = dimensions['wyl'] + dimensions['wtl']
dimensions['hl'] = dimensions['hxl'] + dimensions['htl']
return dimensions
def calc_wt_ht(nhor, nver, syl, sxl, dimensions):
wfm = dimensions['wfm']
hfm = dimensions['hfm']
ws = dimensions['ws']
wyl = dimensions['wyl']
wtl = dimensions['wtl']
wpg = dimensions['wpg']
hs = dimensions['hs']
hxl = dimensions['hxl']
htl = dimensions['htl']
hpg = dimensions['hpg']
wt = nhor * ws + 2 * wfm + (nhor - 1) * wpg + wyl * np.sum(syl[:, 0]) + \
wtl * np.sum(syl[:, 1])
ht = nver * hs + 2 * hfm + (nver - 1) * hpg + hxl * np.sum(sxl[:, 0]) + \
htl * np.sum(sxl[:, 1])
return wt, ht
def create_axes_in_points(nhor, nver, syl, sxl, dimensions):
syl = np.array(syl)
sxl = np.array(sxl)
wfm = dimensions['wfm']
hfm = dimensions['hfm']
wpg = dimensions['wpg']
hpg = dimensions['hpg']
ws = dimensions['ws']
wyl = dimensions['wyl']
wtl = dimensions['wtl']
wsp = dimensions['wsp']
hs = dimensions['hs']
hxl = dimensions['hxl']
htl = dimensions['htl']
hsp = dimensions['hsp']
#
axs = []
wt, ht = calc_wt_ht(nhor, nver, syl, sxl, dimensions)
for ihor in range(nhor):
for iver in range(nver):
ax = [
(wfm + ihor * (wpg + ws) + wsp +
wyl * np.sum(syl[:ihor + 1, 0]) +
wtl * np.sum(syl[:ihor + 1, 1])),
#
ht - hfm - (iver + 1) * hs + hsp - iver * hpg -
hxl * np.sum(sxl[:iver, 0]) - htl * np.sum(sxl[:iver, 1]),
(ws - wsp),
(hs - hsp)
]
axs.append(ax)
return axs
def calc_figure_dimensions(
n_figures, show_yaxis_label_ticks_g, show_xaxis_label_ticks_g,
dimensions):
whfigs = []
for i in range(n_figures):
n_panel_horizontal = len(show_yaxis_label_ticks_g[i, :, 0])
n_panel_vertical = len(show_xaxis_label_ticks_g[i, :, 0])
#
# total width, total height of i th figure
wt, ht = calc_wt_ht(
n_panel_horizontal, n_panel_vertical,
show_yaxis_label_ticks_g[i], show_xaxis_label_ticks_g[i],
dimensions)
whfigs.append([wt, ht])
whfigs = np.array(whfigs)
fig_height = 2 * dimensions['hom'] + np.sum(whfigs[:, 1]) + \
(n_figures - 1) * dimensions['hfg']
fig_width = 2 * dimensions['wom'] + whfigs[0, 0]
return fig_width, fig_height, whfigs
def create_figure(fig_width_in_points, fig_height_in_points, enlargement):
fig_height_inch = fig_height_in_points / 72 * enlargement
fig_width_inch = fig_width_in_points / 72 * enlargement
return plt.figure(figsize=(fig_width_inch, fig_height_inch), dpi=144)
def create_axes(fig, n_figures, show_yaxis_label_ticks_g,
show_xaxis_label_ticks_g, dimensions, fig_width, fig_height,
whfigs):
axs_in_points = []
for i in range(n_figures):
n_panel_horizontal = len(show_yaxis_label_ticks_g[i, :, 0])
n_panel_vertical = len(show_xaxis_label_ticks_g[i, :, 0])
axs_in_points.append(
create_axes_in_points(
n_panel_horizontal, n_panel_vertical,
show_yaxis_label_ticks_g[i], show_xaxis_label_ticks_g[i],
dimensions
)
)
fig_placements = np.zeros((n_figures, 2))
for i_figure in range(n_figures):
fig_placements[i_figure, 0] = dimensions['wom']
fig_placements[i_figure, 1] = fig_height - dimensions['hom'] - \
np.sum(whfigs[:i_figure + 1, 1]) - \
dimensions['hfg'] * i_figure
global_axs_in_points = np.array(axs_in_points)
for i_figure in range(n_figures):
global_axs_in_points[i_figure, :, 0] = \
global_axs_in_points[i_figure, :, 0] + fig_placements[i_figure, 0]
global_axs_in_points[i_figure, :, 1] = \
global_axs_in_points[i_figure, :, 1] + fig_placements[i_figure, 1]
gaxs = np.copy(global_axs_in_points)
gaxs[:, :, 0] = gaxs[:, :, 0] / fig_width
gaxs[:, :, 1] = gaxs[:, :, 1] / fig_height
gaxs[:, :, 2] = gaxs[:, :, 2] / fig_width
gaxs[:, :, 3] = gaxs[:, :, 3] / fig_height
axss = []
for i_figure in range(n_figures):
faxs = []
for j_panel in range(np.shape(gaxs)[1]):
# ax = list(gaxs[i_figure, j_panel, :])
faxs.append(fig.add_axes(gaxs[i_figure, j_panel, :]))
axss.append(faxs)
return axss
class TFigure(object):
def __init__(self, aspect_ratio=1, dimensions=None,
syltf=None, sxltf=None, enlargement=1.0,
spines_to_pad=None, wpad=None, hpad=None, pad=None
):
if syltf is None:
syltf = [[[1, 1]]]
if sxltf is None:
sxltf = [[[1, 1]]]
if dimensions == None:
self.dimensions = set_figure_dimensions_in_points()
self.dimensions['hs'] = self.dimensions['ws'] / aspect_ratio
else:
self.dimensions = dimensions
self.show_yaxis_label_ticks_figs = np.array(syltf)
self.show_xaxis_label_ticks_figs = np.array(sxltf)
self.enlargement = enlargement
self.spines_to_pad = spines_to_pad
if wpad is not None:
self.dimensions['wsp'] = wpad
if hpad is not None:
self.dimensions['hsp'] = hpad
if pad is not None:
self.dimensions['wsp'] = pad
self.dimensions['hsp'] = pad
self.n_figures = len(self.show_yaxis_label_ticks_figs)
self.n_panels = []
self.n_panels_horizontal = []
self.n_panels_vertical = []
for i in range(self.n_figures):
nhp = len(self.show_yaxis_label_ticks_figs[i, :, 0])
nvp = len(self.show_xaxis_label_ticks_figs[i, :, 0])
self.n_panels_horizontal.append(nhp)
self.n_panels_vertical.append(nvp)
self.n_panels.append(nhp * nvp)
self.fig_width_in_points, self.fig_height_in_points, self.whfigs = \
calc_figure_dimensions(
self.n_figures,
self.show_yaxis_label_ticks_figs,
self.show_xaxis_label_ticks_figs,
self.dimensions
)
self.wfp = self.fig_width_in_points
self.hfp = self.fig_height_in_points
self.fig_height_mm = self.fig_height_in_points * 25.4 / 72 * \
self.enlargement
self.fig_width_mm = self.fig_width_in_points * 25.4 / 72 * \
self.enlargement
self.fig = create_figure(
self.fig_width_in_points, self.fig_height_in_points,
self.enlargement
)
self.axs = create_axes(
self.fig, self.n_figures,
self.show_yaxis_label_ticks_figs, self.show_xaxis_label_ticks_figs,
self.dimensions, self.fig_width_in_points,
self.fig_height_in_points,
self.whfigs,
)
if spines_to_pad is not None:
self.select_and_pad_spines(spines_to_pad)
self.set_default_properties()
self.show_tick_and_tick_labels_according_to_settings()
def adjust_spines(self, ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
pad = 0
if loc == 'bottom':
pad = self.dimensions['wsp'] * self.enlargement
elif loc == 'left':
pad = self.dimensions['hsp'] * self.enlargement
spine.set_position(('outward', pad))
spine.set_linewidth(LWT)
spine.set_color(CSP)
else:
spine.set_color(None)
def set_default_properties(self):
for i in range(self.n_figures):
for ax in self.axs[i]:
for spine in ['top', 'right', 'bottom', 'left']:
ax.spines[spine].set_linewidth(LWT)
ax.tick_params(
axis='both', which='both', width=LWT,
direction='out', labelsize=FSTL
)
for tick in ax.get_yticklabels():
tick.set_fontname("Arial")
for tick in ax.get_xticklabels():
tick.set_fontname("Arial")
def select_and_pad_spines(self, spines):
for i in range(self.n_figures):
for i_ax, ax in enumerate(self.axs[i]):
self.adjust_spines(ax, spines)
ax.tick_params(
axis='both', which='both', width=LWT,
direction='out', labelsize=FSTL)
nvp_figi = self.n_panels_vertical[i]
ihor = np.int(i_ax / nvp_figi)
ivar = np.mod(i_ax, nvp_figi)
vis_x = self.show_xaxis_label_ticks_figs[i, ivar, 1]
vis_y = self.show_yaxis_label_ticks_figs[i, ihor, 1]
if vis_x == 0:
ax.spines['bottom'].set_visible(False)
ax.spines['top'].set_visible(False)
if vis_y == 0:
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
def show_tick_and_tick_labels_according_to_settings(self):
for i in range(self.n_figures):
for i_ax, ax in enumerate(self.axs[i]):
nvp_figi = self.n_panels_vertical[i]
nhp_figi = self.n_panels_horizontal[i]
ihor = np.int(i_ax / nvp_figi)
ivar = np.mod(i_ax, nvp_figi)
ax.xaxis.set_visible(
self.show_xaxis_label_ticks_figs[i, ivar, 1]
)
ax.yaxis.set_visible(
self.show_yaxis_label_ticks_figs[i, ihor, 1]
)
def set_ylabel_ale(self, ax, text, fontsize=7):
wax = self.dimensions['ws'] - self.dimensions['wsp']
wltp = self.dimensions['wyl'] + self.dimensions['wtl'] + \
self.dimensions['wsp']
pyl = - wltp / wax
ax.annotate(
text, xy=(0, 0), xytext=(pyl, 0.5), xycoords='axes fraction',
textcoords='axes fraction', ha='left', va='center',
fontsize=fontsize, rotation=90
)
return ax
def set_xlabel_ale(self, ax, text, fontsize=7):
hax = self.dimensions['hs'] - self.dimensions['hsp']
hltp = self.dimensions['hxl'] + self.dimensions['htl'] + \
self.dimensions['hsp']
pxl = - hltp / hax
ax.annotate(
text, xy=(0, 0), xytext=(0.5, pxl),
xycoords='axes fraction', textcoords='axes fraction',
ha='center', va='bottom',
fontsize=fontsize, rotation=0
)
return ax
def main():
fig = TFigure(
enlargement=1.2,
sxltf=[[[0, 0], [1, 1]], [[0, 0], [1, 1]]],
syltf=[[[1, 1], [0, 0]], [[1, 1], [0, 0]]],
spines_to_pad=['bottom', 'left'], wpad=2, hpad=2
)
print(fig.show_xaxis_label_ticks_figs, fig.show_yaxis_label_ticks_figs)
print('119 mm wide and not higher than 195 mm.')
print('figure width: {:.1f}, height: {:1f}'.
format(fig.fig_width_mm, fig.fig_height_mm))
print('Number of figures = ', fig.n_figures)
print('Number of panels = ', fig.n_panels)
print('Number of horizontal panels = ', fig.n_panels_horizontal)
print('Number of vertical panels', fig.n_panels_vertical)
print(len(fig.axs[0]))
for axfig in fig.axs:
for ax in axfig:
ax.xaxis.set_minor_locator(AutoMinorLocator())
fig.set_ylabel_ale(ax, '$\\mathit{a}$')
fig.set_xlabel_ale(ax, '$\\mathit{b}$')
ws = fig.dimensions['ws']
wl = fig.dimensions['wl']
wax = ws - fig.dimensions['wsp']
wltp = wl + fig.dimensions['wsp']
el = wltp / wax
print(ws, wl, wltp, fig.dimensions['wsp'])
print(el)
ax = fig.axs[0][0]
ax.annotate('$a$', xy=(0, 0), xytext=(-el, 0.5),
textcoords='axes fraction',
ha='left', va='center')
# ax.plot([-el, el], [0.5, 0.5], clip_on=False)
ax.set_xlim(0, 1)
print(fig.wfp, fig.hfp)
wfm = fig.dimensions['wfm']
hfm = fig.dimensions['hfm']
ws = fig.dimensions['ws']
hom = fig.dimensions['hom']
hs = fig.dimensions['hs']
htl = fig.dimensions['htl']
hxl = fig.dimensions['hxl']
hpg = fig.dimensions['hpg']
hfg = fig.dimensions['hfg']
s = wfm * 2 + ws * 2
print('hfp = ', fig.hfp)
print('hfm = ', hfm)
print('hs = ', hs)
print('hpg = ', hpg)
print('hom = ', hom)
s_h_p = hfm * 2 + hs * 2 + hxl + htl + hpg
s_h = 2 * hom + 4 * hfm + 4 * hs + 2 * htl + 2 * hxl + 2 * hpg + hfg
print(s_h_p)
print(s_h)
print(fig.whfigs)
wax = fig.dimensions['ws'] - fig.dimensions['wsp']
wltp = fig.dimensions['wyl'] + fig.dimensions['wtl'] + \
fig.dimensions['wsp']
pyl = - wltp / wax
hax = fig.dimensions['hs'] - fig.dimensions['hsp']
hltp = fig.dimensions['hxl'] + fig.dimensions['htl'] + \
fig.dimensions['hsp']
pxl = - hltp / hax
plt.show()
# sxltf = [[[0, 0], [1, 1]],
# [[0, 0], [1, 1]]],
if __name__ == '__main__':
main()
|
#!/usr/bin/env python2
import re
import pprint
import subprocess
descs = {
"double": "Double",
"galvanic_mock": "Galvanic-mock",
"mock_derive": "Mock_Derive",
"mock_it": "Mock-it",
"mockall": "Mockall",
"mockers": "Mockers",
"mockiato": "Mockiato",
"mocktopus": "Mocktopus",
"pseudo": "Pseudo",
"simulacrum": "Simulacrum",
"associated_types": "Associated types",
"checkpoint": "Checkpoints",
"closures": "Closures",
"reference_parameters": "Reference parameters",
"consume_parameters": "Consume parameters",
"consume_self": "Consume self",
"doctest": "Doctest",
"external_trait": "External traits",
"foreign": "Foreign",
"generic_method": "Generic methods",
"generic_method_with_lifetime": "Generic methods with lifetime parameters",
"generic_return": "Generic return",
"generic_struct": "Generic structs",
"generic_trait": "Generic traits",
"impl_trait": "Impl Trait",
"inherited_trait": "Inherited traits",
"match_method": "Match function",
"mock_struct": "Structs",
"mock_trait": "Traits",
"multi_trait": "Multiple traits",
"return_call_with_args": "Return call with args",
"return_reference": "Return reference",
"return_mutable_reference": "Return mutable reference",
"return_owned": "Return owned",
"return_parameters": "Return parameters",
"send": "Send",
"sequence": "Sequence",
"static_method": "Static methods",
"times_range": "Times range",
"where_clause": "Where clauses",
"derive": "Derive",
"fallback": "Fallback",
"match_combo": "Match combinations",
"match_constant": "Match constant",
"match_operator": "Match operator",
"match_pattern": "Match pattern",
"match_range": "Match range",
"match_wildcard": "Match wildcard",
"modules": "Mock modules",
"return_constant": "Return a constant",
"return_default": "Return default",
"return_panic": "Return panic",
"times_once": "Times once",
"times_any": "Times any",
"times_n": "Times n",
"times_never": "Times never",
"many_args": "Maximum arguments",
"rustc": "Rustc",
"first_release": "First release",
"version": "Tested version",
"link": "Current version",
}
def format_cell(s):
words = s.split(" ")
result = words[-1]
text = " ".join(words[0:-1])
if '<img ' in text:
bg = "white"
elif result == "ok":
if re.match("^0\.[0-9]+\.[0-9]+", text):
bg = "#fe7d37"
else:
bg = "#ADEBAD"
elif result == "warn":
bg = "#FFEF99"
elif result == "-":
bg = "white"
else:
bg = "#EB9999"
if not text:
text = {"error": "no", "ok": "yes", "FAILED": "no"}[result]
return "{background:%s}.%s" % (bg, text)
def print_row(feature, results):
result_details = "|".join([format_cell(results[l][feature])
for l in libnames])
print "|%21s|%s|" % (descs[feature], result_details)
# First, run the tests and collect results
results = {}
p1 = subprocess.Popen(["cargo", "+nightly", "test", "-v",
"--no-fail-fast", "--", "--nocapture", "--test-threads=1"],
stdout=subprocess.PIPE)
output = p1.communicate()[0]
for line in output.splitlines():
match = re.match("^test t_(\w+)::(?:mod_t::)?t::(\w+) \.\.\. (.+)$", line)
if not match:
match = re.match(
"^test src/t_(\w+)\.rs - \w+::(doctest) \(line \d+\) \.\.\. (\w+)", line)
if match:
lib = match.group(1)
feature = match.group(2)
result = match.group(3)
if not results.has_key(lib):
results[lib] = {}
results[lib][feature] = result
# Manually add a few more data
results['double']['rustc'] = "stable ok"
results['galvanic_mock']['rustc'] = "nightly warn"
# results['mock_derive']['rustc'] = "nightly < 1.28.0 error"
results['mockall']['rustc'] = "stable ok"
results['mockers']['rustc'] = "stable ok"
results['mockiato']['rustc'] = "stable ok"
results['mocktopus']['rustc'] = "nightly warn"
results['pseudo']['rustc'] = "stable ok"
results['simulacrum']['rustc'] = "stable ok"
results['mock_it']['rustc'] = "stable ok"
results['double']['first_release'] = "Dec-12-2017 -"
results['galvanic_mock']['first_release'] = "Aug-13-2017 -"
# results['mock_derive']['first_release'] = "Jul-16-2017 -"
results['mockall']['first_release'] = "Jul-3-2019 -"
results['mockers']['first_release'] = "Apr-6-2016 -"
results['mockiato']['first_release'] = "Feb-11-2019 -"
results['mocktopus']['first_release'] = "Sep-5-2017 -"
results['pseudo']['first_release'] = "Mar-23-2017 -"
results['simulacrum']['first_release'] = "Dec-17-2017 -"
results['mock_it']['first_release'] = "Mar-11-2018 -"
# Finally, generate the table
libnames = sorted(results.keys())
lib_headers = "|_. ".join([descs[l] for l in libnames])
print "|_. |_.%s|" % lib_headers
essential_features = ["associated_types", "checkpoint", "closures",
"reference_parameters", "consume_parameters", "consume_self", "doctest",
"external_trait", "foreign", "generic_method",
"generic_method_with_lifetime", "generic_return",
"generic_struct", "generic_trait", "inherited_trait", "match_method",
"mock_struct", "mock_trait", "multi_trait", "return_call_with_args",
"return_reference", "return_mutable_reference", "return_owned",
"return_parameters", "send", "sequence", "static_method", "times_range",
"where_clause"]
convenience_features = [ "derive", "fallback", "impl_trait", "match_combo",
"match_constant", "match_operator", "match_pattern", "match_range",
"match_wildcard", "modules", "return_constant", "return_default",
"return_panic", "times_once", "times_any", "times_n", "times_never"]
other_features = [ "many_args", "rustc", "first_release", "version", "link"]
print "|\\10=. Essential Features|"
for feature in essential_features:
print_row(feature, results)
print "|\\10=. Convenience Features|"
for feature in convenience_features:
print_row(feature, results)
print "|\\10=. Other|"
for feature in other_features:
print_row(feature, results)
|
<reponame>Ardavans/sHDP
from __future__ import division
import numpy as np
import abc, os
from nose.plugins.attrib import attr
from ..util import testing
class DistributionTester(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def distribution_class(self):
pass
@abc.abstractproperty
def hyperparameter_settings(self):
pass
class BasicTester(DistributionTester):
@property
def basic_data_size(self):
return 1000
def loglike_lists_tests(self):
for setting_idx, hypparam_dict in enumerate(self.geweke_hyperparameter_settings):
yield self.check_loglike_lists, setting_idx, hypparam_dict
def check_loglike_lists(self,setting_idx,hypparam_dict):
dist = self.distribution_class(**hypparam_dict)
data = dist.rvs(self.basic_data_size)
l1 = dist.log_likelihood(data).sum()
l2 = sum(dist.log_likelihood(d) for d in np.array_split(data,self.basic_data_size))
assert np.isclose(l1,l2)
def stats_lists_tests(self):
for setting_idx, hypparam_dict in enumerate(self.geweke_hyperparameter_settings):
yield self.check_stats_lists, setting_idx, hypparam_dict
def check_stats_lists(self,setting_idx,hypparam_dict):
dist = self.distribution_class(**hypparam_dict)
data = dist.rvs(self.basic_data_size)
if hasattr(dist,'_get_statistics'):
s1 = dist._get_statistics(data)
s2 = dist._get_statistics([d for d in np.array_split(data,self.basic_data_size)])
self._check_stats(s1,s2)
def _check_stats(self,s1,s2):
if isinstance(s1,np.ndarray):
if s1.dtype == np.object:
assert all(np.allclose(t1,t2) for t1, t2 in zip(s1,s2))
else:
assert np.allclose(s1,s2)
elif isinstance(s1,tuple):
assert all(np.allclose(ss1,ss2) for ss1,ss2 in zip(s1,s2))
def missing_data_tests(self):
for setting_idx, hypparam_dict in enumerate(self.geweke_hyperparameter_settings):
yield self.check_missing_data_stats, setting_idx, hypparam_dict
def check_missing_data_stats(self,setting_idx,hypparam_dict):
dist = self.distribution_class(**hypparam_dict)
data = dist.rvs(self.basic_data_size)
if isinstance(data,np.ndarray):
data[np.random.randint(2,size=data.shape[0]) == 1] = np.nan
s1 = dist._get_statistics(data)
s2 = dist._get_statistics(data[~np.isnan(data).any(1)])
self._check_stats(s1,s2)
class BigDataGibbsTester(DistributionTester):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def params_close(self,distn1,distn2):
pass
@property
def big_data_size(self):
return 20000
@property
def big_data_repeats_per_setting(self):
return 1
@property
def big_data_hyperparameter_settings(self):
return self.hyperparameter_settings
def big_data_tests(self):
for setting_idx, hypparam_dict in enumerate(self.big_data_hyperparameter_settings):
for i in range(self.big_data_repeats_per_setting):
yield self.check_big_data, setting_idx, hypparam_dict
def check_big_data(self,setting_idx,hypparam_dict):
d1 = self.distribution_class(**hypparam_dict)
d2 = self.distribution_class(**hypparam_dict)
data = d1.rvs(self.big_data_size)
d2.resample(data)
assert self.params_close(d1,d2)
class GewekeGibbsTester(DistributionTester):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def geweke_statistics(self,distn,data):
pass
@property
def geweke_nsamples(self):
return 30000
@property
def geweke_data_size(self):
return 1 # NOTE: more data usually means slower mixing
@property
def geweke_ntrials(self):
return 3
@property
def geweke_pval(self):
return 0.05
@property
def geweke_hyperparameter_settings(self):
return self.hyperparameter_settings
def geweke_numerical_slice(self,distn,setting_idx):
return slice(None)
@property
def resample_kwargs(self):
return {}
@property
def geweke_num_statistic_fails_to_tolerate(self):
return 1
@attr('slow')
def geweke_tests(self):
for setting_idx, hypparam_dict in enumerate(self.geweke_hyperparameter_settings):
yield self.check_geweke, setting_idx, hypparam_dict
def geweke_figure_filepath(self,setting_idx):
return os.path.join(os.path.dirname(__file__),'figures',
self.__class__.__name__,'setting_%d.pdf' % setting_idx)
def check_geweke(self,setting_idx,hypparam_dict):
import os
from matplotlib import pyplot as plt
plt.ioff()
fig = plt.figure()
figpath = self.geweke_figure_filepath(setting_idx)
mkdir(os.path.dirname(figpath))
nsamples, data_size, ntrials = self.geweke_nsamples, \
self.geweke_data_size, self.geweke_ntrials
d = self.distribution_class(**hypparam_dict)
sample_dim = np.atleast_1d(self.geweke_statistics(d,d.rvs(10))).shape[0]
num_statistic_fails = 0
for trial in xrange(ntrials):
# collect forward-generated statistics
forward_statistics = np.squeeze(np.empty((nsamples,sample_dim)))
for i in xrange(nsamples):
d = self.distribution_class(**hypparam_dict)
data = d.rvs(data_size)
forward_statistics[i] = self.geweke_statistics(d,data)
# collect gibbs-generated statistics
gibbs_statistics = np.squeeze(np.empty((nsamples,sample_dim)))
d = self.distribution_class(**hypparam_dict)
data = d.rvs(data_size)
for i in xrange(nsamples):
d.resample(data,**self.resample_kwargs)
data = d.rvs(data_size)
gibbs_statistics[i] = self.geweke_statistics(d,data)
testing.populations_eq_quantile_plot(forward_statistics,gibbs_statistics,fig=fig)
try:
sl = self.geweke_numerical_slice(d,setting_idx)
testing.assert_populations_eq_moments(
forward_statistics[...,sl],gibbs_statistics[...,sl],
pval=self.geweke_pval)
except AssertionError:
datapath = os.path.join(os.path.dirname(__file__),'figures',
self.__class__.__name__,'setting_%d_trial_%d.npz' % (setting_idx,trial))
np.savez(datapath,fwd=forward_statistics,gibbs=gibbs_statistics)
example_violating_means = forward_statistics.mean(0), gibbs_statistics.mean(0)
num_statistic_fails += 1
plt.savefig(figpath)
assert num_statistic_fails <= self.geweke_num_statistic_fails_to_tolerate, \
'Geweke MAY have failed, check FIGURES in %s (e.g. %s vs %s)' \
% ((os.path.dirname(figpath),) + example_violating_means)
##########
# misc #
##########
def mkdir(path):
# from
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
import errno
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
|
import logging
from datetime import datetime
from zensols.cli import OneConfPerActionOptionsCliEnv
from zensols.garmdown import (
Manager,
Backuper,
SheetUpdater,
Reporter,
AppConfig,
)
class InfoCli(object):
def __init__(self, config, detail=False, limit=None):
self.mng = Manager(config)
self.detail = detail
self.limit = limit
def environment(self):
self.mng.environment()
def fetch_config(self):
self.mng.config.fetch_config.write()
def write_not_downloaded(self):
self.mng.write_not_downloaded(self.detail, self.limit)
def write_not_imported(self):
self.mng.write_not_imported(self.detail, self.limit)
class DownloadCli(object):
def __init__(self, config, limit=None):
self.mng = Manager(config)
self.config = config
self.limit = limit
def sync_activities(self):
self.mng.sync_activities(self.limit)
def sync_tcx(self):
self.mng.sync_tcx(self.limit)
def import_tcx(self):
self.mng.import_tcx()
def sync(self):
self.mng.sync(self.limit)
backuper = Backuper(self.config)
backuper.backup()
def clean_imported(self):
self.mng.clean_imported()
class SheetCli(object):
def __init__(self, config):
self.config = config
def sync(self):
su = SheetUpdater(self.config)
su.sync()
class BackupCli(object):
def __init__(self, config):
self.backer = Backuper(config)
def backup(self):
self.backer.backup(True)
class ReporterCli(object):
FORMAT_TYPES = set('detail summary json'.split())
def __init__(self, config, format, datestr=None):
self.config = config
self.format = format
self.datestr = datestr
if format not in self.FORMAT_TYPES:
fopts = self.format_type_string()
raise ValueError(
f"unknown format type '{format}' not one of {fopts}")
@classmethod
def format_type_string(cls):
return '|'.join(cls.FORMAT_TYPES)
@property
def date(self):
if self.datestr is None:
date = datetime.now()
else:
date = datetime.strptime(self.datestr, '%Y-%m-%d')
return date
def report(self):
reporter = Reporter(self.config)
getattr(reporter, f'write_{self.format}')(self.date)
class SyncCli(object):
def __init__(self, config):
self.config = config
def sync(self):
DownloadCli(self.config).sync()
SheetCli(self.config).sync()
class ConfAppCommandLine(OneConfPerActionOptionsCliEnv):
def __init__(self):
detail_op = ['-d', '--detail', False,
{'dest': 'detail',
'action': 'store_true', 'default': False,
'help': 'report details of missing data'}]
limit_op = ['-l', '--limit', False,
{'dest': 'limit', 'metavar': 'INTEGER',
'type': 'int',
'help': 'the limit'}]
date_op = ['-a', '--date', False,
{'dest': 'datestr', 'metavar': 'mm/dd/yyyy',
'help': 'the date to report, which defaults to today'}]
format_op = ['-f', '--format', False,
{'dest': 'format',
'default': 'detail',
'metavar': ReporterCli.format_type_string(),
'help': 'the format'}]
cnf = {'executors':
[{'name': 'info',
'executor': lambda params: InfoCli(**params),
'actions': [{'name': 'env',
'meth': 'environment',
'doc': 'print environment',
'opts': [detail_op]},
{'name': 'fetchconf',
'meth': 'fetch_config',
'doc': 'print fetch configuration',
'opts': [detail_op]},
{'name': 'notdown',
'meth': 'write_not_downloaded',
'doc': 'print activities not downloaded',
'opts': [detail_op]},
{'name': 'notimport',
'meth': 'write_not_imported',
'doc': 'print activities not imported',
'opts': [detail_op]}]},
{'name': 'down',
'executor': lambda params: DownloadCli(**params),
'actions': [{'name': 'activity',
'meth': 'sync_activities',
'doc': 'download outstanding activites',
'opts': [limit_op]},
{'name': 'tcx',
'meth': 'sync_tcx',
'doc': 'download outstanding TCX files',
'opts': [limit_op]},
{'name': 'import',
'meth': 'import_tcx',
'doc': 'import TCX file',
'opts': [limit_op]},
{'name': 'clean',
'meth': 'clean_imported',
'doc': 'remove all TCX files from the imported directory',
'opts': []},
{'name': 'download',
'doc': 'download all outstanding data',
'opts': [limit_op]}]},
{'name': 'backup',
'executor': lambda params: BackupCli(**params),
'actions': [{'name': 'backup',
'doc': 'backup (force) the activites database',
'opts': []}]},
{'name': 'report',
'executor': lambda params: ReporterCli(**params),
'actions': [{'name': 'report',
'doc': 'report activities for a day',
'opts': [date_op, format_op]}]},
{'name': 'sheet',
'executor': lambda params: SheetCli(**params),
'actions': [{'name': 'sheet',
'meth': 'sync',
'doc': 'update Google Docs training spreadsheet',
'opts': []}]},
{'name': 'sync',
'executor': lambda params: SyncCli(**params),
'actions': [{'name': 'sync',
'doc': 'equivalent to actions download and sheet',
'opts': []}]}],
'config_option': {'name': 'config',
'expect': True,
'opt': ['-c', '--config', False,
{'dest': 'config',
'metavar': 'FILE',
'help': 'configuration file'}]},
'whine': 1}
super(ConfAppCommandLine, self).__init__(
cnf, config_env_name='garmdownrc', pkg_dist='zensols.garmdown',
config_type=AppConfig, default_action='sync')
def main():
logging.basicConfig(format='%(module)s: %(message)s', level=logging.INFO)
logging.getLogger('zensols.actioncli').setLevel(logging.WARNING)
cl = ConfAppCommandLine()
cl.invoke()
|
<filename>pyccel/ast/omp.py
# coding: utf-8
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
"""
OpenMP has several constructs and directives, and this file contains the OpenMP types that are supported.
We represent some types with the OmpAnnotatedComment type.
These types are detailed on our documentation:
https://github.com/pyccel/pyccel/blob/master/tutorial/openmp.md
"""
from .basic import Basic
class OmpAnnotatedComment(Basic):
"""Represents an OpenMP Annotated Comment in the code.
Parameters
----------
txt: str
statement to print
combined: List (Optional)
constructs to be combined with the current construct
Examples
--------
>>> from pyccel.ast.omp import OmpAnnotatedComment
>>> OmpAnnotatedComment('parallel')
OmpAnnotatedComment(parallel)
"""
__slots__ = ('_txt', '_combined', '_has_nowait')
_attribute_nodes = ()
_is_multiline = False
def __init__(self, txt, has_nowait=False, combined=None):
self._txt = txt
self._combined = combined
self._has_nowait = has_nowait
super().__init__()
@property
def is_multiline(self):
"""Used to check if the construct needs brackets."""
return self._is_multiline
@property
def has_nowait(self):
"""Used to check if the construct has a nowait clause."""
return self._has_nowait
@has_nowait.setter
def has_nowait(self, value):
"""Used to set the _has_nowait var."""
self._has_nowait = value
@property
def name(self):
"""Name of the construct."""
return ''
@property
def txt(self):
"""Used to store clauses."""
return self._txt
@property
def combined(self):
"""Used to store the combined construct of a directive."""
return self._combined
def __getnewargs__(self):
"""Used for Pickling self."""
args = (self.txt, self.combined)
return args
def __str__(self):
instructions = [self.name, self.combined, self.txt]
return '#$ omp '+' '.join(i for i in instructions if i)
class OMP_For_Loop(OmpAnnotatedComment):
""" Represents an OpenMP Loop construct. """
__slots__ = ()
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
@property
def name(self):
"""Name of the construct."""
return 'for'
class OMP_Simd_Construct(OmpAnnotatedComment):
""" Represents an OpenMP Simd construct"""
__slots__ = ()
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
@property
def name(self):
"""Name of the construct."""
return 'simd'
class OMP_TaskLoop_Construct(OmpAnnotatedComment):
""" Represents an OpenMP Taskloop construct"""
__slots__ = ()
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
@property
def name(self):
"""Name of the construct."""
return 'taskloop'
class OMP_Distribute_Construct(OmpAnnotatedComment):
""" Represents an OpenMP Distribute construct"""
__slots__ = ()
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
@property
def name(self):
"""Name of the construct."""
return 'distribute'
class OMP_Parallel_Construct(OmpAnnotatedComment):
""" Represents an OpenMP Parallel construct. """
__slots__ = ()
_is_multiline = True
@property
def name(self):
"""Name of the construct."""
return 'parallel'
class OMP_Task_Construct(OmpAnnotatedComment):
""" Represents an OpenMP Task construct. """
__slots__ = ()
_is_multiline = True
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
class OMP_Single_Construct(OmpAnnotatedComment):
""" Represents an OpenMP Single construct. """
__slots__ = ()
_is_multiline = True
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
@property
def name(self):
"""Name of the construct."""
return 'single'
class OMP_Critical_Construct(OmpAnnotatedComment):
""" Represents an OpenMP Critical construct. """
__slots__ = ()
_is_multiline = True
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
class OMP_Master_Construct(OmpAnnotatedComment):
""" Represents OpenMP Master construct. """
__slots__ = ()
_is_multiline = True
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
class OMP_Masked_Construct(OmpAnnotatedComment):
""" Represents OpenMP Masked construct. """
__slots__ = ()
_is_multiline = True
@property
def name(self):
"""Name of the construct."""
return 'masked'
class OMP_Cancel_Construct(OmpAnnotatedComment):
""" Represents OpenMP Cancel construct. """
__slots__ = ()
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
class OMP_Target_Construct(OmpAnnotatedComment):
""" Represents OpenMP Target construct. """
__slots__ = ()
_is_multiline = True
@property
def name(self):
"""Name of the construct."""
return 'target'
class OMP_Teams_Construct(OmpAnnotatedComment):
""" Represents OpenMP Teams construct. """
__slots__ = ()
_is_multiline = True
@property
def name(self):
"""Name of the construct."""
return 'teams'
class OMP_Sections_Construct(OmpAnnotatedComment):
""" Represents OpenMP Sections construct. """
__slots__ = ()
_is_multiline = True
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
@property
def name(self):
"""Name of the construct."""
return 'sections'
class OMP_Section_Construct(OmpAnnotatedComment):
""" Represent OpenMP Section construct. """
__slots__ = ()
_is_multiline = True
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
class Omp_End_Clause(OmpAnnotatedComment):
""" Represents the End of an OpenMP block. """
__slots__ = ()
def __init__(self, txt, has_nowait):
super().__init__(txt, has_nowait)
|
<gh_stars>1-10
import inspect
import json
from collections import OrderedDict
from pathlib import Path
import random
import pandas as pd
import hashlib
import numpy as np
from scipy.stats.mstats import gmean
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from qml.cv import QCV
from qml.helpers import get_engine, save
from qml.config import *
class QModels:
@classmethod
def get_instance(cls):
if not getattr(cls, 'instance', False):
cls.instance = QModels()
return cls.instance
def __init__(self):
self.models = {}
#todo create cs in cv_data
def qpredict(self, model_id, data_id, data=None, ids=None, tag='', save_result=True, save_model=False, force=False):
if ids is not None:
train_ids, test_ids = ids
res = self._check_result_exists(model_id, data_id, train_ids, test_ids, tag)
if res is not None and not force:
return res
if data is None:
#todo
load_data_id = data_id if data_id >0 else 1
Y_train = pd.read_csv(QML_TRAIN_Y_FILE_MASK.format(load_data_id), index_col=QML_INDEX_COL)
X_train = pd.read_csv(QML_TRAIN_X_FILE_MASK.format(load_data_id), index_col=QML_INDEX_COL)
X_test = pd.read_csv(QML_TEST_X_FILE_MASK.format(load_data_id), index_col=QML_INDEX_COL)
if ids is None:
train_ids = Y_train.index.values
test_ids = X_test.index.values
else:
temp = pd.concat([X_train, X_test])
X_train = temp.loc[train_ids]
Y_train = Y_train.loc[train_ids][QML_RES_COL]
X_test = temp.loc[test_ids]
temp = None
else:
X_train, Y_train, X_test = data
train_ids = Y_train.index.values
test_ids = X_test.index.values
if ids is None and not force:
res = self._check_result_exists(model_id, data_id, train_ids, test_ids, tag)
if res is not None and not force:
return res
model = self.get_model(model_id)
fit_res = model.fit(X_train, Y_train)
if save_model:
save(fit_res, QML_DATA_DIR + 'models/' + 'm{0:0=7d}_d{1:0=3d}__tr_{2}_ts_{3}_t_{4}'.format(
model_id, data_id, len(X_train), len(X_test), tag
))
predict_fn = getattr(fit_res, model.qml_predict_fn)
predict_fn_kwargs = {}
if 'force' in inspect.getfullargspec(predict_fn).args:
predict_fn_kwargs['force'] = force
res = predict_fn(X_test, **predict_fn_kwargs)
#todo
if model.qml_predict_fn == 'predict_proba':
res = [x[1] for x in res]
A1 = pd.DataFrame(X_test.index)
A1[QML_RES_COL] = res
A1.set_index(QML_INDEX_COL, inplace=True)
if save_result:
A1.to_csv(self._get_res_filename(model_id, data_id, train_ids, test_ids, tag))
return A1
def _check_result_exists(self, model_id, data_id, train_ids, test_ids, tag):
"""check if result already exists"""
filename = self._get_res_filename(model_id, data_id, train_ids, test_ids, tag)
my_file = Path(filename)
if my_file.is_file():
return pd.read_csv(filename, index_col=QML_INDEX_COL)
else:
return None
def _get_res_filename(self, model_id, data_id, train_ids, test_ids, tag):
"""saved result filename"""
ids_hash = hashlib.md5(
'_'.join([str(i) for i in sorted(train_ids)]).encode('utf-8') +
'_'.join([str(i) for i in sorted(test_ids)]).encode('utf-8')
).hexdigest()
filename = QML_DATA_DIR + 'res/m{0:0=7d}_m{1:0=3d}__tr_{2}_ts_{3}_{4}_h_{5}.csv'.format(
model_id, data_id, len(train_ids), len(test_ids), tag, ids_hash
)
return filename
def get_model(self, model_id):
if model_id not in self.models:
self._load_model(model_id)
return self.models[model_id]
def add(self, model_id, model, description=None, predict_fn='predict', description_params=None):
_, conn = get_engine()
description = description if description else ''
res = conn.execute("select cls, params, descr, predict_fn from qml_models where model_id={}".format(model_id)).fetchone()
if res:
if res['cls'] != self.get_class(model) or res['params'] != self.get_params(model, description_params):
raise Exception('Model {} changed'.format(model_id))
else:
conn.execute(
"""
insert into qml_models (model_id, cls, params, descr, predict_fn) values
({}, '{}', '{}', '{}', '{}')
""".format(
model_id,
self.get_class(model),
self.get_params(model, description_params),
description,
predict_fn
)
)
self.models[model_id] = model
model.qml_descr = description
model.qml_predict_fn = predict_fn
conn.close()
def add_by_params(self, model, description=None, predict_fn='predict', description_params=None):
_, conn = get_engine()
description = description if description else ''
cls = self.get_class(model)
description_params = self.get_params(model, description_params)
res = conn.execute(
"""
select model_id
from qml_models
where
cls='{}'
and params='{}'
""".format(cls, description_params)
).fetchone()
if res:
return res['model_id']
else:
conn.execute(
"""
insert into qml_models (model_id, cls, params, descr, predict_fn) values
(null, '{}', '{}', '{}', '{}')
""".format(cls, description_params, description, predict_fn),
)
model_id=conn.execute('SELECT LAST_INSERT_ID() AS id').fetchone()[0]
self.models[model_id] = model
model.qml_descr = description
model.qml_predict_fn = predict_fn
conn.close()
return model_id
def get_class(self, model):
return str(model.__class__.__name__)
def get_params(self, model, description_params):
return self.normalize_params(model.get_params()) if description_params is None else description_params
@classmethod
def normalize_params(cls, params):
return json.dumps(OrderedDict(sorted(params.items())))
def _load_model(self, model_id):
_, conn = get_engine()
#todo
models = {
'QXgb': QXgb,
'KNeighborsClassifier': KNeighborsClassifier,
'QAvg': QAvg,
'QRankedAvg': QRankedAvg,
'QRankedByLineAvg': QRankedByLineAvg,
'QStackModel': QStackModel,
'LogisticRegression': LogisticRegression,
'DecisionTreeClassifier': DecisionTreeClassifier,
'QPostProcessingModel': QPostProcessingModel,
'RandomForestClassifier': RandomForestClassifier
}
res = conn.execute(
"""
select cls, params, descr, predict_fn
from qml_models
where
model_id='{}'
""".format(model_id)
).fetchone()
if not res:
raise Exception('Missing {} model'.format(model_id))
model = models[res['cls']](**json.loads(res['params']))
self.add(model_id, model, res['descr'], res['predict_fn'])
def get_cv_score(self, model_id, data_id):
_, conn = get_engine()
row = conn.execute(
"select cv_score from qml_results where model_id={} and data_id={}".format(model_id, data_id)
).fetchone()
cv_score = row['cv_score']
conn.close()
return cv_score
####################################
class QXgb:
def __init__(self, **kwargs):
self.params = kwargs
self._Booster = None
def get_params(self):
return self.params
def fit(self, X, Y):
trainDmatrix = xgb.DMatrix(X, label=Y)
params = self.params.copy()
params['silent'] = True
train_params = {}
train_params_names = ['num_boost_round', 'early_stopping_rounds']
for n in train_params_names:
if n in params:
train_params[n] = params[n]
del params[n]
self._Booster = xgb.train(params, trainDmatrix, verbose_eval=False, **train_params)
return self
def predict(self, X):
test_dmatrix = xgb.DMatrix(X)
return self.booster().predict(test_dmatrix)
##from xgb##
def booster(self):
if self._Booster is None:
raise Exception('need to call fit beforehand')
return self._Booster
@property
def feature_importances_(self):
"""
Returns
-------
feature_importances_ : array of shape = [n_features]
"""
b = self.booster()
fs = b.get_fscore()
all_features = [fs.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum()
####################################
class QAvg:
def __init__(self, models, is_geom=False):
self.models = models
self.is_geom = is_geom
def get_params(self):
p = {'models': self.models}
if self.is_geom:
p['is_geom'] = True
return p
def fit(self, X, Y):
self.train_X, self.train_Y = X, Y
return self
def predict(self, X, force=False):
qm = QModels.get_instance()
A1 = None
i=0
for (model_id, data_id) in self.models:
i+=1
res = qm.qpredict(
model_id, data_id, ids=[self.train_X.index.values, X.index.values], force=force
)
if A1 is None:
A1 = res
else:
A1[QML_RES_COL + '{}'.format(i)] = res[QML_RES_COL]
if self.is_geom:
return list(gmean(A1, axis=1))
else:
return list(A1.mean(axis=1))
####################################
class QRankedAvg:
def __init__(self, models):
self.models = models
def get_params(self):
return {
'models': self.models
}
def fit(self, X, Y):
self.train_X, self.train_Y = X, Y
return self
def predict(self, X, force=False):
qm = QModels.get_instance()
A1 = None
i=0
cv_score_sum = 0
for (model_id, data_id) in self.models:
i+=1
res = qm.qpredict(
model_id, data_id, ids=[self.train_X.index.values, X.index.values], force=force
)
cv_score = qm.get_cv_score(model_id, data_id)
cv_score_sum += cv_score
if A1 is None:
A1 = res
A1['col{}'.format(i)] = A1[QML_RES_COL].apply(lambda x: float(x)*float(cv_score))
else:
A1['col{}'.format(i)] = res[QML_RES_COL].apply(lambda x: float(x)*float(cv_score))
del A1[QML_RES_COL]
return [float(x)/float(cv_score_sum) for x in list(A1.sum(axis=1))]
class QRankedByLineAvg:
def __init__(self, models):
self.models = models
def get_params(self):
return {
'models': self.models
}
def fit(self, X, Y):
self.train_X, self.train_Y = X, Y
return self
def predict(self, X, force=False):
qm = QModels.get_instance()
temp = []
for (model_id, data_id) in self.models:
res = qm.qpredict(
model_id, data_id, ids=[self.train_X.index.values, X.index.values], force=force
)
cv_score = qm.get_cv_score(model_id, data_id)
temp.append([model_id, data_id, cv_score, res])
temp = sorted(temp, key=lambda x: x[2])
A1 = None
score_sum = 0
for i, [model_id, data_id, cv_score, res] in enumerate(temp):
score = i+1
score_sum +=score
if A1 is None:
A1 = res
A1['col{}'.format(i)] = A1[QML_RES_COL].apply(lambda x: float(x)*float(score))
else:
A1['col{}'.format(i)] = res[QML_RES_COL].apply(lambda x: float(x)*float(score))
del A1[QML_RES_COL]
return [float(x)/float(score_sum) for x in list(A1.sum(axis=1))]
####################################
class QStackModel:
def __init__(self, models, second_layer_model):
self.models = models
self.second_layer_model = second_layer_model
def get_params(self):
return {
'models': self.models,
'second_layer_model': self.second_layer_model
}
def fit(self, X, Y):
self.train_ids = list(X.index.values)
self.train_Y = Y
return self
def predict(self, X, force=False):
qm = QModels.get_instance()
#cv = QCV(qm)
###level1
middle = round(len(self.train_ids)/2)
train_ids1 = self.train_ids[:middle]
train_ids2 = self.train_ids[middle:]
A_train = A_test = None
for i, (model_id, data_id) in enumerate(self.models):
res1 = qm.qpredict(
model_id, data_id, ids=[train_ids1, train_ids2], force=force
)
res2 = qm.qpredict(
model_id, data_id, ids=[train_ids2, train_ids1], force=force
)
col = pd.concat([res2, res1]).rename(index=str, columns={QML_RES_COL: "m_{}".format(i)})
if A_train is None:
A_train = col
else:
A_train = A_train.join(col)
res = qm.qpredict(
model_id, data_id, ids=[self.train_ids, X.index.values], force=force
)
col = res.rename(index=str, columns={QML_RES_COL: "m_{}".format(i)})
if A_test is None:
A_test = col
else:
A_test = A_test.join(col)
return list(
qm.qpredict(
self.second_layer_model, -1, data=[A_train, self.train_Y, A_test],
save_result=False
)[QML_RES_COL]
)
####################################
class QPostProcessingModel:
def __init__(self, model_id, data_id, fn):
self.model_id = model_id
self.data_id = data_id
self.fn = fn
def get_params(self):
return {
'model_id': self.model_id,
'data_id': self.data_id
}
def fit(self, X, Y):
self.train_X = X
self.train_Y = Y
return self
def predict(self, X, cv_parts='', cv_part=''):
res = QModels.get_instance().qpredict(self.model_id, self.data_id, data=[self.train_X, self.train_Y, X])
return self.fn(X, res) |
import torch.nn as nn
from torch.nn import MultiheadAttention
import torch.nn.functional as F
class TransformerEncoderLayer(nn.Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
r"""Pass the input through the encoder layer.
Args:
src: the sequnce to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
if hasattr(self, "activation"):
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
else: # for backward compatibility
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class TransformerDecoderLayer(nn.Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
if hasattr(self, "activation"):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
else: # for backward compatibility
tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise RuntimeError("activation should be relu/gelu, not %s." % activation) |
<filename>ig_bot/tests/test_scraping.py
from unittest import mock
from igramscraper.exception import InstagramException
import pytest
from ig_bot.factories import AccountFactory
from ig_bot.scraping import (
account_by_id,
account_by_username,
exponential_sleep,
followed_accounts,
MaxRateLimitingRetriesExceeded,
)
@pytest.fixture
def account_one_mock():
mock_ig_account = mock.Mock()
mock_ig_account.identifier = '1'
mock_ig_account.username = 'one'
mock_ig_account.full_name = 'Account One'
mock_ig_account.centrality = None
return mock_ig_account
@pytest.fixture
def account_one(account_one_mock):
return AccountFactory(
identifier=account_one_mock.identifier,
username=account_one_mock.username,
full_name=account_one_mock.full_name,
centrality=account_one_mock.centrality,
)
@pytest.fixture
def account_two_mock():
mock_ig_account = mock.Mock()
mock_ig_account.identifier = '2'
mock_ig_account.username = 'two'
mock_ig_account.full_name = 'Account Two'
mock_ig_account.centrality = None
return mock_ig_account
@pytest.fixture
def account_two(account_two_mock):
return AccountFactory(
identifier=account_two_mock.identifier,
username=account_two_mock.username,
full_name=account_two_mock.full_name,
centrality=None,
)
@mock.patch('ig_bot.scraping.time.sleep')
def test_exponential_sleep_sleeps_for_approtiate_durations(mock_sleep):
for e in range (1, 11):
exponential_sleep(exponent=e, base=2, offset=10, logger=mock.Mock())
sleep_durations = tuple(call.args[0] for call in mock_sleep.call_args_list)
assert sleep_durations == (12, 14, 18, 26, 42, 74, 138, 266, 522, 1034)
@mock.patch('ig_bot.scraping.time.sleep')
def test_expoential_sleep_logs_sleep_duration(mock_sleep):
mock_logger = mock.Mock()
exponential_sleep(exponent=12, base=2, offset=10, logger=mock_logger)
mock_logger.info.assert_called_with(f'Sleeping for 4106 seconds...')
def test_followed_accounts_yields_followers(
account_one_mock,
account_one,
account_two_mock,
account_two,
):
follower = AccountFactory(identifier='1', username='bot')
mock_client = mock.Mock()
mock_client.get_following.return_value = {"accounts": [account_one_mock, account_two_mock]}
mock_logger = mock.Mock()
config = {
'follows_page_size': 100,
'rate_limit_retries': 5,
'exponential_sleep_base': 2.05,
'exponential_sleep_offset': 10.3,
'max_followed_scraped': 999,
}
followed_generator = followed_accounts(follower,
mock_client,
config=config,
logger=mock_logger)
assert (account_one, account_two) == tuple(followed_generator)
@mock.patch('ig_bot.scraping.exponential_sleep')
def test_followed_accounts_retries_on_rate_limiting(mock_exponential_sleep):
follower = AccountFactory(identifier='1', username='bot')
mock_client = mock.Mock()
mock_client.get_following.side_effect = InstagramException("429")
config = {
'follows_page_size': 100,
'rate_limit_retries': 5,
'exponential_sleep_base': 2.05,
'exponential_sleep_offset': 10.3,
'max_followed_scraped': 999,
}
mock_logger = mock.Mock()
with pytest.raises(MaxRateLimitingRetriesExceeded):
followed_accounts(follower,
mock_client,
config=config,
logger=mock_logger)
assert mock_exponential_sleep.call_count == 5
assert mock_logger.exception.call_count == 5
def test_account_by_id_returns_account(account_one_mock, account_one):
mock_client = mock.Mock()
mock_client.get_account_by_id.return_value = account_one_mock
mock_logger = mock.Mock()
config = {
'rate_limit_retries': 5,
'exponential_sleep_base': 2.05,
'exponential_sleep_offset': 10.3,
}
retrieved_account = account_by_id(account_one.identifier,
mock_client,
config=config,
logger=mock_logger)
assert retrieved_account == account_one
@mock.patch('ig_bot.scraping.exponential_sleep')
def test_account_by_id_retries_on_rate_limiting(mock_exponential_sleep,
account_one):
mock_client = mock.Mock()
mock_client.get_account_by_id.side_effect = InstagramException("429")
config = {
'scraping': {'follows_page_size': 100},
'rate_limit_retries': 5,
'exponential_sleep_base': 2.05,
'exponential_sleep_offset': 10.3,
}
mock_logger = mock.Mock()
with pytest.raises(MaxRateLimitingRetriesExceeded):
account_by_id(account_one.identifier,
mock_client,
config=config,
logger=mock_logger)
assert mock_exponential_sleep.call_count == 5
assert mock_logger.exception.call_count == 5
def test_account_by_username_returns_account(account_one_mock, account_one):
mock_client = mock.Mock()
mock_client.get_account.return_value = account_one_mock
mock_logger = mock.Mock()
config = {
'rate_limit_retries': 5,
'exponential_sleep_base': 2.05,
'exponential_sleep_offset': 10.3,
}
retrieved_account = account_by_username(account_one.username,
mock_client,
config=config,
logger=mock_logger)
assert retrieved_account == account_one
@mock.patch('ig_bot.scraping.exponential_sleep')
def test_account_by_username_retries_on_rate_limiting(mock_exponential_sleep,
account_one):
mock_client = mock.Mock()
mock_client.get_account.side_effect = InstagramException("429")
config = {
'scraping': {'follows_page_size': 100},
'rate_limit_retries': 5,
'exponential_sleep_base': 2.05,
'exponential_sleep_offset': 10.3,
}
mock_logger = mock.Mock()
with pytest.raises(MaxRateLimitingRetriesExceeded):
account_by_username(account_one.username,
mock_client,
config=config,
logger=mock_logger)
assert mock_exponential_sleep.call_count == 5
assert mock_logger.exception.call_count == 5
|
import pytest
from platon_env.chain import Chain
from platon_env.base.host import Host
from platon_env.node import Node, NodeOpts
# host = Host('10.10.8.209', 'juzhen', 'Juzhen123!')
# node_id = '35bb5daad814fe902030cba6fd2d3ec60906dab70ba5df4d42a19448d300ab203cfd892c325f6716965dd93d8de2a377a2806c9703b69b68287577c70f9e7c07'
# node_key = '<KEY>'
# node = Node(host, node_id, node_key)
# chain = Chain(init_nodes=[], normal_nodes=[node])
node_id = '493c66bd7d6051e42a68bffa5f70005555886f28a0d9f10afaca4abc45723a26d6b833126fb65f11e3be51613405df664e7cda12baad538dd08b0a5774aa22cf'
node_key = '3f9301b1e574ce779e3d4ba054f3275e3a7d6d2ab22d1ef4b6b94e1b1491b55f'
network = 'private'
bls_pubKey = '5b6ce2480feee69b2007516054a25ace5d7ea2026d271fbdadcc2266f9e21e3e912f7d770c85f45385ba44e673e22b0db5ef5af1f57adf75d9b1b7628748d33a4a57ee2c8c7236691e579d219d42e1d875e084359acb8231fbc3da8ae400200e'
bls_prikey = 'edc1eafa379dadbe39297b629d0e17a4c7c3d90d8b7d08795a7db79dd498ec36'
base_dir = '/home/shing'
host = Host('192.168.21.42', 'shing', password='<PASSWORD>')
node = Node(host, node_id, node_key, network, bls_pubkey=bls_pubKey, bls_prikey=bls_prikey, base_dir=base_dir)
rpc_port = '6789'
rpc_api = 'web3,platon,admin,personal'
platon = 'file/platon'
keystore_dir = 'file/keystore.tar.gz'
genesis_file = r'C:\PlatON\PlatON_code\platon-env\tests\file\genesis.json'
# chain = Chain(nodes=[])
chain = Chain(nodes=[node])
@pytest.fixture()
def install_chain():
nodeOpts = NodeOpts(rpc_port=rpc_port, rpc_api=rpc_api, ws_port=None, ws_api=None, extra_opts=None)
chain.install(platon=platon, network=network, genesis_file=genesis_file, static_nodes=node.static_nodes,
keystore_dir=keystore_dir, options=nodeOpts)
pid = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid != '' and int(pid) > 0
return chain
def test_nodes():
assert isinstance(chain.nodes, set)
def test_install():
# todo: 等环境能用了写几个节点的用例
nodeOpts = NodeOpts(rpc_port=rpc_port, rpc_api=rpc_api, ws_port=None, ws_api=None, extra_opts=None)
chain.install(platon=platon, network=network, genesis_file=genesis_file, static_nodes=node.static_nodes,
keystore_dir=keystore_dir, options=nodeOpts)
pid = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid != '' and int(pid) > 0
def test_uninstall(install_chain):
pid = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid != '' and int(pid) > 0
ls = host.ssh(f'cd {base_dir};ls')
assert node.name in ls
supervisor_cfg = host.ssh(f'cd {host.supervisor.process_config_path};ls')
assert node.name + '.conf' == supervisor_cfg
chain.uninstall()
pid_after = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid_after == ''
ls_after = host.ssh(f'cd {base_dir};ls')
assert node.name not in ls_after
supervisor_cfg_after = host.ssh(f'cd {host.supervisor.process_config_path};ls')
assert node.name not in supervisor_cfg_after
def test_add_process():
# todo: 待写
chain.add_process()
def test_status(install_chain):
status_list = chain.status()
assert status_list[0] is True
status_restart_list = chain.status()
assert status_restart_list[0] is True
chain.stop()
status_stop_list = chain.status()
assert status_stop_list[0] is False
def test_init():
# 初始化断言怎么写呀
pass
def test_start(install_chain):
chain.stop()
pid = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid == ''
chain.start()
pid = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid != '' and int(pid) > 0
def test_restart(install_chain):
pid = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid != '' and int(pid) > 0
chain.restart()
pid_restart = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid_restart != '' and pid_restart != pid
def test_stop(install_chain):
# chain.stop()
pid = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid != '' and int(pid) > 0
chain.stop()
pid = host.ssh(f'ps -ef | grep {node.name} | grep -v grep | ' + "awk {'print $2'}")
assert pid == ''
def test_upload_platon(install_chain):
# 有多的节点用了多试几个
chain.uninstall()
chain.upload_platon(platon, [node])
ls_after = host.ssh(f'cd {node.deploy_path};ls')
assert ls_after == 'platon'
def test_upload_keystore(install_chain):
# dir格式的还没有写,有空再写
# 有多的节点用了多试几个
chain.uninstall()
chain.upload_keystore(keystore_dir, [node])
ls_after = host.ssh(f'cd {node.data_dir};ls')
assert 'keystore\n' in ls_after
def test_set_static_nodes(install_chain):
chain.uninstall()
enodes = [
"enode://3ea97e7d098d4b2c2cc7fb2ef9e2c1b802d27f01a4a0d1f7ca5ab5ce2133d560c6f703f957162a580d04da59f45707dae40107c99762509278adf1501692e0a6@192.168.16.121:16789",
"enode://c9b8de645f6060a364c35e89a4744263917e1342eb3f131e8ce6b2f81f92bb9601832a354d0a54b3ca051064329867590923fc4dbb60ea0d82219ec20a851cac@192.168.16.123:16789",
"enode://e9ee916797e66c3e10eb272956525f62ac8f9b9b74af05a5b021c7b23d7b740359c62912fe5e7fef66f2a3f5358bc7d8c1af7d862269ed5db27b5cbcf9820ec8@192.168.16.122:16789",
"enode://03d6f06860ace8a5295167e039b7b7161a1e8903bacf9e50fb32b1a74b15a9fc1b28b400630ef38a6fb6a0c8874dd01cd65788b42a864da56e442ab7d832d7ea@192.168.16.124:16789",
]
chain.set_static_nodes(enodes, [node])
ls_after = host.ssh(f'cd {node.deploy_path};ls')
assert ls_after == 'static-nodes.json'
def test_concurrent_executor():
pass
def test_deploy_private_chain():
pass
|
import argparse
parser = argparse.ArgumentParser(description='Tune and train a model')
# Use multiprocessing to call get_data in parallel
parser.add_argument('--pool-data-acquisition', '-mpdata', action='store_true')
# Turns out Tensorflow doesn't play well with multiprocessing; for now, phenotypes should only be length 1
# THIS WILL NOT WORK IF YOU PASS MULTIPLE PHENOTYPES AND POOL_DATA_ACQUISITION
parser.add_argument('--phenotypes', nargs='+')
parser.add_argument('--thresholds', '-t', type=float, nargs='+', required=True)
# Only use DNN
parser.add_argument('--network', '-n', choices=['DNN', 'dnn'], required=True)
parser.add_argument('--pair', nargs=2, required=True)
parser.add_argument('--exp-id', '-i', default='')
parser.add_argument('--batch-size', '-b', type=int, default=1000)
parser.add_argument('--data-batch', '-db', type=int, default=50000)
parser.add_argument('--epochs', '-e', type=int, default=20)
parser.add_argument('--max-samples', '-m', type=int, default=100_000)
parser.add_argument('--random-state', '-r', type=int, default=42)
parser.add_argument('--validation-split', '-v', type=float, default=0.2)
parser.add_argument('--test-split', type=float, default=0.1)
parser.add_argument('--tune-split', type=float, default=0.01)
parser.add_argument('--optimize-split', type=float, default=0.2)
# Use multiprocessing to speed up Tensorflow computations
parser.add_argument('--use-multiprocessing', '-mptf', action='store_true')
parser.add_argument('--create-test-output', action='store_true')
parser.add_argument('--model-directory', '-mdir', type=str, default='/lab/corradin_biobank/FOR_AN/combinatorial_GWAS/data/06_models/{}/5_11_2021')
parser.add_argument('--test-directory', '-tdir', type=str, default='/lab/corradin_biobank/FOR_AN/combinatorial_GWAS/data/07_model_output/{}/5_11_2021')
parser.add_argument('--quiet', '-q', action='store_true')
parser.add_argument('--query-index', type=int, default=None)
args = parser.parse_args()
globals().update(vars(args))
if not all(0 <= t <= 1 for t in thresholds):
raise ValueError(f"Thresholds must be between 0 and 1! (Got thresholds of {thresholds})")
network = network.lower()
if '{}' in model_directory:
model_directory = model_directory.replace('{}', network)
if '{}' in test_directory:
test_directory = test_directory.replace('{}', network)
if exp_id == '':
exp_id = f'simulated_phenotypes_{"_".join(phenotypes)}_thresholds_{"_".join(str(t) for t in thresholds)}_max_samples_{max_samples}_network_{network}'
if not quiet:
print('Options:')
print('\n'.join(f'{k}: {v}' for k,v in vars(args).items()))
import sys
sys.path.append('../../')
from combinatorial_gwas.simulation import SimulatedPheno, SNPInfoUnit
import combinatorial_gwas.high_level as cgwas
import tensorflow as tf
import kerastuner as kt
import os
import shutil
import string
import sklearn
import math
import pickle
def make_DNN_model(d1, dropout, l2, A, P):
regularizer = tf.keras.regularizers.L2(l2)
return tf.keras.Sequential(layers=[
tf.keras.layers.InputLayer(input_shape=(A, 4)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(d1, activation=tf.nn.relu, kernel_regularizer=regularizer),
tf.keras.layers.Dropout(dropout),
tf.keras.layers.Dense(P, activation=tf.nn.sigmoid, kernel_regularizer=regularizer)
], name='dnn_model')
def compile_model(model, optimizer):
model.compile(optimizer=optimizer, loss=tf.keras.losses.BinaryCrossentropy(), metrics=['accuracy', tf.metrics.AUC(name='AuROC'), tf.metrics.AUC(name='AuPR', curve='PR')])
model.summary()
class Range:
def __init__(self, min, max, step=None, sampling='linear'):
self.min = min
self.max = max
self.step = step
self.sampling = sampling
def to_Int(self, hp, name):
return hp.Int(name, min_value = self.min, max_value = self.max, step = self.step)
def to_Float(self, hp, name):
return hp.Float(name, min_value = self.min, max_value=self.max, step=self.step)
l2_choices = [0.5, 0.2, 0.1, 0.08, 0.05, 0.01]
lr_choices = [0.1, 0.05, 0.01, 0.005, 0.001]
dropout_choices = [0.05, 0.1, 0.15, 0.2, 0.35, 0.5, 0.65, 0.8]
def load_pickled_hyperparameters(file):
with open(file, 'rb') as f:
out = pickle.load(f)
return out["outputs"]["hyperparameters"].values
def get_tunable_DNN_model(hp, d1, A, P):
d1_units = d1.to_Int(hp, 'd1_units')
dropout = hp.Choice('dropout', values=dropout_choices)
l2 = hp.Choice('l2', values=l2_choices)
lr = hp.Choice('lr', values=lr_choices)
model = make_DNN_model(d1_units, dropout, l2, A, P)
compile_model(model, tf.keras.optimizers.Adam(lr))
return model
def get_tuner(model_builder):
global exp_id
try:
shutil.rmtree(f'hp_tuning_{exp_id}')
except FileNotFoundError:
pass
return kt.Hyperband(model_builder, objective=kt.Objective('val_AuPR', direction='max'), max_epochs=epochs, executions_per_trial=3, directory=f'hp_tuning_{exp_id}', project_name='initial_model')
def get_datasource(phenotype, threshold):
datasource = cgwas.chromosome_datasource(snp_filters=[cgwas.snp_filter(phenotype, cgwas.snp_filter.SORT_PVALUE, threshold=threshold)], balance_pheno=phenotype, max_samples=max_samples, random_state=random_state, data_split_params=cgwas.DataSplitParams(validation_split=validation_split, test_split=test_split, tune_split=tune_split, validation_tune_split = optimize_split))
return datasource
def get_tunable_hyperparameters_DNN(search_train_x):
A = search_train_x.shape[1]
P = 1
d1 = Range(max(P // 2, 1), P * 128, 16)
return d1, A, P
def tuned_model(nt, dt, p, t, s, search_train_x):
# tuner = get_tuner(lambda hp: get_tunable_DNN_model(hp, *get_tunable_hyperparameters_DNN(search_train_x)))
# tuner.search_space_summary()
# tuner.search(search_train_x, search_train_y, epochs=epochs, validation_data=(search_validation_x, search_validation_y))
# tuner.results_summary()
filename = f"../../data/07_model_output/{nt}/{dt}/phenotype_{p}_threshold_{t}_max-samples_{s}"
hp = load_pickled_hyperparameters(filename)
model = make_DNN_model(hp['d1_units'], hp['dropout'], hp['l2'], search_train_x.shape[1], 1)
compile_model(model, tf.keras.optimizers.Adam(hp['lr']))
return model, None, hp
def train_test_model(model, train_x, train_y, validation_x, validation_y, test_x, test_y):
history = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs, validation_data=(validation_x, validation_y), use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1 if use_multiprocessing else 1)
return history, model.evaluate(x=test_x, y=test_y, use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1 if use_multiprocessing else 1)
save_model = lambda model, filename: model.save(f'{model_directory}/{filename}')
def dataframe_output(model_output, output_names):
import pandas as pd
return pd.DataFrame.from_dict({name:[out] for name, out in zip(output_names, model_output)})
def save_data(outputs, datasource, filename):
import pickle
os.makedirs(test_directory, exist_ok=True)
to_save = {}
to_save['outputs'] = outputs
to_save['args'] = vars(args)
genome_files = datasource.genome_files
datasource.genome_files = None
to_save['datasource'] = datasource
with open(f'{test_directory}/{filename}', 'wb') as f:
pickle.dump(to_save, f)
datasource.genome_files = genome_files
def _driver(phenotype, threshold, tuning_data, train_validation_test_data, datasource, query):
tune_x, optimize_x, tune_y, optimize_y = tuning_data
train_x, train_y, validation_x, validation_y, test_x, test_y = train_validation_test_data
model, tuner, hyperparameters = tuned_model(network, '4_29_2021', phenotype, threshold, max_samples, test_x)
history, test_output = train_test_model(model, *train_validation_test_data)
evaluated = {}
evaluated['test'] = test_output
evaluated['train'] = model.evaluate(x=train_x, y=train_y, use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1 if use_multiprocessing else 1)
evaluated['validation'] = model.evaluate(x=validation_x, y=validation_y, use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1 if use_multiprocessing else 1)
# evaluated['tune'] = model.evaluate(x=tune_x, y=tune_y, use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1 if use_multiprocessing else 1)
# evaluated['optimize'] = model.evaluate(x=optimize_x, y=optimize_y, use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1 if use_multiprocessing else 1)
evaluated = {k:dataframe_output(v, model.metrics_names) for k, v in evaluated.items()}
evaluated['history'] = history.history
evaluated['hyperparameters'] = hyperparameters
filename = f'simulated_phenotype_{phenotype}_threshold_{threshold}_max-samples_{max_samples}_query_{query}'
save_model(model, filename)
if create_test_output:
save_data(evaluated, datasource, filename)
for i in range(len(train_x)):
del train_x[i]
def get_batched_tune_train_validation_test_data(datasource, phenotype, threshold):
r_x, r_y = batched_train_data(datasource, phenotype, threshold, 'train')
v_x, v_y = datasource.get_simulated_data(slice(0, None), 'validation', pair)
t_x, t_y = datasource.get_simulated_data(slice(0, None), 'test', pair)
rt_x, rt_y = None, None# datasource.get_simulated_data(slice(0, None), 'train_tune', pair)
vt_x, vt_y = None, None#datasource.get_simulated_data(slice(0, None), 'validation_tune', pair)
return rt_x, vt_x, rt_y, vt_y, r_x, r_y, v_x, v_y, t_x, t_y
def acquire_data(args):
phenotype, threshold = args
datasource = get_datasource(phenotype, threshold)
data = get_batched_tune_train_validation_test_data(datasource, phenotype, threshold)
return (*data, datasource)
queries = sorted(cgwas.simulation_I83_queries_pheno_dict[tuple(pair)])
if query_index != None:
queries = [queries[query_index]]
def batched_train_data(datasource, phenotype, threshold, dataset):
class TrainingData(tf.keras.utils.Sequence):
def __init__(self, datasource, phenotype, threshold, dataset, query):
self.datasource = datasource
self.phenotype = phenotype
self.threshold = threshold
self.dataset = dataset
self.batch_indeces = list(range(0, datasource.max_samples, data_batch))
if self.batch_indeces[-1] != datasource.max_samples:
self.batch_indeces.append(datasource.max_samples)
self.saved = [f'{test_directory}/train_cache/{f"phenotype_{self.phenotype}_threshold_{self.threshold}_max_samples_{max_samples}_dataset_{self.dataset}_{idx}_X.cache"}' for idx in range(len(self))]
self.query = query
try:
self.y_df = cgwas.simulation_I83_queries_pheno_dict[tuple(pair)][self.query]
except KeyError:
self.y_df = cgwas.simulated_I83_queries_pheno_dict[tuple(reversed(pair))][self.query]
print(type(datasource))
def __len__(self):
return len(self.batch_indeces) - 1
def __getitem__(self, idx):
import pickle
os.makedirs(f'{test_directory}/train_cache', exist_ok=True)
x_filename = self.saved[idx]
s = slice(*self.batch_indeces[idx:idx+2])
try:
with open(x_filename, 'rb') as f:
x = pickle.load(f)
except (FileNotFoundError, EOFError) as e:
x, y = self.datasource.get_simulated_data(s, self.dataset, pair)
y = y[self.query].values
with open(x_filename, 'wb') as f:
pickle.dump(x, f)
return x, y
print(type(self.datasource))
_, sample_id_subset = self.datasource.get_sample_id_in_split(s, self.dataset)
y = self.y_df.pheno_col.loc[sample_id_subset].values
return x, y
def __delitem__(self, idx):
pass
def clean():
for idx in range(len(self)):
try:
os.remove(self.saved[idx])
except:
pass
return {query:TrainingData(datasource, phenotype, threshold, dataset, query) for query in queries}, None
def driver():
for p in phenotypes:
for threshold in thresholds:
rt_x, vt_x, rt_y, vt_y, r_x, r_y, v_x, v_y, t_x, t_y, datasource = acquire_data((p, threshold))
for query in queries:
_driver(p, threshold, (rt_x, vt_x, rt_y, vt_y), (r_x[query], r_y, v_x, v_y[query].values, t_x, t_y[query].values), datasource, query)
driver() |
# -*- coding: utf-8 -*- #
import numpy as np
import matplotlib.pyplot as plt
class Neuron():
def __init__(self, x, y, color='k', text=''):
self.x = x
self.y = y
self.color = color
self.text = text
def draw(self, neuron_radius=0.5):
circle = plt.Circle(
(self.x, self.y), radius=neuron_radius, color=self.color, fill=False)
plt.text(self.x-0.3, self.y-0.1, self.text)
plt.gca().add_patch(circle)
class Layer():
def __init__(self, network, neuron_num, neuron_num_widest, neuron_radius, line_weights, line_colors, neuron_color, neuron_text, distance_layers=6, distance_neurons=2):
self.distance_layers = distance_layers
self.distance_neurons = distance_neurons
self.neuron_radius = neuron_radius
self.neuron_text = neuron_text
self.neuron_num_widest = neuron_num_widest
self.previous_layer = self.__get_previous_layer(network)
self.direction = network.direction
if (self.direction == 'bottomtotop') or (self.direction == 'toptobottom'):
self.y = self.__layer_position()
elif (self.direction == 'righttoleft') or (self.direction == 'lefttoright'):
self.x = self.__layer_position()
self.neurons = self.__intialise_neurons(
neuron_num, neuron_color, neuron_text)
self.line_weights = line_weights
self.line_colors = line_colors
def __intialise_neurons(self, neuron_num, neuron_color='k', neuron_text=''):
neurons = []
if (self.direction == 'bottomtotop') or (self.direction == 'toptobottom'):
self.x = self.__lmargin_for_centering(neuron_num)
elif (self.direction == 'righttoleft') or (self.direction == 'lefttoright'):
self.y = self.__lmargin_for_centering(neuron_num)
for iteration in range(neuron_num):
if isinstance(neuron_color, str):
color = neuron_color
else:
color = neuron_color[iteration]
if isinstance(neuron_text, str):
text = neuron_text
else:
text = neuron_text[iteration]
neuron = Neuron(self.x, self.y, color, text)
neurons.append(neuron)
if (self.direction == 'bottomtotop') or (self.direction == 'toptobottom'):
self.x += self.distance_neurons
elif (self.direction == 'righttoleft') or (self.direction == 'lefttoright'):
self.y += self.distance_neurons
return neurons
def __lmargin_for_centering(self, neuron_num):
return self.distance_neurons * (self.neuron_num_widest - neuron_num) / 2
def __layer_position(self):
if self.previous_layer:
if (self.direction == 'bottomtotop'):
return self.previous_layer.y + self.distance_layers
elif (self.direction == 'toptobottom'):
return self.previous_layer.y - self.distance_layers
elif (self.direction == 'lefttoright'):
return self.previous_layer.x + self.distance_layers
elif (self.direction == 'righttoleft'):
return self.previous_layer.x - self.distance_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_two_neurons(self, neuron1, neuron2, line_weight=1, linecolor='k'):
if (self.direction == 'bottomtotop') or (self.direction == 'toptobottom'):
angle = np.arctan((neuron2.x - neuron1.x) /
float(neuron2.y - neuron1.y))
x_adjustment = self.neuron_radius * np.sin(angle)
y_adjustment = self.neuron_radius * np.cos(angle)
elif (self.direction == 'righttoleft') or (self.direction == 'lefttoright'):
angle = np.arctan((neuron2.y - neuron1.y) /
float(neuron2.x - neuron1.x))
x_adjustment = self.neuron_radius * np.cos(angle)
y_adjustment = self.neuron_radius * np.sin(angle)
if (self.direction == 'bottomtotop'):
line_x1 = neuron1.x - x_adjustment
line_x2 = neuron2.x + x_adjustment
line_y1 = neuron1.y - y_adjustment
line_y2 = neuron2.y + y_adjustment
elif (self.direction == 'toptobottom'):
line_x1 = neuron1.x + x_adjustment
line_x2 = neuron2.x - x_adjustment
line_y1 = neuron1.y + y_adjustment
line_y2 = neuron2.y - y_adjustment
if (self.direction == 'lefttoright'):
line_x1 = neuron1.x - x_adjustment
line_x2 = neuron2.x + x_adjustment
line_y1 = neuron1.y - y_adjustment
line_y2 = neuron2.y + y_adjustment
elif (self.direction == 'righttoleft'):
line_x1 = neuron1.x + x_adjustment
line_x2 = neuron2.x - x_adjustment
line_y1 = neuron1.y + y_adjustment
line_y2 = neuron2.y - y_adjustment
line = plt.Line2D((line_x1, line_x2),
(line_y1, line_y2),
alpha=line_weight, color=linecolor)
plt.gca().add_line(line)
def draw(self, layerType=0):
for this_layer_neuron_index in range(len(self.neurons)):
neuron = self.neurons[this_layer_neuron_index]
neuron.draw(self.neuron_radius)
if self.previous_layer:
for previous_layer_neuron_index in range(len(self.previous_layer.neurons)):
previous_layer_neuron = self.previous_layer.neurons[previous_layer_neuron_index]
if isinstance(self.previous_layer.line_weights, int):
line_weight = self.previous_layer.line_weights
else:
line_weight = self.previous_layer.line_weights[this_layer_neuron_index,
previous_layer_neuron_index]
if isinstance(self.previous_layer.line_colors, str):
linecolor = self.previous_layer.line_colors
else:
linecolor = self.previous_layer.line_colors[this_layer_neuron_index,
previous_layer_neuron_index]
self.__line_two_neurons(
neuron, previous_layer_neuron, line_weight, linecolor)
if (self.direction == 'bottomtotop') or (self.direction == 'toptobottom'):
x_text = self.neuron_num_widest * self.distance_neurons
if layerType == 0:
plt.text(x_text, self.y, 'Input Layer', fontsize=12)
elif layerType == -1:
plt.text(x_text, self.y, 'Output Layer', fontsize=12)
else:
plt.text(x_text, self.y, 'Hidden Layer ' +
str(layerType), fontsize=12)
elif (self.direction == 'righttoleft') or (self.direction == 'lefttoright'):
y_text = self.neuron_num_widest * self.distance_neurons
x_text = self.x - self.distance_neurons
if layerType == 0:
plt.text(x_text, y_text, 'Input Layer', fontsize=12)
elif layerType == -1:
plt.text(x_text, y_text, 'Output Layer', fontsize=12)
else:
plt.text(x_text, y_text, 'Hidden Layer ' +
str(layerType), fontsize=12)
class NeuralNetwork():
def __init__(self, neuron_num_widest, neuron_radius=0.5, direction='lefttoright'):
self.neuron_num_widest = neuron_num_widest
self.layers = []
self.layertype = 0
self.direction = direction
self.neuron_radius = neuron_radius
def add_layer(self, neuron_num, neuron_radius=0.5, line_weights=1, line_colors='k', neuron_color='k', neuron_text=''):
layer = Layer(self, neuron_num, self.neuron_num_widest, neuron_radius,
line_weights, line_colors, neuron_color, neuron_text)
self.layers.append(layer)
def draw(self):
plt.figure()
for i in range(len(self.layers)):
layer = self.layers[i]
if i == len(self.layers)-1:
i = -1
layer.draw(i)
plt.axis('scaled')
plt.axis('off')
plt.gcf().tight_layout()
|
<reponame>hkamran80/IpBlacklister
import asyncio
import os
import re
import json
import datetime
import aiohttp
import aiosqlite
from yarl import URL
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
def log(message: str, file="ip_blacklister.log") -> None:
file = os.path.join(PROJECT_ROOT, file)
with open(file, "a") as f:
f.write(message + "\n")
def get_api() -> str:
"""
returns the api key from the settings.json file
:return: str, abuseipdb api key
"""
file = os.path.join(PROJECT_ROOT, "settings.json")
with open(file, "r") as f:
data = json.load(f)
return data["api"]
def get_access_log() -> str:
"""
returns the access log filepath from the settings.json file
:return: str, apache2 log filepath
"""
file = os.path.join(PROJECT_ROOT, "settings.json")
with open(file, "r") as f:
data = json.load(f)
return data["access_log"]
def read_access_log(location: str) -> set:
"""
reads the access log and searches for ip adresses
:param location: str, apache2 log filepath
:return: set, ip adresses
"""
ip_pattern = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
with open(location) as file:
ips = set()
for line in file:
match = ip_pattern.findall(line)
if match:
ips.update(match)
return ips
async def get_all_old_ips(db="db.db") -> set:
"""
returns all ip addresses in database that have not been evaluated in 30 days
:param db: str, sqlite db file
:return: set, ip addresses older than 30 days
"""
db = os.path.join(PROJECT_ROOT, db)
async with aiosqlite.connect(db) as connection:
cursor = await connection.cursor()
sql = "select ip from iptable " \
"where not day between date('now', '-30 day') and date('now')"
await cursor.execute(sql)
return set(ip[0] for ip in await cursor.fetchall())
async def get_all_recent_ips(db="db.db") -> set:
"""
returns all ip addresses the database that are evaluated the last 30 days
:param db: str, sqlite db file
:return: set, ip adresses newer than 30 days
"""
db = os.path.join(PROJECT_ROOT, db)
async with aiosqlite.connect(db) as connection:
cursor = await connection.cursor()
sql = "select ip from iptable " \
"where day between date('now', '-30 day') and date('now')"
await cursor.execute(sql)
return set(ip[0] for ip in await cursor.fetchall())
async def request_url(url: str, session: aiohttp.ClientSession) -> dict:
"""
requests a abuseipdb api url and returns its data
:param url: str, abuseipdb api url
:param session: aiohttp.ClientSession, client session with api key in header
:return: dict, data about an api
"""
async with session.get(url) as response:
if response.status == 200:
return await response.json(encoding="utf-8")
else:
return {}
async def check_ips(*ips: str, max_age="30", api: str) -> list:
"""
requests abuseipdb with given ips on /check endpoint
:param ips: str, ip addresses
:param max_age: str, repports not older than 30 days
:param api: str, abuseipdb api key
:return: list, listof dicts with data about each ip
"""
headers = {
"Key": api,
"Accept": "applications/json"}
base_url = "https://api.abuseipdb.com/api/v2/check"
async with aiohttp.ClientSession(headers=headers) as session:
tasks = []
for ip in ips:
tasks.append(asyncio.create_task(request_url(
f"{base_url}?ipAddress={URL(ip)}&maxAgeInDays={URL(max_age)}",
session)))
return [result["data"] for result in [(await task) for task in tasks] if result]
async def store_ips(*ips: str, db="db.db") -> None:
"""
stores the ips in the database with current date
:param ips: str, ip adresses
:param db: str, sqlite database name
:return: None
"""
db = os.path.join(PROJECT_ROOT, db)
async with aiosqlite.connect(db) as connecton:
cursor = await connecton.cursor()
for ip in ips:
day = datetime.date.today()
sql = "insert or ignore into iptable (" \
" 'ip', 'day'" \
") values (" \
f" '{ip}', '{day.strftime('%Y-%m-%d')}'" \
")"
await cursor.execute(sql)
await connecton.commit()
async def update_ips(*ips: str, db="db.db") -> None:
"""
updates the date of given ips in the database
:param ips: str, ip addresses
:param db: str, sqlite database filepath
:return: None
"""
db = os.path.join(PROJECT_ROOT, db)
async with aiosqlite.connect(db) as connection:
cursor = await connection.cursor()
today = datetime.date.today()
for ip in ips:
sql = f"update iptable" \
f" set (day)" \
f" =('{today.strftime('%Y-%m-%d')}')" \
f"where ip='{ip}'"
await cursor.execute(sql)
await connection.commit()
def evaluate_ip_ban(data: list) -> None:
"""
evaluates if an ip address is acting abusive calls ban on it if it is
:param data: dict, abuseipdb data about ip address
:return: None
"""
for request in data:
if request["abuseConfidenceScore"] > 70:
if request["totalReports"] > 10:
ban(request["ipAddress"])
def ban(ip: str) -> None:
"""
calls system command for ban on given ip
:param ip: str, ip address
:return: None
"""
os.system(f"ufw deny from {ip} to any")
log(f"Banned {ip} date: {datetime.date.today()}")
async def main() -> None:
api = get_api()
access_log = get_access_log()
log_ips = read_access_log(access_log)
recent_ips = await get_all_recent_ips()
old_ips = await get_all_old_ips()
data = await check_ips(*log_ips.difference(recent_ips).union(old_ips), api=api)
evaluate_ip_ban(data)
await store_ips(*log_ips.difference(old_ips).difference(recent_ips))
await update_ips(*old_ips)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.