input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
proxy = True
verbose_name = "22 Kontrak ATL DLH"
verbose_name_plural = "22 Kontrak ATL DLH"
def __unicode__(self):
return self.nomor_sp2d
class HargaATLDLH(HargaATL):
class Meta:
proxy = True
verbose_name = "22 Harga ATL DLH"
verbose_name_plural = "22 Harga ATL DLH"
def __unicode__(self):
return "%s" % (self.id_atl)
class ATLPenghapusanDLH(ATL):
class Meta:
proxy = True
verbose_name = "22 ATL Penghapusan DLH"
verbose_name_plural = "22 ATL Penghapusan DLH"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangATLDLH(TahunBerkurangATL):
class Meta:
proxy = True
verbose_name = "22 Tahun Berkurang ATL DLH"
verbose_name_plural = "22 Tahun Berkurang ATL DLH"
def __unicode__(self):
return "%s" % (self.id)
class PenghapusanATLDLH(PenghapusanATL):
class Meta:
proxy = True
verbose_name = "22 Penghapusan ATL DLH"
verbose_name_plural = "22 Penghapusan ATL DLH"
def __unicode__(self):
return "%s" % (self.id)
class SKPDAsalATLDLH(SKPDAsalATL):
class Meta:
proxy = True
verbose_name = "22 SKPD Asal ATL DLH"
verbose_name_plural = "22 SKPD Asal ATL DLH"
def __unicode__(self):
return "%s" % (self.id)
class SKPDTujuanATLDLH(SKPDTujuanATL):
class Meta:
proxy = True
verbose_name = "22 SKPD Tujuan ATL DLH"
verbose_name_plural = "22 SKPD Tujuan ATL DLH"
def __unicode__(self):
return "%s" % (self.id)
class FotoATLDLH(FotoATL):
class Meta:
proxy = True
verbose_name = "22 Foto ATL DLH"
verbose_name_plural = "22 Foto ATL DLH"
def __unicode__(self):
return "%s" % (self.id_atl)
##DKO
##model pada app DKO
class ATLDKO(ATL):
class Meta:
proxy = True
verbose_name = "23 ATL DKO"
verbose_name_plural = "23 ATL DKO"
def __unicode__(self):
return self.nama_barang
class ATLUsulHapusDKO(ATL):
class Meta:
proxy = True
verbose_name = "23 ATL Usul Hapus DKO"
verbose_name_plural = "23 ATL Usul Hapus DKO"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangUsulHapusATLDKO(TahunBerkurangUsulHapusATL):
class Meta:
proxy = True
verbose_name = "23 Usul Hapus ATL DKO"
verbose_name_plural = "23 Usul Hapus ATL DKO"
def __unicode__(self):
return "%s" % (self.id)
class KontrakATLDKO(KontrakATL):
class Meta:
proxy = True
verbose_name = "23 Kontrak ATL DKO"
verbose_name_plural = "23 Kontrak ATL DKO"
def __unicode__(self):
return self.nomor_sp2d
class HargaATLDKO(HargaATL):
class Meta:
proxy = True
verbose_name = "23 Harga ATL DKO"
verbose_name_plural = "23 Harga ATL DKO"
def __unicode__(self):
return "%s" % (self.id_atl)
class ATLPenghapusanDKO(ATL):
class Meta:
proxy = True
verbose_name = "23 ATL Penghapusan DKO"
verbose_name_plural = "23 ATL Penghapusan DKO"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangATLDKO(TahunBerkurangATL):
class Meta:
proxy = True
verbose_name = "23 Tahun Berkurang ATL DKO"
verbose_name_plural = "23 Tahun Berkurang ATL DKO"
def __unicode__(self):
return "%s" % (self.id)
class PenghapusanATLDKO(PenghapusanATL):
class Meta:
proxy = True
verbose_name = "23 Penghapusan ATL DKO"
verbose_name_plural = "23 Penghapusan ATL DKO"
def __unicode__(self):
return "%s" % (self.id)
class SKPDAsalATLDKO(SKPDAsalATL):
class Meta:
proxy = True
verbose_name = "23 SKPD Asal ATL DKO"
verbose_name_plural = "23 SKPD Asal ATL DKO"
def __unicode__(self):
return "%s" % (self.id)
class SKPDTujuanATLDKO(SKPDTujuanATL):
class Meta:
proxy = True
verbose_name = "23 SKPD Tujuan ATL DKO"
verbose_name_plural = "23 SKPD Tujuan ATL DKO"
def __unicode__(self):
return "%s" % (self.id)
class FotoATLDKO(FotoATL):
class Meta:
proxy = True
verbose_name = "23 Foto ATL DKO"
verbose_name_plural = "23 Foto ATL DKO"
def __unicode__(self):
return "%s" % (self.id_atl)
##KESBANGPOL
##model pada app KESBANGPOL
class ATLKESBANGPOL(ATL):
class Meta:
proxy = True
verbose_name = "24 ATL KESBANGPOL"
verbose_name_plural = "24 ATL KESBANGPOL"
def __unicode__(self):
return self.nama_barang
class ATLUsulHapusKESBANGPOL(ATL):
class Meta:
proxy = True
verbose_name = "24 ATL Usul Hapus KESBANGPOL"
verbose_name_plural = "24 ATL Usul Hapus KESBANGPOL"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangUsulHapusATLKESBANGPOL(TahunBerkurangUsulHapusATL):
class Meta:
proxy = True
verbose_name = "24 Usul Hapus ATL KESBANGPOL"
verbose_name_plural = "24 Usul Hapus ATL KESBANGPOL"
def __unicode__(self):
return "%s" % (self.id)
class KontrakATLKESBANGPOL(KontrakATL):
class Meta:
proxy = True
verbose_name = "24 Kontrak ATL KESBANGPOL"
verbose_name_plural = "24 Kontrak ATL KESBANGPOL"
def __unicode__(self):
return self.nomor_sp2d
class HargaATLKESBANGPOL(HargaATL):
class Meta:
proxy = True
verbose_name = "24 Harga ATL KESBANGPOL"
verbose_name_plural = "24 Harga ATL KESBANGPOL"
def __unicode__(self):
return "%s" % (self.id_atl)
class ATLPenghapusanKESBANGPOL(ATL):
class Meta:
proxy = True
verbose_name = "24 ATL Penghapusan KESBANGPOL"
verbose_name_plural = "24 ATL Penghapusan KESBANGPOL"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangATLKESBANGPOL(TahunBerkurangATL):
class Meta:
proxy = True
verbose_name = "24 Tahun Berkurang ATL KESBANGPOL"
verbose_name_plural = "24 Tahun Berkurang ATL KESBANGPOL"
def __unicode__(self):
return "%s" % (self.id)
class PenghapusanATLKESBANGPOL(PenghapusanATL):
class Meta:
proxy = True
verbose_name = "24 Penghapusan ATL KESBANGPOL"
verbose_name_plural = "24 Penghapusan ATL KESBANGPOL"
def __unicode__(self):
return "%s" % (self.id)
class SKPDAsalATLKESBANGPOL(SKPDAsalATL):
class Meta:
proxy = True
verbose_name = "24 SKPD Asal ATL KESBANGPOL"
verbose_name_plural = "24 SKPD Asal ATL KESBANGPOL"
def __unicode__(self):
return "%s" % (self.id)
class SKPDTujuanATLKESBANGPOL(SKPDTujuanATL):
class Meta:
proxy = True
verbose_name = "24 SKPD Tujuan ATL KESBANGPOL"
verbose_name_plural = "24 SKPD Tujuan ATL KESBANGPOL"
def __unicode__(self):
return "%s" % (self.id)
class FotoATLKESBANGPOL(FotoATL):
class Meta:
proxy = True
verbose_name = "24 Foto ATL KESBANGPOL"
verbose_name_plural = "24 Foto ATL KESBANGPOL"
def __unicode__(self):
return "%s" % (self.id_atl)
##SATPOLPP
##model pada app SATPOLPP
class ATLSATPOLPP(ATL):
class Meta:
proxy = True
verbose_name = "25 ATL SATPOLPP"
verbose_name_plural = "25 ATL SATPOLPP"
def __unicode__(self):
return self.nama_barang
class ATLUsulHapusSATPOLPP(ATL):
class Meta:
proxy = True
verbose_name = "25 ATL Usul Hapus SATPOLPP"
verbose_name_plural = "25 ATL Usul Hapus SATPOLPP"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangUsulHapusATLSATPOLPP(TahunBerkurangUsulHapusATL):
class Meta:
proxy = True
verbose_name = "25 Usul Hapus ATL SATPOLPP"
verbose_name_plural = "25 Usul Hapus ATL SATPOLPP"
def __unicode__(self):
return "%s" % (self.id)
class KontrakATLSATPOLPP(KontrakATL):
class Meta:
proxy = True
verbose_name = "25 Kontrak ATL SATPOLPP"
verbose_name_plural = "25 Kontrak ATL SATPOLPP"
def __unicode__(self):
return self.nomor_sp2d
class HargaATLSATPOLPP(HargaATL):
class Meta:
proxy = True
verbose_name = "25 Harga ATL SATPOLPP"
verbose_name_plural = "25 Harga ATL SATPOLPP"
def __unicode__(self):
return "%s" % (self.id_atl)
class ATLPenghapusanSATPOLPP(ATL):
class Meta:
proxy = True
verbose_name = "25 ATL Penghapusan SATPOLPP"
verbose_name_plural = "25 ATL Penghapusan SATPOLPP"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangATLSATPOLPP(TahunBerkurangATL):
class Meta:
proxy = True
verbose_name = "25 Tahun Berkurang ATL SATPOLPP"
verbose_name_plural = "25 Tahun Berkurang ATL SATPOLPP"
def __unicode__(self):
return "%s" % (self.id)
class PenghapusanATLSATPOLPP(PenghapusanATL):
class Meta:
proxy = True
verbose_name = "25 Penghapusan ATL SATPOLPP"
verbose_name_plural = "25 Penghapusan ATL SATPOLPP"
def __unicode__(self):
return "%s" % (self.id)
class SKPDAsalATLSATPOLPP(SKPDAsalATL):
class Meta:
proxy = True
verbose_name = "25 SKPD Asal ATL SATPOLPP"
verbose_name_plural = "25 SKPD Asal ATL SATPOLPP"
def __unicode__(self):
return "%s" % (self.id)
class SKPDTujuanATLSATPOLPP(SKPDTujuanATL):
class Meta:
proxy = True
verbose_name = "25 SKPD Tujuan ATL SATPOLPP"
verbose_name_plural = "25 SKPD Tujuan ATL SATPOLPP"
def __unicode__(self):
return "%s" % (self.id)
class FotoATLSATPOLPP(FotoATL):
class Meta:
proxy = True
verbose_name = "25 Foto ATL SATPOLPP"
verbose_name_plural = "25 Foto ATL SATPOLPP"
def __unicode__(self):
return "%s" % (self.id_atl)
##BKPPD
##model pada app BKPPD
class ATLBKPPD(ATL):
class Meta:
proxy = True
verbose_name = "26 ATL BKPPD"
verbose_name_plural = "26 ATL BKPPD"
def __unicode__(self):
return self.nama_barang
class ATLUsulHapusBKPPD(ATL):
class Meta:
proxy = True
verbose_name = "26 ATL Usul Hapus BKPPD"
verbose_name_plural = "26 ATL Usul Hapus BKPPD"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangUsulHapusATLBKPPD(TahunBerkurangUsulHapusATL):
class Meta:
proxy = True
verbose_name = "26 Usul Hapus ATL BKPPD"
verbose_name_plural = "26 Usul Hapus ATL BKPPD"
def __unicode__(self):
return "%s" % (self.id)
class KontrakATLBKPPD(KontrakATL):
class Meta:
proxy = True
verbose_name = "26 Kontrak ATL BKPPD"
verbose_name_plural = "26 Kontrak ATL BKPPD"
def __unicode__(self):
return self.nomor_sp2d
class HargaATLBKPPD(HargaATL):
class Meta:
proxy = True
verbose_name = "26 Harga ATL BKPPD"
verbose_name_plural = "26 Harga ATL BKPPD"
def __unicode__(self):
return "%s" % (self.id_atl)
class ATLPenghapusanBKPPD(ATL):
class Meta:
proxy = True
verbose_name = "26 ATL Penghapusan BKPPD"
verbose_name_plural = "26 ATL Penghapusan BKPPD"
def __unicode__(self):
return self.nama_barang
class TahunBerkurangATLBKPPD(TahunBerkurangATL):
class Meta:
proxy = True
verbose_name = "26 Tahun Berkurang ATL BKPPD"
verbose_name_plural = "26 Tahun Berkurang ATL BKPPD"
def __unicode__(self):
return "%s" % (self.id)
class PenghapusanATLBKPPD(PenghapusanATL):
class Meta:
proxy = True
verbose_name = "26 Penghapusan ATL BKPPD"
verbose_name_plural = "26 Penghapusan ATL BKPPD"
def __unicode__(self):
return "%s" % (self.id)
class SKPDAsalATLBKPPD(SKPDAsalATL):
class Meta:
proxy = True
verbose_name = "26 SKPD Asal ATL BKPPD"
verbose_name_plural = "26 SKPD Asal ATL BKPPD"
def __unicode__(self):
return "%s" % (self.id)
class SKPDTujuanATLBKPPD(SKPDTujuanATL):
class Meta:
proxy = True
verbose_name = "26 SKPD Tujuan ATL BKPPD"
verbose_name_plural = "26 SKPD Tujuan ATL BKPPD"
def __unicode__(self):
return "%s" % (self.id)
class FotoATLBKPPD(FotoATL):
class Meta:
proxy = True
verbose_name = "26 Foto ATL BKPPD"
verbose_name_plural = "26 Foto ATL BKPPD"
def __unicode__(self):
return "%s" % (self.id_atl)
##SekretariatKorpri
##model pada app SekretariatKorpri
class ATLSekretariatKorpri(ATL):
class Meta:
proxy = True
verbose_name = "27 ATL Sekretariat Korpri"
verbose_name_plural = "27 ATL Sekretariat Korpri"
| |
<reponame>carbonblack/cbapi
#!/usr/bin/python
import os
import sys
import time
import json
import random
import pprint
import socket
import struct
import syslog
import requests
import optparse
sys.path.insert(0, "lib/")
from eventHelpers import *
sensorid_to_details_map = {}
cbapi = {}
g_output = None
class EventOutput(object):
FORMATS = ['json', 'table', 'csv']
DESTINATIONS = ['udp', 'tcp', 'syslog', 'file', 'stdout']
def __init__(self, out_format, out_dest):
if out_format not in EventOutput.FORMATS:
raise ValueError("output format (%s) not a valid format value" % out_format)
if out_dest not in EventOutput.DESTINATIONS:
raise ValueError("output destination (%s) not a valid destination value" % out_dest)
self.oformat = out_format
self.dest = out_dest
def _getPathFromEvent(self, event):
"""
Get a "path" represenation of a sensor event
"""
if "filemod" == event["type"]:
return event["path"]
elif "proc" == event["type"]:
return event["path"]
elif "regmod" == event["type"]:
return event["path"]
elif "modload" == event["type"]:
return event["path"]
elif "netconn" == event["type"]:
if event.get('protocol', 17) == 6:
proto = "tcp"
else:
proto = "udp"
return "%s:%s (%s) via %s %s" % (event.get("ipv4", "<no IP>"), event["port"], event.get("domain", "<no domain>"), proto, event.get("direction", "<unknown direction>"))
elif "childproc" == event["type"]:
return event["created"]
return ""
def format(self, event):
if "json" == self.oformat:
try:
import cjson
return cjson.encode(event)
except Exception, e:
return json.dumps(event)
elif 'table' == self.oformat:
ret = "%-19s | %-20s | %10s | %33s | %s" % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(event["timestamp"])),\
event.get('computer_name', ""),
event['type'],
event.get("md5", "").encode('hex'),
self._getPathFromEvent(event))
elif 'csv' == self.oformat:
ret = "%s ; %s ; %s ; %s ; %s" % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(event["timestamp"])), \
event.get('computer_name', ""),
event['type'],
event.get("md5", "").encode('hex'),
self._getPathFromEvent(event))
return ret
def output(self, eventdata):
raise Exception("Not Implimented")
class StdOutOutput(EventOutput):
def __init__(self, format):
super(StdOutOutput, self).__init__(format, 'stdout')
def output(self, eventdata):
print eventdata
class FileOutput(EventOutput):
def __init__(self, format, outfile):
super(FileOutput, self).__init__(format, 'file')
self.fout = open(outfile, 'a')
def output(self, eventdata):
self.fout.write(eventdata + '\n')
class SyslogOutput(EventOutput):
def __init__(self, format, identity='eventExporter.py', facility=syslog.LOG_LOCAL0, priority=syslog.LOG_INFO):
super(SyslogOutput, self).__init__(format, 'syslog')
self.priority = priority
syslog.openlog(identity, syslog.LOG_PID, facility)
def output(self, eventdata):
syslog.syslog(self.priority, eventdata)
class UdpOutput(EventOutput):
def __init__(self, format, host, port):
super(UdpOutput, self).__init__(format, 'udp')
self.ip = socket.gethostbyname(host)
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def output(self, eventdata):
self.sock.sendto(eventdata+'\n', (self.ip, self.port))
class TcpOutput(EventOutput):
def __init__(self, format, host, port):
super(TcpOutput, self).__init__(format, 'tcp')
ip = socket.gethostbyname(host)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((ip, port))
def output(self, eventdata):
self.sock.send(eventdata + '\n')
def lookup_host_details(sensor_id):
"""
return a dictionary describing a sensor, as identifed by it's id
use the documented CB API, caching the results for subsequent faster lookup
return an empty dictionary on lookup failure
"""
global sensorid_to_details_map
try:
# without cbapi access, nothing to do
#
if not cbapi.has_key('url') or not cbapi.has_key('apitoken'):
return {}
# use the cached copy if available
#
if sensorid_to_details_map.has_key(sensor_id):
return sensorid_to_details_map[sensor_id]
# perform the lookup
# this will fail if the CB server is not availalble, if the cb
# api parameters are incorrect, or if the sensor id does not exists
#
url = "%s/api/v1/sensor/%s" % (cbapi['url'], sensor_id)
r = requests.get(url, headers={'X-Auth-Token':cbapi['apitoken']}, verify=cbapi['ssl_verify'])
r.raise_for_status()
# cache off the result
#
host_details = r.json()
# the sensor endpoint provides a lot more detail than is required
# strip down to just computer name, computer sid, and sensor id
#
host_simple = {}
if host_details.has_key('computer_name'):
host_simple['computer_name'] = host_details['computer_name']
if host_details.has_key('computer_sid'):
host_simple['computer_sid'] = host_details['computer_sid']
host_simple['sensor_id'] = sensor_id
# cache off the host details
#
sensorid_to_details_map[sensor_id] = host_simple
return host_simple
except Exception, e:
return {}
except:
return {}
def dumpEvent(event):
global g_output
fevent = g_output.format(event)
g_output.output(fevent)
def processEventLogDir(directory, outputformat, remove):
"""
recursively enumerate a directory, processing each file as a
Carbon Black sensor event log
"""
for root, dirnames, filenames in os.walk(directory):
for filename in filenames:
hostinfo = {}
try:
sensor_id = root.split('/')[-1]
hostinfo = lookup_host_details(sensor_id)
except Exception, e:
pass
processEventLogFile(os.path.join(root, filename), outputformat, remove, hostinfo, sensor_id)
def getEventLogDirFromCfg():
"""
determine the directory for archived CB sensor logs based on current configuration
"""
for line in open("/etc/cb/datastore/archive.properties").readlines():
if line.strip().startswith('cbfs-http.log-archive.filesystem.location'):
return line.split('=')[1].strip()
raise Exception("Unable to determine value of the cbfs-http.log-archive.filesystem.location config option")
def getBusUsernameFromConfig():
for line in open('/etc/cb/cb.conf').readlines():
if line.strip().startswith('RabbitMQUser'):
return line.split('=')[1].strip()
def getBusPasswordFromConfig():
for line in open('/etc/cb/cb.conf').readlines():
if line.strip().startswith('RabbitMQPassword'):
return line.split('=')[1].strip()
def processEventLogFile(filename, outputformat, remove, hostinfo, sensorid):
"""
read an entire event log file from disk, break it into its
component protobuf events, re-package each protobuf event as
json, and output
"""
sys.stderr.write("-> Processing %s...\n" % (filename,))
f = open(filename)
events = []
while True:
cb = f.read(4)
if 0 == len(cb):
break
cb = struct.unpack('i', cb)[0]
msg = f.read(cb)
events.append(msg)
sys.stderr.write("-> Read %d events\n" % (len(events),))
num_events_attempted = 0
num_events_succeeded = 0
for event in events:
try:
# get the event as a native python object (dictionary)
# this means de-protobuf-ing
#
event_as_obj = protobuf_to_obj(event, sensorid)
event_as_obj.update(hostinfo)
dumpEvent(event_as_obj)
num_events_succeeded = num_events_succeeded + 1
except Exception, e:
pass
num_events_attempted = num_events_attempted + 1
sys.stderr.write("-> Events Sent : %d\n" % (num_events_succeeded,))
sys.stderr.write("-> Events Send Failed : %d\n" % (num_events_attempted - num_events_succeeded,))
f.close()
if remove:
os.remove(filename)
def handle_event_pb(protobuf_bytes):
(sensorid, event_obj) = protobuf_to_obj_and_host(protobuf_bytes)
hostinnfo = lookup_host_details(sensorid)
event_obj.update(hostinnfo)
dumpEvent(event_obj)
def on_bus_msg(channel, method_frame, header_frame, body):
'''
callback that gets called for any event on the CB pub/sub event bus
'''
try:
if "application/protobuf" == header_frame.content_type:
handle_event_pb(body)
except Exception, e:
sys.stderr.write("-> Exception processing bus msg: %s\n" % e)
finally:
# need to make sure we ack the messages so they don't get left un-acked in the queue
# we set multiple to true to ensure that we ack all previous messages
channel.basic_ack(delivery_tag=method_frame.delivery_tag, multiple=True)
def processEventsFromBus(rabbit_mq_user, rabbit_mq_pass):
#import this here so the other functions (file, directory)
# work without pika installed
import pika
credentials = pika.PlainCredentials(rabbit_mq_user, rabbit_mq_pass)
parameters = pika.ConnectionParameters('localhost',
5004,
'/',
credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
queue_name = 'event_exporter_pid_%d' % os.getpid()
# make sure you use auto_delete so the queue isn't left filling
# with events when this program exists.
channel.queue_declare(queue=queue_name, auto_delete=True)
channel.queue_bind(exchange='api.events', queue=queue_name, routing_key='#')
channel.basic_consume(on_bus_msg, queue=queue_name)
sys.stderr.write("-> Subscribed to Pub/Sub bus (press Ctl-C to quit)\n")
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
connection.close()
def build_cli_parser():
parser = optparse.OptionParser(usage="%prog [options]", description="Process Carbon Black Sensor Event Logs")
#
# CB server info (needed for host information lookups)
#
group = optparse.OptionGroup(parser, "CB server options")
group.add_option("-c", "--cburl", action="store", default=None, dest="url",
help="CB server's URL. e.g., http://127.0.0.1; only useful when -A is specified")
group.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Carbon Black server; only useful when -A and -c are specified")
group.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate; only useful when -c is specified.")
parser.add_option_group(group)
#
# Input options (ie - where should I grab the raw events from)
#
group = optparse.OptionGroup(parser, "Event input source options")
group.add_option("-i", "--in-file", action="store", default=None, dest="infile",
help="Single CB sensor event log filename to process")
group.add_option("-d", "--directory", action="store", default=None, dest="directory",
help="Directory to enumerate looking for Carbon Black event log files")
group.add_option("-r", "--remove", action="store_true", default=False, dest="remove",
help="Remove event log file(s) after processing; use with caution!")
group.add_option("-A", "--auto", action="store_true", default=False, dest="auto",
help="Automatically find the event log directory from CB server config")
group.add_option("-b", "--bus", action="store_true", default=False, dest="bus",
help="Pull events out of the CB pub/sub event bus")
group.add_option("-u", "--user", action="store", default=None, dest="user",
help="The username for the rabbitMQ pub/sub event bus (default is to pull it from config)")
group.add_option("-p", "--pass", action="store", default=None, dest="pwd",
help="The password for the rabbitMQ pub/sub event bus (default is to pull it from config)")
parser.add_option_group(group)
#
# Output options (ie - where do we put the formatted events and how are they formatted)
#
group = optparse.OptionGroup(parser, "Output source options",
"Output options for events that control both the formatting and destination")
group.add_option("-f", "--format", action="store", default="json", dest="format",
help="Output format; must be one of [json|table|csv]; default is table")
group.add_option("-o", "--out-file", action="store", default=None, dest="outfile",
help="Write the formatted events to a log file (default is writting to stdout)")
group.add_option("-s", "--syslog", action="store_true", default=False, dest="syslog",
help="Write the formatted events to the syslog file (default is writting to stdout)")
group.add_option('-t', '--tcp-out', action='store', default=None, dest='tcpout',
help='Write the formatted events to a tcp host and port (format is HOST:IP)')
group.add_option('-U', '--udp-out', action='store', default=None, dest='udpout',
help='Write the formatted events to a udp host and port (format is HOST:IP)')
parser.add_option_group(group)
return parser
if __name__ == '__main__':
parser = build_cli_parser()
opts, args = parser.parse_args(sys.argv)
# check for | |
<gh_stars>1-10
from helpers import Project2Box, ProjOperator, squaredNorm, clearFile
import logging
import random
import math
import sys
from topologyGenerator import Problem
import numpy as np
import argparse
from SparseVector import SparseVector
import time
import pickle
class boxOptimizer():
def __init__(self):
"""This class is the implmentation of the algrotihm prposed in GLOBAL CONVERGENCE OF A CLASS OF
TRUST REGION ALGORITHMS FOR OPTIMIZATION WITH SIMPLE BOUNDS. It solves generic box constrianed problems of the form
Minimize F(x)
Subject to 0 <= x <= B.
"""
self.mu = 0.5
self.eta = 0.6
self.gamma0 = 0.3
self.gamma1 = 0.7
#self.gamma2 = 2.0
self.gamma2 = 10.0
self.nu = 1.0
self.Delta = 0.5
def initialPoint(self, Pr, SHIFTS, startFromLast=False):
if startFromLast:
return
constraint_func, constraint_grads_dummy, constraint_Hessian_dummy = Pr.evalFullConstraintsGrad(0)
FEAS = True
#If the current
for constraint in constraint_func:
FEAS = FEAS and constraint_func[constraint] + SHIFTS[constraint] >= 0.0
if FEAS:
return
for key in Pr.VAR:
if type(key[1]) == tuple:
#Remainder variables
Pr.VAR[key] = Pr.BOX[key]
else:
#Caching variables
Pr.VAR[key] = 0.0
def evaluate(self, Pr, LAMBDAS, SHIFTS, degree=2, debug=False):
"""Evalue the objective function.
degree = -1 only computes the LAMBAD BAR
degree = 0 computes LAMBAD BAR plus the objective value
degree = +1 computes LAMBDA BAR, the objective, and the objective`s gradient
degree = +2 computes LAMBDA BAR, the objective, the objective`s gradient, and the objective`s Hessian
"""
obj_barrier = 0.0
grad_barrier = {}
Hessian_barrier = {}
LAMBDA_BAR = {}
#w.r.t. constraints
constraint_func, constraint_grads, constraint_Hessian = Pr.evalFullConstraintsGrad(degree)
#w.r.t. objective
obj_func, obj_grads, obj_Hessian = Pr.evalGradandUtilities(degree)
for obj in obj_func:
if degree < 0:
continue
#Objective
obj_barrier += obj_func[obj]
if degree<1:
continue
#Grad
for index in obj_grads[obj]:
if index in grad_barrier:
grad_barrier[index] += obj_grads[obj][index]
else:
grad_barrier[index] = obj_grads[obj][index]
if degree<2:
continue
#Hessian
for index_pair in obj_Hessian[obj]:
if index_pair in Hessian_barrier:
Hessian_barrier[index_pair] += obj_Hessian[obj][index_pair]
else:
Hessian_barrier[index_pair] = obj_Hessian[obj][index_pair]
for constraint in constraint_func:
LAMBDA_BAR[constraint] = LAMBDAS[constraint] * SHIFTS[constraint] / (constraint_func[constraint] + SHIFTS[constraint])
if degree < 0:
continue
#Objective
try:
obj_barrier += -1.0 * LAMBDAS[constraint] * SHIFTS[constraint] * math.log(constraint_func[constraint] + SHIFTS[constraint])
except ValueError:
obj_barrier = float("inf")
if degree<1:
continue
#Grad
for index in constraint_grads[constraint]:
grad_index = -1.0 * LAMBDA_BAR[constraint] * constraint_grads[constraint][index]
if index in grad_barrier:
grad_barrier[index] += grad_index
else:
grad_barrier[index] = grad_index
if degree<2:
continue
#Hessian
for index_pair in constraint_Hessian[constraint]:
if index_pair in Hessian_barrier:
Hessian_barrier[index_pair] += constraint_Hessian[constraint][index_pair]
else:
Hessian_barrier[index_pair] = constraint_Hessian[constraint][index_pair]
return LAMBDA_BAR, obj_barrier, SparseVector(grad_barrier), SparseVector(Hessian_barrier)
def optimizer(self, Pr, Lambdas, Shifts, FirstOrderOptThreshold , iterations=100, debug=False, logger=None):
REJ = False
LAMBDA_BAR, obj, grad, Hessian = self.evaluate(Pr, Lambdas, Shifts)
#Initialize delta
self.Delta = 0.5
for i in range(iterations):
TrustRegionThreshold = self.Delta * self.nu
#Find a direction for update
s_k = self._findCauchyPoint(grad, Hessian, Pr.VAR, Pr.BOX, TrustRegionThreshold, debug=False)
#Update the current solution
Pr.VAR += s_k
#Evaluet only the objective for the new point
LAMBDA_BAR, obj_toBeTested, grad_NULL, Hessian_NULL = self.evaluate(Pr, Lambdas, Shifts, 0)
#Measure the improvement raio
if -1.0 * s_k.dot(grad) - 0.5 * s_k.dot( s_k.MatMul(Hessian) ) != 0:
rho_k = (obj - obj_toBeTested) / (-1.0 * s_k.dot(grad) - 0.5 * s_k.dot( s_k.MatMul(Hessian) ) )
else:
#If in the current interval local min is t = 0, make the trust region larger so that the algorithm can find a better local min.
rho_k = self.eta
if rho_k <= self.mu:
#Point rejected
REJ = True
Pr.VAR -= s_k
self.Delta *= 0.5 * (self.gamma0 + self.gamma1)
elif rho_k < self.eta:
#Point accepted
REJ = False
self.Delta *= 0.5 * (self.gamma1 + 1.0)
else:
#Point accepted
REJ = False
self.Delta *= 0.5 * (1.0 + self.gamma2)
if not REJ:
LAMBDA_BAR, obj, grad, Hessian = self.evaluate(Pr, Lambdas, Shifts)
if debug:
print "Iteration ", i
print "Thershold ", TrustRegionThreshold
print "Grad norm", squaredNorm(grad), " accpted: ", not REJ
print "Direction norm ", squaredNorm( s_k)
print "Obj improvement is, ", obj - obj_toBeTested
print "Quad obj improevment is ", -1.0 * s_k.dot(grad) - 0.5 * s_k.dot( s_k.MatMul(Hessian) )
print "Improvement ratio", rho_k
# print "s_k is ", s_k
# print "Var is ", Pr.VAR
# print "grad is ", grad
#Stppping criterion
firstOrderOpt = squaredNorm( ProjOperator(Pr.VAR, grad, Pr.BOX) )
if i % 50 == 0:
logger.info("Inner iteration %d, current obejctive value is %.8f and current optimality gap is %.10f" %(i, obj, firstOrderOpt ) )
# print "Direction is ", s_k, " rejection ", REJ, " ratio ", rho_k, " variables ", Pr.VAR
if firstOrderOpt <= FirstOrderOptThreshold:
break
LAMBDA_BAR, obj_toBeTested, grad_NULL, Hessian_NULL = self.evaluate(Pr, Lambdas, Shifts, 0)
return LAMBDA_BAR, firstOrderOpt
#m =
#rho = (f(x_k) - f(x_k+s_k)) / (f(x_k) - )
# if rho > mu
#else
def _getQudraticQuoeff(self, S_independant_k, S_dependant_k, grad, Hessian):
b = S_dependant_k.dot(grad) + S_dependant_k.dot( S_independant_k.MatMul( Hessian) )
a = 0.5 * S_dependant_k.dot( S_dependant_k.MatMul( Hessian) )
return a, b
def _findCauchyPoint(self, grad, Hessian, Vars, Box, TrustRegionThreshold, scaling=False, debug=False):
"Return the direction s_k as in Step 1 of the algorithm. Note that grad and Hessian are SparseVectors."
if not scaling:
scalingD = dict( [(key, 1.0) for key in Vars] )
else:
scalingD = {}
for key in Vars:
try:
if grad[key] >= 0:
scalingD[key] = Vars[key]
else:
scalingD[key] = (Box[key] - Vars[key] )
except ZeroDivisionError:
scalingD[key] = 10.0
#Compute hitting times
hitting_times = {'dummy':0.0 }
for key in Vars:
if grad[key] == 0.0:
hitting_times[key] = sys.maxsize
elif grad[key] > 0:
hitting_times[key] = Vars[key] / (scalingD[key] * grad[key])
else:
hitting_times[key] = (Box[key] - Vars[key]) / abs( scalingD[key] * grad[key] )
sorted_hitting_times_items = sorted(hitting_times.items(), key = lambda x: x[1])
#Decompose S_k = S_independant_k + S_dependant_k * t
S_independant_k = SparseVector({})
S_dependant_k = SparseVector( dict([(key, -1.0 * scalingD[key] * grad[key]) for key in grad]) )
vars_indepnedant_of_t = []
end_deriavative_sgn = 0
t_threshold = sys.maxsize
for i in range(len( sorted_hitting_times_items )):
key, t_key = sorted_hitting_times_items[i]
if i < len( sorted_hitting_times_items ) -1:
next_key, next_t_key = sorted_hitting_times_items[i+1]
else:
next_t_key = -1 #dummy value
if key != 'dummy':
vars_indepnedant_of_t.append( key )
del S_dependant_k[key]
if grad[key] > 0.0:
S_independant_k[key] = -1.0 * Vars[key]
elif grad[key] < 0.0:
S_independant_k[key] = Box[key] - Vars[key]
else:
S_independant_k[key] = 0.0
if next_t_key == t_key:
continue
if debug:
print "Search ointerval is :", t_key, next_t_key
#for key in vars_indepnedant_of_t:
# del S_dependant_k[key]
a, b = self._getQudraticQuoeff(S_independant_k, S_dependant_k, grad, Hessian)
#Check if the current interval is inside the trusts region
if squaredNorm( S_independant_k + S_dependant_k * next_t_key ) >= TrustRegionThreshold:
A = S_dependant_k.dot(S_dependant_k)
B = 2.0 * S_dependant_k.dot(S_independant_k)
C = S_independant_k.dot(S_independant_k) - TrustRegionThreshold**2
D = B**2 - 4.0 * A * C
try:
root_1_tc = (-1.0 * B-math.sqrt(D))/(2*A)
root_2_tc = (-1.0 * B+math.sqrt(D))/(2*A)
except ZeroDivisionError:
try:
root_1_tc = -1.0 * C / B
root_2_tc = root_1_tc
except ZeroDivisionError:
root_1_tc = sys.maxsize
root_2_tc = sys.maxsize
if root_1_tc > t_key and root_1_tc <= next_t_key:
t_threshold = root_1_tc
else:
t_threshold = root_2_tc
#Find the first local minimum of the piece-wise quadratic function a * t**2 + b * t
# this happens in two cases
# (a) if the quadratic function is convex and its peak is in the interval [t_key, next_t_key]
# (b) if the quadratic function was decreasing in the last interval and it is increasing now.
# check (a)
if a > 0.0 and -1.0 * b /(2 * a) > t_key and -1.0 * b /(2 * a) < next_t_key:
t_C_k = -1.0 * b /(2 * a)
if t_C_k > t_threshold:
if debug:
print "Iter ", i, " Convexity Rechaed the threhsold, T_thre is ", t_threshold
return S_independant_k + S_dependant_k * t_threshold
else:
if debug:
print "Convexity"
return S_independant_k + S_dependant_k * t_C_k
# check (b)
beg_deriavative_sgn = np.sign( 2*a * t_key + b)
if beg_deriavative_sgn == 0:
#Check if the quadratic functions peaks coincide with the hitting_times
if a > 0.0:
beg_deriavative_sgn = 1
elif a == | |
#print np.size(self.m_RES_peff_approx), (weight_enb), (resTraffic_enb),enb_idx
self.o_powRes_enb[enb_idx] = np.dot(weight_enb, resTraffic_enb) # 1xN * 1xN^T: 1x1
self.o_powSum_norm = np.dot(self.m_WeightPow, self.o_powRes_enb) # inner product
def f_update_sgw_results(self):
# get SGW load,load ratio and status
self.o_SgwLoad = self.m_load_update[self.m_sgw_start:self.m_mux_start] #-1)
#self.o_SGW_status = self.o_SgwLoad > self.mc_reOnOff_th
self.o_SgwLoadRatio = self.o_ResLoadRatio[self.m_sgw_start:self.m_mux_start] #-1)
self.o_SGW_status = (self.o_SgwLoadRatio > self.mc_reOnOff_th).astype(int)
SgwLR_var = sum((self.o_SgwLoadRatio - self.m_ideal_lb_sgw)**2 / self.md_new_SgwNum)
self.o_SgwLR_std = np.sqrt(SgwLR_var)
# get SGW LB output
self.f_update_lb_sgw(self.m_sdiag)
#disp(self.m_sdiag)
#self.f_update_lb_sgw(0.00001)
# get actual SGW LB for enbs
self.f_calculate_lb_actual()
def f_update_lb_sgw(self,sdiag): # similar to f_form_fmin_lb_sgw
# get active system capacity
C_sys = self.md_C_sys_sgw # C_sys
Q_mat = -(1.0/C_sys)*np.ones((self.i_SGW_num,self.i_SGW_num)) # -1/C_sys, No need for dynamic SGW number (IxI)
t_diag = np.diag(1./self.m_SGW_cap + sdiag) # diag(1/c_i+\tau)
Q_mat = Q_mat + t_diag # IxI, C_i -1/S_sys at diag
# vaiance weight: 1/(I*S^hat)
var_w = 1 / (self.md_new_SgwNum * self.md_var_norm / self.m_normScale)
# initial output per ENB
self.o_SgwLBVarEnb = np.zeros(self.m_EnbNum,) # (1,self.m_EnbNum)
for enb_idx in range(self.m_EnbNum):
Q_enb_mat = np.dot(np.diag(self.m_EnbSgwMat[enb_idx,:]), Q_mat) # IxI
Q_enb_sq = np.dot(Q_enb_mat.T, Q_enb_mat) # IxI
# get A(I)^T B_j Q^2 A(I)
QtildaSgwEnb = np.dot(np.dot(self.m_SgwRouteMat.T, Q_enb_sq), self.m_SgwRouteMat) # RxR
self.o_SgwLBVarEnb[enb_idx] = var_w * np.dot(np.dot(self.m_rate_update, QtildaSgwEnb), self.m_rate_update)
self.o_SgwLBVarSum = np.dot(self.m_WeightSgw, self.o_SgwLBVarEnb) # 1xJ * 1xJ inner product
def f_calculate_lb_actual(self):
# get SGW load ratio
self.o_SgwLBVarEnbActual = np.zeros(self.m_EnbNum,) # (1,self.m_EnbNum)
sgw_load_all = self.o_ResLoadRatio[self.m_sgw_start:self.m_mux_start] # -1)
if (self.i_LB_mode=='global'):
diff_ratio = sgw_load_all - self.m_ideal_lb_sgw
diff_ratio_square = diff_ratio**2 # 1xI
for enb_idx in range(self.m_EnbNum):
enbSgwVet = self.m_EnbSgwMat[enb_idx,:] # 1xI
sgw_en_num = np.sum(enbSgwVet)
enbSgwLdVar_raw = np.dot(enbSgwVet, diff_ratio_square) # 1xI * (1xI)' --> 1x1
self.o_SgwLBVarEnbActual[enb_idx] = enbSgwLdVar_raw / (sgw_en_num * self.md_var_norm / self.m_normScale)
# self.m_debug.diff_ratio = diff_ratio
elif (self.i_LB_mode=='user'):
#disp('user mode to get LB')
# recalculate the user specific total load ratio over
# allocated DCs (self.m_sgw_user_raio)
self.f_calculate_loadUser()
for enb_idx in range(self.m_EnbNum):
# get loadRatio diff against a user specific ratio
diff_ratio = sgw_load_all - self.m_sgw_user_raio[enb_idx] # 1xI
diff_ratio_square = diff_ratio**2 # 1xI
enbSgwVet = self.m_EnbSgwMat[enb_idx,:] # 1xI
sgw_en_num = sum(enbSgwVet)
# remove the unallocated DCs and sum the rest up
enbSgwLdVar_raw = np.dot(enbSgwVet, diff_ratio_square) # 1xI * (1xI)^T --> 1x1
self.o_SgwLBVarEnbActual[enb_idx] = enbSgwLdVar_raw / (sgw_en_num * self.md_var_norm / self.m_normScale)
self.o_SgwLBVarSumActual = np.dot(self.m_WeightSgw, self.o_SgwLBVarEnbActual) # 1xJ * 1xJ^T
def f_update_mux_results(self):
# get MUX load,load ratio and status
if self.i_MUX_num > 0 and self.m_lbMux_en:
self.o_MuxLoad = self.m_load_update[self.m_mux_start:self.m_link_start] # -1)
#self.o_MUX_status = self.o_MuxLoad > self.mc_reOnOff_th
self.o_MuxLoadRatio = self.o_ResLoadRatio[self.m_mux_start:self.m_link_start] #-1)
self.o_MUX_status = (self.o_MuxLoadRatio > self.mc_reOnOff_th).astype(float)
#disp(self.md_new_MuxNum)
MuxLR_var = sum((self.o_MuxLoadRatio - np.mean(self.o_MuxLoadRatio))**2 / self.md_new_MuxNum)
self.o_MuxLR_std = np.sqrt(MuxLR_var)
# calculate delay for each MUX
tcapLeft_mux = self.m_MUX_cap - self.o_ResLoad[self.m_mux_start:self.m_link_start] #-1)
capLeft_mux = np.maximum(tcapLeft_mux,0.0000001) # 1xM
tem_MuxDelay = self.md_mux_config_new / capLeft_mux # 1xM
self.o_MuxDelay = tem_MuxDelay # .* self.o_MUX_status # 1xM
# calculate delay sum for each ENB
self.f_calculate_enb_delay()
def f_calculate_enb_delay(self):
self.m_MuxRouteMat = self.m_ResRouteMat[self.m_mux_start:self.m_link_start,:] # MxR
self.md_mux_norm = 1.0 / (self.md_MUX_ntime / self.m_normScale * self.md_new_MuxNum) # 1xM
muxDelayNorm = self.o_MuxDelay * self.md_mux_norm # 1xM, /(S^mux * M)
route_delay = np.dot(self.m_MuxRouteMat.T, muxDelayNorm) # MxR' * 1xM'-->Rx1
self.o_EnbDelay = np.dot(self.i_EnbRouteMat, route_delay) # (JxR * Rx1)'-->1xJ
self.o_DelaySum = np.dot(self.m_WeightMux, self.o_EnbDelay) # 1xJ * 1xJ'
def f_record_results(self):
if self.m_firstopen==1:
fid = open(self.i_fileName, 'w')
else:
fid = open(self.i_fileName, 'a')
self.m_firstopen=0
if self.m_title_on ==0:
fid.write('#########constraint mode is %s \n' %self.i_constraint_mode)
for j in range(self.m_EnbNum):
fid.write('user%d,weight_pow=%4.5f,weight_sgw=%4.5f,weight_mux=%4.5f \n'
%(j+1,self.m_WeightPow[j],self.m_WeightSgw[j],self.m_WeightMux[j]))
self.m_title_on =1
fid.write('iteration=%d \n' %self.m_iteration)
fid.write('route load allocation, \n')
fid.write('BS/Route index,')
for item in range(1):
for r in range(self.m_RtNum):
fid.write('r%d,' %r)
fid.write('$,')
fid.write('\n')
for j in range(self.m_EnbNum):
fid.write('eNodB%d,' %(j+1))
# print out traffic allocation
for r in range(self.m_RtNum):
fid.write('%4.5f,' %self.o_traffic[j,r])
# fid.write('$,')
# % print out traffic matrix
# for j=1:self.m_Ndim
# fid.write('%4.5f,',self.o_Amatrix(i,j))
# end
fid.write('\n')
# print out sum per BS
fid.write('sum,')
for r in range(self.m_RtNum):
#print 'self.m_rate_update[r]=',self.m_rate_update[r]
fid.write('%4.5f,' %(self.m_rate_update[r]))
# fid.write('$,')
# for j=1:self.m_Ndim
# fid.write('%4.5f,',sum(self.o_Amatrix(:,j)))
# end
fid.write('\n')
fid.write('SgwIndex,load,load ratio,load status, price, slop, load ratio STD \n')
for i in range(self.m_SgwNum):
fid.write('SGW%d, %2.5f, %2.5f, %d,%2.5f,%2.5f,%2.5f\n' %(i+1,self.o_SgwLoad[i],
self.o_SgwLoadRatio[i],self.o_SGW_status[i],
999,
self.m_RES_peff_approx[i],
self.o_SgwLR_std))
if self.i_MUX_num > 0 :
fid.write('MuxIndex,load,load ratio,load status, price, slop, load ratio STD, Delay \n')
mux_start = self.m_mux_start
mux_end = self.m_link_start #-1
for h in range(mux_start,mux_end):
fid.write('MUX%d, %2.5f, %2.5f, %d,%2.5f,%2.5f,%2.5f,%2.5f\n'
%(h-mux_start+1,self.o_ResLoad[h],
self.o_ResLoadRatio[h],self.o_RES_status[h],
999,
self.m_RES_peff_approx[h],
self.o_MuxLR_std,
self.o_MuxDelay[h-mux_start])) #+1
if self.i_link_num > 0:
fid.write('LinkIndex,load,load ratio,load status, price, slop \n')
link_start = self.m_link_start
link_end = self.m_ResNum
for l in range(link_start,link_end):
fid.write('LINK%d, %2.5f, %2.5f, %d,%2.5f,%2.5f\n'
%(l-link_start+1,self.o_ResLoad[l],
self.o_ResLoadRatio[l],self.o_RES_status[l],
999,
self.m_RES_peff_approx[l]))
fid.write('EnbIndex,SumPowNorm,LB variance,MUX delay \n')
for j in range(self.m_EnbNum):
fid.write('ENB%d, %2.5f, %2.5f, %2.5f\n'
%(j,
self.o_powRes_enb[j],
self.o_SgwLBVarEnbActual[j],
self.o_EnbDelay[j]))
fid.write('o_PowConsum1,o_PowConsum2, min value, effctive min,o_powSum_norm,o_SgwLBVarSum,o_DelaySum\n')
# print self.o_powSum_norm,self.o_SgwLBVarSum,self.o_DelaySum
fid.write('%2.5f, %2.5f, %2.5f, %2.5f, %2.5f, %2.5f, %2.5f \n'
%(self.o_PowConsum1,self.o_PowConsum2, self.o_fval, self.f_calculate_min_value(),
self.o_powSum_norm,self.o_SgwLBVarSum,self.o_DelaySum))
fid.close()
def f_calculate_min_value(self):
min_value_pow = np.dot(self.m_fvec, self.m_rate_update) # inner product of 2 vectors
min_value_lb_sgw = np.dot(np.dot(self.m_rate_update, self.md_QtildaSgw), self.m_rate_update)
min_delay_mux = self.f_form_mux_delay(self.m_rate_update)
#disp(min_value_pow);disp(min_value_lb_sgw);disp(min_delay_mux);
eff_min = min_value_pow + min_value_lb_sgw + min_delay_mux
return eff_min
class userSolverTemplate(object):
def __init__(self,j,D,C,I,H,L,RM,EM,E,pnorm,R_sgw,D_mux,V_norm,sdiag,
Weight,numbs,BsAgg_step):
self.i_SGW_num = I
self.i_MUX_num = H
self.i_link_num= L
self.i_enb_idx=j
self.i_ResRouteMat=RM
self.i_EnbRouteMat=EM
self.i_traffic_in=D
self.i_RES_peff=E
self.id_RES_pnorm = pnorm # RES normalised power cosumption dim: 1
self.id_load_ratio_sgw = R_sgw # expected load ratio R=T/S^sgw_sys or T_j/S^srv_sys,j
self.id_MUX_ntime = D_mux # MUX normalised time delay,dim: 1xM
self.id_sgwVar_norm= V_norm # SGW variance norming factor
self.i_sdiag = sdiag
self.i_RES_cap=C
self.i_WeightPow=Weight[0]
self.i_WeightSgw=Weight[1]
self.i_WeightMux=Weight[2]
self.io_numbs=numbs # number of active BS,used for scaling but may not be needed
self.io_BsAgg_step = BsAgg_step # BS aggregated traffic gradient step size: \alpha_{\lambda}
self._f_init_m()
self.f_check()
def _f_init_m(self):
# Optinal inputs
self.io_BsAllo_step = None # BS traffic rate allocation gradient step size: \alpha_{d}
self.io_max_iteration = None
# dynamic input
#innter loop
self.id_sgw_loads = None # dynamic loads over all S-GWs dim: I
self.id_mux_loads = None # dynamic loads over all MUXs dim: M
self.id_price_load = None # lagragian multiplier for RES n regarding capacity constrain \sum_r A_{n,r}d_r <= S_n
self._id_res_loads = None # dim: N
#outter loop
# id_load_ratio_mux # expected load ratio R=T/S^mux_sys !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! to be removed
self.id_SGW_status = None # Renewed SGW status 1--> en, 0-->dis dim: 1xI
self.id_RES_status = None # updated RES status 1--> en, 0-->dis dim: 1xN
self.id_ResRouteMat = None # dynamic active RES-Route map
self.id_iteration = None
# i_LinkGr_step # Link constrain gradient step size: \alpha_{\mu}
# class member
self.m_SgwNum = None # Initial number of SGWs as I
self.m_ResNum = None # Initial number of RESs as N
self.m_BsAgg_step = None # actual BS aggregated traffic gradient step size: \alpha_{\lambda}
self.m_BsAgg_price = None # BS aggregated traffic Lagragian multiplier
self.m_rescap_price = None # gradient for RES capacity over d_r
self.m_init_allo = None # Initial traffic allocation with static LB: d_r=D_j/ActiveRtNum,dim: 1 * R
self.m_pre_allo = None # Rate allocation of the previous literation,dim: 1 * R
self.m_enbRt_vect = None # eNodeB j route map vecter,dim: 1 * R
self.m_enbRt_num = None # total route number
self.m_enbAtRt_num = None # active route number
self.m_enbRes_vect = None # eNodeB-RES map vector,dim: 1 * N
self.m_enbSgw_vect = None # eNodeB-SGW map vector,dim: 1 * I
self.m_enbMux_vect = None # eNodeB-MUX map vector,dim: 1 * M
self.m_BsAllo_step = None # actual BS rate allocation step size: \alpha_{d}
self.m_sgw_start = None # SGW start postion
self.m_mux_start = None # MUX start postion
self.m_link_start = None # Link start postion
self.m_RES_peff = None # dynamic RES peff
self.m_lbMux_en = None # MUX LB enabler
self.m_SGW_cap = None # SGW capacity
self.m_MUX_cap = None # MUX capacity
self.m_sgwVarNorm_en = None # LB var norm enable
self.m_obj_gradient = None # gradient without lag or pen
self.m_gradient = None # completed gradient
self.m_userSgwSysCap = None
self.m_normScale = 1 # constant for level normalisation factors for POW,LB and delay (0.08,0.05,0.02
###############penalty related############
self.m_pen_eq_weight =3.0 #best is 1.3
self.m_pen_ieq_weight =5.0
# dynamic member
self.md_RES_cap = None
self.md_SgwNum = None # dynamic active number of SGWs
self.md_ResNum = None # dynamic active number of RES
self.md_MuxNum = None # dynamic active number of MUXs
self.md_SgwRouteMat = None # dynamic active SGW-Route map
| |
= op_mat_just_corners[2, self._topleft_mask]
op_mat_cnr3 = op_mat_just_corners[3, self._topright_mask]
op_mat_just_active_cnrs = np.vstack(
(op_mat_cnr0, op_mat_cnr1, op_mat_cnr2, op_mat_cnr3)
)
self._operating_matrix_corner_int_IDs = self._realIDtointerior(
op_mat_just_active_cnrs
)
# ^(4corners,4nodesactivepercorner)
self._operating_matrix_bottom_int_IDs = self._realIDtointerior(
self._operating_matrix_ID_map[self._bottom_interior_IDs, :][
:, self._bottom_mask
]
)
# ^(nbottomnodes,6activenodeseach)
self._operating_matrix_top_int_IDs = self._realIDtointerior(
self._operating_matrix_ID_map[self._top_interior_IDs, :][:, self._top_mask]
)
self._operating_matrix_left_int_IDs = self._realIDtointerior(
self._operating_matrix_ID_map[self._left_interior_IDs, :][
:, self._left_mask
]
)
self._operating_matrix_right_int_IDs = self._realIDtointerior(
self._operating_matrix_ID_map[self._right_interior_IDs, :][
:, self._right_mask
]
)
def _gear_timestep(self, timestep_in, new_grid):
"""This method allows the gearing between the model run step and the
component (shorter) step.
The method becomes unstable if S>Scrit, so we test to prevent
this. We implicitly assume the initial condition does not
contain slopes > Scrit. If the method persistently explodes,
this may be the problem.
"""
extended_elevs = np.empty(self._grid.number_of_nodes + 1, dtype=float)
extended_elevs[-1] = np.nan
node_neighbors = self._grid.active_adjacent_nodes_at_node
extended_elevs[:-1] = new_grid["node"][self._values_to_diffuse]
max_offset = np.nanmax(
np.fabs(
extended_elevs[:-1][node_neighbors]
- extended_elevs[:-1].reshape((self._grid.number_of_nodes, 1))
)
)
if max_offset > np.tan(self._S_crit) * min(self._grid.dx, self._grid.dy):
# ^using S not tan(S) adds a buffer - but not appropriate
self._internal_repeats = (
int(
max_offset
// (np.tan(self._S_crit) * min(self._grid.dx, self._grid.dy))
)
+ 1
)
# now we rig it so the actual timestep is an integer divisor
# of T_in:
self._delta_t = timestep_in / self._internal_repeats
self._uplift_per_step = (
new_grid["node"][self._values_to_diffuse]
- self._grid["node"][self._values_to_diffuse]
) / self._internal_repeats
if self._internal_repeats > 10000:
raise ValueError(
"""Uplift rate is too high; solution is not
stable!!"""
)
else:
self._internal_repeats = 1
self._delta_t = timestep_in
self._uplift_per_step = (
new_grid["node"][self._values_to_diffuse]
- self._grid["node"][self._values_to_diffuse]
)
return self._delta_t
def _set_variables(self, grid):
"""This function sets the variables needed for update().
Now vectorized, shouold run faster. At the moment, this method
can only handle fixed value BCs.
"""
n_interior_nodes = grid.number_of_interior_nodes
# Initialize the local builder lists
_mat_RHS = np.zeros(n_interior_nodes)
try:
elev = grid["node"][self._values_to_diffuse]
except KeyError:
raise NameError("elevations not found in grid!")
try:
_delta_t = self._delta_t
except AttributeError:
raise NameError(
"""Timestep not set! Call _gear_timestep(tstep)
after initializing the component, but before
running it."""
)
_one_over_delta_x = self._one_over_delta_x
_one_over_delta_x_sqd = self._one_over_delta_x_sqd
_one_over_delta_y = self._one_over_delta_y
_one_over_delta_y_sqd = self._one_over_delta_y_sqd
_kappa = self._kappa
_b = self._b
_S_crit = self._S_crit
_core_nodes = self._core_nodes
corenodesbyintIDs = self._corenodesbyintIDs
operating_matrix_core_int_IDs = self._operating_matrix_core_int_IDs
operating_matrix_corner_int_IDs = self._operating_matrix_corner_int_IDs
_interior_corners = self._interior_corners
corners_antimasks = self._corners_antimasks
corner_interior_IDs = self._corner_interior_IDs
modulator_mask = self._modulator_mask
corner_flags = self._corner_flags
bottom_interior_IDs = self._bottom_interior_IDs
top_interior_IDs = self._top_interior_IDs
left_interior_IDs = self._left_interior_IDs
right_interior_IDs = self._right_interior_IDs
bottom_antimask = self._bottom_antimask
_bottom_list = self._bottom_list
top_antimask = self._top_antimask
_top_list = self._top_list
left_antimask = self._left_antimask
_left_list = self._left_list
right_antimask = self._right_antimask
_right_list = self._right_list
# Need to modify the "effective" values of the edge nodes if any of
# the edges are inactive:
if self._bottom_flag == 4:
bottom_edge, inside_bottom_edge = grid.nodes[(0, 1), :]
elev[bottom_edge] = elev[inside_bottom_edge]
# corners are special cases, and assumed linked to the bottom and
# top edge BCs...
elev[bottom_edge[0]] = elev[inside_bottom_edge[1]]
elev[bottom_edge[-1]] = elev[inside_bottom_edge[-2]]
if self._top_flag == 4:
top_edge, inside_top_edge = grid.nodes[(-1, -2), :]
elev[top_edge] = elev[inside_top_edge]
# corners are special cases, and assumed linked to the bottom and
# top edge BCs...
elev[top_edge[0]] = elev[inside_top_edge[1]]
elev[top_edge[-1]] = elev[inside_top_edge[-2]]
if self._left_flag == 4:
left_edge = grid.nodes[1:-1, 0]
inside_left_edge = grid.nodes[1:-1, 1]
elev[left_edge] = elev[inside_left_edge]
if self._right_flag == 4:
right_edge = grid.nodes[1:-1, -1]
inside_right_edge = grid.nodes[1:-1, -2]
elev[right_edge] = elev[inside_right_edge]
# replacing loop:
cell_neighbors = grid.active_adjacent_nodes_at_node
# ^E,N,W,S
cell_diagonals = grid.diagonal_adjacent_nodes_at_node # NE,NW,SW,SE
# ^this should be dealt with by active_neighbors... (skips bad nodes)
_z_x = (
(elev[cell_neighbors[:, 0]] - elev[cell_neighbors[:, 2]])
* 0.5
* _one_over_delta_x
)
_z_y = (
(elev[cell_neighbors[:, 1]] - elev[cell_neighbors[:, 3]])
* 0.5
* _one_over_delta_y
)
_z_xx = (
elev[cell_neighbors[:, 0]] - 2.0 * elev + elev[cell_neighbors[:, 2]]
) * _one_over_delta_x_sqd
_z_yy = (
elev[cell_neighbors[:, 1]] - 2.0 * elev + elev[cell_neighbors[:, 3]]
) * _one_over_delta_y_sqd
_z_xy = (
(
elev[cell_diagonals[:, 0]]
- elev[cell_diagonals[:, 1]]
- elev[cell_diagonals[:, 3]]
+ elev[cell_diagonals[:, 2]]
)
* 0.25
* _one_over_delta_x
* _one_over_delta_y
)
_d = 1.0 / (1.0 - _b * (_z_x * _z_x + _z_y * _z_y))
_abd_sqd = _kappa * _b * _d * _d
_F_ij = -2.0 * _kappa * _d * (
_one_over_delta_x_sqd + _one_over_delta_y_sqd
) - 4.0 * _abd_sqd * (
_z_x * _z_x * _one_over_delta_x_sqd + _z_y * _z_y * _one_over_delta_y_sqd
)
_F_ijminus1 = (
_kappa * _d * _one_over_delta_x_sqd
- _abd_sqd * _z_x * (_z_xx + _z_yy) * _one_over_delta_x
- 4.0
* _abd_sqd
* _b
* _d
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
* _z_x
* _one_over_delta_x
- 2.0
* _abd_sqd
* (
_z_x * _z_xx * _one_over_delta_x
- _z_x * _z_x * _one_over_delta_x_sqd
+ _z_y * _z_xy * _one_over_delta_x
)
)
_F_ijplus1 = (
_kappa * _d * _one_over_delta_x_sqd
+ _abd_sqd * _z_x * (_z_xx + _z_yy) * _one_over_delta_x
+ 4.0
* _abd_sqd
* _b
* _d
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
* _z_x
* _one_over_delta_x
+ 2.0
* _abd_sqd
* (
_z_x * _z_xx * _one_over_delta_x
+ _z_x * _z_x * _one_over_delta_x_sqd
+ _z_y * _z_xy * _one_over_delta_x
)
)
_F_iminus1j = (
_kappa * _d * _one_over_delta_y_sqd
- _abd_sqd * _z_y * (_z_xx + _z_yy) * _one_over_delta_y
- 4.0
* _abd_sqd
* _b
* _d
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
* _z_y
* _one_over_delta_y
- 2.0
* _abd_sqd
* (
_z_y * _z_yy * _one_over_delta_y
- _z_y * _z_y * _one_over_delta_y_sqd
+ _z_x * _z_xy * _one_over_delta_y
)
)
_F_iplus1j = (
_kappa * _d * _one_over_delta_y_sqd
+ _abd_sqd * _z_y * (_z_xx + _z_yy) * _one_over_delta_y
+ 4.0
* _abd_sqd
* _b
* _d
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
* _z_y
* _one_over_delta_y
+ 2.0
* _abd_sqd
* (
_z_y * _z_yy * _one_over_delta_y
+ _z_y * _z_y * _one_over_delta_y_sqd
+ _z_x * _z_xy * _one_over_delta_y
)
)
_F_iplus1jplus1 = _abd_sqd * _z_x * _z_y * _one_over_delta_x * _one_over_delta_y
_F_iminus1jminus1 = _F_iplus1jplus1
_F_iplus1jminus1 = -_F_iplus1jplus1
_F_iminus1jplus1 = _F_iplus1jminus1
_equ_RHS_calc_frag = (
_F_ij * elev
+ _F_ijminus1 * elev[cell_neighbors[:, 2]]
+ _F_ijplus1 * elev[cell_neighbors[:, 0]]
+ _F_iminus1j * elev[cell_neighbors[:, 3]]
+ _F_iplus1j * elev[cell_neighbors[:, 1]]
+ _F_iminus1jminus1 * elev[cell_diagonals[:, 2]]
+ _F_iplus1jplus1 * elev[cell_diagonals[:, 0]]
+ _F_iplus1jminus1 * elev[cell_diagonals[:, 1]]
+ _F_iminus1jplus1 * elev[cell_diagonals[:, 3]]
)
# NB- all _z_... and _F_... variables are nnodes long, and thus use
# real IDs (tho calcs will be flawed for Bnodes)
# RHS of equ 6 (see para [20])
_func_on_z = self._rock_density / self._sed_density * self._uplift + _kappa * (
(_z_xx + _z_yy) / (1.0 - (_z_x * _z_x + _z_y * _z_y) / _S_crit * _S_crit)
+ 2.0
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
/ (
_S_crit
* _S_crit
* (1.0 - (_z_x * _z_x + _z_y * _z_y) / _S_crit * _S_crit) ** 2.0
)
)
# Remember, the RHS is getting wiped each loop as part of
# self._set_variables()
# _mat_RHS is ninteriornodes long, but were only working on a
# ncorenodes long subset here
_mat_RHS[corenodesbyintIDs] += elev[_core_nodes] + _delta_t * (
_func_on_z[_core_nodes] - _equ_RHS_calc_frag[_core_nodes]
)
low_row = (
np.vstack((_F_iminus1jminus1, _F_iminus1j, _F_iminus1jplus1)) * -_delta_t
)
mid_row = np.vstack(
(-_delta_t * _F_ijminus1, 1.0 - _delta_t * _F_ij, -_delta_t * _F_ijplus1)
)
top_row = np.vstack((_F_iplus1jminus1, _F_iplus1j, _F_iplus1jplus1)) * -_delta_t
nine_node_map = np.vstack((low_row, mid_row, top_row)).T
# ^Note shape is (nnodes,9); it's realID indexed
core_op_mat_row = np.repeat(corenodesbyintIDs, 9)
core_op_mat_col = operating_matrix_core_int_IDs.astype(int).flatten()
core_op_mat_data = nine_node_map[_core_nodes, :].flatten()
# Now the interior corners; BL,BR,TL,TR
_mat_RHS[corner_interior_IDs] += elev[_interior_corners] + _delta_t * (
_func_on_z[_interior_corners] - _equ_RHS_calc_frag[_interior_corners]
)
corners_op_mat_row = np.repeat(self._corner_interior_IDs, 4)
corners_op_mat_col = operating_matrix_corner_int_IDs.astype(int).flatten()
corners_op_mat_data = nine_node_map[_interior_corners, :][
(np.arange(4).reshape((4, 1)), self._corners_masks)
].flatten()
# ^1st index gives (4,9), 2nd reduces to (4,4), then flattened
for i in | |
+ " alredy staked on chain " + str(c.on_chain) + " in AddLiquidity (NOP)")
readyQueue.put(c)
else:
Start(ctx, c, "AL")
LP = None
token = None
if (c.on_chain==1):
LP = ctx.contracts['LP_1']
token = '0x0000000000000000000000000000000000000000'
else:
LP = ctx.contracts['LP_2']
token = '0x4200000000000000000000000000000000000006'
t2 = LP.functions.addLiquidity(
amount,
token,
)
r = c.buildAndSubmit(ctx, t2, {'value': amount})
c.staked[c.on_chain] = True
Watch(ctx, c, "AL", r)
def AddLiquidity_NG(ctx, c,amount):
if c.staked_NG:
amount = c.staked_NG
Start(ctx, c, "RN")
LP = ctx.contracts['L1_EthPool']
token = '0x0000000000000000000000000000000000000000'
t2 = LP.functions.withdrawLiquidity(
amount,
token,
c.acct.address
)
r2 = c.buildAndSubmit(ctx, t2, {})
c.staked_NG = 0
Watch(ctx, c, "RN", r2)
else:
Start(ctx, c, "AN")
LP = ctx.contracts['L1_EthPool']
token = '0x0000000000000000000000000000000000000000'
t2 = LP.functions.addLiquidity(
amount,
token,
)
r2 = c.buildAndSubmit(ctx, t2, {'value':amount})
c.staked_NG = amount
Watch(ctx, c, "AN", r2)
def AddLiquidity_2(ctx, chain, acct,amount):
chainId = 0
token = None
if (chain==1):
LP = ctx.contracts['LP_1']
token = '0x0000000000000000000000000000000000000000'
else:
LP = ctx.contracts['LP_2']
token = '0x4200000000000000000000000000000000000006'
t2 = LP.functions.addLiquidity(
amount,
token,
)
t = t2.buildTransaction({
'from':acct.address,
'nonce':ctx.rpc[chain].eth.get_transaction_count(acct.address),
'chainId': ctx.chainIds[chain],
'value':amount
})
signed_tx = ctx.rpc[chain].eth.account.sign_transaction(t, acct.key)
r = ctx.rpc[chain].eth.send_raw_transaction(signed_tx.rawTransaction)
return r
# FIXME - this is used by the Funder to transfer L1->L2 if needed on startup. Can't quite unify it with
# the Onramp_trad code path used by the workers.
def Onramp_2(ctx, acct, amount):
chain = 1
sb = ctx.contracts['SB_1'].functions.depositETH(
8000000, # FIXME / ref: 100000 == MIN_ROLLUP_TX_GAS from OVM_CanonicalTransactionChain.sol
# Values like 8*MIN can fail silently, successful Tx on L1 but no event ever on L2
# 8000000 works sometimes(?)
'0x',
)
t = sb.buildTransaction({
'from':acct.address,
'nonce':ctx.rpc[chain].eth.get_transaction_count(acct.address),
'chainId': ctx.chainIds[chain],
'value':amount
})
signed_tx = ctx.rpc[chain].eth.account.sign_transaction(t, acct.key)
ret = ctx.rpc[chain].eth.send_raw_transaction(signed_tx.rawTransaction)
return ret
def Onramp_trad(ctx,c):
Start(ctx,c,"SO")
bb = ctx.rpc[1].eth.getBalance(c.acct.address)
amount = Web3.toWei(bb, 'wei') - min_balance
sb = ctx.contracts['SB_1'].functions.depositETH(
8000000, # FIXME / ref: 100000 == MIN_ROLLUP_TX_GAS from OVM_CanonicalTransactionChain.sol
# Values like 8*MIN can fail silently, successful Tx on L1 but no event ever on L2
# 8000000 works sometimes(?)
'0x',
)
ret = c.buildAndSubmit(ctx, sb, {'value':amount})
c.on_chain = 2
WatchEv(ctx,c,"SO",ret)
def Onramp_fast(ctx,c):
acct = c.acct
chain = 1
t = ctx.contracts['LP_2'].functions.poolInfo('0x4200000000000000000000000000000000000006').call()
bb = ctx.rpc[1].eth.getBalance(acct.address) - min_balance
if bb > (t[2] / 2.0):
lPrint(ctx.log, "***** WARNING Child " + str(c.num) + " falling back to traditional onramp")
Onramp_trad(acct)
else:
if True:
Start(ctx,c,"FO")
dep = ctx.contracts['LP_1'].functions.clientDepositL1(
0,
'0x0000000000000000000000000000000000000000'
)
r = c.buildAndSubmit(ctx, dep, {'value':bb})
c.on_chain = 2
WatchEv(ctx,c,"FO",r)
def Onramp_NG(ctx,c):
acct = c.acct
chain = 1
amount = ctx.rpc[1].eth.getBalance(acct.address) - min_balance
if False:
lPrint(ctx.log, "***** WARNING Child " + str(c.num) + " falling back to traditional onramp")
else:
if True:
Start(ctx,c,"UO")
dep = ctx.contracts['L1_EthPool'].functions.clientDepositL1(
0,
'0x0000000000000000000000000000000000000000'
)
r = c.buildAndSubmit(ctx, dep, {'value':amount})
c.on_chain = 2
WatchEv(ctx,c,"UO",r)
def SlowExit(ctx,c, ng):
if ng:
cn='L2_BobaPortal'
optype="SN"
else:
cn='SB_2'
optype="SX"
Start(ctx,c,optype)
chain = 2
amount = Web3.toWei(ctx.rpc[2].eth.getBalance(c.acct.address) - min_balance, 'wei')
print("DBG Amount",amount,"of", Web3.toWei(ctx.rpc[2].eth.getBalance(c.acct.address), 'wei'))
t = ctx.contracts[cn].functions.withdraw(
'0x4200000000000000000000000000000000000006',
amount,
0, # L1-gas, unused
'0x41424344',
)
r = c.buildAndSubmit(ctx, t, {})
c.on_chain = 1
WatchEv(ctx,c,optype,r)
def FastExit(ctx, c, ng):
if ng:
cn= 'L2_EthPool'
optype="FN"
else:
cn='LP_2'
optype="FX"
t = ctx.contracts['LP_1'].functions.poolInfo('0x0000000000000000000000000000000000000000').call()
bb = ctx.rpc[2].eth.getBalance(c.acct.address)
if ng and bb > (ctx.contracts['L2_EthPool'].functions.safeL1Balance().call() / 2.0):
print("Falling back to NG slow exit")
SlowExit(ctx,c,ng)
elif ng == False and bb > (t[2] / 2.0):
print("Falling back to traditional exit")
SlowExit(ctx,c,ng)
else:
Start(ctx,c,optype)
amount = Web3.toWei(bb - min_balance, 'wei')
print("DBG exit amount = ",amount,"bb",bb)
dep = ctx.contracts[cn].functions.clientDepositL2(
amount,
'0x4200000000000000000000000000000000000006'
)
r = c.buildAndSubmit(ctx, dep, {'value':Web3.toWei(amount,'wei')})
c.on_chain = 1
WatchEv(ctx,c,optype,r)
def SendFunds(ctx, c):
bal = ctx.rpc[c.on_chain].eth.getBalance(c.acct.address) # FIXME - cache this in the Child structure
idx = randint(0,len(addrs)-1)
if (addrs[idx] == c.acct.address):
lPrint(ctx.log, "Child " + str(c.num) + " NOP on chain " + str(c.on_chain))
readyQueue.put(c)
else:
Start(ctx,c,"PY")
tt = xFund(ctx, c, addrs[idx], bal / 10.0)
myAssert(tt not in txWatch)
Watch(ctx,c,"PY",tt)
def StopChild(ctx, c):
print("StopChild",c.num,"op",c.op)
for ch in range(1,3):
# FIXME - try removing liquidity instead, if staked on either chain.
if c.staked[ch] or c.staked_NG:
pass
b = ctx.rpc[ch].eth.getBalance(c.acct.address)
lPrint(ctx.log, "StopChild " + str(c.num) + " chain " + str(ch) + " balance " + str(b))
if b > min_balance:
try:
Start(ctx,c,"RF")
r = Fund(ctx, c.acct, c.parent, ch, Web3.toWei(b - min_balance, 'wei'))
c.gasEstimate = 21000
lPrint(ctx.log, "Child " + str(c.num) + " refunding " + str(Web3.fromWei(b,'ether')) + " to " + c.parent + " on chain " + str(ch) + " tx " + Web3.toHex(r))
Watch(ctx,c,"RF",r)
except Exception as e:
lPrint(ctx.log, "ERROR Refund attempt failed for child " + str(c.num) + " on chain " + str(ch) + " error " + str(e))
Finish(c, False)
continue
return
else:
lPrint(ctx.log, "Child " + str(c.num) + " is below minimum refund balance on chain " + str(ch))
Start(ctx, c,"DN")
Watch(ctx, c,"DN")
c.exiting = True
Finish(c)
# Create a child process and give it 1/2 of my balance
def SpawnChild(ctx,c):
# FIXME - not yet implemented
# Create a new Child process. Update num_children, add to list
# Start a Fund transaction
pass
def RollDice(ctx, c, prob):
if len(c.preload) > 0:
if c.preload[0] > 0:
c.preload[0] -= 1
ret = False
else:
c.preload.pop(0)
s = "Overriding RNG for child " + str(c.num) + ", " + str(len(c.preload)) + " more operations pending"
lPrint(ctx.log, s)
return True
return ret
num = randint(0,100)
if num < prob:
ret = True
else:
ret = False
#print("dice",num,"/",prob,ret)
return ret
def dispatch(ctx, prefix, c):
if c.on_chain == 1:
# ERC20 approval not presently needed on L1
if RollDice(ctx,c,env['op_pct'][0][0]):
bal = ctx.rpc[c.on_chain].eth.getBalance(c.acct.address)
lPrint(ctx.log, prefix + "will add/remove liquidity")
AddLiquidity(ctx, c, Web3.toWei(bal / 4.0,'wei'))
elif RollDice(ctx,c, env['op_pct'][0][1]):
lPrint(ctx.log, prefix + "will fast-onramp")
Onramp_fast(ctx, c)
elif RollDice(ctx,c, env['op_pct'][0][2]):
lPrint(ctx.log, prefix + "will traditonal-onramp")
Onramp_trad(ctx, c)
elif RollDice(ctx,c, env['op_pct'][0][3]):
bal = ctx.rpc[c.on_chain].eth.getBalance(c.acct.address)
if c.staked_NG:
lPrint(ctx.log, prefix + "will remove NG liquidity")
else:
lPrint(ctx.log, prefix + "will add NG liquidity")
AddLiquidity_NG(ctx, c, Web3.toWei(bal / 4.0,'wei'))
elif RollDice(ctx,c, env['op_pct'][0][4]):
lPrint(ctx.log, prefix + "will use NG unified onramp")
Onramp_NG(ctx, c)
else:
lPrint(ctx.log, prefix + "Will send funds")
SendFunds(ctx, c)
else:
if not c.approved[2]:
lPrint(ctx.log, prefix + "Approving contracts")
# Currently synchronous, could do multi-step waits for completion
Approve(ctx, boba_addrs['Proxy__L2LiquidityPool'], c.acct)
if env['ng_enabled']:
Approve(ctx, boba_addrs['L2_EthPool'], c.acct)
Approve(ctx, boba_addrs['L2_BobaPortal'], c.acct)
Approve(ctx, '0x4200000000000000000000000000000000000010', c.acct)
c.approved[2] = True
readyQueue.put(c)
return
mayExit = True
minActive = int(num_children / min_active_per)
if (len(evWatch) + idleQueue.qsize()) >= (num_children - minActive):
mayExit = False
if RollDice(ctx,c,env['op_pct'][1][0]):
bal = ctx.rpc[c.on_chain].eth.getBalance(c.acct.address)
lPrint(ctx.log, prefix + "will add/remove liquidity")
AddLiquidity(ctx, c, Web3.toWei(bal / 4.0,'wei'))
elif mayExit and RollDice(ctx,c, env['op_pct'][1][1]):
lPrint(ctx.log, prefix + "will fast-exit")
r = FastExit(ctx, c, False)
elif mayExit and RollDice(ctx,c, env['op_pct'][1][2]):
lPrint(ctx.log, prefix + "will slow-exit")
SlowExit(ctx, c,False)
elif mayExit and RollDice(ctx,c, env['op_pct'][1][3]):
lPrint(ctx.log, prefix + "will NG fast-exit")
r = FastExit(ctx, c, True)
elif mayExit and RollDice(ctx,c, env['op_pct'][1][4]):
lPrint(ctx.log, prefix + "will NG slow-exit")
SlowExit(ctx, c, True)
else:
lPrint(ctx.log, prefix + "Will send funds")
SendFunds(ctx, c)
def worker_thread(env, A, num, cx, ch):
ctx = Context(env, A, "./logs/worker-"+str(num)+".log",None)
wasBusy = True
while shutdown.num_done < num_children and shutdown.level < 2:
c = None
try:
c = readyQueue.get(False)
wasBusy = True
except:
if wasBusy:
lPrint(ctx.log, "Worker " + str(num) + " readyQueue is empty")
wasBusy = False
if not c:
time.sleep(2)
continue
b1 = ctx.rpc[1].eth.getBalance(c.acct.address)
b2 = ctx.rpc[2].eth.getBalance(c.acct.address)
# Request funding if necessary
bal = ctx.rpc[c.on_chain].eth.getBalance(c.acct.address)
if shutdown.level > 0:
StopChild(ctx, c) # A child might get here several times before it's done
continue
prefix = "W " + str(num) + " ch " + str(c.on_chain) + " Child " + str(c.num) + " "
s = prefix + "dispatching at " + str(time.time()) + " b1 " + str(Web3.fromWei(b1,'ether'))
s += " b2 " + str(Web3.fromWei(b2,'ether')) + " L2_Share " + str(int(100*b2/(b1+b2))) + "%"
s += " Total " + str(Web3.fromWei(b1+b2,'ether'))
lPrint(ctx.log, s)
if (bal >= min_balance):
pass
elif c.on_chain == 1 and b2 >= min_balance:
lPrint(ctx.log, prefix + "balance low, switching to chain 2")
c.on_chain = 2
elif c.on_chain == 2 and b1 >= min_balance:
lPrint(ctx.log, prefix + "balance low, switching to chain 1")
c.on_chain = 1
else:
lPrint(ctx.log, prefix + "has insufficient funding on either chain")
idleQueue.put(c)
| |
"""
A collection of useful functions and data structures for working with data captured by Heimann HTPA32x32d and other thermopile sensor arrays.
Data types:
* txt - Heimann HTPA recordings in *.TXT,
* np - NumPy array of thermopile sensor array data, shaped [frames, height, width],
* csv - thermopile sensor array data in *.csv file, NOT a pandas dataframe!
* df - pandas dataframe,
* pc - NumPy array of pseudocolored thermopile sensor array data, shaped [frames, height, width, channels],
Warnings:
when converting TXT -> other types the array is rotated 90 deg. CW
numpy array order is 'K'
txt array order is 'F'
"""
import numpy as np
import pandas as pd
import cv2
import os
import imageio
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import pickle
import itertools
import json
import glob
import collections
import shutil
import pickle
import re
DTYPE = "float32"
PD_SEP = ","
PD_NAN = np.inf
PD_DTYPE = np.float32
READ_CSV_ARGS = {"skiprows": 1}
PD_TIME_COL = "Time (sec)"
PD_PTAT_COL = "PTAT"
HTPA_UDP_MODULE_WEBCAM_IMG_EXT = "jpg"
READERS_EXTENSIONS_DICT = {
"txt": "txt",
"csv": "csv",
"pickle": "pickle",
"pkl": "pickle",
"p": "pickle",
}
SUPPORTED_EXTENSIONS = list(READERS_EXTENSIONS_DICT.keys())
def remove_extension(filepath):
return filepath.split(".")[0]
def get_extension(filepath):
return filepath.split(".")[1]
def ensure_path_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def ensure_parent_exists(path):
ensure_path_exists(os.path.dirname(path))
def read_tpa_file(filepath: str, array_size: int = 32):
"""
Convert Heimann HTPA file to NumPy array shaped [frames, height, width].
Currently supported: see SUPPORTED_EXTENSIONS flag
Parameters
----------
filepath : str
array_size : int, optional (for txt files only)
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
extension_lowercase = get_extension(filepath).lower()
assert (extension_lowercase in SUPPORTED_EXTENSIONS)
reader = READERS_EXTENSIONS_DICT[extension_lowercase]
if reader == 'txt':
return txt2np(filepath)
if reader == 'csv':
return csv2np(filepath)
if reader == 'pickle':
return pickle2np(filepath)
def write_tpa_file(filepath: str, array, timestamps: list, header=None) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a txt file.
Currently supported: see SUPPORTED_EXTENSIONS flag
Parameters
----------
filepath : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
extension_lowercase = get_extension(filepath).lower()
assert (extension_lowercase in SUPPORTED_EXTENSIONS)
writer = READERS_EXTENSIONS_DICT[extension_lowercase]
if writer == 'txt':
return write_np2txt(filepath, array, timestamps, header=header)
if writer == 'csv':
assert not header
return write_np2csv(filepath, array, timestamps)
if writer == 'pickle':
assert not header
return write_np2pickle(filepath, array, timestamps)
def modify_txt_header(filepath : str, new_header):
header = new_header.rstrip()
header += "\n"
with open(filepath) as f:
lines = f.readlines()
lines[0] = header
with open(filepath, "w") as f:
f.writelines(lines)
def read_txt_header(filepath: str):
"""
Read Heimann HTPA .txt header.
Parameters
----------
filepath : str
Returns
-------
str
TPA file header
"""
with open(filepath) as f:
header = f.readline().rstrip()
return header
def txt2np(filepath: str, array_size: int = 32):
"""
Convert Heimann HTPA .txt to NumPy array shaped [frames, height, width].
Parameters
----------
filepath : str
array_size : int, optional
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
with open(filepath) as f:
# discard the first line
_ = f.readline()
# read line by line now
line = "dummy line"
frames = []
timestamps = []
while line:
line = f.readline()
if line:
split = line.split(" ")
frame = split[0: array_size ** 2]
timestamp = split[-1]
frame = np.array([int(T) for T in frame], dtype=DTYPE)
frame = frame.reshape([array_size, array_size], order="F")
frame *= 1e-2
frames.append(frame)
timestamps.append(float(timestamp))
frames = np.array(frames)
# the array needs rotating 90 CW
frames = np.rot90(frames, k=-1, axes=(1, 2))
return frames, timestamps
def write_np2txt(output_fp: str, array, timestamps: list, header: str = None) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a txt file.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
header : str, optional
TXT header
"""
ensure_parent_exists(output_fp)
frames = np.rot90(array, k=1, axes=(1, 2))
if header:
header = header.rstrip()
header += "\n"
else:
header = "HTPA32x32d\n"
with open(output_fp, 'w') as file:
file.write(header)
for step, t in zip(frames, timestamps):
line = ""
for val in step.flatten("F"):
line += ("%02.2f" % val).replace(".", "")[:4] + " "
file.write("{}t: {}\n".format(line, t))
def write_np2pickle(output_fp: str, array, timestamps: list) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a pickle file.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
ensure_parent_exists(output_fp)
with open(output_fp, "wb") as f:
pickle.dump((array, timestamps), f)
return True
def pickle2np(filepath: str):
"""
Convert Heimann HTPA .txt to NumPy array shaped [frames, height, width].
Parameters
----------
filepath : str
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
with open(filepath, "rb") as f:
frames, timestamps = pickle.load(f)
return frames, timestamps
def write_np2csv(output_fp: str, array, timestamps: list) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to .CSV dataframe.
CSV should preferably represent the data collected without preprocessing, cropping or any data manipulation.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
ensure_parent_exists(output_fp)
# initialize csv template (and append frames later)
# prepend first row for compability with legacy format
first_row = pd.DataFrame({"HTPA 32x32d": []})
first_row.to_csv(output_fp, index=False, sep=PD_SEP)
headers = {PD_TIME_COL: [], PD_PTAT_COL: []}
df = pd.DataFrame(headers)
for idx in range(np.prod(array.shape[1:])):
df.insert(len(df.columns), "P%04d" % idx, [])
df.to_csv(output_fp, mode="a", index=False, sep=PD_SEP)
for idx in range(array.shape[0]):
frame = array[idx, ...]
timestamp = timestamps[idx]
temps = list(frame.flatten())
row_data = [timestamp, PD_NAN]
row_data.extend(temps)
row = pd.DataFrame([row_data])
row = row.astype(PD_DTYPE)
row.to_csv(output_fp, mode="a", header=False, sep=PD_SEP, index=False)
return True
def csv2np(csv_fp: str):
"""
Read and convert .CSV dataframe to a Heimann HTPA NumPy array shaped [frames, height, width]
Parameters
----------
csv_fp : str
Filepath to the csv file tor read.
Returns
-------
array : np.array
Temperatue distribution sequence, shape [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
df = pd.read_csv(csv_fp, **READ_CSV_ARGS)
timestamps = df[PD_TIME_COL]
array = df.drop([PD_TIME_COL, PD_PTAT_COL], axis=1).to_numpy(dtype=DTYPE)
array = reshape_flattened_frames(array)
return array, timestamps
def apply_heatmap(array, cv_colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
"""
Applies pseudocoloring (heatmap) to a sequence of thermal distribution. Same as np2pc().
np2pc() is preffered.
Parameters
----------
array : np.array
(frames, height, width)
cv_colormap : int, optional
Returns
-------
np.array
(frames, height, width, channels)
"""
min, max = array.min(), array.max()
shape = array.shape
array_normalized = (255 * ((array - min) / (max - min))).astype(np.uint8)
heatmap_flat = cv2.applyColorMap(array_normalized.flatten(), cv_colormap)
return heatmap_flat.reshape([shape[0], shape[1], shape[2], 3])
def np2pc(array, cv_colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
"""
Applies pseudocoloring (heatmap) to a sequence of thermal distribution. Same as apply_heatmap().
np2pc() is preffered.
Parameters
----------
array : np.array
(frames, height, width)
cv_colormap : int, optional
Returns
-------
np.array
(frames, height, width, channels)
"""
return apply_heatmap(array, cv_colormap)
def save_frames(array, dir_name: str, extension: str = ".bmp") -> bool:
"""
Exctracts and saves frames from a sequence array into a folder dir_name
Parameters
----------
array : np.array
(frames, height, width, channels)
Returns
-------
bool
True if success
"""
if not os.path.exists(dir_name):
os.mkdir(dir_name)
for idx, frame in enumerate(array):
cv2.imwrite(os.path.join(dir_name, "%d" % idx + extension), frame)
return True
def flatten_frames(array):
"""
Flattens array of shape [frames, height, width] into array of shape [frames, height*width]
Parameters
----------
array : np.array
(frames, height, width)
Returns
-------
np.array
flattened array (frames, height, width)
"""
_, height, width = array.shape
return array.reshape((-1, height * width))
def write_pc2gif(array, fp: str, fps=10, loop: int = 0, duration=None):
"""
Converts and saves NumPy array of pseudocolored thermopile sensor array data, shaped [frames, height, width, channels], into a .gif file
Parameters
----------
array : np.array
Pseudocolored data (frames, height, width, channels).
fp : str
The filepath to write to.
fps : float, optional
Default 10, approx. equal to a typical thermopile sensor array FPS value.
loop : int, | |
1
ATOM 552 N N . ALA A 1 99 ? 26.253 -16.456 25.181 1.00 18.00 ? 100 ALA A N 1
ATOM 553 C CA . ALA A 1 99 ? 26.103 -15.041 24.775 1.00 19.23 ? 100 ALA A CA 1
ATOM 554 C C . ALA A 1 99 ? 27.438 -14.338 24.535 1.00 18.66 ? 100 ALA A C 1
ATOM 555 O O . ALA A 1 99 ? 27.509 -13.373 23.800 1.00 20.15 ? 100 ALA A O 1
ATOM 556 C CB . ALA A 1 99 ? 25.259 -14.278 25.817 1.00 20.91 ? 100 ALA A CB 1
ATOM 557 N N . ASP A 1 100 ? 28.508 -14.874 25.102 1.00 17.21 ? 101 ASP A N 1
ATOM 558 C CA . ASP A 1 100 ? 29.853 -14.282 24.947 1.00 17.31 ? 101 ASP A CA 1
ATOM 559 C C . ASP A 1 100 ? 30.493 -14.543 23.583 1.00 18.65 ? 101 ASP A C 1
ATOM 560 O O . ASP A 1 100 ? 31.454 -13.819 23.198 1.00 17.99 ? 101 ASP A O 1
ATOM 561 C CB . ASP A 1 100 ? 30.782 -14.828 26.003 1.00 17.91 ? 101 ASP A CB 1
ATOM 562 C CG . ASP A 1 100 ? 30.493 -14.326 27.366 1.00 20.29 ? 101 ASP A CG 1
ATOM 563 O OD1 . ASP A 1 100 ? 29.918 -13.198 27.507 1.00 25.36 ? 101 ASP A OD1 1
ATOM 564 O OD2 . ASP A 1 100 ? 30.824 -15.057 28.330 1.00 19.87 ? 101 ASP A OD2 1
ATOM 565 N N . LEU A 1 101 ? 30.039 -15.591 22.894 1.00 16.84 ? 102 LEU A N 1
ATOM 566 C CA . LEU A 1 101 ? 30.626 -15.982 21.664 1.00 17.27 ? 102 LEU A CA 1
ATOM 567 C C . LEU A 1 101 ? 30.337 -14.943 20.601 1.00 18.38 ? 102 LEU A C 1
ATOM 568 O O . LEU A 1 101 ? 29.263 -14.342 20.537 1.00 17.12 ? 102 LEU A O 1
ATOM 569 C CB . LEU A 1 101 ? 30.215 -17.386 21.230 1.00 16.87 ? 102 LEU A CB 1
ATOM 570 C CG . LEU A 1 101 ? 30.561 -18.532 22.180 1.00 17.21 ? 102 LEU A CG 1
ATOM 571 C CD1 . LEU A 1 101 ? 29.955 -19.784 21.544 1.00 17.38 ? 102 LEU A CD1 1
ATOM 572 C CD2 . LEU A 1 101 ? 32.056 -18.687 22.400 1.00 16.56 ? 102 LEU A CD2 1
ATOM 573 N N . VAL A 1 102 ? 31.406 -14.610 19.896 1.00 16.46 ? 103 VAL A N 1
ATOM 574 C CA . VAL A 1 102 ? 31.365 -13.680 18.834 1.00 16.53 ? 103 VAL A CA 1
ATOM 575 C C . VAL A 1 102 ? 31.599 -14.469 17.545 1.00 19.06 ? 103 VAL A C 1
ATOM 576 O O . VAL A 1 102 ? 30.954 -15.501 17.368 1.00 19.67 ? 103 VAL A O 1
ATOM 577 C CB . VAL A 1 102 ? 32.208 -12.438 19.149 1.00 16.44 ? 103 VAL A CB 1
ATOM 578 C CG1 . VAL A 1 102 ? 31.522 -11.631 20.245 1.00 15.98 ? 103 VAL A CG1 1
ATOM 579 C CG2 . VAL A 1 102 ? 33.657 -12.822 19.475 1.00 16.45 ? 103 VAL A CG2 1
ATOM 580 N N . ASN A 1 103 ? 32.423 -13.975 16.636 1.00 18.02 ? 104 ASN A N 1
ATOM 581 C CA . ASN A 1 103 ? 32.468 -14.454 15.254 1.00 17.00 ? 104 ASN A CA 1
ATOM 582 C C . ASN A 1 103 ? 33.614 -15.387 14.967 1.00 18.35 ? 104 ASN A C 1
ATOM 583 O O . ASN A 1 103 ? 33.752 -15.884 13.841 1.00 20.76 ? 104 ASN A O 1
ATOM 584 C CB . ASN A 1 103 ? 32.541 -13.291 14.286 1.00 17.26 ? 104 ASN A CB 1
ATOM 585 C CG . ASN A 1 103 ? 33.858 -12.517 14.365 1.00 17.20 ? 104 ASN A CG 1
ATOM 586 O OD1 . ASN A 1 103 ? 34.357 -12.038 13.364 1.00 20.04 ? 104 ASN A OD1 1
ATOM 587 N ND2 . ASN A 1 103 ? 34.355 -12.332 15.540 1.00 14.77 ? 104 ASN A ND2 1
ATOM 588 N N . TYR A 1 104 ? 34.536 -15.510 15.883 1.00 15.38 ? 105 TYR A N 1
ATOM 589 C CA . TYR A 1 104 ? 35.678 -16.390 15.635 1.00 15.75 ? 105 TYR A CA 1
ATOM 590 C C . TYR A 1 104 ? 36.170 -16.792 17.005 1.00 13.14 ? 105 TYR A C 1
ATOM 591 O O . TYR A 1 104 ? 36.819 -15.956 17.701 1.00 13.64 ? 105 TYR A O 1
ATOM 592 C CB . TYR A 1 104 ? 36.725 -15.649 14.846 1.00 15.97 ? 105 TYR A CB 1
ATOM 593 C CG . TYR A 1 104 ? 37.817 -16.574 14.465 1.00 17.40 ? 105 TYR A CG 1
ATOM 594 C CD1 . TYR A 1 104 ? 37.590 -17.650 13.582 1.00 21.32 ? 105 TYR A CD1 1
ATOM 595 C CD2 . TYR A 1 104 ? 39.063 -16.451 15.024 1.00 19.41 ? 105 TYR A CD2 1
ATOM 596 C CE1 . TYR A 1 104 ? 38.591 -18.554 13.285 1.00 21.69 ? 105 TYR A CE1 1
ATOM 597 C CE2 . TYR A 1 104 ? 40.073 -17.347 14.714 1.00 19.56 ? 105 TYR A CE2 1
ATOM 598 C CZ . TYR A 1 104 ? 39.827 -18.391 13.850 1.00 21.69 ? 105 TYR A CZ 1
ATOM 599 O OH . TYR A 1 104 ? 40.835 -19.271 13.538 1.00 26.80 ? 105 TYR A OH 1
ATOM 600 N N . ASN A 1 105 ? 35.862 -18.022 17.410 1.00 13.63 ? 106 ASN A N 1
ATOM 601 C CA . ASN A 1 105 ? 35.988 -18.416 18.800 1.00 14.68 ? 106 ASN A CA 1
ATOM 602 C C . ASN A 1 105 ? 36.567 -19.835 18.845 1.00 15.58 ? 106 ASN A C 1
ATOM 603 O O . ASN A 1 105 ? 36.015 -20.704 19.496 1.00 15.64 ? 106 ASN A O 1
ATOM 604 C CB . ASN A 1 105 ? 34.603 -18.445 19.461 1.00 17.64 ? 106 ASN A CB 1
ATOM 605 C CG . ASN A 1 105 ? 33.701 -17.318 19.004 1.00 17.58 ? 106 ASN A CG 1
ATOM 606 O OD1 . ASN A 1 105 ? 34.002 -16.147 19.196 1.00 18.58 ? 106 ASN A OD1 1
ATOM 607 N ND2 . ASN A 1 105 ? 32.564 -17.673 18.461 1.00 18.83 ? 106 ASN A ND2 1
ATOM 608 N N . PRO A 1 106 ? 37.695 -20.091 18.139 1.00 15.82 ? 107 PRO A N 1
ATOM 609 C CA . PRO A 1 106 ? 38.205 -21.460 17.986 1.00 16.60 ? 107 PRO A CA 1
ATOM 610 C C . PRO A 1 106 ? 38.517 -22.138 19.320 1.00 15.46 ? 107 PRO A C 1
ATOM 611 O O . PRO A 1 106 ? 38.388 -23.370 19.478 1.00 17.06 ? 107 PRO A O 1
ATOM 612 C CB . PRO A 1 106 ? 39.433 -21.273 17.091 1.00 16.95 ? 107 PRO A | |
self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
return
def ReadFRO(self, filename, element_type):
"""Read fro mesh"""
if self.elements is not None and self.points is not None:
self.__reset__()
if element_type == "tri":
el = 5
else:
raise NotImplementedError("Reading FRO files for {} elements not yet implemented".format(element_type))
content = np.fromfile(filename, dtype=np.float64, sep=" ")
nelem = int(content[0])
nnode = int(content[1])
nsurface = int(content[3])
points = content[8:8+4*nnode].reshape(nnode,4)[:,1:]
elements = content[8+4*nnode::].reshape(nelem,el)[:,1:-1].astype(np.int64) - 1
face_to_surface = content[8+4*nnode::].reshape(nelem,el)[:,-1].astype(np.int64) - 1
self.nelem = nelem
self.nnode = nnode
self.elements = np.ascontiguousarray(elements)
self.element_type = element_type
self.points = np.ascontiguousarray(points)
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
self.face_to_surface = np.ascontiguousarray(face_to_surface)
return
def ReadHDF5(self,filename):
"""Read mesh from MATLAB HDF5 file format"""
if self.elements is not None and self.points is not None:
self.__reset__()
DictOutput = loadmat(filename)
# GENERIC READER - READS EVERYTHING FROM HDF5 AND ASSIGNS IT TO MESH OBJECT
for key, value in DictOutput.items():
if isinstance(DictOutput[key],np.ndarray):
if "elements" in key or "edge" in key or "face" in key:
setattr(self, key, np.ascontiguousarray(value).astype(np.uint64))
else:
setattr(self, key, np.ascontiguousarray(value))
else:
setattr(self, key, value)
if isinstance(self.element_type,np.ndarray):
self.element_type = str(self.element_type[0])
if isinstance(self.nelem,np.ndarray):
self.nelem = int(self.nelem[0])
for key in self.__dict__.keys():
if isinstance(self.__dict__[str(key)],np.ndarray):
if self.__dict__[str(key)].size == 1:
self.__dict__[str(key)] = np.asscalar(self.__dict__[str(key)])
def ReadDCM(self, filename, element_type="quad", ndim=2):
""" EZ4U mesh reader
"""
if element_type != "quad":
raise NotImplementedError("DCM/EZ4U reader for {} elements not yet implemented".format(element_type))
self.__reset__()
self.element_type = element_type
content = np.fromfile(filename, dtype=np.float64, sep=" ")
self.nnode = int(content[0])
self.nelem = int(content[1])
if ndim==2:
self.points = content[3:self.nnode*4+3].reshape(self.nnode,4)[:,[1,2]]
else:
self.points = content[3:self.nnode*4+3].reshape(self.nnode,4)[:,1:]
self.elements = content[self.nnode*4+3:].astype(np.int64).reshape(self.nelem,11)[:,7:] - 1
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
self.GetEdgesQuad()
self.GetBoundaryEdgesQuad()
def SimplePlot(self, to_plot='faces', color=None, edge_color=None, point_color=None,
plot_points=False, plot_faces=None, plot_edges=True, point_radius=None,
save=False, filename=None, figure=None, show_plot=True, show_axis=False, grid="off"):
"""Simple mesh plot
to_plot: [str] only for 3D. 'faces' to plot only boundary faces
or 'all_faces' to plot all faces
grid: [str] None, "on" or "off"
"""
self.__do_essential_memebers_exist__()
# REDIRECT FOR 3D SURFACE MESHES
if self.element_type == "tri" or self.element_type == "quad":
if self.points.ndim == 2 and self.points.shape[1] == 3:
mesh = self.CreateDummy3DMeshfrom2DMesh()
mesh.SimplePlot(to_plot=to_plot, color=color, plot_points=plot_points,
plot_edges=plot_edges, point_radius=point_radius,
save=save, filename=filename, figure=figure, show_plot=show_plot,
show_axis=show_axis, grid=grid)
return
ndim = self.InferSpatialDimension()
edim = self.InferElementalDimension()
if color is None:
color=(197/255.,241/255.,197/255.)
if edge_color is None:
edge_color = (0,0,0)
if point_color is None:
point_color = (0,0,0)
if grid is None:
grid = "off"
if point_radius is None:
if ndim == 2:
point_radius = 0.75
else:
point_radius = 0.1
if save:
if filename is None:
warn('File name not given. I am going to write one in the current directory')
filename = PWD(__file__) + "/output.png"
else:
if filename.split(".")[-1] == filename:
filename += ".png"
import matplotlib as mpl
if self.element_type == "tri" or self.element_type == "quad" or self.element_type == "pent":
import matplotlib.pyplot as plt
if figure is None:
figure = plt.figure()
elif self.element_type == "tet" or self.element_type == "hex":
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
# os.environ['ETS_TOOLKIT'] = 'wx'
from mayavi import mlab
if to_plot == 'all_faces':
if self.all_faces is None:
self.GetFaces()
faces = self.all_faces
else:
if self.faces is None:
self.GetBoundaryFaces()
faces = self.faces
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
if color is not None:
if isinstance(color,tuple):
if len(color) != 3:
raise ValueError("Color should be given in a rgb/RGB tuple format with 3 values i.e. (x,y,z)")
if color[0] > 1.0 or color[1] > 1.0 or color[2] > 1.0:
color = (color[0]/255.,color[1]/255.,color[2]/255.)
elif isinstance(color,str):
color = mpl.colors.hex2color(color)
if edge_color is not None:
if isinstance(edge_color,tuple):
if len(edge_color) != 3:
raise ValueError("Color should be given in a rgb/RGB tuple format with 3 values i.e. (x,y,z)")
if edge_color[0] > 1.0 or edge_color[1] > 1.0 or edge_color[2] > 1.0:
edge_color = (edge_color[0]/255.,edge_color[1]/255.,edge_color[2]/255.)
elif isinstance(edge_color,str):
edge_color = mpl.colors.hex2color(edge_color)
if plot_faces is None:
if edim == 3:
plot_faces = True
else:
plot_faces = False
if self.element_type == "tri":
plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3],color=edge_color)
if plot_faces:
plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3],
np.ones(self.points.shape[0]), 100, alpha=0.3)
if plot_points:
plt.plot(self.points[:,0],self.points[:,1], "o", color=point_color, markersize=point_radius)
plt.axis("equal")
if not show_axis:
plt.axis('off')
if grid == "on":
plt.grid("on")
if show_plot:
plt.show()
elif self.element_type == "tet":
if plot_faces:
mlab.triangular_mesh(self.points[:,0],self.points[:,1],
self.points[:,2],faces[:,:3],color=color)
radius = 1e-00
if plot_edges:
mlab.triangular_mesh(self.points[:,0],self.points[:,1],self.points[:,2], faces[:,:3],
line_width=radius,tube_radius=radius,color=edge_color,
representation='wireframe')
if plot_points:
mlab.points3d(self.points[:,0],self.points[:,1],self.points[:,2],
color=point_color,mode='sphere',scale_factor=point_radius)
# svpoints = self.points[np.unique(self.faces),:]
# mlab.points3d(svpoints[:,0],svpoints[:,1],svpoints[:,2],color=(0,0,0),mode='sphere',scale_factor=0.005)
# mlab.view(azimuth=135, elevation=45, distance=7, focalpoint=None,
# roll=0, reset_roll=True, figure=None)
if show_plot:
mlab.show()
elif self.element_type=="quad":
C = self.InferPolynomialDegree() - 1
pdim = self.points.shape[1]
edge_elements = self.GetElementsEdgeNumberingQuad()
reference_edges = NodeArrangementQuad(C)[0]
reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)
reference_edges = np.delete(reference_edges,1,1)
self.GetEdgesQuad()
x_edges = np.zeros((C+2,self.all_edges.shape[0]))
y_edges = np.zeros((C+2,self.all_edges.shape[0]))
z_edges = np.zeros((C+2,self.all_edges.shape[0]))
BasesOneD = np.eye(2,2)
for iedge in range(self.all_edges.shape[0]):
ielem = edge_elements[iedge,0]
edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]
if pdim == 2:
x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T
elif pdim == 3:
x_edges[:,iedge], y_edges[:,iedge], z_edges[:,iedge] = self.points[edge,:].T
plt.plot(x_edges,y_edges,'-', color=edge_color)
if plot_points:
plt.plot(self.points[:,0],self.points[:,1], "o", color=point_color, markersize=point_radius)
if plot_faces:
plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3],
np.ones(self.points.shape[0]), 100, alpha=0.3)
plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,[0,2,3]],
np.ones(self.points.shape[0]), 100, alpha=0.3)
plt.axis('equal')
if not show_axis:
plt.axis('off')
if grid == "on":
plt.grid("on")
if show_plot:
plt.show()
elif self.element_type == "hex":
if to_plot == "all_faces":
ProjectionFlags = np.ones(faces.shape[0],dtype=np.int64)
else:
ProjectionFlags = None
from Florence.PostProcessing import PostProcess
tmesh = PostProcess.TessellateHexes(self,np.zeros_like(self.points),plot_points=True,
interpolation_degree=0, ProjectionFlags=ProjectionFlags)
Xplot = tmesh.points
Tplot = tmesh.elements
# color=(197/255.,241/255.,197/255.)
point_line_width = .002
if plot_faces:
trimesh_h = mlab.triangular_mesh(Xplot[:,0], Xplot[:,1], Xplot[:,2], Tplot,
line_width=point_line_width,color=color)
if plot_edges:
src = mlab.pipeline.scalar_scatter(tmesh.x_edges.T.copy().flatten(),
tmesh.y_edges.T.copy().flatten(), tmesh.z_edges.T.copy().flatten())
src.mlab_source.dataset.lines = tmesh.connections
h_edges = mlab.pipeline.surface(src, color = edge_color, line_width=3)
# AVOID WARNINGS
# lines = mlab.pipeline.stripper(src)
# h_edges = mlab.pipeline.surface(lines, color = edge_color, line_width=3)
# mlab.view(azimuth=135, elevation=45, distance=7, focalpoint=None,
# roll=0, reset_roll=True, figure=None)
if plot_points:
mlab.points3d(self.points[:,0],self.points[:,1],self.points[:,2],
color=point_color,mode='sphere',scale_factor=point_radius)
if show_plot:
mlab.show()
elif self.element_type == "line":
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
if self.points.ndim == 1:
self.points = self.points[:,None]
points = np.zeros((self.points.shape[0],3))
if self.points.shape[1] == 1:
points[:,0] = np.copy(self.points[:,0])
if self.points.shape[1] == 2:
points[:,:2] = np.copy(self.points)
elif self.points.shape[1] == 3:
points = np.copy(self.points)
if plot_edges:
src = mlab.pipeline.scalar_scatter(points[:,0],points[:,1],points[:,2])
src.mlab_source.dataset.lines = self.elements[:,:2]
lines = mlab.pipeline.stripper(src)
h_edges = mlab.pipeline.surface(lines, color = (0,0,0), line_width=2)
if plot_points:
h_points = mlab.points3d(points[:,0],points[:,1],points[:,2],color=(0,0,0),mode='sphere',scale_factor=point_radius)
if show_plot:
mlab.show()
else:
raise NotImplementedError("SimplePlot for {} not implemented yet".format(self.element_type))
if save:
ndim = self.InferSpatialDimension()
if ndim == 2:
plt.savefig(filename,format="png",dpi=300)
else:
mlab.savefig(filename,dpi=300)
def PlotMeshNumbering(self, figure=None, show_plot=True):
"""Plots element and node numbers on top of the triangular mesh"""
self.__do_essential_memebers_exist__()
import matplotlib.pyplot as plt
import matplotlib as mpl
if self.element_type == "tri":
if figure is None:
figure = plt.figure()
plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3])
plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3], np.ones(self.points.shape[0]), 100,alpha=0.3)
for i in range(0,self.elements.shape[0]):
coord = self.points[self.elements[i,:],:]
x_avg = np.sum(coord[:,0])/self.elements.shape[1]
y_avg = np.sum(coord[:,1])/self.elements.shape[1]
plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')
for i in range(0,self.points.shape[0]):
plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')
plt.axis('equal')
if show_plot:
plt.show()
elif self.element_type == "quad":
if figure is None:
figure = plt.figure()
point_radius = 3.
C = self.InferPolynomialDegree() - 1
edge_elements = self.GetElementsEdgeNumberingQuad()
reference_edges = NodeArrangementQuad(C)[0]
reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)
reference_edges = np.delete(reference_edges,1,1)
self.GetEdgesQuad()
x_edges = np.zeros((C+2,self.all_edges.shape[0]))
y_edges = np.zeros((C+2,self.all_edges.shape[0]))
BasesOneD = np.eye(2,2)
for iedge in range(self.all_edges.shape[0]):
ielem = edge_elements[iedge,0]
edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]
x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T
plt.plot(x_edges,y_edges,'-k')
for i in range(self.elements.shape[0]):
coord = self.points[self.elements[i,:],:]
x_avg = np.sum(coord[:,0])/self.elements.shape[1]
y_avg = np.sum(coord[:,1])/self.elements.shape[1]
plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')
for i in range(0,self.points.shape[0]):
plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')
plt.axis('equal')
if show_plot:
plt.show()
elif self.element_type == "tet" or self.element_type == "hex":
import matplotlib as mpl
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(800,600))
view = mlab.view()
figure.scene.disable_render = True
color = mpl.colors.hex2color('#F88379')
linewidth = 3.
# trimesh_h = mlab.triangular_mesh(self.points[:,0],
# self.points[:,1], self.points[:,2], self.faces[:,:3],
# line_width=linewidth,tube_radius=linewidth,color=(0,0.6,0.4),
# representation='wireframe') # representation='surface'
# # CHANGE LIGHTING OPTION
# trimesh_h.actor.property.interpolation = 'phong'
# trimesh_h.actor.property.specular = 0.1
# trimesh_h.actor.property.specular_power = 5
# PLOTTING EDGES
from Florence.PostProcessing import PostProcess
tmesh = PostProcess(3,3).Tessellate(self, np.zeros_like(self.points), interpolation_degree=0,
plot_points=True, plot_edges=True, plot_surfaces=False)
x_edges = tmesh.x_edges
y_edges = tmesh.y_edges
z_edges = tmesh.z_edges
connections = tmesh.connections
src = mlab.pipeline.scalar_scatter(x_edges.T.copy().flatten(), y_edges.T.copy().flatten(), z_edges.T.copy().flatten())
src.mlab_source.dataset.lines = connections
h_edges = mlab.pipeline.surface(src, color = (0,0.6,0.4), line_width=linewidth)
# AVOID WARNINGS
# lines = mlab.pipeline.stripper(src)
# h_edges = mlab.pipeline.surface(lines, color = (0,0.6,0.4), line_width=linewidth)
# ELEMENT NUMBERING
# for i in range(0,self.elements.shape[0]):
# coord = self.points[self.elements[i,:],:]
# x_avg = np.sum(coord[:,0])/self.elements.shape[1]
# y_avg = np.sum(coord[:,1])/self.elements.shape[1]
# z_avg = np.sum(coord[:,2])/self.elements.shape[1]
# # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=color)
# mlab.text3d(x_avg,y_avg,z_avg,str(i),color=(0,0,0.),scale=2)
# POINT NUMBERING
for i in range(self.elements.shape[0]):
for j in range(self.elements.shape[1]):
text_obj | |
for i in equip_list:
if i['imei'] == i0:
equip_list.remove(i)
exist.remove(i0)
break
ret = json.dumps(equip_list, ensure_ascii=True, indent=4)
return ret
def equip_tower_mapping(querydict):
ret = {}
if querydict.has_key('imei'):
l = db_util.mongo_find(
gConfig['webgis']['mongodb']['database'],
'features',
{
"properties.webgis_type":"point_tower",
"properties.metals":{
"$elemMatch":{
"type":u"多功能驱鸟装置",
"imei":querydict['imei']
}
}
},
0,
'webgis'
)
if len(l)>0:
obj = {}
obj['tower_id'] = l[0]['_id']
obj['name'] = l[0]['properties']['name']
obj['lng'] = l[0]['geometry']['coordinates'][0]
obj['lat'] = l[0]['geometry']['coordinates'][1]
obj['alt'] = l[0]['geometry']['coordinates'][2]
ret[querydict['imei']] = obj
else:
l = db_util.mongo_find(
gConfig['webgis']['mongodb']['database'],
'features',
{
"properties.webgis_type":"point_tower",
"properties.metals":{
"$elemMatch":{
"type":u"多功能驱鸟装置",
}
}
},
0,
'webgis'
)
for i in l:
for j in i['properties']['metals']:
if j.has_key('type') and j['type'] == u'多功能驱鸟装置' and j.has_key('imei') and len(j['imei'])>0:
obj = {}
obj['tower_id'] = i['_id']
obj['name'] = i['properties']['name']
obj['lng'] = i['geometry']['coordinates'][0]
obj['lat'] = i['geometry']['coordinates'][1]
obj['alt'] = i['geometry']['coordinates'][2]
ret[j['imei']] = obj
ret = json.dumps(ret, ensure_ascii=True, indent=4)
return ret
statuscode, headers, body = '200 OK', {}, ''
urls = gUrlMap.bind_to_environ(environ)
querydict, buf = get_querydict_by_GET_POST(environ)
endpoint, args = urls.match()
if args.has_key('_id') and isinstance(querydict, dict):
querydict['_id'] = args['_id']
if args.has_key('imei') and isinstance(querydict, dict):
querydict['imei'] = args['imei']
if args.has_key('records_num') and isinstance(querydict, dict):
querydict['records_num'] = args['records_num']
if endpoint == 'get_equip_list':
body = get_equip_list(environ, querydict)
elif endpoint == 'get_latest_records_by_imei':
body = get_latest_records_by_imei(environ, querydict)
elif endpoint == 'equip_tower_mapping':
body = equip_tower_mapping(querydict)
return statuscode, headers, body
def handle_bayesian(environ):
def get_collection(collection):
ret = None
db_util.mongo_init_client('webgis')
db = db_util.gClientMongo['webgis'][gConfig['webgis']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
# def convert_strkey_to_bool(obj):
# if isinstance(obj, list):
# for i in range(0, len(obj)):
# obj[i] = convert_strkey_to_bool(obj[i])
# if isinstance(obj, dict):
# for k in obj.keys():
# if k in ['true', u'true']:
# obj[True] = obj[k]
# del obj['true']
# del obj[u'true']
# elif k in ['false', u'false']:
# obj[False] = obj[k]
# del obj['false']
# del obj[u'false']
# obj[k] = convert_strkey_to_bool(obj[k])
#
# return obj
def save_by_id(querydict, collection_name):
ret = []
collection = get_collection(collection_name)
if isinstance(querydict, list):
ids = []
for i in querydict:
if i['_id'] is None:
del i['_id']
id = collection.save(db_util.add_mongo_id(i))
if id:
ids.append(id)
ret = list(collection.find({'_id':{'$in':ids}}))
elif isinstance(querydict, dict):
id = collection.save(db_util.add_mongo_id(querydict))
ret = collection.find_one({'_id':id})
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def delete_by_id(querydict, collection_name):
ret = ''
collection = get_collection(collection_name)
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'record_not_exist' }, ensure_ascii=True, indent=4)
if isinstance(querydict['_id'], list):
ids = db_util.add_mongo_id(querydict['_id'])
cond = {'_id':{'$in':ids}}
collection.remove(cond)
ret = json.dumps(db_util.remove_mongo_id(querydict['_id']), ensure_ascii=True, indent=4)
return ret
def bayesian_query_domains_range(querydict):
ret = []
collection = get_collection('bayesian_domains_range')
ret = list(collection.find({}))
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def bayesian_save_domains_range(querydict):
return save_by_id(querydict, 'bayesian_domains_range')
def bayesian_delete_domains_range(querydict):
return delete_by_id(querydict, 'bayesian_domains_range')
def bayesian_query_node(querydict):
ret = []
if querydict.has_key('line_name') and len(querydict['line_name']):
collection = get_collection('bayesian_nodes')
ret = list(collection.find({'line_name':querydict['line_name']}))
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def bayesian_query_graphiz(querydict):
ret = ''
if querydict.has_key('line_name') and len(querydict['line_name']):
g = create_bbn_by_line_name(querydict['line_name'])
dpi = 100
rankdir = 'LL'
if querydict.has_key('dpi') and len(querydict['dpi']):
dpi = int(querydict['dpi'])
if querydict.has_key('rankdir') and len(querydict['rankdir']):
rankdir = querydict['rankdir']
ret = g.get_graphviz_source(dpi, rankdir)
return enc(ret)
def bayesian_save_node(querydict):
return save_by_id(querydict, 'bayesian_nodes')
def bayesian_delete_node(querydict):
ret = '[]'
delete_by_id(querydict, 'bayesian_nodes')
collection = get_collection('bayesian_nodes')
if querydict.has_key('names'):
if isinstance(querydict['names'], list):
# names = [str(i) for i in querydict['names']]
names = querydict['names']
l = list(collection.find({'conditions': {'$elemMatch': {'$elemMatch': {'$elemMatch': {'$elemMatch':{'$in': names}}}}}}))
for i in l:
existlist = []
conditions = []
for ii in i['conditions']:
idx = i['conditions'].index(ii)
tmp = []
for iii in ii[0]:
# idx1 = ii[0].index(iii)
if not iii[0] in names:
tmp.append(iii)
ii[0] = tmp
i['conditions'][idx] = ii
for ii in i['conditions']:
key = ''
for iii in ii[0]:
key += iii[0] + ':' + iii[1] + '|'
if not key in existlist:
existlist.append(key)
conditions.append(ii)
i['conditions'] = conditions
collection.save(i)
if querydict.has_key('line_name') and len(querydict['line_name'])>0:
ret = bayesian_query_node(querydict)
return ret
def bayesian_query_predict(querydict):
ret = []
if querydict.has_key('line_name') and len(querydict['line_name']):
g = create_bbn_by_line_name(querydict['line_name'])
del querydict['line_name']
qd = {}
querymulti = False
for k in querydict.keys():
if isinstance(querydict[k], unicode):
qd[str(k)] = str(querydict[k])
elif isinstance(querydict[k], list) and k == u'line_state':
querymulti = True
else:
qd[str(k)] = querydict[k]
if querymulti:
for i in querydict['line_state']:
qd['line_state'] = str(i)
ret.append({'line_state':i, 'result':bayes_util.query_bbn_condition(g, **qd)})
else:
ret = bayes_util.query_bbn_condition(g, **qd)
ret = json.dumps(ret, ensure_ascii=True, indent=4)
return ret
def reset_unit_by_line_name(line_name):
collection = get_collection('bayesian_nodes')
units = list(collection.find({'line_name':line_name, 'name':{'$regex':'^unit_[0-9]$'}}))
data = bayes_util.get_state_examination_data_by_line_name(line_name)
o = bayes_util.calc_probability_unit(data)
for unit in units:
if o.has_key(unit['name']):
unit['conditions'] = o[unit['name']]
# print(unit['name'])
# print(unit['conditions'])
collection.save(unit)
ret = list(collection.find({'line_name':line_name}).sort('name', pymongo.ASCENDING))
return ret
def bayesian_reset_unit(querydict):
ret = []
if querydict.has_key('line_name') and len(querydict['line_name']):
ret = reset_unit_by_line_name(querydict['line_name'])
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def build_additional_condition(line_name, cond):
ret = cond
collection = get_collection('bayesian_nodes')
l = list(collection.find({'line_name':line_name}))
for node in l:
ret[node['name']] = node['conditions']
return ret
def create_bbn_by_line_name(line_name):
cond = bayes_util.build_state_examination_condition(line_name)
cond = build_additional_condition(line_name, cond)
g = None
if bayes_util.USE_C_MODULE:
print('using c-accelerate module...')
g = bayes_util.build_bbn_from_conditionals(cond)
else:
print('using pure-python module...')
g = bayes_util.build_bbn_from_conditionals_plus(cond)
return g
statuscode, headers, body = '200 OK', {}, ''
urls = gUrlMap.bind_to_environ(environ)
querydict, buf = get_querydict_by_GET_POST(environ)
endpoint, args = urls.match()
if args.has_key('_id') and isinstance(querydict, dict):
querydict['_id'] = args['_id']
if endpoint == 'bayesian_query_node':
body = bayesian_query_node(querydict)
elif endpoint == 'bayesian_save_node':
body = bayesian_save_node(querydict)
elif endpoint == 'bayesian_query_predict':
body = bayesian_query_predict(querydict)
elif endpoint == 'bayesian_reset_unit':
body = bayesian_reset_unit(querydict)
elif endpoint == 'bayesian_query_graphiz':
body = bayesian_query_graphiz(querydict)
headers['Content-Type'] = 'text/plain'
elif endpoint == 'bayesian_delete_node':
body = bayesian_delete_node(querydict)
elif endpoint == 'bayesian_save_domains_range':
body = bayesian_save_domains_range(querydict)
elif endpoint == 'bayesian_delete_domains_range':
body = bayesian_delete_domains_range(querydict)
elif endpoint == 'bayesian_query_domains_range':
body = bayesian_query_domains_range(querydict)
return statuscode, headers, body
headers = {}
headerslist = []
cookie_header = None
statuscode = '200 OK'
body = ''
path_info = environ['PATH_INFO']
if 'proxy.cgi' in path_info:
statuscode, headers, body = handle_proxy_cgi(environ)
elif path_info == '/test':
statuscode, headers, body = handle_test(environ)
elif path_info == '/get':
statuscode, headers, body = handle_get_method(environ)
elif path_info == '/post':
statuscode, headers, body = handle_post_method(environ)
elif path_info == '/wmts':
statuscode, headers, body = handle_wmts(environ)
elif path_info == '/tiles':
statuscode, headers, body = handle_tiles(environ)
elif '/arcgistile' in path_info:
statuscode, headers, body = handle_arcgistile(environ)
elif path_info == '/terrain/layer.json' or path_info[-8:] == '.terrain':
statuscode, headers, body = handle_terrain(environ)
#elif path_info[-8:] == '.terrain':
#return handle_terrain1(environ)
# elif path_info == '/wfs':
# statuscode, headers, body = handle_wfs(environ)
elif path_info =='/create_cluster' or path_info =='/kill_cluster':
statuscode, headers, body = handle_cluster(environ)
elif path_info == '/websocket':
statuscode, headers, body = handle_websocket(environ)
elif len(path_info)>6 and path_info[:6] == '/proxy':
statuscode, headers, body = proxy(environ)
headers['Cache-Control'] = 'no-cache'
# elif path_info == '/anti_bird_equip_list':
# statuscode, headers, body = anti_bird_equip_list(environ)
# elif path_info == '/anti_bird_equip_tower_mapping':
# statuscode, headers, body = anti_bird_equip_tower_mapping(environ)
# elif path_info == '/anti_bird_get_latest_records_by_imei':
# statuscode, headers, body = anti_bird_get_latest_records_by_imei(environ)
else:
if path_info[-1:] == '/':
path_info = gConfig['web']['indexpage']
if str(gConfig['webgis']['session']['enable_session'].lower()) == 'true' :
# and path_info in ['/login', '/logout', gConfig['web']['loginpage'], gConfig['web']['indexpage'], gConfig['web']['mainpage']]:
if gSessionStore is None:
gSessionStore = FilesystemSessionStore()
is_expire = False
with session_manager(environ):
sess, cookie_header, is_expire = session_handle(environ, gRequest, gSessionStore)
if path_info == str(gConfig['web']['unauthorizedpage']):
if not sess.has_key('ip'):
sess['ip'] = environ['REMOTE_ADDR']
gSessionStore.save_if_modified(sess)
headerslist.append(('Content-Type', str(gConfig['mime_type']['.html'])))
headerslist.append(cookie_header)
statuscode, headers, body = handle_static(environ, gConfig['web']['unauthorizedpage'])
start_response('401 Unauthorized', headerslist)
return [body]
if path_info == '/logout':
gSessionStore.delete(sess)
sess, cookie_header, is_expire = session_handle(environ, gRequest, gSessionStore)
headerslist.append(cookie_header)
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
start_response('200 OK', headerslist)
return [json.dumps({'result':u'ok'}, ensure_ascii=True, indent=4)]
if is_expire:
if not sess.has_key('ip'):
sess['ip'] = environ['REMOTE_ADDR']
gSessionStore.save_if_modified(sess)
headerslist.append(('Content-Type', str(gConfig['mime_type']['.html'])))
headerslist.append(cookie_header)
statuscode, headers, body = handle_static(environ, gConfig['web']['unauthorizedpage'])
start_response('401 Unauthorized', headerslist)
return [body]
# headerslist.append(('Location', str(gConfig['web']['expirepage'])))
# start_response('302 Redirect', headerslist)
# return ['']
# headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
# statuscode = '200 OK'
# body = json.dumps({'result':u'session_expired'}, ensure_ascii=True, indent=4)
if path_info == '/login':
user = handle_login(environ)
if user:
sess = gSessionStore.session_class(user, sess.sid, False)
sess['username'] = user['username']
cookie_header = set_cookie_data(gRequest, {'_id':user['_id'], 'username': user['username'], 'displayname': user['displayname']})
gSessionStore.save_if_modified(sess)
headerslist.append(cookie_header)
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
start_response('200 OK', headerslist)
return [json.dumps(sess, ensure_ascii=True, indent=4)]
else:
headerslist.append(cookie_header)
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
start_response('200 OK', headerslist)
return [json.dumps({'result':u'用户名或密码错误'}, ensure_ascii=True, indent=4)]
if path_info == str(gConfig['web']['mainpage']):
#401 Unauthorized
#if session_id is None or token is None:
headerslist.append(('Content-Type', str(gConfig['mime_type']['.html'])))
headerslist.append(cookie_header)
if sess is None or len(sess.keys())==0 or len(sess.sid)==0 or not sess.has_key('username'):
statuscode, headers, body = handle_static(environ, gConfig['web']['unauthorizedpage'])
statuscode = '401 Unauthorized'
start_response(statuscode, headerslist)
return | |
| (forever + stmt)
| (repeat + LPAR + expr + RPAR + stmt)
| (while_ + LPAR + expr + RPAR + stmt)
| (
for_
+ LPAR
+ assgnmt
+ SEMI
+ Group(expr)
+ SEMI
+ assgnmt
+ RPAR
+ stmt
)
| (fork + ZeroOrMore(stmt) + join)
| (
fork
+ COLON
+ identifier
+ ZeroOrMore(blockDecl)
+ ZeroOrMore(stmt)
+ end
)
| (wait + LPAR + expr + RPAR + stmtOrNull)
| ("->" + identifier + SEMI)
| (disable + identifier + SEMI)
| (assign + assgnmt + SEMI)
| (deassign + lvalue + SEMI)
| (force + assgnmt + SEMI)
| (release + lvalue + SEMI)
| (
begin
+ COLON
+ identifier
+ ZeroOrMore(blockDecl)
+ ZeroOrMore(stmt)
+ end
).setName("begin:label-end")
|
# these *have* to go at the end of the list!!!
(assgnmt + SEMI)
| (nbAssgnmt + SEMI)
| (
Combine(Optional("$") + identifier)
+ Optional(LPAR + delimitedList(expr | empty) + RPAR)
+ SEMI
)
).setName("stmtBody")
"""
x::=<blocking_assignment> ;
x||= <non_blocking_assignment> ;
x||= if ( <expression> ) <statement_or_null>
x||= if ( <expression> ) <statement_or_null> else <statement_or_null>
x||= case ( <expression> ) <case_item>+ endcase
x||= casez ( <expression> ) <case_item>+ endcase
x||= casex ( <expression> ) <case_item>+ endcase
x||= forever <statement>
x||= repeat ( <expression> ) <statement>
x||= while ( <expression> ) <statement>
x||= for ( <assignment> ; <expression> ; <assignment> ) <statement>
x||= <delay_or_event_control> <statement_or_null>
x||= wait ( <expression> ) <statement_or_null>
x||= -> <name_of_event> ;
x||= <seq_block>
x||= <par_block>
x||= <task_enable>
x||= <system_task_enable>
x||= disable <name_of_task> ;
x||= disable <name_of_block> ;
x||= assign <assignment> ;
x||= deassign <lvalue> ;
x||= force <assignment> ;
x||= release <lvalue> ;
"""
alwaysStmt = Group("always" + Optional(eventControl) + stmt).setName(
"alwaysStmt"
)
initialStmt = Group("initial" + stmt).setName("initialStmt")
chargeStrength = Group(LPAR + oneOf("small medium large") + RPAR).setName(
"chargeStrength"
)
continuousAssign = Group(
assign
+ Optional(driveStrength)
+ Optional(delay)
+ delimitedList(assgnmt)
+ SEMI
).setName("continuousAssign")
tfDecl = (
parameterDecl
| inputDecl
| outputDecl
| inoutDecl
| regDecl
| timeDecl
| integerDecl
| realDecl
)
functionDecl = Group(
"function"
+ Optional(range | "integer" | "real")
+ identifier
+ SEMI
+ Group(OneOrMore(tfDecl))
+ Group(ZeroOrMore(stmt))
+ "endfunction"
)
inputOutput = oneOf("input output")
netDecl1Arg = (
nettype
+ Optional(expandRange)
+ Optional(delay)
+ Group(delimitedList(~inputOutput + identifier))
)
netDecl2Arg = (
"trireg"
+ Optional(chargeStrength)
+ Optional(expandRange)
+ Optional(delay)
+ Group(delimitedList(~inputOutput + identifier))
)
netDecl3Arg = (
nettype
+ Optional(driveStrength)
+ Optional(expandRange)
+ Optional(delay)
+ Group(delimitedList(assgnmt))
)
netDecl1 = Group(netDecl1Arg + SEMI).setName("netDecl1")
netDecl2 = Group(netDecl2Arg + SEMI).setName("netDecl2")
netDecl3 = Group(netDecl3Arg + SEMI).setName("netDecl3")
gateType = oneOf(
"and nand or nor xor xnor buf bufif0 bufif1 "
"not notif0 notif1 pulldown pullup nmos rnmos "
"pmos rpmos cmos rcmos tran rtran tranif0 "
"rtranif0 tranif1 rtranif1"
)
gateInstance = (
Optional(Group(identifier + Optional(range)))
+ LPAR
+ Group(delimitedList(expr))
+ RPAR
)
gateDecl = Group(
gateType
+ Optional(driveStrength)
+ Optional(delay)
+ delimitedList(gateInstance)
+ SEMI
)
udpInstance = Group(
Group(identifier + Optional(range | subscrRef))
+ LPAR
+ Group(delimitedList(expr))
+ RPAR
)
udpInstantiation = Group(
identifier
- Optional(driveStrength)
+ Optional(delay)
+ delimitedList(udpInstance)
+ SEMI
).setName("udpInstantiation")
parameterValueAssignment = Group(
Literal("#") + LPAR + Group(delimitedList(expr)) + RPAR
)
namedPortConnection = Group(DOT + identifier + LPAR + expr + RPAR).setName(
"namedPortConnection"
) # .setDebug()
assert r".\abc (abc )" == namedPortConnection
modulePortConnection = expr | empty
# ~ moduleInstance = Group( Group ( identifier + Optional(range) ) +
# ~ ( delimitedList( modulePortConnection ) |
# ~ delimitedList( namedPortConnection ) ) )
inst_args = Group(
LPAR
+ (delimitedList(namedPortConnection) | delimitedList(modulePortConnection))
+ RPAR
).setName("inst_args")
moduleInstance = Group(Group(identifier + Optional(range)) + inst_args).setName(
"moduleInstance"
) # .setDebug()
moduleInstantiation = Group(
identifier
+ Optional(parameterValueAssignment)
+ delimitedList(moduleInstance).setName("moduleInstanceList")
+ SEMI
).setName("moduleInstantiation")
parameterOverride = Group("defparam" + delimitedList(paramAssgnmt) + SEMI)
task = Group(
"task" + identifier + SEMI + ZeroOrMore(tfDecl) + stmtOrNull + "endtask"
)
specparamDecl = Group("specparam" + delimitedList(paramAssgnmt) + SEMI)
pathDescr1 = Group(LPAR + subscrIdentifier + "=>" + subscrIdentifier + RPAR)
pathDescr2 = Group(
LPAR
+ Group(delimitedList(subscrIdentifier))
+ "*>"
+ Group(delimitedList(subscrIdentifier))
+ RPAR
)
pathDescr3 = Group(
LPAR
+ Group(delimitedList(subscrIdentifier))
+ "=>"
+ Group(delimitedList(subscrIdentifier))
+ RPAR
)
pathDelayValue = Group(
(LPAR + Group(delimitedList(mintypmaxExpr | expr)) + RPAR)
| mintypmaxExpr
| expr
)
pathDecl = Group(
(pathDescr1 | pathDescr2 | pathDescr3) + EQ + pathDelayValue + SEMI
).setName("pathDecl")
portConditionExpr = Forward()
portConditionTerm = Optional(unop) + subscrIdentifier
portConditionExpr << portConditionTerm + Optional(binop + portConditionExpr)
polarityOp = oneOf("+ -")
levelSensitivePathDecl1 = Group(
if_
+ Group(LPAR + portConditionExpr + RPAR)
+ subscrIdentifier
+ Optional(polarityOp)
+ "=>"
+ subscrIdentifier
+ EQ
+ pathDelayValue
+ SEMI
)
levelSensitivePathDecl2 = Group(
if_
+ Group(LPAR + portConditionExpr + RPAR)
+ LPAR
+ Group(delimitedList(subscrIdentifier))
+ Optional(polarityOp)
+ "*>"
+ Group(delimitedList(subscrIdentifier))
+ RPAR
+ EQ
+ pathDelayValue
+ SEMI
)
levelSensitivePathDecl = levelSensitivePathDecl1 | levelSensitivePathDecl2
edgeIdentifier = posedge | negedge
edgeSensitivePathDecl1 = Group(
Optional(if_ + Group(LPAR + expr + RPAR))
+ LPAR
+ Optional(edgeIdentifier)
+ subscrIdentifier
+ "=>"
+ LPAR
+ subscrIdentifier
+ Optional(polarityOp)
+ COLON
+ expr
+ RPAR
+ RPAR
+ EQ
+ pathDelayValue
+ SEMI
)
edgeSensitivePathDecl2 = Group(
Optional(if_ + Group(LPAR + expr + RPAR))
+ LPAR
+ Optional(edgeIdentifier)
+ subscrIdentifier
+ "*>"
+ LPAR
+ delimitedList(subscrIdentifier)
+ Optional(polarityOp)
+ COLON
+ expr
+ RPAR
+ RPAR
+ EQ
+ pathDelayValue
+ SEMI
)
edgeSensitivePathDecl = edgeSensitivePathDecl1 | edgeSensitivePathDecl2
edgeDescr = oneOf("01 10 0x x1 1x x0").setName("edgeDescr")
timCheckEventControl = Group(
posedge | negedge | (edge + LBRACK + delimitedList(edgeDescr) + RBRACK)
)
timCheckCond = Forward()
timCondBinop = oneOf("== === != !==")
timCheckCondTerm = (expr + timCondBinop + scalarConst) | (Optional("~") + expr)
timCheckCond << ((LPAR + timCheckCond + RPAR) | timCheckCondTerm)
timCheckEvent = Group(
Optional(timCheckEventControl)
+ subscrIdentifier
+ Optional("&&&" + timCheckCond)
)
timCheckLimit = expr
controlledTimingCheckEvent = Group(
timCheckEventControl + subscrIdentifier + Optional("&&&" + timCheckCond)
)
notifyRegister = identifier
systemTimingCheck1 = Group(
"$setup"
+ LPAR
+ timCheckEvent
+ COMMA
+ timCheckEvent
+ COMMA
+ timCheckLimit
+ Optional(COMMA + notifyRegister)
+ RPAR
+ SEMI
)
systemTimingCheck2 = Group(
"$hold"
+ LPAR
+ timCheckEvent
+ COMMA
+ timCheckEvent
+ COMMA
+ timCheckLimit
+ Optional(COMMA + notifyRegister)
+ RPAR
+ SEMI
)
systemTimingCheck3 = Group(
"$period"
+ LPAR
+ controlledTimingCheckEvent
+ COMMA
+ timCheckLimit
+ Optional(COMMA + notifyRegister)
+ RPAR
+ SEMI
)
systemTimingCheck4 = Group(
"$width"
+ LPAR
+ controlledTimingCheckEvent
+ COMMA
+ timCheckLimit
+ Optional(COMMA + expr + COMMA + notifyRegister)
+ RPAR
+ SEMI
)
systemTimingCheck5 = Group(
"$skew"
+ LPAR
+ timCheckEvent
+ COMMA
+ timCheckEvent
+ COMMA
+ timCheckLimit
+ Optional(COMMA + notifyRegister)
+ RPAR
+ SEMI
)
systemTimingCheck6 = Group(
"$recovery"
+ LPAR
+ controlledTimingCheckEvent
+ COMMA
+ timCheckEvent
+ COMMA
+ timCheckLimit
+ Optional(COMMA + notifyRegister)
+ RPAR
+ SEMI
)
systemTimingCheck7 = Group(
"$setuphold"
+ LPAR
+ timCheckEvent
+ COMMA
+ timCheckEvent
+ COMMA
+ timCheckLimit
+ COMMA
+ timCheckLimit
+ Optional(COMMA + notifyRegister)
+ RPAR
+ SEMI
)
systemTimingCheck = (
FollowedBy("$")
+ (
systemTimingCheck1
| systemTimingCheck2
| systemTimingCheck3
| systemTimingCheck4
| systemTimingCheck5
| systemTimingCheck6
| systemTimingCheck7
)
).setName("systemTimingCheck")
sdpd = (
if_
+ Group(LPAR + expr + RPAR)
+ (pathDescr1 | pathDescr2)
+ EQ
+ pathDelayValue
+ SEMI
)
specifyItem = ~Keyword("endspecify") + (
specparamDecl
| pathDecl
| levelSensitivePathDecl
| edgeSensitivePathDecl
| systemTimingCheck
| sdpd
)
"""
x::= <specparam_declaration>
x||= <path_declaration>
x||= <level_sensitive_path_declaration>
x||= <edge_sensitive_path_declaration>
x||= <system_timing_check>
x||= <sdpd>
"""
specifyBlock = Group(
"specify" + ZeroOrMore(specifyItem) + "endspecify"
).setName("specifyBlock")
moduleItem = ~Keyword("endmodule") + (
parameterDecl
| inputDecl
| outputDecl
| inoutDecl
| regDecl
| netDecl3
| netDecl1
| netDecl2
| timeDecl
| integerDecl
| realDecl
| eventDecl
| gateDecl
| parameterOverride
| continuousAssign
| specifyBlock
| initialStmt
| alwaysStmt
| task
| functionDecl
|
# these have to be at the end - they start with identifiers
moduleInstantiation
| udpInstantiation
)
""" All possible moduleItems, from Verilog grammar spec
x::= <parameter_declaration>
x||= <input_declaration>
x||= <output_declaration>
x||= <inout_declaration>
?||= <net_declaration> (spec does not seem consistent for this item)
| |
assert 'OUTPUT: hello' in res.output
assert res.exit_code == ExitCode.FAILED
def test_cli_stop_on_fail(cli, cp):
"""Verify the --stop option works correctly."""
res = cli.invoke(build_magic, ['--verbose', '--stop', '-c', 'execute', f'{cp}', '-c', 'execute', 'echo hello'])
if sys.platform == 'linux':
assert 'cp: missing file operand' in res.output
elif sys.platform == 'win32':
assert 'The syntax of the command is incorrect.' in res.output
else:
assert 'usage: cp' in res.output or 'cp: missing file operand' in res.output
assert 'OUTPUT: hello' not in res.output
assert res.exit_code == ExitCode.FAILED
def test_cli_parameters(cli):
"""Verify the --parameter option works correctly."""
res = cli.invoke(build_magic, ['-p', 'keytype', 'rsa', '--parameter', 'keypass', '1234', 'echo hello'])
assert res.exit_code == ExitCode.PASSED
assert '( 1/1 ) EXECUTE : echo hello ........................................ RUNNING' in res.output
assert 'Stage 1 finished with result DONE' in res.output
def test_cli_parameters_invalid_parameter(cli):
"""Test the case where an invalid parameter is provided."""
res = cli.invoke(build_magic, ['-p', 'dummy', '1234', 'echo hello'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == 'Parameter dummy is not a valid parameter.\n'
def test_cli_parameters_invalid_parameter_value(cli):
"""Test the case where an invalid parameter value is provided."""
res = cli.invoke(build_magic, ['-p', 'keytype', 'dummy', 'echo hello'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert "Validation failed: Value dummy is not one of " in res.output
def test_cli_config_template(cli):
"""Verify the --template option works correctly."""
filename = 'build-magic_template.yaml'
current = Path().cwd().resolve()
res = cli.invoke(build_magic, ['--template'])
assert current.joinpath(filename).exists()
os.remove(filename)
assert res.exit_code == ExitCode.PASSED
def test_cli_template_exists(cli):
"""Test the case where a template config file cannot be generated because one already exists."""
filename = 'build-magic_template.yaml'
current = Path.cwd().resolve()
Path.touch(current.joinpath(filename))
res = cli.invoke(build_magic, ['--template'])
os.remove(filename)
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == 'Cannot generate the config template because it already exists!\n'
def test_cli_template_permission_error(cli, mocker):
"""Test the case where a template config file cannot be generated because the user does not have permission."""
mocker.patch('build_magic.core.generate_config_template', side_effect=PermissionError)
res = cli.invoke(build_magic, ['--template'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == "Cannot generate the config template because build-magic doesn't have permission.\n"
def test_cli_config(cli, config_file, ls):
"""Verify the --config option works correctly."""
res = cli.invoke(build_magic, ['--config', str(config_file)])
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: Test stage' in res.output
assert '( 1/2 ) EXECUTE : echo hello' in res.output
assert f'( 2/2 ) EXECUTE : {ls}' in res.output
assert 'Stage 1: Test stage - finished with result DONE' in res.output
assert 'build-magic finished in' in res.output
def test_cli_config_multi(cli, config_file, multi_config):
"""Verify assigning multiple config files works correctly."""
file1 = config_file
file2 = multi_config
res = cli.invoke(build_magic, ['--config', str(file1), '--config', str(file2)])
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: Test stage' in res.output
assert 'Starting Stage 2: Stage A' in res.output
assert 'Starting Stage 3: Stage B' in res.output
assert 'Stage 1: Test stage - finished with result DONE' in res.output
assert 'Stage 2: Stage A - finished with result DONE' in res.output
assert 'Stage 3: Stage B - finished with result DONE' in res.output
def test_cli_config_parameters(cli, mocker, parameters_config):
"""Verify assigning parameters from a config file works correctly."""
mocker.patch('paramiko.ECDSAKey.from_private_key_file')
mocker.patch('build_magic.runner.Remote.connect', return_value=paramiko.SSHClient)
mocker.patch(
'paramiko.SSHClient.exec_command',
return_value=(
None,
MagicMock(readlines=lambda: 'hello', channel=MagicMock(recv_exit_status=lambda: 0)),
MagicMock(readlines=lambda: '')
)
)
mocker.patch('paramiko.SSHClient.close')
res = cli.invoke(build_magic, ['--config', str(parameters_config)])
print(res.output)
assert res.exit_code == ExitCode.PASSED
assert "Starting Stage 1" in res.output
assert "( 1/1 ) EXECUTE : echo hello ........................................ RUNNING" in res.output
assert "Stage 1 finished with result DONE" in res.output
def test_cli_target(cli, targets_config):
"""Verify the --target option works correctly."""
res = cli.invoke(build_magic, ['-C', str(targets_config), '--target', 'Stage D', '-t', 'Stage B'])
assert res.exit_code == ExitCode.PASSED
out = res.output
assert 'Stage D - Test Stage D' in out
out = out.split('\n', maxsplit=8)[-1]
assert 'Stage B - Test Stage B' in out
assert '( 1/1 ) EXECUTE : echo "B" .......................................... RUNNING' in res.output
assert "Stage 2: Stage B - finished with result DONE" in res.output
def test_cli_invalid_target(cli, targets_config):
"""Test the case where an invalid target name is provided."""
res = cli.invoke(build_magic, ['-C', str(targets_config), '-t', 'blarg'])
out = res.output
assert res.exit_code == ExitCode.INPUT_ERROR
assert out == "Target blarg not found among ['Stage A', 'Stage B', 'Stage C', 'Stage D'].\n"
def test_cli_yaml_parsing_error(cli, config_file, mocker):
"""Test the case where there's an error when parsing a config file."""
yaml_load = mocker.patch('yaml.safe_load', side_effect=ComposerError('YAML error'))
res = cli.invoke(build_magic, ['-C', str(config_file)])
out = res.output
assert res.exit_code == ExitCode.INPUT_ERROR
assert out == 'YAML error\n'
assert yaml_load.call_count == 1
def test_cli_default_config_all_stages(cli, default_config):
"""Verify the "all" argument works with a default config file."""
res = cli.invoke(build_magic, ['all'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: build' in out
assert 'Starting Stage 2: deploy' in out
assert 'Starting Stage 3: release' in out
def test_cli_default_config_single_stage(cli, default_config):
"""Verify running a single stage by name as an argument works with a default config file."""
res = cli.invoke(build_magic, ['deploy'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert ('Starting Stage 1: build' in out) is False
assert ('Starting Stage 1: deploy' in out) is True
assert ('Starting Stage 3: release' in out) is False
def test_cli_default_config_reorder_stages(cli, default_config):
"""Verify running stages in a custom order by arguments works with a default config file."""
res = cli.invoke(build_magic, ['release', 'deploy', 'build'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 3: build' in out
assert 'Starting Stage 2: deploy' in out
assert 'Starting Stage 1: release' in out
def test_cli_default_config_repeat_stages(cli, default_config):
"""Verify running stages more than once by arguments works with a default config file."""
res = cli.invoke(build_magic, ['release', 'release'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: release' in out
assert 'Starting Stage 2: release' in out
def test_cli_default_config_with_targets(cli, default_config):
"""Verify running stages using the --target option works with a default config file."""
res = cli.invoke(build_magic, ['-t', 'release', '-t', 'deploy', '-t', 'build'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 3: build' in out
assert 'Starting Stage 2: deploy' in out
assert 'Starting Stage 1: release' in out
def test_cli_default_config_repeat_stages_all(cli, default_config):
"""Verify running stages more than once by using all works with a default config file."""
res = cli.invoke(build_magic, ['all', 'build'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: build' in out
assert 'Starting Stage 2: deploy' in out
assert 'Starting Stage 3: release' in out
assert 'Starting Stage 4: build' in out
def test_cli_default_config_with_ad_hoc_command(cli, default_config):
"""Verify running an ad hoc command works correctly with a default config file."""
res = cli.invoke(build_magic, ['--name', 'test', 'echo "hello world"'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert ('Starting Stage 1: test' in out) is True
assert ('echo "hello world"' in out) is True
def test_cli_default_config_with_ad_hoc_command_no_quotes(cli, default_config):
"""Verify running an un-quoted ad hoc command works correctly with a default config file.
This test covers an edge case where a default config exists, but an un-quoted ad hoc command is provided,
causing the command to be executed n times where n is the number of args in the command."""
res = cli.invoke(build_magic, ['echo', 'hello', 'world'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert ('Starting Stage 1' in out) is True
assert ('echo hello world' in out) is True
assert ('Starting Stage 2' in out) is False
assert ('Starting Stage 3' in out) is False
def test_cli_default_config_not_repeated(cli, default_config):
"""Test the case where a default config file is added explicitly with --command option."""
res = cli.invoke(build_magic, ['-C', 'build-magic.yaml', '-t', 'deploy'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert ('Starting Stage 1: deploy' in out) is True
assert ('Starting Stage 2: deploy' in out) is False
def test_cli_default_config_usage(cli, default_config):
"""Verify the usage is printed when a default config file is present."""
res = cli.invoke(build_magic)
assert res.exit_code == ExitCode.NO_TESTS
assert res.output == USAGE
def test_cli_default_config_multiple_commands(cli, default_config):
"""Verify running multiple commands works when a default config file is present."""
res = cli.invoke(build_magic, ['-c', 'execute', 'echo hello', '-c', 'execute', 'echo world'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert "EXECUTE : echo hello" in out
assert "EXECUTE : echo world" in out
def test_cli_default_config_multiple_defaults_error(cli, default_config, second_default):
"""Test the case where an error is raised if there's more than one default config file."""
res = cli.invoke(build_magic, ['all'])
out = res.output
assert res.exit_code == ExitCode.INPUT_ERROR
assert 'More than one config file found:' in out
def test_cli_variable(cli, variables_config):
"""Verify adding variables from the | |
<filename>{{cookiecutter.project_slug}}/config/settings.py
"""Base settings to build other settings files upon."""
{% if cookiecutter.use_sentry == 'y' -%}
import logging
{%- endif %}
import pathlib
from typing import Any, Dict, List, Tuple
import environs
{%- if cookiecutter.use_sentry == 'y' %}
import sentry_sdk
{%- endif %}
from marshmallow.validate import OneOf
{%- if cookiecutter.use_sentry == 'y' -%}
{%- if cookiecutter.use_celery == 'y' %}
from sentry_sdk.integrations.celery import CeleryIntegration
{%- endif %}
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
{%- endif %}
env = environs.Env()
env.read_env()
# ('{{ cookiecutter.project_slug }}/config/settings.py'.parents[1] = '{{ cookiecutter.project_slug }}/')
ROOT_DIR = pathlib.Path(__file__).parents[1]
APPS_DIR = ROOT_DIR / '{{ cookiecutter.project_slug }}'
# GENERAL
# ------------------------------------------------------------------------------
# Setting is not intended to be used as is - only as a single point to toggle some environment-related values.
ENVIRONMENT_DEBUG = 'debug'
ENVIRONMENT_TEST = 'test'
ENVIRONMENT_PRODUCTION = 'production'
PROJECT_ENVIRONMENT = env(
'PROJECT_ENVIRONMENT',
ENVIRONMENT_PRODUCTION,
validate=OneOf([ENVIRONMENT_DEBUG, ENVIRONMENT_TEST, ENVIRONMENT_PRODUCTION]), # type: ignore
)
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', PROJECT_ENVIRONMENT == ENVIRONMENT_DEBUG)
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['{{ cookiecutter.domain_name }}'])
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = env('DJANGO_TIMEZONE', 'UTC')
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
{% if cookiecutter.use_docker == 'y' -%}
DATABASES = {
'default': env.dj_db_url('DATABASE_URL'),
}
{%- else %}
DATABASES = {
'default': env.dj_db_url(
'DATABASE_URL',
default='postgres://{{cookiecutter.project_slug}}@localhost:5432/{{cookiecutter.project_slug}}',
),
}
{%- endif %}
DATABASES['default']['ATOMIC_REQUESTS'] = True
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
},
},
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': '',
},
}
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'allauth',
'allauth.account',
'allauth.socialaccount',
{%- if cookiecutter.use_whitenoise == 'n' %}
# https://github.com/antonagestam/collectfast#installation
'collectfast',
{%- endif %}
{% if cookiecutter.use_compressor == 'y' %}
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
'compressor',
{%- endif %}
'rest_framework',
]
if PROJECT_ENVIRONMENT == ENVIRONMENT_DEBUG:
THIRD_PARTY_APPS += [
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
'debug_toolbar',
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
'django_extensions',
]
elif PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
THIRD_PARTY_APPS += [
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
'anymail',
'gunicorn',
# https://django-storages.readthedocs.io/en/latest/#installation
'storages',
]
LOCAL_APPS = [
'{{ cookiecutter.project_slug }}.users.apps.UsersAppConfig',
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = LOCAL_APPS + THIRD_PARTY_APPS + DJANGO_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {'sites': '{{ cookiecutter.project_slug }}.contrib.sites.migrations'}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
if PROJECT_ENVIRONMENT == ENVIRONMENT_TEST:
PASSWORD_HASHERS = ['django.contrib.auth.hashers.MD5PasswordHasher']
else:
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
if PROJECT_ENVIRONMENT in {ENVIRONMENT_DEBUG, ENVIRONMENT_TEST}:
AUTH_PASSWORD_VALIDATORS: List[Dict[str, Any]] = []
else:
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
{%- if cookiecutter.use_whitenoise == 'y' %}
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
'whitenoise.middleware.WhiteNoiseMiddleware',
{%- endif %}
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
if PROJECT_ENVIRONMENT == ENVIRONMENT_DEBUG:
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
# SECURITY
# ------------------------------------------------------------------------------
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
{% if cookiecutter.cloud_provider == 'AWS' %}
# STORAGES
# ------------------------------------------------------------------------------
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age={0}, s-maxage={0}, must-revalidate'.format(_AWS_EXPIRY)}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env('DJANGO_AWS_S3_REGION_NAME', default=None)
{% elif cookiecutter.cloud_provider == 'GCE' %}
# STORAGES
# ------------------------------------------------------------------------------
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
DEFAULT_FILE_STORAGE = 'storages.backends.gcloud.GoogleCloudStorage'
GS_BUCKET_NAME = env('DJANGO_GCE_STORAGE_BUCKET_NAME')
GS_DEFAULT_ACL = 'publicRead'
{% endif %}
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR.joinpath('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
{%- if cookiecutter.cloud_provider == 'AWS' %}
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
STATIC_URL = 'https://{0}.s3.amazonaws.com/static/'.format(AWS_STORAGE_BUCKET_NAME)
{%- elif cookiecutter.cloud_provider == 'GCE' %}
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
STATIC_URL = 'https://storage.googleapis.com/{0}/static/'.format(GS_BUCKET_NAME)
{%- endif %}
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR.joinpath('static'))]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
{%- if cookiecutter.use_compressor == 'y' %}
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
'compressor.finders.CompressorFinder',
{%- endif %}
]
{%- if cookiecutter.cloud_provider == 'AWS' %}
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
STATICFILES_STORAGE = 'config.custom_storages.StaticRootS3Boto3Storage'
{%- elif cookiecutter.use_whitenoise == 'y' %}
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
{%- endif %}
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR.joinpath('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
{%- if cookiecutter.cloud_provider == 'AWS' %}
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
MEDIA_URL = 'https://{0}.s3.amazonaws.com/media/'.format(AWS_STORAGE_BUCKET_NAME)
DEFAULT_FILE_STORAGE = 'config.custom_storages.MediaRootS3Boto3Storage'
{%- elif cookiecutter.cloud_provider == 'GCE' %}
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
MEDIA_URL = 'https://storage.googleapis.com/{0}/media/'.format(GS_BUCKET_NAME)
MEDIA_ROOT = 'https://storage.googleapis.com/{0}/media/'.format(GS_BUCKET_NAME)
{%- endif %}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
if PROJECT_ENVIRONMENT != ENVIRONMENT_DEBUG:
loaders = ( # type: ignore
'django.template.loaders.cached.Loader',
loaders,
)
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [str(APPS_DIR.joinpath('templates'))],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': loaders,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR.joinpath('fixtures')),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
if PROJECT_ENVIRONMENT == ENVIRONMENT_DEBUG:
{% if cookiecutter.use_mailhog == 'y' and cookiecutter.use_docker == 'y' -%}
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = env('EMAIL_HOST', default='mailhog')
{%- elif cookiecutter.use_mailhog == 'y' and cookiecutter.use_docker == 'n' -%}
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
{%- else -%}
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
{%- endif %}
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
elif PROJECT_ENVIRONMENT == ENVIRONMENT_TEST:
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
elif PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL', default='{{cookiecutter.project_name}} <<EMAIL>ply@{{<EMAIL>}}>',
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[{{cookiecutter.project_name}}]')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN'),
}
else:
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = env('DJANGO_ADMIN_URL', 'admin/')
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS: List[Tuple[str, str]] = []
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
{% if cookiecutter.use_celery == 'y' -%}
# Celery
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['{{cookiecutter.project_slug}}.taskapp.celery.CeleryAppConfig']
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env('CELERY_BROKER_URL')
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ['json']
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = 'json'
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = 'json'
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERYD_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERYD_TASK_SOFT_TIME_LIMIT = 60
{%- if cookiecutter.use_docker == 'n' %}
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-always-eager
CELERY_TASK_ALWAYS_EAGER = DEBUG
{%- endif %}
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates
CELERY_TASK_EAGER_PROPAGATES = DEBUG
{%- endif %}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True) # noqa: WPS425
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = '{{cookiecutter.project_slug}}.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = '{{cookiecutter.project_slug}}.users.adapters.SocialAccountAdapter'
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': ['debug_toolbar.panels.redirects.RedirectsPanel'],
'SHOW_TEMPLATE_CONTEXT': True,
}
if PROJECT_ENVIRONMENT == ENVIRONMENT_DEBUG:
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
{%- if cookiecutter.use_docker == 'y' %}
if env('USE_DOCKER') == 'yes':
import socket # noqa: WPS433
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + '1' for ip in ips]
{%- endif %}
{%- if cookiecutter.use_compressor == 'y' %}
# django-compressor
# ------------------------------------------------------------------------------
if PROJECT_ENVIRONMENT == ENVIRONMENT_PRODUCTION:
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL
{% endif %}
{%- if cookiecutter.use_whitenoise == 'n' %}
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
AWS_PRELOAD_METADATA = True
{% endif %}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
{% if cookiecutter.use_sentry == 'n' -%}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse'}},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': [],
'level': 'ERROR',
'propagate': True,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': True,
},
},
}
{% else %}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
# Errors logged by the SDK itself
'sentry_sdk': {'level': 'ERROR', 'handlers': ['console'], 'propagate': False},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': | |
'test_course', 'test_run', TEST_USER_ID, BRANCH_NAME_DRAFT
)
new_locator = new_course.location
# check index entry
index_info = modulestore().get_course_index_info(new_locator.course_key)
assert index_info['org'] == 'test_org'
assert index_info['edited_by'] == TEST_USER_ID
# check structure info
structure_info = modulestore().get_course_history_info(new_locator.course_key)
assert structure_info['original_version'] == index_info['versions'][BRANCH_NAME_DRAFT]
assert structure_info['previous_version'] is None
assert structure_info['edited_by'] == TEST_USER_ID
# check the returned course object
assert isinstance(new_course, CourseBlock)
assert new_course.category == 'course'
assert not new_course.show_calculator
assert new_course.allow_anonymous
assert len(new_course.children) == 0
assert new_course.edited_by == TEST_USER_ID
assert len(new_course.grading_policy['GRADER']) == 4
self.assertDictEqual(new_course.grade_cutoffs, {"Pass": 0.5})
def test_cloned_course(self):
"""
Test making a course which points to an existing draft and published but not making any changes to either.
"""
original_locator = CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT)
original_index = modulestore().get_course_index_info(original_locator)
new_draft = modulestore().create_course(
'best', 'leech', 'leech_run', TEST_OTHER_USER_ID, BRANCH_NAME_DRAFT,
versions_dict=original_index['versions'])
new_draft_locator = new_draft.location
self.assertRegex(new_draft_locator.org, 'best')
# the edited_by and other meta fields on the new course will be the original author not this one
assert new_draft.edited_by == TEST_USER_ID
assert new_draft_locator.version_guid == original_index['versions'][BRANCH_NAME_DRAFT]
# however the edited_by and other meta fields on course_index will be this one
new_index = modulestore().get_course_index_info(new_draft_locator.course_key)
assert new_index['edited_by'] == TEST_OTHER_USER_ID
new_published_locator = new_draft_locator.course_key.for_branch(BRANCH_NAME_PUBLISHED)
new_published = modulestore().get_course(new_published_locator)
assert new_published.edited_by == TEST_USER_ID
assert new_published.location.version_guid == original_index['versions'][BRANCH_NAME_PUBLISHED]
# changing this course will not change the original course
# using new_draft.location will insert the chapter under the course root
new_item = modulestore().create_child(
TEST_OTHER_USER_ID, new_draft.location, 'chapter',
fields={'display_name': 'new chapter'}
)
new_draft_locator = new_draft_locator.course_key.version_agnostic()
new_index = modulestore().get_course_index_info(new_draft_locator)
assert new_index['versions'][BRANCH_NAME_DRAFT] != original_index['versions'][BRANCH_NAME_DRAFT]
new_draft = modulestore().get_course(new_draft_locator)
assert new_item.edited_by == TEST_OTHER_USER_ID
assert new_item.location.version_guid != original_index['versions'][BRANCH_NAME_DRAFT]
assert new_draft.location.version_guid != original_index['versions'][BRANCH_NAME_DRAFT]
structure_info = modulestore().get_course_history_info(new_draft_locator)
assert structure_info['edited_by'] == TEST_OTHER_USER_ID
original_course = modulestore().get_course(original_locator)
assert original_course.location.version_guid == original_index['versions'][BRANCH_NAME_DRAFT]
def test_derived_course(self):
"""
Create a new course which overrides metadata and course_data
"""
original_locator = CourseLocator(org='guestx', course='contender', run="run", branch=BRANCH_NAME_DRAFT)
original = modulestore().get_course(original_locator)
original_index = modulestore().get_course_index_info(original_locator)
fields = {
'grading_policy': original.grading_policy,
'display_name': 'Derivative',
}
fields['grading_policy']['GRADE_CUTOFFS'] = {'A': .9, 'B': .8, 'C': .65}
new_draft = modulestore().create_course(
'counter', 'leech', 'leech_run', TEST_OTHER_USER_ID, BRANCH_NAME_DRAFT,
versions_dict={BRANCH_NAME_DRAFT: original_index['versions'][BRANCH_NAME_DRAFT]},
fields=fields
)
new_draft_locator = new_draft.location
self.assertRegex(new_draft_locator.org, 'counter')
# the edited_by and other meta fields on the new course will be the original author not this one
assert new_draft.edited_by == TEST_OTHER_USER_ID
assert new_draft_locator.version_guid != original_index['versions'][BRANCH_NAME_DRAFT]
# however the edited_by and other meta fields on course_index will be this one
new_index = modulestore().get_course_index_info(new_draft_locator.course_key)
assert new_index['edited_by'] == TEST_OTHER_USER_ID
assert new_draft.display_name == fields['display_name']
self.assertDictEqual(
new_draft.grading_policy['GRADE_CUTOFFS'],
fields['grading_policy']['GRADE_CUTOFFS']
)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_update_course_index(self, _from_json):
"""
Test the versions pointers. NOTE: you can change the org, course, or other things, but
it's not clear how you'd find them again or associate them w/ existing student history since
we use course_key so many places as immutable.
"""
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course_info = modulestore().get_course_index_info(locator)
# an allowed but not necessarily recommended way to revert the draft version
head_course = modulestore().get_course(locator)
versions = course_info['versions']
versions[BRANCH_NAME_DRAFT] = head_course.previous_version
modulestore().update_course_index(None, course_info)
course = modulestore().get_course(locator)
assert course.location.version_guid == versions[BRANCH_NAME_DRAFT]
# an allowed but not recommended way to publish a course
versions[BRANCH_NAME_PUBLISHED] = versions[BRANCH_NAME_DRAFT]
modulestore().update_course_index(None, course_info)
course = modulestore().get_course(locator.for_branch(BRANCH_NAME_PUBLISHED))
assert course.location.version_guid == versions[BRANCH_NAME_DRAFT]
def test_create_with_root(self):
"""
Test create_course with a specified root id and category
"""
user = random.getrandbits(32)
new_course = modulestore().create_course(
'test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT,
root_block_id='top', root_category='chapter'
)
assert new_course.location.block_id == 'top'
assert new_course.category == 'chapter'
# look at db to verify
db_structure = modulestore().db_connection.get_structure(
new_course.location.as_object_id(new_course.location.version_guid)
)
assert db_structure is not None, "Didn't find course"
assert BlockKey('course', 'course') not in db_structure['blocks']
assert BlockKey('chapter', 'top') in db_structure['blocks']
assert db_structure['blocks'][BlockKey('chapter', 'top')].block_type == 'chapter'
def test_create_id_dupe(self):
"""
Test create_course rejects duplicate id
"""
user = random.getrandbits(32)
courses = modulestore().get_courses(BRANCH_NAME_DRAFT)
with pytest.raises(DuplicateCourseError):
dupe_course_key = courses[0].location.course_key
modulestore().create_course(
dupe_course_key.org, dupe_course_key.course, dupe_course_key.run, user, BRANCH_NAME_DRAFT
)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_bulk_ops_get_courses(self, _from_json):
"""
Test get_courses when some are created, updated, and deleted w/in a bulk operation
"""
# create 3 courses before bulk operation
split_store = modulestore()
user = random.getrandbits(32)
to_be_created = split_store.make_course_key('new', 'created', 'course')
with split_store.bulk_operations(to_be_created):
split_store.create_course(
to_be_created.org, to_be_created.course, to_be_created.run, user, master_branch=BRANCH_NAME_DRAFT,
)
modified_course_loc = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
with split_store.bulk_operations(modified_course_loc):
modified_course = modulestore().get_course(modified_course_loc)
modified_course.advertised_start = 'coming soon to a theater near you'
split_store.update_item(modified_course, user)
to_be_deleted = split_store.make_course_key("guestx", "contender", "run")
with split_store.bulk_operations(to_be_deleted):
split_store.delete_course(to_be_deleted, user)
# now get_courses
courses = split_store.get_courses(BRANCH_NAME_DRAFT)
assert len(courses) == 3
course_ids = [course.id.for_branch(None) for course in courses]
assert to_be_deleted not in course_ids
assert to_be_created in course_ids
fetched_modified = [course for course in courses if course.id == modified_course_loc][0]
assert fetched_modified.advertised_start == modified_course.advertised_start
class TestInheritance(SplitModuleTest):
"""
Test the metadata inheritance mechanism.
"""
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_inheritance(self, _from_json):
"""
The actual test
"""
# Note, not testing value where defined (course) b/c there's no
# defined accessor for it on CourseBlock.
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'problem', 'problem3_2'
)
node = modulestore().get_item(locator)
# inherited
assert node.graceperiod == datetime.timedelta(hours=2)
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'problem', 'problem1'
)
node = modulestore().get_item(locator)
# overridden
assert node.graceperiod == datetime.timedelta(hours=4)
def test_inheritance_not_saved(self):
"""
Was saving inherited settings with updated blocks causing inheritance to be sticky
"""
# set on parent, retrieve child, verify setting
chapter = modulestore().get_item(
BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'chapter', 'chapter3' # lint-amnesty, pylint: disable=line-too-long
)
)
problem = modulestore().get_item(
BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'problem', 'problem3_2' # lint-amnesty, pylint: disable=line-too-long
)
)
assert not problem.visible_to_staff_only
chapter.visible_to_staff_only = True
modulestore().update_item(chapter, self.user_id)
problem = modulestore().get_item(problem.location.version_agnostic())
assert problem.visible_to_staff_only
# unset on parent, retrieve child, verify unset
chapter = modulestore().get_item(chapter.location.version_agnostic())
del chapter.visible_to_staff_only
modulestore().update_item(chapter, self.user_id)
problem = modulestore().get_item(problem.location.version_agnostic())
assert not problem.visible_to_staff_only
def test_dynamic_inheritance(self):
"""
Test inheritance for create_item with and without a parent pointer
"""
course_key = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
chapter = modulestore().get_item(BlockUsageLocator(course_key, 'chapter', 'chapter3'))
chapter.visible_to_staff_only = True
orphan_problem = modulestore().create_item(self.user_id, course_key, 'problem')
assert not orphan_problem.visible_to_staff_only
parented_problem = modulestore().create_child(self.user_id, chapter.location.version_agnostic(), 'problem') # lint-amnesty, pylint: disable=unused-variable
# FIXME LMS-11376
# self.assertTrue(parented_problem.visible_to_staff_only)
orphan_problem = modulestore().create_xblock(chapter.runtime, course_key, 'problem')
assert not orphan_problem.visible_to_staff_only
parented_problem = modulestore().create_xblock(chapter.runtime, course_key, 'problem', parent_xblock=chapter)
# FIXME LMS-11376
# self.assertTrue(parented_problem.visible_to_staff_only)
class TestPublish(SplitModuleTest):
"""
Test the publishing api
"""
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_publish_safe(self, _from_json):
"""
Test the standard patterns: publish to new branch, revise and publish
"""
source_course = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
dest_course = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_PUBLISHED)
head = source_course.make_usage_key('course', "head12345")
chapter1 = source_course.make_usage_key('chapter', 'chapter1')
chapter2 = source_course.make_usage_key('chapter', 'chapter2')
chapter3 = source_course.make_usage_key('chapter', 'chapter3')
modulestore().copy(self.user_id, source_course, dest_course, [head], [chapter2, chapter3])
expected = [BlockKey.from_usage_key(head), BlockKey.from_usage_key(chapter1)]
unexpected = [
BlockKey.from_usage_key(chapter2),
BlockKey.from_usage_key(chapter3),
BlockKey("problem", "problem1"),
BlockKey("problem", "problem3_2")
]
self._check_course(source_course, dest_course, expected, unexpected)
# add a child under chapter1
new_module = modulestore().create_child(
self.user_id, chapter1, "sequential",
fields={'display_name': 'new sequential'},
)
# remove chapter1 from expected b/c its pub'd version != the source anymore since source changed
expected.remove(BlockKey.from_usage_key(chapter1))
# check that it's not in published course
with pytest.raises(ItemNotFoundError):
modulestore().get_item(new_module.location.map_into_course(dest_course))
# publish it
modulestore().copy(self.user_id, source_course, dest_course, [new_module.location], None)
expected.append(BlockKey.from_usage_key(new_module.location))
# check that it is in the published course and that its parent is the chapter
pub_module = modulestore().get_item(new_module.location.map_into_course(dest_course))
assert modulestore().get_parent_location(pub_module.location).block_id == chapter1.block_id
# ensure intentionally orphaned blocks work (e.g., course_info)
new_module = modulestore().create_item(
self.user_id, source_course, "course_info", block_id="handouts"
)
# publish it
modulestore().copy(self.user_id, source_course, dest_course, [new_module.location], None)
expected.append(BlockKey.from_usage_key(new_module.location))
# check that it is in the published course (no error means it worked)
pub_module = modulestore().get_item(new_module.location.map_into_course(dest_course))
self._check_course(source_course, dest_course, expected, unexpected)
def test_exceptions(self):
"""
Test the exceptions which preclude successful publication
"""
source_course = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
# destination does not exist
destination_course = CourseLocator(org='fake', course='Unknown', run="run", branch=BRANCH_NAME_PUBLISHED)
head = source_course.make_usage_key('course', "head12345")
chapter3 = source_course.make_usage_key('chapter', 'chapter3')
problem1 = source_course.make_usage_key('problem', 'problem1')
with pytest.raises(ItemNotFoundError):
modulestore().copy(self.user_id, source_course, destination_course, [chapter3], None)
# publishing into a new branch w/o publishing the root
destination_course = CourseLocator(org='testx', course='GreekHero', run='run', branch=BRANCH_NAME_PUBLISHED)
with pytest.raises(ItemNotFoundError):
modulestore().copy(self.user_id, source_course, destination_course, [chapter3], None)
# publishing a subdag w/o the parent already in course
modulestore().copy(self.user_id, source_course, destination_course, [head], [chapter3])
with pytest.raises(ItemNotFoundError):
modulestore().copy(self.user_id, source_course, destination_course, [problem1], [])
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_move_delete(self, _from_json):
"""
Test publishing moves and deletes.
"""
source_course = CourseLocator(org='testx', course='GreekHero', run='run', branch=BRANCH_NAME_DRAFT)
dest_course = CourseLocator(org='testx', course='GreekHero', run='run', branch=BRANCH_NAME_PUBLISHED)
head = source_course.make_usage_key('course', "head12345")
chapter2 = source_course.make_usage_key('chapter', 'chapter2')
problem1 = source_course.make_usage_key('problem', 'problem1')
modulestore().copy(self.user_id, source_course, dest_course, [head], [chapter2])
expected = [
BlockKey("course", "head12345"),
BlockKey("chapter", "chapter1"),
BlockKey("chapter", "chapter3"),
BlockKey("problem", "problem1"),
BlockKey("problem", "problem3_2"),
]
self._check_course(source_course, dest_course, expected, [BlockKey("chapter", "chapter2")])
# now move problem1 and delete | |
<reponame>floodlight-sports/floodlight
import os.path
import warnings
from typing import Dict, Tuple, Union
from pathlib import Path
import numpy as np
import pandas as pd
from lxml import etree
from floodlight.io.utils import download_from_url, get_and_convert
from floodlight.core.code import Code
from floodlight.core.events import Events
from floodlight.core.pitch import Pitch
from floodlight.core.xy import XY
from floodlight.settings import DATA_DIR
# ----------------------------- StatsPerform Open Format -------------------------------
def _create_metadata_from_open_csv_df(
csv_df: pd.DataFrame,
) -> Tuple[Dict[int, tuple], Pitch]:
"""Creates meta information from a pd.DataFrame that results from parsing the open
StatsPerform event data csv file.
Parameters
----------
csv_df: pd.DataFrame
Data Frame with the parsed event data csv file.
Returns
-------
periods: Dict[int, int]
Dictionary with start and endframes:
``periods[segment] = (startframe, endframe)``.
pitch: Pitch
Playing Pitch object.
"""
# create pitch
pi_len = csv_df["pitch_dimension_long_side"].values[0]
pi_wid = csv_df["pitch_dimension_short_side"].values[0]
pitch = Pitch.from_template(
"statsperform_open",
length=pi_len,
width=pi_wid,
sport="football",
)
# create periods for segments, coded as jumps in the frame sequence
periods = {}
frame_values = csv_df["frame_count"].unique()
seg_idx = np.where(np.diff(frame_values, prepend=frame_values[0]) > 1)
seg_idx = np.insert(seg_idx, 0, 0)
seg_idx = np.append(seg_idx, len(frame_values))
for segment in range(len(seg_idx) - 1):
start = int(frame_values[seg_idx[segment]])
end = int(frame_values[seg_idx[segment + 1] - 1])
periods[segment] = (start, end)
return periods, pitch
def _create_links_from_open_csv_df(
csv_df: pd.DataFrame, team_ids: Dict[str, float]
) -> Dict[str, Dict[int, int]]:
"""Checks the entire parsed open StatsPerform event data csv file for unique jIDs
(jerseynumbers) and creates a dictionary linking jIDs to xIDs in ascending order.
Parameters
----------
csv_df: pd.DataFrame
Data Frame with the parsed event data csv file.
team_ids: Dict[str, float]
Dictionary that stores the StatsPerform team id of the Home and Away team
Returns
-------
links: Dict[str, Dict[int, int]]
A link dictionary of the form ``links[team][jID] = xID``.
"""
links = {}
for team in team_ids:
links[team] = {
int(jID): xID
for xID, jID in enumerate(
csv_df[csv_df["team_id"] == team_ids[team]]["jersey_no"].unique()
)
}
return links
def _read_open_event_csv_single_line(
line: str,
) -> Tuple[Dict, str, str]:
"""Extracts all relevant information from a single line of StatsPerform's Event csv
file (i.e. one single event in the data).
Parameters
----------
line: str
One full line from StatsPerform's Event csv file.
Returns
-------
event: Dict
Dictionary with relevant event information in the form:
``event[attribute] = value``.
"""
event = {}
attrib = line.split(sep=",")
# description
event["eID"] = attrib[5].replace(" ", "")
# relative time
event["gameclock"] = float(attrib[4])
event["frameclock"] = float(attrib[2])
# segment, player and team
segment = attrib[3]
team = attrib[9]
event["tID"] = team
event["pID"] = attrib[8]
# outcome
event["outcome"] = np.nan
if "Won" in attrib[5].split(" "):
event["outcome"] = 1
elif "Lost" in attrib[5].split(" "):
event["outcome"] = 0
# minute and second of game
event["minute"] = np.floor(event["gameclock"] / 60)
event["second"] = np.floor(event["gameclock"] - event["minute"] * 60)
# additional information (qualifier)
event["qualifier"] = {
"event_id": attrib[1],
"event_type_id": attrib[6],
"sequencenumber": attrib[7],
"jersey_no": attrib[10],
"is_pass": attrib[11],
"is_cross": attrib[12],
"is_corner": attrib[13],
"is_free_kick": attrib[14],
"is_goal_kick": attrib[15],
"passtypeid": attrib[16],
"wintypeid": attrib[17],
"savetypeid": attrib[18],
"possessionnumber": attrib[19],
}
return event, team, segment
def create_links_from_open_tracking_data_csv(
filepath_tracking: Union[str, Path]
) -> Dict[str, Dict[int, int]]:
"""Parses the entire open StatsPerform event data csv file for unique jIDs
(jerseynumbers) and creates a dictionary linking jIDs to xIDs in ascending order.
Parameters
----------
filepath_tracking: str or pathlib.Path
csv file where the position data in StatsPerform format is saved.
Returns
-------
links: Dict[str, Dict[int, int]]
A link dictionary of the form ``links[team][jID] = xID``.
"""
# read dat-file into pd.DataFrame
dat_df = pd.read_csv(str(filepath_tracking))
# initialize team and ball ids
team_ids = {"Home": 1.0, "Away": 2.0}
ball_id = 4
# check for additional tIDs
for ID in dat_df["team_id"].unique():
if not (ID in team_ids.values() or ID == ball_id):
warnings.warn(f"Team ID {ID} did not match any of the standard IDs!")
return _create_links_from_open_csv_df(dat_df, team_ids)
def read_open_event_data_csv(
filepath_events: Union[str, Path],
) -> Tuple[Events, Events, Events, Events]:
"""Parses an open StatsPerform Match Event csv file and extracts the event data.
This function provides a high-level access to the particular openly published
StatsPerform match events csv file (e.g. for the Pro Forum '22) and returns Event
objects for both teams.
Parameters
----------
filepath_events: str or pathlib.Path
Full path to xml File where the Event data in StatsPerform csv format is
saved
Returns
-------
data_objects: Tuple[Events, Events, Events, Events]
Events-objects for both teams and both halves.
Notes
-----
StatsPerform's open format of handling provides certain additional event attributes,
which attach additional information to certain events. As of now, these information
are parsed as a string in the ``qualifier`` column of the returned DataFrame and can
be transformed to a dict of form ``{attribute: value}``.
"""
# initialize bin and variables
events = {}
teams = ["1.0", "2.0"]
segments = ["1", "2"]
for team in teams:
events[team] = {segment: pd.DataFrame() for segment in segments}
# parse event data
with open(str(filepath_events), "r") as f:
while True:
line = f.readline()
# terminate if at end of file
if len(line) == 0:
break
# skip the head
if line.split(sep=",")[3] == "current_phase":
continue
# read single line
event, team, segment = _read_open_event_csv_single_line(line)
# insert to bin
if team:
events[team][segment] = events[team][segment].append(
event, ignore_index=True
)
else: # if no clear assignment possible, insert to bins for both teams
for team in teams:
events[team][segment] = events[team][segment].append(
event, ignore_index=True
)
# assembly
t1_ht1 = Events(
events=events["1.0"]["1"],
)
t1_ht2 = Events(
events=events["1.0"]["2"],
)
t2_ht1 = Events(
events=events["2.0"]["1"],
)
t2_ht2 = Events(
events=events["2.0"]["2"],
)
data_objects = (t1_ht1, t1_ht2, t2_ht1, t2_ht2)
return data_objects
def read_open_tracking_data_csv(
filepath_tracking: Union[str, Path],
links: Dict[str, Dict[int, int]] = None,
) -> Tuple[XY, XY, XY, XY, XY, XY, Code, Code, Pitch]:
"""Parses an open StatsPerform csv file and extract position data and possession
codes as well as pitch information.
Openly published StatsPerform position data (e.g. for the Pro Forum '22) is stored
in a csv file containing all position data (for both halves) as well as information
about players, the pitch, and the ball possession. This function provides a
high-level access to StatsPerform data by parsing the csv file.
Parameters
----------
filepath_tracking: str or pathlib.Path
Full path to the csv file.
links: Dict[str, Dict[int, int]], optional
A link dictionary of the form ``links[team][jID] = xID``. Player's are
identified in StatsPerform files via jID, and this dictionary is used to map
them to a specific xID in the respective XY objects. Should be supplied if that
order matters. If None is given (default), the links are automatically extracted
from the csv file at the cost of a second pass through the entire file.
Returns
-------
data_objects: Tuple[XY, XY, XY, XY, XY, XY, Code, Code, Pitch]
XY-, Code-, and Pitch-objects for both teams and both halves. The order is
(home_ht1, home_ht2, away_ht1, away_ht2, ball_ht1, ball_ht2,
possession_ht1, possession_ht2, pitch)
"""
# parse the csv file into pd.DataFrame
dat_df = pd.read_csv(str(filepath_tracking))
# initialize team and ball ids
team_ids = {"Home": 1.0, "Away": 2.0}
ball_id = 4
# check for additional tIDs
for ID in dat_df["team_id"].unique():
if not (ID in team_ids.values() or ID == ball_id):
warnings.warn(f"Team ID {ID} did not match any of the standard IDs!")
# create or check links
if links is None:
links = _create_links_from_open_csv_df(dat_df, team_ids)
else:
pass
# potential check vs jerseys in dat file
# create periods and pitch
periods, pitch = _create_metadata_from_open_csv_df(dat_df)
segments = list(periods.keys())
# infer data shapes
number_of_players = {team: len(links[team]) for team in links}
number_of_frames = {}
for segment in segments:
start = periods[segment][0]
end = periods[segment][1]
number_of_frames[segment] = end - start + 1
# bins
codes = {"possession": {segment: [] for segment in segments}}
xydata = {
"Home": {
segment: np.full(
[
number_of_frames[segment],
number_of_players[list(links.keys())[0]] * 2,
],
np.nan,
)
for segment in periods
},
"Away": {
segment: np.full(
[
number_of_frames[segment],
number_of_players[list(links.keys())[1]] * 2,
],
np.nan,
)
for segment in periods
},
"Ball": {
segment: np.full([number_of_frames[segment], 2], np.nan)
for segment in periods
},
}
# loop
for segment in segments:
# teams
for team in team_ids:
team_df = dat_df[dat_df["team_id"] == team_ids[team]]
for pID in team_df["player_id"].unique():
# extract player information
pl_df = team_df[team_df["player_id"] == pID]
frames = pl_df["frame_count"].values
x_position = pl_df["pos_x"].values
y_position = pl_df["pos_y"].values
# compute appearance of player in segment
appearance = np.array(
[
(periods[segment][0] <= | |
<gh_stars>1-10
import numpy as np
import copy
from models.get_model import get_model
from util.collect_stat import CollectStatistics
from util.utils import get_indices_each_node_case
from util.sampling import MinibatchSampling
from data_reader.dataset import get_data
from control_algorithm.gtop_k import GTopK
from control_algorithm.periodic_k import PERIODIC_K
from control_algorithm.online_gradient_descent import ONLINE_GRADIENT_DESCENT
import random
from control_algorithm.mab_exp3 import EXP3
from control_algorithm.continuous_bandit import CONTINUOUS_BANDIT
import time
# Configurations are now in a separate config.py file
from config import *
"""
The above are configuration scripts, the "real" program starts here
"""
model = get_model(model_name)
if hasattr(model, 'create_graph'):
model.create_graph(learning_rate=step_size)
sim = 0
MAX_CASE = 4
case = 1
if batch_size < total_data: # Read all data once when using stochastic gradient descent
train_image, train_label, test_image, test_label, train_label_orig = get_data(dataset, total_data,
dataset_file_path)
indices_each_node_case = get_indices_each_node_case(n_nodes, MAX_CASE, train_label_orig)
stat = CollectStatistics(results_file_name=single_run_results_file_path)
data_size = np.zeros([MAX_CASE, n_nodes]) # Data size for different cases and different n
for case in range(MAX_CASE):
for n in range(n_nodes):
data_size[case][n] = len(indices_each_node_case[case][n])
unique_labels = np.unique(train_label,axis=0).tolist()
dim_w = model.get_weight_dimension(train_image, train_label)
w_global_init = model.get_init_weight(dim_w, rand_seed=sim)
w_global = copy.deepcopy(w_global_init)
random.seed(sim)
w_global_prev = copy.deepcopy(w_global)
total_time = 0 # Actual total time, where use_fixed_averaging_slots has no effect
sampler_list = []
train_indices_list = []
w_list = []
# -------------------------------------- initialize each client
for n in range(0, n_nodes):
indices_this_node = indices_each_node_case[case][n]
if batch_size >= total_data:
sampler = None
train_indices = indices_this_node
else:
if batch_size > len(indices_this_node):
sampler = MinibatchSampling(indices_this_node, len(indices_this_node), sim) # For nodes whose sample length are less than batch size
else:
sampler = MinibatchSampling(indices_this_node, batch_size, sim)
train_indices = None # To be defined later
sampler_list.append(sampler)
train_indices_list.append(train_indices)
w_list.append(copy.deepcopy(w_global_init))
grad_array = np.zeros([n_nodes, dim_w])
# -------------------------------------- initialize compression method
print("compression method:", comp_method)
print("adaptive method:", k_adaptive_method)
tau_setup = 1
if comp_method == 'Always_send_all':
k = dim_w
elif comp_method == 'FedAvg':
tau_setup = int(np.ceil(dim_w / k_init / 2))
print('FedAvg:', tau_setup)
elif comp_method == 'U_top_K' or comp_method == 'FAB_top_K':
g_top_k = GTopK(dim_w)
if k_adaptive_method == 'OGD_SIGN' or k_adaptive_method == 'OGD_VALUE':
loss = np.zeros([n_nodes])
loss_prev = np.zeros([n_nodes])
loss_aux = np.zeros([n_nodes])
if k_init is None:
k = int(np.floor(compression_ratio_init * dim_w)) # k_up
else:
k = k_init
k_prev = k
k_down = k
k_aux = int(np.ceil(k * 0.9))
k_aux_down = k_aux
ogd = ONLINE_GRADIENT_DESCENT(int(np.round(dim_w * 0.002)), dim_w) # Min. cannot be 0, use 0.2% of all weights as min.
w_global_aux = copy.deepcopy(w_global)
elif k_adaptive_method == 'EXP3':
loss = np.zeros([n_nodes])
loss_prev = np.zeros([n_nodes])
k_list = [i+10 for i in range(dim_w-10)]
T = 5780 # 1000
ep3 = EXP3(dim_w, T, k_list)
k = ep3.pick_choice() # Initial value
prob = copy.deepcopy(ep3.prob)
k_down = k
elif k_adaptive_method == 'Continuous_bandit':
loss = np.zeros([n_nodes])
loss_prev = np.zeros([n_nodes])
action_dimension = 1
max_k = np.ones(action_dimension)*dim_w
min_k = np.zeros(action_dimension)+10
T = 5780 # 1000
cb = CONTINUOUS_BANDIT(action_dimension, max_k, min_k, T)
k = cb.get_initial_action() # Get initial compression ratio
k_down = k
elif k_adaptive_method == 'NONE':
if k_init is None:
k = int(np.floor(compression_ratio_init * dim_w)) # k_up
else:
k = k_init
k_prev = k
k_down = k
else:
raise Exception("Unknown adaptive compression name")
elif comp_method == 'FUB_top_K':
g_top_k = GTopK(dim_w)
if k_init is None:
k = int(np.floor(compression_ratio_init * dim_w)) # k_up
else:
k = k_init
k_prev = k
elif comp_method == 'PERIODIC_K':
if k_init is None:
k = int(np.floor(compression_ratio_init * dim_w)) # k_up
else:
k = k_init
k_prev = k
pk = PERIODIC_K(k, dim_w)
else:
raise Exception("Unknown compression name")
num_iter = 0
expr_start_time = time.time()
# Loop for multiple rounds of local iterations + global aggregation
while True:
num_iter = num_iter + tau_setup
if (comp_method == 'U_top_K' or comp_method == 'FAB_top_K') and (k_adaptive_method == 'OGD_SIGN'or k_adaptive_method == 'OGD_VALUE'):
w_global_prev_2 = copy.deepcopy(w_global_prev)
w_global_prev = copy.deepcopy(w_global)
train_indices_current_list = []
for n in range(0, n_nodes):
train_indices = train_indices_list[n]
use_consecutive_training = False
if tau_setup > 1:
use_consecutive_training = True
for i in range(0, tau_setup):
if batch_size < total_data:
sample_indices = sampler_list[n].get_next_batch()
train_indices = sample_indices
train_indices_current_list.append(train_indices)
if use_consecutive_training:
model.run_one_step_consecutive_training(train_image, train_label, train_indices)
else:
grad = model.gradient(train_image, train_label, w_list[n], train_indices)
grad_array[n] += grad
if tau_setup > 1:
w_list[n] = w_list[n] - step_size * grad # For multiple tau
if use_consecutive_training:
w_list[n] = model.end_consecutive_training_and_get_weights()
if comp_method == 'U_top_K':
g_global, mask, local_mask = g_top_k.general_top_k(k, k_down, grad_array, n_nodes, data_size[case])
w_global = w_global_prev - step_size * g_global
if k_adaptive_method == 'OGD_SIGN' or k_adaptive_method == 'OGD_VALUE':
g_global_aux, _, _ = g_top_k.general_top_k(k_aux, k_aux_down, grad_array, n_nodes, data_size[case])
w_global_aux = w_global_prev - step_size * g_global_aux
elif comp_method == 'FAB_top_K':
import time
g_global, mask, local_mask = g_top_k.fairness_aware_top_k(k, k_down, grad_array, n_nodes, data_size[case])
w_global = w_global_prev - step_size * g_global
if k_adaptive_method == 'OGD_SIGN' or k_adaptive_method == 'OGD_VALUE':
g_global_aux, _, _ = g_top_k.fairness_aware_top_k(k_aux, k_aux_down, grad_array, n_nodes,
data_size[case])
w_global_aux = w_global_prev - step_size * g_global_aux
elif comp_method == "FUB_top_K":
g_global, mask, local_mask = g_top_k.global_top_k(k, grad_array, n_nodes, data_size[case])
w_global = w_global_prev - step_size * g_global
elif comp_method == 'PERIODIC_K':
mask = pk.generate_mask()
grad_array_ = np.multiply(grad_array, mask)
g_global = np.dot(data_size[case], grad_array_)/sum(data_size[case])
w_global = w_global_prev - step_size * g_global
mask_r = np.zeros(dim_w)
mask_r[mask == 0] = 1
grad_array = np.multiply(grad_array, mask_r) # Update gradient residual
else:
if use_consecutive_training:
w_global = np.dot(data_size[case], w_list) / sum(data_size[case])
else:
g_global = np.dot(data_size[case], grad_array) / sum(data_size[case])
w_global = w_global_prev - step_size * g_global
grad_array = np.zeros([n_nodes, dim_w])
# ----------------------- synchronize weights among all clients
if True in np.isnan(w_global):
print('*** w_global is NaN, using previous value')
w_global = copy.deepcopy(w_global_prev) # If current w_global contains NaN value, use previous w_global
for n in range(0, n_nodes):
w_list[n] = copy.deepcopy(w_global)
# ------------------------ find the next k
if comp_method == 'U_top_K' or comp_method == 'FAB_top_K':
if k_adaptive_method == 'OGD_SIGN' or k_adaptive_method == 'OGD_VALUE':
for n in range(0, n_nodes):
train_indices = train_indices_current_list[n]
tmp_indices = [random.choice(train_indices)]
loss[n] = model.loss(train_image, train_label, w_list[n], tmp_indices)
loss_prev[n] = model.loss(train_image, train_label, w_global_prev, tmp_indices) # Get the loss base on new mini-batch
loss_aux[n] = model.loss(train_image, train_label, w_global_aux, tmp_indices)
global_loss = np.sum(np.multiply(loss, data_size[case])) / sum(data_size[case]) # Get the global loss in aggregator by collecting local losses
global_loss_prev = np.sum(np.multiply(loss_prev, data_size[case])) / sum(data_size[case]) # Get the global loss base on new mini-batch
global_loss_aux = np.sum(np.multiply(loss_aux, data_size[case])) / sum(data_size[case])
k_prev = k
cost = 0
if num_iter > 2:
cost = comp_time + comm_time
if np.isnan(global_loss) or np.isnan(global_loss_aux):
print('*** loss is NaN!')
w_global = copy.deepcopy(w_global_prev)
w_global_prev = copy.deepcopy(w_global_prev_2)
cost_aux = None
elif global_loss_prev - global_loss > 0 and global_loss_prev - global_loss_aux > 0:
cost_aux = (comp_time + comm_time_aux) * (global_loss_prev - global_loss) / (global_loss_prev - global_loss_aux)
if np.isnan(cost_aux):
cost_aux = None
else:
cost_aux = None
print('global_loss_prev:', global_loss_prev)
print('global_loss:', global_loss)
if k_adaptive_method == 'OGD_SIGN':
k, k_aux = ogd.tuning_k_grad_sign(k, k_aux, cost, cost_aux, num_iter)
else: # 'VALUE'
k, k_aux = ogd.tuning_k_grad_value(k, k_aux, cost, cost_aux, num_iter)
k_down = k
k_aux_down = k_aux
elif k_adaptive_method == 'EXP3':
for n in range(0, n_nodes):
train_indices = train_indices_current_list[n]
tmp_indices = [random.choice(train_indices)]
loss[n] = model.loss(train_image, train_label, w_list[n], tmp_indices)
loss_prev[n] = model.loss(train_image, train_label, w_global_prev, tmp_indices) # Get the loss base on new mini-batch
global_loss = np.sum(np.multiply(loss, data_size[case])) / sum(data_size[case]) # Get the global loss in aggregator by collecting local losses
global_loss_prev = np.sum(np.multiply(loss_prev, data_size[case])) / sum(data_size[case]) # Get the global loss base on new mini-batch
reward = 0
k_prev = k
if num_iter > 2:
reward = (comp_time + comm_time) / (global_loss - global_loss_prev) # To be maximized
k, prob = ep3.step(k_prev, reward) # Get next k
print('EXP3 rewards:' + str(reward) + ', next k:' + str(k))
k_down = k
elif k_adaptive_method == 'Continuous_bandit':
for n in range(0, n_nodes):
train_indices = train_indices_current_list[n]
tmp_indices = [random.choice(train_indices)]
loss[n] = model.loss(train_image, train_label, w_list[n], tmp_indices)
loss_prev[n] = model.loss(train_image, train_label, w_global_prev, tmp_indices) # Get the loss base on new mini-batch
global_loss = np.sum(np.multiply(loss, data_size[case])) / sum(data_size[case]) # Get the global loss in aggregator by collecting local losses
global_loss_prev = np.sum(np.multiply(loss_prev, data_size[case])) / sum(data_size[case]) # Get the global loss base on new mini-batch
cost = 0
k_prev = k
if num_iter > 2:
if global_loss_prev - global_loss == 0:
cost = None
else:
cost = (comp_time + comm_time) / (global_loss_prev - global_loss) # To be minimized
k = cb.get_next_action(cost)
print('Continuous_bandit cost:' + str(cost) + ', next k:' + str(k))
k_down = k
# ------------------------ accumulate residual gradients and collect information
if comp_method == "U_top_K" or comp_method == 'FAB_top_K' or comp_method == "FUB_top_K": # Update residuals
for i in range(n_nodes):
local_mask[i][mask == 0] = 0
local_mask_r = np.zeros([n_nodes, dim_w])
local_mask_r[local_mask == 0] = 1
grad_array = np.multiply(grad_array, local_mask_r) # Update gradient residual
k_down_actual = | |
#
# Copyright (c) 2016 Nutanix Inc. All rights reserved.
#
# Thread-safety: this class is not thread-safe.
#
import glob
import logging
import os
import random
import time
from collections import namedtuple
import gflags
from curie.acropolis_node import AcropolisNode
from curie.acropolis_types import AcropolisTaskInfo
from curie.acropolis_unix_vm import AcropolisUnixVm
from curie.cluster import Cluster
from curie.curie_error_pb2 import CurieError
from curie.curie_metrics_pb2 import CurieMetric
from curie.exception import CurieException, CurieTestException
from curie.goldimage_manager import GoldImageManager
from curie.log import CHECK, patch_trace
from curie.metrics_util import MetricsUtil
from curie.name_util import CURIE_GOLDIMAGE_VM_DISK_PREFIX, NameUtil
from curie.node import NodePropertyNames
from curie.nutanix_cluster_dp_mixin import NutanixClusterDPMixin
from curie.task import PrismTask, PrismTaskPoller, TaskPoller, TaskStatus
from curie.util import CurieUtil
from curie.vm import VmDescriptor, VmParams
log = logging.getLogger(__name__)
patch_trace()
FLAGS = gflags.FLAGS
class AcropolisCluster(NutanixClusterDPMixin, Cluster):
__CURIE_METRIC_NAME_MAP__ = {
"CpuUsage.Avg.Percent": "hypervisor_cpu_usage_ppm",
"CpuUsage.Avg.Megahertz": "hypervisor_cpu_usage_ppm",
"MemUsage.Avg.Percent": "hypervisor_memory_usage_ppm",
"NetReceived.Avg.KilobytesPerSecond": "hypervisor_num_received_bytes",
"NetTransmitted.Avg.KilobytesPerSecond":
"hypervisor_num_transmitted_bytes",
}
@classmethod
def identifier_to_cluster_uuid(cls, rest_client, cluster_id_or_name):
try:
return rest_client.clusters_get(
cluster_name=cluster_id_or_name)["clusterUuid"]
except Exception:
log.warning("Failed to lookup cluster by name, assuming '%s' is a UUID.",
cluster_id_or_name, exc_info=True)
# This will raise an appropriate exception on failure.
return rest_client.clusters_get(
cluster_id=cluster_id_or_name)["clusterUuid"]
@classmethod
def identifier_to_node_uuid(cls, rest_client, node_id_name_or_ip):
# These will raise appropriate exceptions on failure, so it's safe to
# assume that otherwise accessing the 'uuid' key is safe.
if CurieUtil.is_ipv4_address(node_id_name_or_ip):
return rest_client.hosts_get(host_ip=node_id_name_or_ip)["uuid"]
elif CurieUtil.is_uuid(node_id_name_or_ip):
try:
return rest_client.hosts_get(host_id=node_id_name_or_ip)["uuid"]
except Exception:
log.debug("Failed to lookup node via UUID '%s'", node_id_name_or_ip)
# The provided node identifier is not an IPv4 address or a UUID. It may
# be either an unresolved hostname or a Prism name. Try Prism name first
# to avoid potential overhead in name resolution.
try:
return rest_client.hosts_get(host_name=node_id_name_or_ip)["uuid"]
except Exception:
log.debug("Failed to lookup node via Prism name '%s'",
node_id_name_or_ip)
try:
ip = CurieUtil.resolve_hostname(node_id_name_or_ip)
except Exception:
raise CurieException(
CurieError.kInvalidParameter, "Unable to resolve IP address for '%s'"
% node_id_name_or_ip)
# Allow this to raise it's own exception on failure, as there are no
# further methods to which we can fall back.
return rest_client.hosts_get(host_ip=ip)["uuid"]
def __init__(self, cluster_metadata):
# TODO (jklein): Would be nice to standardize this in a cleaner way.
CHECK(cluster_metadata.cluster_hypervisor_info.HasField("ahv_info"))
CHECK(cluster_metadata.cluster_software_info.HasField("nutanix_info"))
CHECK(
cluster_metadata.cluster_management_server_info.HasField("prism_info"))
# Prism information for the PE/PC server that manages this cluster.
self._mgmt_server_info = \
cluster_metadata.cluster_management_server_info.prism_info
cluster_metadata.cluster_software_info.nutanix_info.prism_host = \
self._mgmt_server_info.prism_host
# Map of VM UUIDs to host UUIDs on which they should be scheduled.
self.__vm_uuid_host_uuid_map = {}
# ID Caches
self.__cluster_id = None
self.__container_id = None
self.__network_id = None
self.__host_ip_cvm_ip_map = None
super(AcropolisCluster, self).__init__(cluster_metadata)
# Allow clusters to be specified by name or UUID.
@property
def _cluster_id(self):
if self.__cluster_id is None:
self.__cluster_id = self.identifier_to_cluster_uuid(
self._prism_client, self._mgmt_server_info.prism_cluster_id)
return self.__cluster_id
# Allow containers to be specified by name or UUID.
@property
def _container_id(self):
if self.__container_id is None:
self.__container_id = self.__identifier_to_container_uuid(
self._mgmt_server_info.prism_container_id)
return self.__container_id
@property
def _network_id(self):
if self.__network_id is None:
mgmt_info = self._metadata.cluster_management_server_info
network_name = mgmt_info.prism_info.prism_network_id
for network_json in self._prism_client.networks_get()["entities"]:
if network_json["name"] == network_name:
self.__network_id = network_json["uuid"]
break
else:
raise CurieException(CurieError.kInvalidParameter,
"Unknown network '%s'" % network_name)
return self.__network_id
@property
def _host_ip_cvm_ip_map(self):
if self.__host_ip_cvm_ip_map is None:
self.__host_ip_cvm_ip_map = {}
entities = self._prism_client.hosts_get().get("entities")
for entity in entities:
log.debug("Adding CVM for host %s: %s",
entity["hypervisorAddress"], entity["serviceVMExternalIP"])
self.__host_ip_cvm_ip_map[entity["hypervisorAddress"]] = \
entity["serviceVMExternalIP"]
return self.__host_ip_cvm_ip_map
#----------------------------------------------------------------------------
#
# Public base Cluster methods.
#
#----------------------------------------------------------------------------
def update_metadata(self, include_reporting_fields):
cluster_json = self.__lookup_cluster_json()
self._node_id_metadata_map = {node.id: node
for node in self._metadata.cluster_nodes}
node_uuid_metadata_id_map = self.get_node_uuid_metadata_id_map()
for node_json in self._prism_client.hosts_get().get("entities", []):
if node_json["clusterUuid"] != cluster_json["clusterUuid"]:
continue
try:
curr_node_identifier = node_uuid_metadata_id_map[node_json["uuid"]]
except KeyError:
# If the node is missing in the metadata, skip it.
continue
node_proto = self._node_id_metadata_map.get(curr_node_identifier)
CHECK(node_proto)
node_proto.id = node_json["uuid"]
if include_reporting_fields:
node_hw = node_proto.node_hardware
node_hw.num_cpu_packages = node_json["numCpuSockets"]
node_hw.num_cpu_cores = node_json["numCpuCores"]
node_hw.num_cpu_threads = node_json["numCpuThreads"]
node_hw.cpu_hz = node_json["cpuFrequencyInHz"]
node_hw.memory_size = node_json["memoryCapacityInBytes"]
if include_reporting_fields:
# TODO (jklein): AHV info per-node.
cluster_software_info = self._metadata.cluster_software_info
nutanix_version = self._prism_client.get_nutanix_metadata().version
if nutanix_version is not None:
cluster_software_info.nutanix_info.version = nutanix_version
def get_node_uuid_metadata_id_map(self):
node_uuid_metadata_id_map = {}
for node_id in [n.id for n in self._metadata.cluster_nodes]:
try:
curr_node_uuid = self.identifier_to_node_uuid(self._prism_client,
node_id)
except Exception:
raise CurieTestException(
cause=
"Node with ID '%s' is in the Curie cluster metadata, but not "
"found in the AHV cluster." % node_id,
impact=
"The cluster configuration is invalid.",
corrective_action=
"Please check that all of the nodes in the Curie cluster metadata "
"are part of the AHV cluster. For example, if the cluster "
"configuration has four nodes, please check that all four nodes "
"are present in the AHV cluster."
)
else:
node_uuid_metadata_id_map[curr_node_uuid] = node_id
return node_uuid_metadata_id_map
def nodes(self):
nodes = []
for index, node_metadata in enumerate(self._metadata.cluster_nodes):
nodes.append(AcropolisNode(self, node_metadata.id, index))
return nodes
def power_off_nodes_soft(self, nodes, timeout_secs=None, async=False):
"""See 'Cluster.power_off_nodes_soft' for definition."""
success = True
node_power_state_map = self.get_power_state_for_nodes(nodes)
powered_off = \
AcropolisNode.get_management_software_value_for_attribute(
NodePropertyNames.POWERED_OFF)
try:
for node in nodes:
if node_power_state_map[node.node_id()] == powered_off:
log.info("Skipping power-off request for node '%s' which is already "
"powered off", node.node_id())
continue
log.info("Requesting genesis shutdown for node '%s'", node.node_id())
curr_success = self._prism_client.genesis_prepare_node_for_shutdown(
node.node_id())
if curr_success:
success = self._prism_client.genesis_shutdown_hypervisor(
node.node_id())
else:
success = False
log.error("Failed to perform soft power off on node %s",
node.node_id())
break
if not success:
raise CurieTestException("Failed to power off nodes")
except BaseException as exc:
# Capture stacktrace here in case an exception is raised clearing the
# genesis shutdown token.
log.exception(str(exc))
raise
else:
log.info("Successfully powered off nodes %s",
", ".join([n.node_id() for n in nodes]))
finally:
log.info("Clearing genesis shutdown token")
if not self._prism_client.genesis_clear_shutdown_token():
raise CurieTestException("Failed to clear genesis shutdown token")
def vms(self):
return map(self.__vm_json_to_curie_vm,
self._prism_client.vms_get().get("entities", []))
def get_power_state_for_vms(self, vms):
"""See 'Cluster.get_power_state_for_vms' for documentation."""
vm_ids = set([vm.vm_id() for vm in vms])
vm_id_power_state_map = {}
for vm_json in self._prism_client.vms_get().get("entities", []):
if vm_json["uuid"] not in vm_ids:
continue
vm_ids.discard(vm_json["uuid"])
vm_id_power_state_map[vm_json["uuid"]] = vm_json["powerState"]
if vm_ids:
raise CurieTestException("Invalid VM ID(s) '%s'" % ", ".join(vm_ids))
return vm_id_power_state_map
def get_power_state_for_nodes(self, nodes):
"""See 'Cluster.get_power_state_for_nodes' for documentation."""
ret = {}
ip_status_map = self._prism_client.genesis_cluster_status().get("svms", {})
for node in nodes:
status_map = ip_status_map.get(self._host_ip_cvm_ip_map[node.node_ip()])
if status_map and status_map["state"].strip().lower() == "down":
log.debug("Translating 'state' == 'down' to 'kNormalDisconnected'")
curr_status = "kNormalDisconnected"
else:
log.debug("Translating 'state' == '%s' to 'kNormalConnected'",
status_map.get("state"))
curr_status = "kNormalConnected"
host_json = self._prism_client.hosts_get_by_id(node.node_id())
for key in ["hypervisorState", "state"]:
log.debug("Via REST API, AHV reports '%s' == '%s'",
key, host_json.get(key))
ret[node.node_id()] = curr_status
return ret
def sync_power_state_for_nodes(self, nodes, timeout_secs=None):
"""See 'Cluster.sync_power_state_for_nodes' for documentation."""
# No-op: It is not known that syncing is required on AHV.
return self.get_power_state_for_nodes(nodes)
def power_on_vms(self, vms, max_parallel_tasks=None):
"""
See 'Cluster.power_on_vms' for documentation.
"""
self.__set_power_state_for_vms(
vms, "on", wait_for_ip=True,
max_parallel_tasks=self._get_max_parallel_tasks(max_parallel_tasks))
def power_off_vms(self, vms, max_parallel_tasks=None):
"""
See 'Cluster.power_off_vms' for documentation.
"""
self.__set_power_state_for_vms(
vms, "off", max_parallel_tasks=max_parallel_tasks)
def delete_vms(self, vms, ignore_errors=False, max_parallel_tasks=None,
timeout_secs=None):
"""Delete VMs.
Acropolis DELETE requests for /vms/{vm_id} are async. This method collects
all taskUuids and polls until completion.
Args:
vms (list<CurieVM>): List of VMs to delete.
ignore_errors (bool): Optional. Whether to allow individual tasks to
fail. Default False.
max_parallel_tasks (int): Max number of requests to have in-flight at
any given time. (Currently ignored)
timeout_secs (int): If provided, overall timeout for VM deletion tasks.
Raises:
CurieTestException:
- If any VM is not already powered off.
- All VMs are not destroyed with in the timeout.
- Destroy task fails and ignore_errors is False.
"""
# TODO (jklein): max_parallel_tasks won't work unless this is changed to
# use task descriptors.
if timeout_secs is None:
timeout_secs = len(vms) * 60
task_t0 = self._prism_client.get_cluster_timestamp_usecs()
vm_id_task_map = {}
for vm_id, tid in self._prism_client.vms_delete(
[vm.vm_id() for vm in vms]).iteritems():
if tid is None:
raise CurieTestException("Failed to delete VM %s" % vm_id)
vm_id_task_map[vm_id] = PrismTask.from_task_id(self._prism_client, tid)
try:
PrismTaskPoller.execute_parallel_tasks(
tasks=vm_id_task_map.values(),
max_parallel=self._get_max_parallel_tasks(max_parallel_tasks),
timeout_secs=timeout_secs,
prism_client=self._prism_client,
cutoff_usecs=task_t0)
except CurieTestException:
if not ignore_errors:
raise
log.debug("Ignoring exception in delete_vms", exc_info=True)
failed_to_delete_vm_ids = []
for vm_id, task in vm_id_task_map.iteritems():
if task.get_status() != TaskStatus.kSucceeded:
failed_to_delete_vm_ids.append(vm_id)
if failed_to_delete_vm_ids:
msg = "Failed to delete vms: %s" % ", ".join(failed_to_delete_vm_ids)
if ignore_errors:
log.error(msg)
else:
raise CurieTestException(msg)
def import_vm(self, goldimages_directory, goldimage_name, vm_name, node_id=None):
"""
Creates a VM from the specified gold image. If 'node_id' is specified, the
VM is created on that node, else a random node is selected. The VM will be
created on the datastore associated with the curie server's settings for
this cluster.
"""
if node_id is None:
node_id = random.choice(self.nodes()).node_id()
ovfs = glob.glob(os.path.join(goldimages_directory,
goldimage_name, "*.ovf"))
if len(ovfs) == 0:
raise CurieException(CurieError.kInternalError,
"Unable to locate .ovf file in '%s'" %
os.path.join(goldimages_directory,
goldimage_name))
elif len(ovfs) > 1:
raise CurieException(CurieError.kInternalError,
"Unique .ovf file expected. Found: '%s'" % ovfs)
vm = | |
from chiscore import davies_pvalue, optimal_davies_pvalue
class StructLMM:
r"""
Structured linear mixed model that accounts for genotype-environment interactions.
Let n be the number of samples.
StructLMM [1] extends the conventional linear mixed model by including an
additional per-individual effect term that accounts for genotype-environment
interaction, which can be represented as an n×1 vector, 𝛃.
The model is given by
𝐲 = 𝙼𝛂 + 𝐠𝛽 + 𝐠⊙𝛃 + 𝐞 + 𝛆,
where
𝛽 ∼ 𝓝(0, 𝓋₀⋅ρ), 𝛃 ∼ 𝓝(𝟎, 𝓋₀(1-ρ)𝙴𝙴ᵀ), 𝐞 ∼ 𝓝(𝟎, 𝓋₁𝚆𝚆ᵀ), and 𝛆 ∼ 𝓝(𝟎, 𝓋₂𝙸).
The vector 𝐲 is the outcome, matrix 𝙼 contains the covariates, and vector 𝐠 is the
genetic variant.
The matrices 𝙴 and 𝚆 are generally the same, and represent the environment
configuration for each sample.
The parameters 𝓋₀, 𝓋₁, and 𝓋₂ are the overall variances.
The parameter ρ ∈ [𝟶, 𝟷] dictates the relevance of genotype-environment interaction
versus the genotype effect alone.
The term 𝐞 accounts for additive environment-only effects while 𝛆 accounts for
noise effects.
The above model is equivalent to
𝐲 = 𝙼𝛂 + 𝐠⊙𝛃 + 𝐞 + 𝛆,
where
𝛃 ∼ 𝓝(𝟎, 𝓋₀(ρ𝟏𝟏ᵀ + (1-ρ)𝙴𝙴ᵀ)), 𝐞 ∼ 𝓝(𝟎, 𝓋₁𝚆𝚆ᵀ), and 𝛆 ∼ 𝓝(𝟎, 𝓋₂𝙸).
Its marginalised form is given by
𝐲 ∼ 𝓝(𝙼𝛂, 𝓋₀𝙳(ρ𝟏𝟏ᵀ + (1-ρ)𝙴𝙴ᵀ)𝙳 + 𝓋₁𝚆𝚆ᵀ + 𝓋₂𝙸),
where 𝙳 = diag(𝐠).
StructLMM method is used to perform two types of statistical tests.
The association one compares the following hypothesis:
𝓗₀: 𝓋₀ = 0
𝓗₁: 𝓋₀ > 0
𝓗₀ denotes no genetic association, while 𝓗₁ models any genetic association.
In particular, 𝓗₁ includes genotype-environment interaction as part of genetic
association.
The interaction test is slightly more complicated as the term 𝐠𝛽 is now considered
a fixed one. In pratice, we include 𝐠 in the covariates matrix 𝙼 and set ρ = 0.
We refer to this modified model as the interaction model.
The compared hypothesis are:
𝓗₀: 𝓋₀ = 0 (given the interaction model)
𝓗₁: 𝓋₀ > 0 (given the interaction model)
Implementation
--------------
We employ the score-test statistic [2] for both tests
𝑄 = ½𝐲ᵀ𝙿(∂𝙺)𝙿𝐲,
where
𝙿 = 𝙺⁻¹ - 𝙺⁻¹𝙼(𝙼ᵀ𝙺⁻¹𝙼)⁻¹𝙼ᵀ𝙺⁻¹ and cov(𝐲) = 𝙺
for the REML-estimated parameters under the null hypothesis.
The derivative is taken over the parameter being tested.
Lets for now assume that ρ is given.
In practice, we have
𝙺ᵨ = 𝓋₀𝙳(ρ𝟏𝟏ᵀ + (1-ρ)𝙴𝙴ᵀ)𝙳 + 𝓋₁𝚆𝚆ᵀ + 𝓋₂𝙸
∂𝙺ᵨ = 𝙳(ρ𝟏𝟏ᵀ + (1-ρ)𝙴𝙴ᵀ)𝙳
for association test and
𝙺₀ = 𝓋₀𝙳𝙴𝙴ᵀ𝙳 + 𝓋₁𝚆𝚆ᵀ + 𝓋₂𝙸
∂𝙺₀ = 𝙳𝙴𝙴ᵀ𝙳
for interaction test, for parameters estimated via REML.
The outcome distribution under null is
𝐲 ∼ 𝓝(𝙼𝛂, 𝓋₁𝚆𝚆ᵀ + 𝓋₂𝙸).
It can be shown [2]_ that
𝑄 ∼ ∑ᵢ𝜆ᵢ𝜒²(1),
where the weights 𝜆ᵢ are the non-zero eigenvalues of ½√𝙿(∂𝙺)√𝙿.
We employ modified Liu approximation to 𝑄 proposed [3] and modified in [4].
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Barroso,
I., & <NAME>. (2018). A linear mixed-model approach to study multivariate
gene–environment interactions (p. 1). Nature Publishing Group.
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
& <NAME>. (2014). Greater power and computational efficiency for
kernel-based association testing of sets of genetic variants. Bioinformatics,
30(22), 3206-3214.
.. [3] <NAME>., <NAME>., & <NAME>. (2009). A new chi-square approximation
to the distribution of non-negative definite quadratic forms in non-central
normal variables. Computational Statistics & Data Analysis, 53(4), 853-856.
.. [4] <NAME>, <NAME>, and <NAME>. "Optimal tests for rare
variant effects in sequencing association studies." Biostatistics 13.4 (2012):
762-775.
"""
def __init__(self, y, M, E, W=None):
from numpy import asarray, atleast_2d, sqrt
from numpy_sugar import ddot
self._y = atleast_2d(asarray(y, float).ravel()).T
self._E = atleast_2d(asarray(E, float).T).T
if W is None:
self._W = self._E
elif isinstance(W, tuple):
# W must be an eigen-decomposition of 𝚆𝚆ᵀ
self._W = ddot(W[0], sqrt(W[1]))
else:
self._W = atleast_2d(asarray(W, float).T).T
self._M = atleast_2d(asarray(M, float).T).T
nsamples = len(self._y)
if nsamples != self._M.shape[0]:
raise ValueError("Number of samples mismatch between y and M.")
if nsamples != self._E.shape[0]:
raise ValueError("Number of samples mismatch between y and E.")
if nsamples != self._W.shape[0]:
raise ValueError("Number of samples mismatch between y and W.")
self._lmm = None
self._rhos = [0.0, 0.1 ** 2, 0.2 ** 2, 0.3 ** 2, 0.4 ** 2, 0.5 ** 2, 0.5, 0.999]
def fit(self, verbose=True):
from glimix_core.lmm import Kron2Sum
self._lmm = Kron2Sum(self._y, [[1]], self._M, self._W, restricted=True)
self._lmm.fit(verbose=verbose)
self._covarparam0 = self._lmm.C0[0, 0]
self._covarparam1 = self._lmm.C1[0, 0]
def _P(self, v):
"""
Let 𝙺 be the optimal covariance matrix under the null hypothesis.
Given 𝐯, this method computes
𝙿𝐯 = 𝙺⁻¹𝐯 - 𝙺⁻¹𝙼(𝙼ᵀ𝙺⁻¹𝙼)⁻¹𝙼ᵀ𝙺⁻¹𝐯.
"""
from numpy_sugar.linalg import rsolve
from scipy.linalg import cho_solve
x = rsolve(self._lmm.covariance(), v)
if self._lmm.X is not None:
Lh = self._lmm._terms["Lh"]
t = self._lmm.X @ cho_solve(Lh, self._lmm.M.T @ x)
x -= rsolve(self._lmm.covariance(), t)
return x
def _score_stats(self, g, rhos):
"""
Let 𝙺 be the optimal covariance matrix under the null hypothesis.
For a given ρ, the score-based test statistic is given by
𝑄ᵨ = ½𝐲ᵀ𝙿ᵨ(∂𝙺ᵨ)𝙿ᵨ𝐲,
where
∂𝙺ᵨ = 𝙳(ρ𝟏𝟏ᵀ + (1-ρ)𝙴𝙴ᵀ)𝙳
and 𝙳 = diag(𝐠).
"""
from numpy import zeros
from numpy_sugar import ddot
Q = zeros(len(rhos))
DPy = ddot(g, self._P(self._y))
s = DPy.sum()
l = s * s
DPyE = DPy.T @ self._E
r = DPyE @ DPyE.T
for i, rho in enumerate(rhos):
Q[i] = (rho * l + (1 - rho) * r) / 2
return Q
def _score_stats_null_dist(self, g):
"""
Under the null hypothesis, the score-based test statistic follows a weighted sum
of random variables:
𝑄 ∼ ∑ᵢ𝜆ᵢχ²(1),
where 𝜆ᵢ are the non-zero eigenvalues of ½√𝙿(∂𝙺)√𝙿.
Note that
∂𝙺ᵨ = 𝙳(ρ𝟏𝟏ᵀ + (1-ρ)𝙴𝙴ᵀ)𝙳 = (ρ𝐠𝐠ᵀ + (1-ρ)𝙴̃𝙴̃ᵀ)
for 𝙴̃ = 𝙳𝙴.
By using SVD decomposition, one can show that the non-zero eigenvalues of 𝚇𝚇ᵀ
are equal to the non-zero eigenvalues of 𝚇ᵀ𝚇.
Therefore, 𝜆ᵢ are the non-zero eigenvalues of
½[√ρ𝐠 √(1-ρ)𝙴̃]𝙿[√ρ𝐠 √(1-ρ)𝙴̃]ᵀ.
"""
from math import sqrt
from numpy import empty
from numpy.linalg import eigvalsh
from numpy_sugar import ddot
Et = ddot(g, self._E)
Pg = self._P(g)
PEt = self._P(Et)
gPg = g.T @ Pg
EtPEt = Et.T @ PEt
gPEt = g.T @ PEt
n = Et.shape[1] + 1
F = empty((n, n))
lambdas = []
for i in range(len(self._rhos)):
rho = self._rhos[i]
F[0, 0] = rho * gPg
F[0, 1:] = sqrt(rho) * sqrt(1 - rho) * gPEt
F[1:, 0] = F[0, 1:]
F[1:, 1:] = (1 - rho) * EtPEt
lambdas.append(eigvalsh(F) / 2)
return lambdas
def _score_stats_pvalue(self, Qs, lambdas):
"""
Computes Pr(𝑄 > q) for 𝑄 ∼ ∑ᵢ𝜆ᵢχ²(1).
Pr(𝑄 > q) is the p-value for the score statistic.
Parameters
----------
Qs : array_like
𝑄ᵨ statistic.
lambdas : array_like
𝜆ᵢ from the null distribution for each ρ.
"""
from numpy import stack
return stack([_mod_liu(Q, lam) for Q, lam in zip(Qs, lambdas)], axis=0)
def _qmin(self, pliumod):
import scipy.stats as st
from numpy import zeros
# T statistic
T = pliumod[:, 0].min()
qmin = zeros(len(self._rhos))
percentile = 1 - T
for i in range(len(self._rhos)):
q = st.chi2.ppf(percentile, pliumod[i, 3])
mu_q = pliumod[i, 1]
sigma_q = pliumod[i, 2]
dof = pliumod[i, 3]
qmin[i] = (q - dof) / (2 * dof) ** 0.5 * sigma_q + mu_q
return qmin
# SKAT
def score_2dof_inter(self, X):
"""
Interaction test.
Parameters
----------
X : 1d-array
Genetic variant.
Returns
-------
float
P-value.
"""
from numpy import empty
from numpy_sugar import ddot
Q_rho = self._score_stats(X.ravel(), [0])
g = X.ravel()
Et = ddot(g, self._E)
PEt = self._P(Et)
EtPEt = Et.T @ PEt
gPEt = g.T @ PEt
n = Et.shape[1] + 1
F = empty((n, n))
F[0, 0] = 0
F[0, 1:] = gPEt
F[1:, 0] = F[0, 1:]
F[1:, 1:] = EtPEt
F /= 2
return davies_pvalue(Q_rho[0], F)
# SKAT-O
def score_2dof_assoc(self, X, return_rho=False):
"""
Association test.
Parameters
----------
X : 1d-array
Genetic variant.
return_rho : bool (optional)
``True`` to return the optimal ρ; ``False`` otherwise (Default).
Returns
-------
float
P-value.
float
Optimal ρ. Returned only if ``return_rho == True``.
"""
from numpy | |
<filename>pylink/model.py
#!/usr/bin/python
import inspect
import math
import os
import pprint
import re
import sys
import tempfile
import traceback
import pylink.utils as utils
from pylink.tagged_attribute import TaggedAttribute
class LoopException(Exception):
pass
class DAGModel(object):
"""DAG Solver
After instantiating this class with a series of nodes, those nodes
(and their recursively-calculated values) can be referenced as
instance variables. It also includes a solver, and the ability to
override values.
"""
def __init__(self, contrib=[], **extras):
"""Creates a new DAG Model
contrib -- Iterable of tributaries
extras -- Any additional node to add/override
Please note that nodes are processed in the order in which
they are received. That means that if you add your node in
the last tributary, it'll override any previous definition of
that node. Similarly, if you add your node in the kwargs,
you'll override any node definition in the tributaries.
"""
# Make sure the input looks sane
for t in contrib:
try:
t.tribute
except AttributeError:
msg = ("One of your Tributaries doesn't have "
+ "'tribute' defined: %s" % t.__class__.__name__)
raise AttributeError(msg)
# calculate the list of node names & enum
names = []
for mod in contrib:
names += mod.tribute.keys()
names.extend(extras.keys())
self.enum = utils.sequential_enum(*names)
# associate names to nodes
(self._names, self._nodes,) = utils.node_associations(self.enum)
# merge the contributions
self._calc = {}
self._values = {}
self._meta = {}
tributes = [m.tribute for m in contrib]
tributes.append(extras)
for t in tributes:
self.accept_tribute(t)
# Record the calculation stack for dependency tracking
self._stack = []
self._init_cache()
# We start with an empty dependency tree and update as able
self._deps = {}
self._map_dependencies()
def accept_tribute(self, t):
for name, v, in t.items():
node = self._nodes[name]
if hasattr(v, '__call__'):
self._calc[node] = v
elif isinstance(v, TaggedAttribute):
self._meta[node] = v.meta
self._values[node] = v.value
else:
self._values[node] = v
def clear_cache(self):
"""Clears the cache.
Useful if you do something like accept_tribute(). Currently
only used in testing.
"""
self._init_cache()
def is_calculated_node(self, node):
"""Determines whether or not the given node is calculated.
"""
return node in self._calc
def is_static_node(self, node):
"""Determines whether or not the given node is static.
"""
return not self.is_calculated_node(node)
def is_overridden(self, node):
"""Determines whether or not the given node is overridden.
"""
return self.is_calculated_node(node) and node in self._values
def get_meta(self, node):
"""Returns the metadata dict associated with this node
"""
if node in self._meta:
return self._meta[node]
return None
def set_meta(self, node, k, v):
"""Sets the desired key/value in this node's metadata dict
"""
self._meta.setdefault(node, {k:v})
self._meta[node][k] = v
def _map_dependencies(self):
# nodes => list of direct dependencies
# self._deps is maintained by the cache system
self._dep_names = self._named_deplist(self._deps)
# nodes => flattened dependency list
self._flat_deps = self._flattened_deps()
self._flat_dep_names = self._named_deplist(self._flat_deps)
# nodes => flattened list of nodes depending upon it
self._clients = self._client_list(self._flat_deps)
self._client_names = self._named_deplist(self._clients)
self._deps_are_stale = False
def _init_cache(self):
self._cache = {}
def _record_parent(self, node):
if len(self._stack):
dep = self._stack[-1]
self._add_dependency_impl(node, dep)
def print_dependencies(self):
"""Pretty Prints the dependency information for all nodes.
First is the one-layer dependencies (ie link_margin_db depends
upon required_ebn0_db and received_ebn0_db).
Next the flattened dependencies are printed (so link_margin_db
ends up depending upon lots and lots of things).
Finally the flattened reverse dependencies are printed (we
need these for proper cache invalidation).
"""
if self._deps_are_stale:
# This call is quite expensive, so we only want to do so
# if necessary.
self._map_dependencies()
pprint.pprint(self._dep_names)
pprint.pprint(self._flat_dep_names)
pprint.pprint(self._client_names)
def _add_dependency_impl(self, node, dep):
self._deps.setdefault(dep, {node:0})
self._deps[dep].setdefault(node, 0)
self._deps[dep][node] += 1
if self._deps[dep][node] == 1:
self._deps_are_stale = True
def cached_calculate(self, node, clear_stack=False):
"""Either return the cached value, or calculate/lookup the node's value.
You really only need this method if you're introducing a
cycle.
clear_stack -- This kwarg permits the calculation with an
empty stack. That way the cycle-checker doesn't complain, and
you can happily introduce cycles. See the package README for
more information about how to do this safely.
"""
if clear_stack:
orig_stack = self._stack
self._stack = []
self._record_parent(node)
if node in self._cache:
retval = self._cache[node]
else:
retval = self._calculate(node)
if clear_stack:
self._stack = orig_stack
return retval
def _calculate(self, node, stack=None):
if stack:
orig_stack = self._stack
self._stack = stack
if node in self._stack:
stack = self._stack + [node]
stack = [self.node_name(n) for n in stack]
s = pprint.pformat(stack)
raise LoopException("\n=== LOOP DETECTED ===\n%s" % s)
self._stack.append(node)
if node in self._values:
retval = self._values[node]
else:
retval = self._calc[node](self)
self._cache_put(node, retval)
self._stack.pop()
if stack:
self._stack = orig_stack
return retval
def _cache_clear(self, node=None):
if self._deps_are_stale:
# This call is quite expensive, so we only want to do so
# if necessary.
self._map_dependencies()
if node is not None:
if node in self._cache:
del self._cache[node]
for client in self._clients[node]:
if client in self._cache:
del self._cache[client]
else:
self._init_cache()
def _cache_put(self, node, value):
self._cache[node] = value
def __getattr__(self, name):
if name in self._nodes:
node = self.node_num(name)
if node not in self._values and node not in self._calc:
name = self.node_name(node)
msg = "It looks like you're missing an item: %s" % name
raise AttributeError(msg)
return self.cached_calculate(node)
raise AttributeError("It looks like you're missing a node: %s" % name)
def _client_list(self, flat_deps):
retval = {}
for dep in self._names:
retval[dep] = []
for node in flat_deps:
if dep in flat_deps[node]:
retval[dep].append(node)
return retval
def _named_deplist(self, deps):
retval = {}
for node in deps:
n = self.node_name(node)
retval[n] = [ self.node_name(x) for x in deps[node] ]
return retval
def __flattened_deps_r(self, node, res={}):
if node in self._deps:
# We only register dependencies for dependent items
for dep in self._deps[node]:
res[dep] = 1
self.__flattened_deps_r(dep, res=res)
return res.keys()
def _flattened_deps(self):
retval = {}
for node in self._deps:
retval[node] = self.__flattened_deps_r(node, res={})
return retval
def node_name(self, node):
"""Returns the name of the node
node -- this is the integer from the enum
"""
return self._names[node]
def node_num(self, name):
"""Returns the node number from the name
"""
return self._nodes[name]
def override(self, node, value):
"""Overrides a given node's value.
If it's a static node, it redefines it. If it's a calculated
node it'll serve this static value instead of executing node.
"""
self._cache_clear(node=node)
self._values[node] = value
def revert(self, node):
"""Reverts an override on a node.
Please note that this operation only makes sense if you're
reverting an override on a calculator.
"""
if node in self._values:
if node in self._calc:
self._cache_clear(node=node)
del self._values[node]
else:
name = self.node_name(node)
msg = "You can't revert a static value: %s" % name
raise AttributeError(msg)
def override_value(self, node):
"""Returns the override value for a node.
If you override a calculated node, this method will return the
value to which it was overridden, otherwise None. And if
you're overriding the calculated node to return None, well,
you're out of luck.
"""
if node in self._values:
return self._values[node]
return None
def _solve_for(self, var, fixed, fixed_value, start, stop, step):
# The output variable should always be reverted
self.revert(fixed)
best_val = start
self.override(var, start)
best_diff = abs(fixed_value - self.cached_calculate(fixed))
for i in range(0, int(math.ceil((stop-start)/step))+1, 1):
val = min(start + step*i, stop)
self.override(var, val)
diff = abs(fixed_value - self.cached_calculate(fixed))
if diff < best_diff:
best_diff = diff
best_val = val
assert(best_val <= stop)
return best_val
def solve_for(self, var, fixed, fixed_value, start, stop, step, rounds=3):
"""Solve for a fixed variable by varying another.
Using multiple <rounds>, this method solves for <var> by
searching for the value that results in <fixed> being closest
to <fixed_value> between <start> and <stop> at intervals of
<step>. Subsequent rounds will use the same number of steps
though the step size will, of course, shrink. After it will
search within the winning range at higher precision.
This method only works for either monotonic functions. If
there are two values that satisfy the constraint, it will find
the one closest to <start>.
var -- Node number to solve for (from the enum)
fixed -- Node number constraining the search
fixed_value -- The target value for the fixed node
start -- The value for <var> at which to start
stop -- The value for <var> at which to stop
step -- The delta between points to try in | |
= "Lat_N"
cdf['Latitude'].attrs['SCALETYP'] = "linear"
cdf['Latitude'].attrs['VALIDMAX'] = 90.0
cdf['Latitude'].attrs['VALIDMIN'] = -90.0
cdf['Latitude'].attrs['VAR_NOTES'] = "Latitude of virtual magnetic observatories"
cdf['Latitude'].attrs['VAR_TYPE'] = "support_data"
cdf.new('Longitude', data=Longitude, type=pycdf.const.CDF_REAL4,
recVary=False)
cdf['Longitude'].attrs['CATDESC'] = "Longitude"
cdf['Longitude'].attrs['FIELDNAM'] = "Longitude"
cdf['Longitude'].attrs['FILLVAL'] = -1e+31
cdf['Longitude'].attrs['FORMAT'] = "E12.2"
cdf['Longitude'].attrs['LABLAXIS'] = "Lon_E"
cdf['Longitude'].attrs['SCALETYP'] = "linear"
cdf['Longitude'].attrs['VALIDMAX'] = 360.0
cdf['Longitude'].attrs['VALIDMIN'] = -180.0
cdf['Longitude'].attrs['VAR_NOTES'] = "Longitude of virtual magnetic observatories"
cdf['Longitude'].attrs['VAR_TYPE'] = "support_data"
cdf.new('Radius', data=Radius, type=pycdf.const.CDF_REAL4,
recVary=False)
cdf['Radius'].attrs['CATDESC'] = "Radius"
cdf['Radius'].attrs['FIELDNAM'] = "Radius"
cdf['Radius'].attrs['FILLVAL'] = -1e+31
cdf['Radius'].attrs['FORMAT'] = "E12.2"
cdf['Radius'].attrs['LABLAXIS'] = "Lon_E"
cdf['Radius'].attrs['SCALETYP'] = "linear"
cdf['Radius'].attrs['VALIDMAX'] = 1e+31
cdf['Radius'].attrs['VALIDMIN'] = 0
cdf['Radius'].attrs['VAR_NOTES'] = "Radius of interpolated sites"
cdf['Radius'].attrs['VAR_TYPE'] = "support_data"
cdf.new('X', data=X, type=pycdf.const.CDF_REAL8,
recVary=True)
cdf['X'].attrs['CATDESC'] = "X component of estimated magnetic field"
cdf['X'].attrs['DEPEND_0'] = "Epoch"
cdf['X'].attrs['DEPEND_1'] = "Latitude"
cdf['X'].attrs['DEPEND_2'] = "Longitude"
cdf['X'].attrs['DISPLAY_TYPE'] = 'no_plot'
cdf['X'].attrs['ElemRec'] = 'X'
cdf['X'].attrs['FIELDNAM'] = "Geomagnetic Field Element 1"
cdf['X'].attrs['FILLVAL'] = -1e+31
cdf['X'].attrs['FORMAT'] = "F12.4"
cdf['X'].attrs['OrigFreq'] = 99999.0
cdf['X'].attrs['SCALETYP'] = "linear"
cdf['X'].attrs['SampPer'] = 60.0
cdf['X'].attrs['UNITS'] = 'nT'
cdf['X'].attrs['VALIDMAX'] = 88000.0
cdf['X'].attrs['VALIDMIN'] = -88000.0
cdf['X'].attrs['VAR_NOTES'] = "X component points toward geographic north"
cdf['X'].attrs['VAR_TYPE'] = "data"
cdf.new('Y', data=Y, type=pycdf.const.CDF_REAL8,
recVary=True)
cdf['Y'].attrs['CATDESC'] = "Y component of estimated magnetic field"
cdf['Y'].attrs['DEPEND_0'] = "Epoch"
cdf['Y'].attrs['DEPEND_1'] = "Latitude"
cdf['Y'].attrs['DEPEND_2'] = "Longitude"
cdf['Y'].attrs['DISPLAY_TYPE'] = 'no_plot'
cdf['Y'].attrs['ElemRec'] = 'Y'
cdf['Y'].attrs['FIELDNAM'] = "Geomagnetic Field Element 2"
cdf['Y'].attrs['FILLVAL'] = -1e+31
cdf['Y'].attrs['FORMAT'] = "F12.4"
cdf['Y'].attrs['OrigFreq'] = 99999.0
cdf['Y'].attrs['SCALETYP'] = "linear"
cdf['Y'].attrs['SampPer'] = 60.0
cdf['Y'].attrs['UNITS'] = 'nT'
cdf['Y'].attrs['VALIDMAX'] = 88000.0
cdf['Y'].attrs['VALIDMIN'] = -88000.0
cdf['Y'].attrs['VAR_NOTES'] = "Y component points toward geographic east"
cdf['Y'].attrs['VAR_TYPE'] = "data"
cdf.new('Z', data=Z, type=pycdf.const.CDF_REAL8,
recVary=True)
cdf['Z'].attrs['CATDESC'] = "Z component of estimated magnetic field"
cdf['Z'].attrs['DEPEND_0'] = "Epoch"
cdf['Z'].attrs['DEPEND_1'] = "Latitude"
cdf['Z'].attrs['DEPEND_2'] = "Longitude"
cdf['Z'].attrs['DISPLAY_TYPE'] = 'no_plot'
cdf['Z'].attrs['ElemRec'] = 'Z'
cdf['Z'].attrs['FIELDNAM'] = "Geomagnetic Field Element 3"
cdf['Z'].attrs['FILLVAL'] = -1e+31
cdf['Z'].attrs['FORMAT'] = "F12.4"
cdf['Z'].attrs['OrigFreq'] = 99999.0
cdf['Z'].attrs['SCALETYP'] = "linear"
cdf['Z'].attrs['SampPer'] = 60.0
cdf['Z'].attrs['UNITS'] = 'nT'
cdf['Z'].attrs['VALIDMAX'] = 88000.0
cdf['Z'].attrs['VALIDMIN'] = -88000.0
cdf['Z'].attrs['VAR_NOTES'] = "Z component points toward center of Earth"
cdf['Z'].attrs['VAR_TYPE'] = "data"
# close the CDF file
cdf.close()
def read_imp_ASCII(filename):
"""Read Antti Pulkinnen's multi-file (ASCII) data.
"""
# create a temporary directory
tmpDir = tempfile.mkdtemp()
# unzip filename to tmpDir
with zipfile.ZipFile(filename, 'r') as inZip:
inZip.extractall(tmpDir)
# set filenames
dt_file = os.path.join(tmpDir, 'DateTime.txt')
location_file = os.path.join(tmpDir, 'LatLon.txt')
bx_file = os.path.join(tmpDir, 'BX.txt')
by_file = os.path.join(tmpDir, 'BY.txt')
bz_file = os.path.join(tmpDir, 'BZ.txt')
obx_file = os.path.join(tmpDir, 'obsBX.txt')
oby_file = os.path.join(tmpDir, 'obsBY.txt')
obz_file = os.path.join(tmpDir, 'obsBZ.txt')
station_file = os.path.join(tmpDir, 'Stations.txt')
DT = _read_antti_datetime(dt_file)
Lat, Lon, Rad, Label = _read_antti_location(location_file)
BX = _read_antti_component(bx_file)
BY = _read_antti_component(by_file)
BZ = _read_antti_component(bz_file)
obsX = _read_antti_component(obx_file)
obsY = _read_antti_component(oby_file)
obsZ = _read_antti_component(obz_file)
obsLat, obsLon, obsRad, obsInc, obsID = _read_antti_stations(station_file)
shutil.rmtree(tmpDir)
return (DT, (Lat, Lon, Rad), BX, BY, BZ, Label,
(obsLat, obsLon, obsRad), obsX, obsY, obsZ, obsInc, obsID)
def write_imp_ASCII(DT, lat_lon_r, BX, BY, BZ, Label,
olat_olon_or, obsX, obsY, obsZ, obsInc, obsID,
filename='impOut.zip'):
# def write_antti(DT, Lat, Lon, BX, BY, BZ, Label,
# obsLat, obsLon, obsInc, obsID,
# dt_file = 'DateTime.txt.gz',
# location_file = 'LatLon.txt.gz',
# bx_file = 'BX.txt.gz',
# by_file = 'BY.txt.gz',
# bz_file = 'BZ.txt.gz',
# station_file = 'Stations.txt.gz'):
"""
Write Antti Pulkinnen's multi-file (ASCII) data to a zipfile.
"""
# unpack former tuple arguments (see PEP-3113)
Lat, Lon, Rad = lat_lon_r
obsLat, obsLon, obsRad = olat_olon_or
# create a temporary directory
tmpDir = tempfile.mkdtemp()
# set filenames
dt_file = os.path.join(tmpDir, 'DateTime.txt')
location_file = os.path.join(tmpDir, 'LatLon.txt')
bx_file = os.path.join(tmpDir, 'BX.txt')
by_file = os.path.join(tmpDir, 'BY.txt')
bz_file = os.path.join(tmpDir, 'BZ.txt')
obx_file = os.path.join(tmpDir, 'obsBX.txt')
oby_file = os.path.join(tmpDir, 'obsBY.txt')
obz_file = os.path.join(tmpDir, 'obsBZ.txt')
station_file = os.path.join(tmpDir, 'Stations.txt')
# write out ASCII files
_write_antti_datetime(DT, dt_file)
_write_antti_location(Lat, Lon, Rad, Label, location_file)
_write_antti_component(BX, 'X (northward) component', bx_file)
_write_antti_component(BY, 'Y (eastward) component', by_file)
_write_antti_component(BZ, 'Z (downward) component', bz_file)
_write_antti_stations(obsLat, obsLon, obsRad, obsInc, obsID, station_file)
# not a part of original ASCII format, but included for completeness
_write_antti_component(obsX, 'observed X (northward) component', obx_file)
_write_antti_component(obsY, 'observed Y (eastward) component', oby_file)
_write_antti_component(obsZ, 'observed Z (downward) component', obz_file)
# open up output zip file
with zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) as outZip:
outZip.write(dt_file, os.path.basename(dt_file))
outZip.write(location_file, os.path.basename(location_file))
outZip.write(bx_file, os.path.basename(bx_file))
outZip.write(by_file, os.path.basename(by_file))
outZip.write(bz_file, os.path.basename(bz_file))
outZip.write(obx_file, os.path.basename(obx_file))
outZip.write(oby_file, os.path.basename(oby_file))
outZip.write(obz_file, os.path.basename(obz_file))
outZip.write(station_file, os.path.basename(station_file))
shutil.rmtree(tmpDir)
def _read_antti_datetime(dt_file):
"""
Read datetimes from <NAME>'s DateTime.txt[.gz] file
"""
# NOTE: genfromtxt() doesn't work with gzipped files as it should, so we
# unzip the file ourself, and use io.BytesIO to fake out genfromtext()
if dt_file.split('.')[-1] == 'gz':
ff = gzip.open(dt_file, 'r')
else:
ff = open(dt_file, 'r')
sIO = io.BytesIO(ff.read().encode())
ff.close()
ymdHMS = np.genfromtxt(sIO, comments="%")
DT = np.array([dt.datetime(*elem) for elem in ymdHMS.astype('int')])
sIO.close()
return DT
def _write_antti_datetime(DT, dt_file):
"""
Write datetimes into the ASCII format used by <NAME>
"""
if dt_file.split('.')[-1] == 'gz':
ff = gzip.open(dt_file, 'w')
else:
ff = open(dt_file, 'w')
ff.write("%% Date and time of the geoelectric field distribution. " +
" Data produced on %s\n"%(dt.datetime.utcnow()))
ff.write("%% \n")
ff.write("%% This data comes together with files BX.txt, BY.txt, LatLon.txt" +
" and Stations.txt. \n")
ff.write("%% \n")
ff.write("%% Contact: \n")
ff.write("%% \n")
ff.write("%% The format of the data is as follows:\n")
ff.write("%% \n")
ff.write("%% year1 month1 day1 hour1 minute1 second1 \n")
ff.write("%% year2 month2 day2 hour2 minute2 second2 \n")
ff.write("%% . . . . . . \n")
ff.write("%% . . . . . . \n")
ff.write("%% . . . . . . \n")
ff.write("%% \n")
ff.write("\n")
for d in DT:
ff.write("%02.0f %02.0f %02.0f %02.0f %02.0f %02.0f\n"%
(d.year, d.month, d.day, d.hour, d.minute, d.second))
ff.close()
def _read_antti_component(component_file):
"""
Read vector component from Antti Pulkinnen's [BX|BY|BZ].txt[.gz] file
"""
# NOTE: genfromtxt() doesn't work with gzipped files as it should, so we
# unzip the file ourself, and use io.BytesIO to fake out genfromtext()
if component_file.split('.')[-1] == 'gz':
ff = gzip.open(component_file, 'r')
else:
ff = open(component_file, 'r')
sIO = io.BytesIO(ff.read().encode())
ff.close()
# read array
component = np.genfromtxt(sIO, comments="%").T
sIO.close()
return component
def _write_antti_component(component, component_id, component_file):
"""
Write vector components into the ASCII format used by Antti Pulkinnen.
component - 2D matrix, rows for locations, columns for time steps
component_ID - string describing component (e.g., 'X (northward) component')
component_file - name of file to write out
"""
if component_file.split('.')[-1] == 'gz':
ff = gzip.open(component_file, 'w')
else:
ff = open(component_file, 'w')
ff.write("%%%% %s of the magnetic field distribution."%component_id +
" Data produced on %s\n"%dt.datetime.utcnow())
ff.write("%% \n")
ff.write("%% This data comes together with files DateTime.txt, LatLon.txt" +
" and Stations.txt. \n")
ff.write("%% \n")
ff.write("%% Contact: \n")
ff.write("%% \n")
ff.write("%% The format of the data is as follows:\n")
ff.write("%% \n")
ff.write("%% Comp(loc1,t1) Comp(loc1,t2) Comp(loc1,t3) ... \n")
ff.write("%% Comp(loc2,t1) Comp(loc2,t2) Comp(loc2,t3) ... \n")
ff.write("%% . . . \n")
ff.write("%% . . . \n")
ff.write("%% . . . \n")
ff.write("%% \n")
ff.write("\n")
fmt = ''.join(['%02.4f ' for row in component] + ['\n'])
for loc in component.T:
ff.write(fmt%tuple(loc))
ff.close()
def _read_antti_location(location_file):
"""
Read latitudes, longitudes, and (possibly blank) IDs from <NAME>'s
latlon.txt[.gz] file
"""
# NOTE: genfromtxt() doesn't work with gzipped files as it should, so we
# unzip the file ourself, and use io.BytesIO to fake out genfromtext()
if location_file.split('.')[-1] == 'gz':
ff = gzip.open(location_file, 'r')
else:
ff = open(location_file, 'r')
sIO = io.BytesIO(ff.read().encode())
ff.close()
# read LatLon array (with optional labels...
# either all have labels, or none, else genfromtxt() chokes)
lll = list(zip(*np.atleast_1d(np.genfromtxt(
sIO, comments="%", dtype=None,
names=['latReal','lonReal','radReal','labelString']
))))
# handles older style(s) with no radius and/or labels
if len(lll) > 3:
lat, lon, rad = np.array(lll[0:3])
label = np.array(lll[3])
elif len(lll) > 2:
lat, lon, rad = np.array(lll[0:3])
if isinstance(rad[0], (str, bytes)):
label = rad
rad = np.ones(lat.shape)
else:
label = np.tile('', lat.shape)
elif len(lll) == 2:
lat, lon = np.array(lll[0:2])
rad = np.ones(lat.shape)
label = np.tile('', lat.shape)
else:
raise Exception('Requires (at least) latitude and longitude')
return lat, lon, rad, label
def _write_antti_location(lat, lon, rad, label, location_file):
"""
Write latitudes, longitudes, radius, and IDs of the locations of vector
components into the ASCII format used by <NAME>
"""
if location_file.split('.')[-1] == 'gz':
ff = gzip.open(location_file, 'w')
else:
ff = open(location_file, 'w')
ff.write("%% Geographic coordinates of the geoelectric field distribution " +
" Data produced on %s\n"%(dt.datetime.utcnow()))
ff.write("%% \n")
ff.write("%% This data comes together with files DateTime.txt, B?.txt," +
" and Stations.txt. \n")
ff.write("%% \n")
ff.write("%% Contact: | |
user:
if driver['races_done'] < 5:
if 'rank_pos' in driver:
driverItem = {
'Name':user['username'],
'Races':driver['races_done'],
'Points': 0,
'First':driver['top10']['1'],
'Second':driver['top10']['2'],
'Third':driver['top10']['3'],
'Fourth':driver['top10']['4'],
'Fifth':driver['top10']['5'],
'id':user['_id'],
'Class': driver['classimg'],
'Position': driver['rank_pos'],
'Votes': driver['votes'],
'Races_15': driver['races_15'],
}
else:
driverItem = {
'Name':user['username'],
'Races':driver['races_done'],
'Points': 0,
'First':driver['top10']['1'],
'Second':driver['top10']['2'],
'Third':driver['top10']['3'],
'Fourth':driver['top10']['4'],
'Fifth':driver['top10']['5'],
'id':user['_id'],
'Class': driver['classimg'],
'Position': 999,
'Votes': driver['votes'],
'Races_15': driver['races_15'],
}
else:
#print(driver)
driverItem = {
'Name':user['username'],
'Races':driver['races_done'],
'Points':round(driver['points'] * 1000),
'First':driver['top10']['1'],
'Second':driver['top10']['2'],
'Third':driver['top10']['3'],
'Fourth':driver['top10']['4'],
'Fifth':driver['top10']['5'],
'id':user['_id'],
'Class': driver['classimg'],
'Position': driver['rank_pos'],
'Votes': driver['votes'],
'Races_15': driver['races_15'],
}
if driver['races_done'] == 0:
driverItem['Incidents'] = 0
elif 'incident_ave' in driver:
driverItem['Incidents'] = round((driver['incident_ave']), 2)
else:
driverItem['Incidents'] = round((driver['incidents'] / driver['races_done']), 2)
driverList.append(driverItem)
else:
continue
#sortdriverList = sorted(driverList, key=itemgetter('Points'), reverse=True)
#print(driverList)
except Exception as e:
return str(e)
return json.dumps(driverList)
@application.route("/getDriverList2",methods=['POST'])
@cross_origin()
@csrf.exempt
def getDriverList2():
try:
drivers = db.SeasonDrivers.find().sort([('points', -1)])
driverList = []
for driver in drivers:
#print(driver['steamID'])
user = dbusers.users.find_one({'steam_id': driver['steamID'] })
#print(user)
if user:
if driver['races_done'] < 5:
driverItem = {
'Name':user['username'],
'Races':driver['races_done'],
'Points': 0,
'First':driver['top10']['1'],
'Second':driver['top10']['2'],
'Third':driver['top10']['3'],
'Fourth':driver['top10']['4'],
'Fifth':driver['top10']['5'],
'id':user['_id'],
'Class': driver['classimg'],
'Position': driver['rank_pos'],
'Votes': driver['votes'],
'Races_15': driver['races_15'],
}
else:
#print(driver)
driverItem = {
'Name':user['username'],
'Races':driver['races_done'],
'Points':round(driver['points'] * 1000),
'First':driver['top10']['1'],
'Second':driver['top10']['2'],
'Third':driver['top10']['3'],
'Fourth':driver['top10']['4'],
'Fifth':driver['top10']['5'],
'id':user['_id'],
'Class': driver['classimg'],
'Position': driver['rank_pos'],
'Votes': driver['votes'],
'Races_15': driver['races_15'],
}
if driver['races_done'] == 0:
driverItem['Incidents'] = 0
elif 'incident_ave' in driver:
driverItem['Incidents'] = round((driver['incident_ave']), 2)
else:
driverItem['Incidents'] = round((driver['incidents'] / driver['races_done']), 2)
driverList.append(driverItem)
#sortdriverList = sorted(driverList, key=itemgetter('Points'), reverse=True)
#print(driverList)
except Exception as e:
return str(e)
return json.dumps(driverList)
@application.route('/updateprofile', methods=['POST'])
@login_required
def updateProfile():
userid = request.args.get('userid')
if current_user.get_id() != userid:
return redirect(url_for('driver', userid=userid))
else:
form = EditProfile()
print('-'*50)
print(form.validate_on_submit() == True)
if form.validate_on_submit():
print(form.errors)
try:
dbusers.users.update_one({'_id': userid}, {'$set': {
'phrase' : form.phrase.data,
'about': form.about.data,
'name': form.name.data,
'lastname': form.lastname.data,
'gender': form.gender.data,
'birthday': str(form.birthday.data.strftime('%d-%m-%Y')),
'state': form.state.data,
'city': form.city.data
}})
print(form.phrase.data, form.about.data, form.name.data,
form.lastname.data, form.gender.data, str(form.birthday.data.strftime('%Y-%m-%d')),
form.state.data, form.city.data)
except Exception as e:
return str(e)
else:
print(form.errors)
return redirect(url_for('driver', userid=userid))
@application.route('/upload', methods=['POST', 'GET'])
@login_required
def photoupload():
userid = request.args.get('userid')
if current_user.get_id() != userid:
return redirect(url_for('driver', userid=userid))
else:
form = UploadPhoto()
if form.validate_on_submit():
try:
url = form.photo.data
response = urlopen(HeadRequest(url))
maintype = response.headers['Content-Type'].split(';')[0].lower()
if maintype not in ('image/png', 'image/jpeg', 'image/gif'):
flash('A URL deve ser uma imagem ou não é válida!')
else:
dbusers.users.update_one({'_id':userid}, {'$set': {
'avatar': url}})
except:
flash('URL inacessível ou inválida!')
else:
print(form.errors)
return redirect(url_for('driver', userid=userid))
def gen_password():
#random password
from string import ascii_letters as letters
import random
letters = letters[0:26]
pw = ''
for i in range(5):
pw += random.choice(letters)
return pw
@application.route('/schedulerace', methods=['POST', 'GET'])
@bookrace_required
def schedulerace():
options = db.ServerOptions.find_one()
form = scheduleRace()
userid = current_user.get_id()
user = dbusers.users.find_one({'_id': userid})
form2 = deleteRace()
scheduled_race_before = False
tz = pytz.timezone('Brazil/East')
race_db = db.ScheduledRace.find()
for item in race_db:
if item['user']['id'] == user['_id']:
scheduled_race_before = True
if form2.validate_on_submit() and form2.submitdelete.data:
try:
race = db.ScheduledRace.find_one({'user.id': user['_id']})
if race['Online'] == True:
db.ScheduledRace.update_one({'_id': race['_id']}, {'$set': {'Close': True}})
flash('Servidor foi fechado com Sucesso!')
else:
db.ScheduledRace.delete_one({'user.id': user['_id']})
flash('Corrida excluída com Sucesso!')
except:
flash('A corrida não foi excluída. Algo de errado aconteceu!')
if form.validate_on_submit() and form.registerrace.data:
try:
#Check if time is valid
if tz.localize(datetime.combine(form.date.data, form.time.data)) < datetime.now(tz) and user['admin'] == False:
flash('O horário/dia escolhidos são inválidos. A data deve ser posterior ao horário atual que é: ' +
datetime.now(tz).strftime('%d-%m-%Y %H:%M'))
else:
#TIME IS VALID
#CREATES A RANDOM PASSWORD OR BLANK PASSWORD
if form.password.data == '1':
form.password.data = gen_password()
else:
form.password.data = ''
answers = {
'tracks': form.track.data,
'cars': form.car.data,
'date': form.date.data.strftime('%d-%m-%Y'),
'time': form.time.data.strftime('%H:%M'),
'carviews': form.carview.data,
'damages': form.damage.data,
'fixsetups': form.fixsetup.data,
'fueltires': form.fueltire.data,
'pitreturns': form.pitreturn.data,
'help': [form.traction.data,
form.antilock.data,
form.stability.data,
form.gear.data,
form.clutch.data,
form.invulnerability.data,
form.opposite.data,
form.steering.data,
form.breakhelp.data,
form.spinhelp.data,
form.autopit.data,
form.autolift.data,
form.autoblip.data,
form.driveline.data
],
'mechfailures': form.mechfailure.data,
'maxplayers': str(form.maxplayers.data),
'ip': form.ip.data,
'password': <PASSWORD>,
'flags': form.rules.data,
'tiresets': form.tireset.data,
'session': [str(form.practice.data),
str(form.qualify.data),
str(form.qualylaps.data),
str(form.warmuptime.data),
str(form.racetime.data),
str(form.racelaps.data)],
'starttime': [form.starthour.data,
form.startminute.data],
'starttypes': form.starttype.data,
'trackconds': form.trackcond.data,
'trackprogresses': form.trackprogress.data,
'Started': False,
'racefinishes': form.racefinish.data,
'downstream': form.downstream.data,
'upstream': form.upstream.data,
'fixupgrades': form.fixupgrade.data,
'warmups': form.turnwarmup.data,
'privatequalies': form.privatequaly.data,
'timescales': form.timescale.data,
'Done': False,
'participants': [],
'user': {
'id': user['_id'],
'username': user['username']},
'official': form.official.data,
'cdc': form.cdc.data,
'public': form.public.data,
'Online': False,
'Close': False
}
if answers['cdc'] == True: #Makes sure CDC races are always OFFICIAL races
answers['official'] = True
if answers['official']: #Makes sure OFFICIAL races are always PUBLIC races
answers['public'] = True
answers['timestamp_start'] = time.mktime(datetime.strptime(
answers['date']+' '+answers['time'], "%d-%m-%Y %H:%M").timetuple())
answers['timestamp_end'] = answers['timestamp_start'] + ((form.practice.data + form.qualify.data + form.racetime.data + 15 )*60)
if answers['warmups'] == 'Sim':
answers['timestamp_end'] += (form.warmuptime.data) * 60
for k,v in answers.items(): #Transforma as definições de answers em dados de configuração
for key, value in options.items():
if key != '_id':
for i in range(len(value)):
if k == key:
if value[i][0] == v:
answers[k] = value[i]
scheduled_race_before = False
if user['admin'] == False:
for item in db.ScheduledRace.find():
if item['user']['id'] == answers['user']['id']:
scheduled_race_before = True
#We have to find if the car can only be used on special tracks (e.g: Karts and FTruck)
if answers['cars'][3] in answers['tracks'][3]: #Checks if the selected car can be used in the track
spc_track = True
else:
spc_track = False
racefinish_ok = True
if form.racefinish.data == 'Voltas' or form.racefinish.data == 'Tempo e voltas' and user['admin'] == False:
#ONLY ADMINS CAN CREATE RACES USING LAPS AS FINITH TYPE, REGULAR PLAYERS SHALL USE ONLY TIME RACES
racefinish_ok = False
start_time_ok = True
finish_time_ok = True
race_db = db.ScheduledRace.find()
for item in race_db:
if start_time_ok:
if answers['timestamp_start'] <= item['timestamp_start']: #Corrida começa mais cedo que a outra?
if answers['timestamp_end'] >= item['timestamp_start']: #Corrida termina depois do começo da outra?
if answers['official'] and item['official'] == False:
db.ScheduledRace.delete_one({'_id': item['_id']})
flash('A corrida não oficial: ' + item['tracks'][0] + ' // ' + item['cars'][0] + ' ' + item['date'] + ' ' + item['time'] + ' foi desmarcada')
else:
start_time_ok = False
flash('Choque de horário com: ' + item['tracks'][0] + ' // ' + item['cars'][0] + ' ' + item['date'] + ' ' + item['time'])
elif answers['timestamp_start'] <= item['timestamp_end']:
if answers['official'] and item['official'] == False:
db.ScheduledRace.delete_one({'_id': item['_id']})
flash('A corrida não oficial: ' + item['tracks'][0] + ' // ' + item['cars'][0] + ' ' + item['date'] + ' ' + item['time'] + ' foi desmarcada')
else:
start_time_ok = False
flash('Choque de horário com: ' + item['tracks'][0] + ' // ' + item['cars'][0] + ' ' + item['date'] + ' ' + item['time'])
elif answers['timestamp_start'] > item['timestamp_end']:
start_time_ok == True
for item in race_db:
if start_time_ok == True:
if answers['timestamp_start'] <= item['timestamp_end']:
if answers['official'] and item['official'] == False:
db.ScheduledRace.delete_one({'_id': item['_id']})
flash('A corrida não oficial: ' + item['tracks'][0] + ' // ' + item['cars'][0] + ' ' + item['date'] + ' ' + item['time'] + ' foi desmarcada')
else:
start_time_ok = False
flash('Choque de horário com: ' + item['tracks'][0] + ' // ' + item['cars'][0] + ' ' + item['date'] + ' ' + item['time'])
else:
if finish_time_ok == True:
if answers['timestamp_end'] >= item['timestamp_start']:
finish_time_ok = False
flash('Choque de horário com: ' + item['tracks'][0] + ' // ' + item['cars'][0] + ' ' + item['date'] + ' ' + item['time'])
if scheduled_race_before == False:
if spc_track == True:
if racefinish_ok == True:
if start_time_ok == True:
if finish_time_ok == True:
db.ScheduledRace.insert_one(answers)
flash('Corrida Marcada com Sucesso')
pw = form.password.data
if pw == '':
pw = '<PASSWORD>'
flash('A SENHA DA CORRIDA É: ' + pw )
else:
flash('O horário de término do servidor está chocando com outro Servidor já agendado. Diminua o tempo das sessões')
else:
flash('O horário de início marcado está chocando com outro Servidor já agendado')
else:
flash('Você não tem autorização para criar servidores com o critério de fim de corrida diferente de "TEMPO"')
else:
flash('Carro e pista selecionados não podem ser utilizados em conjunto!')
else:
flash('Você já marcou uma corrida e só poderá marcar outra caso delete a anterior!')
except Exception as e:
print(str(e))
elif form.registerrace.data:
flash('Não foi possível agendar essa corrida!')
| |
from contextlib import contextmanager
from rust import RustHelperBackend
from stone import ir
from stone.backends.helpers import split_words
def fmt_shouting_snake(name):
return '_'.join([word.upper() for word in split_words(name)])
class RustBackend(RustHelperBackend):
def __init__(self, target_folder_path, args):
super(RustBackend, self).__init__(target_folder_path, args)
self._modules = []
self.preserve_aliases = True
# File Generators
def generate(self, api):
self._all_types = {ns.name: {typ.name: typ for typ in ns.data_types}
for ns in api.namespaces.values()}
for namespace in api.namespaces.values():
self._emit_namespace(namespace)
self._generate_mod_file()
def _generate_mod_file(self):
with self.output_to_relative_path('mod.rs'):
self._emit_header()
self.emit(u'#![allow(missing_docs)]')
self.emit()
for module in self._modules:
self.emit(u'if_feature! {{ "dbx_{}", pub mod {}; }}'.format(
module, self.namespace_name_raw(module)))
self.emit()
with self.block(u'pub(crate) fn eat_json_fields<\'de, V>(map: &mut V)'
u' -> Result<(), V::Error>'
u' where V: ::serde::de::MapAccess<\'de>'):
with self.block(u'while map.next_entry::<&str, ::serde_json::Value>()?.is_some()'):
self.emit(u'/* ignore */')
self.emit(u'Ok(())')
# Type Emitters
def _emit_namespace(self, namespace):
ns = self.namespace_name(namespace)
with self.output_to_relative_path(ns + '.rs'):
self._current_namespace = namespace.name
self._emit_header()
if namespace.doc is not None:
self._emit_doc(namespace.doc, prefix=u'//!')
self.emit()
for alias in namespace.aliases:
self._emit_alias(alias)
if namespace.aliases:
self.emit()
for fn in namespace.routes:
self._emit_route(ns, fn)
for typ in namespace.data_types:
self._current_type = typ
if isinstance(typ, ir.Struct):
if typ.has_enumerated_subtypes():
self._emit_polymorphic_struct(typ)
else:
self._emit_struct(typ)
elif isinstance(typ, ir.Union):
self._emit_union(typ)
else:
raise RuntimeError('WARNING: unhandled type "{}" of field "{}"'
.format(type(typ).__name__, typ.name))
self._modules.append(namespace.name)
def _emit_header(self):
self.emit(u'// DO NOT EDIT')
self.emit(u'// This file was @generated by Stone')
self.emit()
self.emit(u'#![allow(')
self.emit(u' clippy::too_many_arguments,')
self.emit(u' clippy::large_enum_variant,')
self.emit(u' clippy::doc_markdown,')
self.emit(u')]')
self.emit()
def _emit_struct(self, struct):
struct_name = self.struct_name(struct)
self._emit_doc(struct.doc)
self.emit(u'#[derive(Debug)]')
with self.block(u'pub struct {}'.format(struct_name)):
for field in struct.all_fields:
self._emit_doc(field.doc)
self.emit(u'pub {}: {},'.format(
self.field_name(field),
self._rust_type(field.data_type)))
self.emit()
if not struct.all_required_fields:
self._impl_default_for_struct(struct)
self.emit()
if struct.all_required_fields or struct.all_optional_fields:
with self._impl_struct(struct):
self._emit_new_for_struct(struct)
self.emit()
self._impl_serde_for_struct(struct)
def _emit_polymorphic_struct(self, struct):
enum_name = self.enum_name(struct)
self._emit_doc(struct.doc)
self.emit(u'#[derive(Debug)]')
with self.block(u'pub enum {}'.format(enum_name)):
for subtype in struct.get_enumerated_subtypes():
self.emit(u'{}({}),'.format(
self.enum_variant_name(subtype),
self._rust_type(subtype.data_type)))
if struct.is_catch_all():
self.emit(u'_Unknown')
self.emit()
self._impl_serde_for_polymorphic_struct(struct)
def _emit_union(self, union):
enum_name = self.enum_name(union)
self._emit_doc(union.doc)
self.emit(u'#[derive(Debug)]')
with self.block(u'pub enum {}'.format(enum_name)):
for field in union.all_fields:
if field.catch_all:
# Handle the 'Other' variant at the end.
continue
self._emit_doc(field.doc)
variant_name = self.enum_variant_name(field)
if isinstance(field.data_type, ir.Void):
self.emit(u'{},'.format(variant_name))
else:
self.emit(u'{}({}),'.format(variant_name, self._rust_type(field.data_type)))
if not union.closed:
self.emit_wrapped_text(
u'Catch-all used for unrecognized values returned from the server.'
u' Encountering this value typically indicates that this SDK version is'
u' out of date.',
prefix=u'/// ', width=100)
self.emit(u'Other,')
self.emit()
self._impl_serde_for_union(union)
if union.name.endswith('Error'):
self._impl_error(enum_name)
def _emit_route(self, ns, fn, auth_trait = None):
route_name = self.route_name(fn)
host = fn.attrs.get('host', 'api')
if host == 'api':
endpoint = u'crate::client_trait::Endpoint::Api'
elif host == 'content':
endpoint = u'crate::client_trait::Endpoint::Content'
elif host == 'notify':
endpoint = u'crate::client_trait::Endpoint::Notify'
else:
raise RuntimeError(u'ERROR: unsupported endpoint: {}'.format(host))
if auth_trait is None:
auths_str = fn.attrs.get('auth', 'user')
auths = list(map(lambda s: s.strip(), auths_str.split(',')))
auths.sort()
if auths == ['user']:
auth_trait = u'crate::client_trait::UserAuthClient'
elif auths == ['team']:
auth_trait = u'crate::client_trait::TeamAuthClient'
elif auths == ['app']:
auth_trait = u'crate::client_trait::AppAuthClient'
elif auths == ['app', 'user']:
# This is kind of lame, but there's no way to have a marker trait for either User
# OR App auth, so to get around this, we'll emit two functions, one for each.
# Emit the User auth route with no suffix via a recursive call.
self._emit_route(ns, fn, u'crate::client_trait::UserAuthClient')
# Now modify the name to add a suffix, and emit the App auth version by continuing.
route_name += "_app_auth"
auth_trait = u'crate::client_trait::AppAuthClient'
elif auths == ['noauth']:
auth_trait = u'crate::client_trait::NoauthClient'
else:
raise Exception('route {}/{}: unsupported auth type(s): {}'.format(
ns, name_with_version, auths_str))
# This is the name of the HTTP route. Almost the same as the 'route_name', but without any
# mangling to avoid Rust keywords and such.
if fn.version > 1:
name_with_version = "{}_v{}".format(fn.name, fn.version)
else:
name_with_version = fn.name
self._emit_doc(fn.doc)
arg_void = isinstance(fn.arg_data_type, ir.Void)
style = fn.attrs.get('style', 'rpc')
if style == 'rpc':
with self.emit_rust_function_def(
route_name,
[u'client: &impl {}'.format(auth_trait)]
+ ([] if arg_void else
[u'arg: &{}'.format(self._rust_type(fn.arg_data_type))]),
u'crate::Result<Result<{}, {}>>'.format(
self._rust_type(fn.result_data_type),
self._rust_type(fn.error_data_type)),
access=u'pub'):
self.emit_rust_fn_call(
u'crate::client_helpers::request',
[u'client',
endpoint,
u'crate::client_trait::Style::Rpc',
u'"{}/{}"'.format(ns, name_with_version),
u'&()' if arg_void else u'arg',
u'None'])
elif style == 'download':
with self.emit_rust_function_def(
route_name,
[u'client: &impl {}'.format(auth_trait)]
+ ([] if arg_void else
[u'arg: &{}'.format(self._rust_type(fn.arg_data_type))])
+ [u'range_start: Option<u64>',
u'range_end: Option<u64>'],
u'crate::Result<Result<crate::client_trait::HttpRequestResult<{}>, {}>>'.format(
self._rust_type(fn.result_data_type),
self._rust_type(fn.error_data_type)),
access=u'pub'):
self.emit_rust_fn_call(
u'crate::client_helpers::request_with_body',
[u'client',
endpoint,
u'crate::client_trait::Style::Download',
u'"{}/{}"'.format(ns, name_with_version),
u'&()' if arg_void else u'arg',
u'None',
u'range_start',
u'range_end'])
elif style == 'upload':
with self.emit_rust_function_def(
route_name,
[u'client: &impl {}'.format(auth_trait)]
+ ([] if arg_void else
[u'arg: &{}'.format(self._rust_type(fn.arg_data_type))])
+ [u'body: &[u8]'],
u'crate::Result<Result<{}, {}>>'.format(
self._rust_type(fn.result_data_type),
self._rust_type(fn.error_data_type)),
access=u'pub'):
self.emit_rust_fn_call(
u'crate::client_helpers::request',
[u'client',
endpoint,
u'crate::client_trait::Style::Upload',
u'"{}/{}"'.format(ns, name_with_version),
u'&()' if arg_void else u'arg',
u'Some(body)'])
else:
raise RuntimeError(u'ERROR: unknown route style: {}'.format(style))
self.emit()
def _emit_alias(self, alias):
alias_name = self.alias_name(alias)
self.emit(u'pub type {} = {};'.format(alias_name, self._rust_type(alias.data_type)))
# Serialization
def _impl_serde_for_struct(self, struct):
"""
Emit internal_deserialize() and possibly internal_deserialize_opt().
internal_deserialize[_opt] takes a map and deserializes it into the struct. It reads the
fields in whatever order; missing fields will be given their default value, or an error
returned if they have no default. Errors will also be raised if a field is present more
than once.
The _opt deserializer returns a None if it reads exactly zero map keys, and is used for
cases where the JSON has a tag, but omits all the fields to signify a null value. It is
only emitted for types which have at least one required field, because if all fields are
optional, there's no way to differentiate between a null value and one where all fields
are default.
"""
type_name = self.struct_name(struct)
field_list_name = u'{}_FIELDS'.format(fmt_shouting_snake(struct.name))
self.generate_multiline_list(
list(u'"{}"'.format(field.name) for field in struct.all_fields),
before='const {}: &[&str] = &'.format(field_list_name),
after=';',
delim=(u'[', u']'))
# Only emit the _opt deserializer if there are required fields.
optional = len(struct.all_required_fields) > 0
with self._impl_struct(struct):
if optional:
# Convenience wrapper around _opt for the more common case where the struct is
# NOT optional.
with self.emit_rust_function_def(
u'internal_deserialize<\'de, V: ::serde::de::MapAccess<\'de>>',
[u'map: V'],
u'Result<{}, V::Error>'.format(type_name),
access=u'pub(crate)'):
self.emit(u'Self::internal_deserialize_opt(map, false)'
u'.map(Option::unwrap)')
self.emit()
else:
self.emit(u'// no _opt deserializer')
with self.emit_rust_function_def(
(u'internal_deserialize_opt' if optional else u'internal_deserialize')
+ u'<\'de, V: ::serde::de::MapAccess<\'de>>',
[u'mut map: V']
+ ([u'optional: bool'] if optional else []),
(u'Result<Option<{}>, V::Error>' if optional else u'Result<{}, V::Error>')
.format(type_name),
access=u'pub(crate)'):
if len(struct.all_fields) == 0:
self.emit(u'// ignore any fields found; none are presently recognized')
self.emit(u'crate::eat_json_fields(&mut map)?;')
if optional:
self.emit(u'Ok(None)')
else:
self.emit(u'Ok({} {{}})'.format(type_name))
else:
for field in struct.all_fields:
self.emit(u'let mut field_{} = None;'.format(self.field_name(field)))
if optional:
self.emit(u'let mut nothing = true;')
with self.block(u'while let Some(key) = map.next_key::<&str>()?'):
if optional:
self.emit(u'nothing = false;')
with self.block(u'match key'):
for field in struct.all_fields:
field_name = self.field_name(field)
with self.block(u'"{}" =>'.format(field.name)):
with self.block(u'if field_{}.is_some()'.format(field_name)):
self.emit(u'return Err(::serde::de::Error::duplicate_field('
u'"{}"));'
.format(field.name))
self.emit(u'field_{} = Some(map.next_value()?);'
.format(field_name))
with self.block(u'_ =>'):
self.emit(u'// unknown field allowed and ignored')
self.emit(u'map.next_value::<::serde_json::Value>()?;')
if optional:
with self.block(u'if optional && nothing'):
self.emit(u'return Ok(None);')
with self.block(u'let result = {}'.format(type_name), delim=(u'{', u'};')):
for field in struct.all_fields:
field_name = self.field_name(field)
if isinstance(field.data_type, ir.Nullable):
self.emit(u'{}: field_{},'.format(field_name, field_name))
elif field.has_default:
default_value = self._default_value(field)
if isinstance(field.data_type, ir.String) \
and not field.default:
self.emit(u'{}: field_{}.unwrap_or_else(String::new),'
.format(field_name, field_name))
elif (ir.is_primitive_type(ir.unwrap_aliases(field.data_type)[0])
# Also, as a rough but effective heuristic, consider values
# that have no parentheses in them to be "trivial", and
# don't enclose them in a closure. This avoids running
# afoul of the clippy::unnecessary_lazy_evaluations lint.
or not "(" in default_value):
self.emit(u'{}: field_{}.unwrap_or({}),'
.format(field_name,
field_name,
default_value))
else:
self.emit(u'{}: field_{}.unwrap_or_else(|| {}),'
.format(field_name,
field_name,
default_value))
else:
self.emit(u'{}: field_{}.ok_or_else(|| '
u'::serde::de::Error::missing_field("{}"))?,'
.format(field_name, field_name, field.name))
if optional:
self.emit(u'Ok(Some(result))')
else:
self.emit(u'Ok(result)')
if struct.all_fields:
self.emit()
with self.emit_rust_function_def(
u'internal_serialize<S: ::serde::ser::Serializer>',
[u'&self', u's: &mut S::SerializeStruct'],
u'Result<(), S::Error>',
access=u'pub(crate)'):
self.emit(u'use serde::ser::SerializeStruct;')
self.generate_multiline_list(
list(u's.serialize_field("{}", &self.{})'
.format(field.name, self.field_name(field))
for field in struct.all_fields),
delim=(u'', u''),
sep='?;',
skip_last_sep=True)
self.emit()
with self._impl_deserialize(self.struct_name(struct)):
self.emit(u'// struct deserializer')
self.emit(u'use serde::de::{MapAccess, Visitor};')
self.emit(u'struct StructVisitor;')
with self.block(u'impl<\'de> Visitor<\'de> for StructVisitor'):
self.emit(u'type Value = {};'.format(type_name))
with self.emit_rust_function_def(
u'expecting',
[u'&self', u'f: &mut ::std::fmt::Formatter<\'_>'],
u'::std::fmt::Result'):
self.emit(u'f.write_str("a {} struct")'.format(struct.name))
with self.emit_rust_function_def(
u'visit_map<V: MapAccess<\'de>>',
[u'self', u'map: V'],
u'Result<Self::Value, V::Error>'):
self.emit(u'{}::internal_deserialize(map)'.format(type_name))
self.emit(u'deserializer.deserialize_struct("{}", {}, StructVisitor)'
.format(struct.name,
field_list_name))
self.emit()
with self._impl_serialize(type_name):
self.emit(u'// struct serializer')
self.emit(u'use serde::ser::SerializeStruct;')
if not struct.all_fields:
self.emit(u'serializer.serialize_struct("{}", 0)?.end()'.format(struct.name))
else:
self.emit(u'let mut s = serializer.serialize_struct("{}", {})?;'
.format(struct.name,
len(struct.all_fields)))
self.emit(u'self.internal_serialize::<S>(&mut s)?;')
self.emit(u's.end()')
self.emit()
def _impl_serde_for_polymorphic_struct(self, struct):
type_name = self.enum_name(struct)
with self._impl_deserialize(type_name):
self.emit(u'// polymorphic struct deserializer')
self.emit(u'use serde::de::{self, MapAccess, Visitor};')
self.emit(u'struct EnumVisitor;')
with self.block(u'impl<\'de> Visitor<\'de> for EnumVisitor'):
self.emit(u'type Value = {};'.format(type_name))
with self.emit_rust_function_def(
u'expecting',
[u'&self', u'f: &mut ::std::fmt::Formatter<\'_>'],
u'::std::fmt::Result'):
self.emit(u'f.write_str("a {} structure")'.format(struct.name))
with self.emit_rust_function_def(
u'visit_map<V: MapAccess<\'de>>',
[u'self', u'mut map: V'],
u'Result<Self::Value, V::Error>'):
with self.block(u'let tag = match map.next_key()?', after=';'):
self.emit(u'Some(".tag") => map.next_value()?,')
self.emit(u'_ => return | |
time.strftime("{0} {1}".format(self.date_format,
self.time_format
),
alert_expires_time.timetuple()
)
alerts_states_list.append({'key': u"{0}{1}".format('alertExpires', alert_counter),
'value': u"{0}".format(alert_expires)})
# ================================ Alert Info =================================
alerts_states_list.append({'key': u"{0}{1}".format('alertDescription', alert_counter),
'value': u"{0}".format(alert_array[alert][0])}
)
alerts_states_list.append({'key': u"{0}{1}".format('alertRegions', alert_counter),
'value': u"{0}".format(alert_array[alert][2])}
)
alerts_states_list.append({'key': u"{0}{1}".format('alertSeverity', alert_counter),
'value': u"{0}".format(alert_array[alert][3])}
)
alerts_states_list.append({'key': u"{0}{1}".format('alertTitle', alert_counter),
'value': u"{0}".format(alert_array[alert][5])}
)
alerts_states_list.append({'key': u"{0}{1}".format('alertUri', alert_counter),
'value': u"{0}".format(alert_array[alert][6])}
)
alert_counter += 1
# Write alert to the log?
# Sample:
# Patchy freezing drizzle is expected this morning, possibly mixed with light snow showers or
# flurries. With temperatures in the lower 30s, any freezing drizzle could cause patchy icy
# conditions on untreated roadways. Motorists are advised to check for the latest forecasts and
# check road conditions before driving. Temperatures will rise above freezing in many areas by
# midday.
if alerts_logging and not alerts_suppressed:
alert_text = textwrap.wrap(alert_array[alert][0], 120)
alert_text_wrapped = u""
for _ in alert_text:
alert_text_wrapped += u"{0}\n".format(_)
self.logger.info(u"\n{0}".format(alert_text_wrapped))
alerts_states_list.append({'key': 'alertCount', 'value': len(alert_array)})
dev.updateStatesOnServer(alerts_states_list)
except Exception:
self.Fogbert.pluginErrorHandler(sub_error=traceback.format_exc())
self.logger.error(u"Problem parsing weather alert data.")
alerts_states_list.append({'key': 'onOffState', 'value': False, 'uiValue': u" "})
dev.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
# =============================================================================
def parse_astronomy_data(self, dev):
"""
Parse astronomy data to devices
The parse_astronomy_data() method takes astronomy data and parses it to device
states. See Dark Sky API for value meaning.
-----
:param indigo.Device dev:
"""
astronomy_states_list = []
try:
location = (dev.pluginProps['latitude'], dev.pluginProps['longitude'])
weather_data = self.masterWeatherDict[location]
astronomy_data = weather_data['daily']['data']
preferred_time = dev.pluginProps.get('time_zone', 'time_here')
timezone = pytz.timezone(zone=weather_data['timezone'])
epoch = self.nested_lookup(obj=weather_data, keys=('currently', 'time'))
sun_rise = self.nested_lookup(obj=astronomy_data, keys=('sunriseTime',))
sun_set = self.nested_lookup(obj=astronomy_data, keys=('sunsetTime',))
moon_phase = float(self.nested_lookup(obj=astronomy_data, keys=('moonPhase',)))
# ============================= Observation Epoch =============================
current_observation_epoch = int(epoch)
astronomy_states_list.append({'key': 'currentObservationEpoch', 'value': current_observation_epoch})
# ============================= Observation Time ==============================
current_observation_time = u"Last updated on {0}".format(time.strftime('%b %d, %H:%M %p %z', time.localtime(current_observation_epoch)))
astronomy_states_list.append({'key': 'currentObservation', 'value': current_observation_time})
# ============================= Observation 24hr ==============================
current_observation_24hr = time.strftime("{0} {1}".format(self.date_format, self.time_format), time.localtime(current_observation_epoch))
astronomy_states_list.append({'key': 'currentObservation24hr', 'value': current_observation_24hr})
# ============================= Sunrise / Sunset ==============================
# Local Time (server timezone)
if preferred_time == "time_here":
sunrise_local = time.localtime(int(sun_rise))
sunrise_local = time.strftime("{0} {1}".format(self.date_format, self.time_format), sunrise_local)
astronomy_states_list.append({'key': 'sunriseTime', 'value': sunrise_local})
astronomy_states_list.append({'key': 'sunriseTimeShort', 'value': sunrise_local[11:16]})
sunset_local = time.localtime(int(sun_set))
sunset_local = time.strftime("{0} {1}".format(self.date_format, self.time_format), sunset_local)
astronomy_states_list.append({'key': 'sunsetTime', 'value': sunset_local})
astronomy_states_list.append({'key': 'sunsetTimeShort', 'value': sunset_local[11:16]})
# Location Time (location timezone)
elif preferred_time == "time_there":
sunrise_aware = dt.datetime.fromtimestamp(int(sun_rise), tz=pytz.utc)
sunset_aware = dt.datetime.fromtimestamp(int(sun_set), tz=pytz.utc)
sunrise_normal = timezone.normalize(dt=sunrise_aware)
sunset_normal = timezone.normalize(dt=sunset_aware)
sunrise_local = time.strftime("{0} {1}".format(self.date_format, self.time_format), sunrise_normal.timetuple())
astronomy_states_list.append({'key': 'sunriseTime', 'value': sunrise_local})
astronomy_states_list.append({'key': 'sunriseTimeShort', 'value': sunrise_local[11:16]})
sunset_local = time.strftime("{0} {1}".format(self.date_format, self.time_format), sunset_normal.timetuple())
astronomy_states_list.append({'key': 'sunsetTime', 'value': sunset_local})
astronomy_states_list.append({'key': 'sunsetTimeShort', 'value': sunset_local[11:16]})
# ================================ Moon Phase =================================
# Float
moon_phase_new, moon_phase_ui = self.fix_corrupted_data(val=moon_phase * 100)
moon_phase_ui = self.ui_format_percentage(dev=dev, val=moon_phase_ui)
astronomy_states_list.append({'key': 'moonPhase', 'value': moon_phase_new, 'uiValue': moon_phase_ui})
# ============================== Moon Phase Icon ==============================
# Integer
moon_phase_icon, moon_phase_icon_ui = self.fix_corrupted_data(val=int(moon_phase_new))
moon_phase_icon_ui = self.ui_format_percentage(dev=dev, val=moon_phase_icon_ui)
astronomy_states_list.append({'key': 'moonPhaseIcon', 'value': moon_phase_icon, 'uiValue': moon_phase_icon_ui})
# ============================== Moon Phase Name ==============================
# String
#
# moonPhase optional, only on daily
# The fractional part of the lunation number during the given day: a value of
# 0 corresponds to a new moon, 0.25 to a first quarter moon, 0.5 to a full
# moon, and 0.75 to a last quarter moon. (The ranges in between these represent
# waxing crescent, waxing gibbous, waning gibbous, and waning crescent moons,
# respectively.) Sources: https://darksky.net/dev/docs and
# https://en.wikipedia.org/wiki/Lunar_phase#Phases_of_the_Moon
criteria = {
'New': moon_phase == 0,
'Waxing Crescent': 0 < moon_phase < .25,
'First Quarter': moon_phase == .25,
'Waxing Gibbous': .25 < moon_phase < .50,
'Full': moon_phase == .50,
'Waning Gibbous': .50 < moon_phase < .75,
'Last Quarter': moon_phase == .75,
'Waning Crescent': .75 < moon_phase,
}
for k, v in criteria.items():
if v:
astronomy_states_list.append({'key': 'moonPhaseName', 'value': k})
break
else:
astronomy_states_list.append({'key': 'moonPhaseName', 'value': u'Unknown'})
new_props = dev.pluginProps
new_props['address'] = u"{0:.5f}, {1:.5f}".format(float(dev.pluginProps.get('latitude', 'lat')), float(dev.pluginProps.get('longitude', 'long')))
dev.replacePluginPropsOnServer(new_props)
astronomy_states_list.append({'key': 'onOffState', 'value': True, 'uiValue': u" "})
dev.updateStatesOnServer(astronomy_states_list)
dev.updateStateImageOnServer(indigo.kStateImageSel.SensorOn)
except Exception:
self.Fogbert.pluginErrorHandler(sub_error=traceback.format_exc())
self.logger.error(u"Problem parsing astronomy data.")
dev.updateStateOnServer('onOffState', value=False, uiValue=u" ")
dev.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
# =============================================================================
def parse_hourly_forecast_data(self, dev):
"""
Parse hourly forecast data to devices
The parse_hourly_forecast_data() method takes hourly weather forecast data and parses
it to device states. See Dark Sky API for value meaning.
-----
:param indigo.Device dev:
"""
hourly_forecast_states_list = []
try:
hour_temp = 0
location = (dev.pluginProps['latitude'], dev.pluginProps['longitude'])
weather_data = self.masterWeatherDict[location]
forecast_data = weather_data['hourly']['data']
preferred_time = dev.pluginProps.get('time_zone', 'time_here')
timezone = pytz.timezone(zone=weather_data['timezone'])
# ============================== Hourly Summary ===============================
hourly_forecast_states_list.append({'key': 'hourly_summary', 'value': self.masterWeatherDict[location]['hourly']['summary']})
# ============================= Observation Epoch =============================
current_observation_epoch = int(self.nested_lookup(weather_data, keys=('currently', 'time')))
hourly_forecast_states_list.append({'key': 'currentObservationEpoch', 'value': current_observation_epoch})
# ============================= Observation Time ==============================
current_observation_time = u"Last updated on {0}".format(time.strftime('%b %d, %H:%M %p %z', time.localtime(current_observation_epoch)))
hourly_forecast_states_list.append({'key': 'currentObservation', 'value': current_observation_time})
# ============================= Observation 24hr ==============================
current_observation_24hr = time.strftime("{0} {1}".format(self.date_format, self.time_format), time.localtime(current_observation_epoch))
hourly_forecast_states_list.append({'key': 'currentObservation24hr', 'value': current_observation_24hr})
forecast_counter = 1
for observation in forecast_data:
if forecast_counter <= 24:
cloud_cover = self.nested_lookup(observation, keys=('cloudCover',))
forecast_time = self.nested_lookup(observation, keys=('time',))
humidity = self.nested_lookup(observation, keys=('humidity',))
icon = self.nested_lookup(observation, keys=('icon',))
ozone = self.nested_lookup(observation, keys=('ozone',))
precip_intensity = self.nested_lookup(observation, keys=('precipIntensity',))
precip_probability = self.nested_lookup(observation, keys=('precipProbability',))
precip_type = self.nested_lookup(observation, keys=('precipType',))
pressure = self.nested_lookup(observation, keys=('pressure',))
summary = self.nested_lookup(observation, keys=('summary',))
temperature = self.nested_lookup(observation, keys=('temperature',))
uv_index = self.nested_lookup(observation, keys=('uvIndex',))
visibility = self.nested_lookup(observation, keys=('visibility',))
wind_bearing = self.nested_lookup(observation, keys=('windBearing',))
wind_gust = self.nested_lookup(observation, keys=('windGust',))
wind_speed = self.nested_lookup(observation, keys=('windSpeed',))
# Add leading zero to counter value for device state names 1-9.
if forecast_counter < 10:
fore_counter_text = u"0{0}".format(forecast_counter)
else:
fore_counter_text = forecast_counter
# ========================= Forecast Day, Epoch, Hour =========================
# Local Time (server timezone)
if preferred_time == "time_here":
local_time = time.localtime(float(forecast_time))
forecast_day_long = time.strftime('%A', local_time)
forecast_day_short = time.strftime('%a', local_time)
forecast_hour = time.strftime('%H:%M', local_time)
forecast_hour_ui = time.strftime(self.time_format, local_time)
hourly_forecast_states_list.append({'key': u"h{0}_day".format(fore_counter_text),
'value': forecast_day_long,
'uiValue': forecast_day_long
}
)
hourly_forecast_states_list.append({'key': u"h{0}_day_short".format(fore_counter_text),
'value': forecast_day_short,
'uiValue': forecast_day_short
}
)
hourly_forecast_states_list.append({'key': u"h{0}_epoch".format(fore_counter_text),
'value': forecast_time
}
)
hourly_forecast_states_list.append({'key': u"h{0}_hour".format(fore_counter_text),
'value': forecast_hour,
'uiValue': forecast_hour_ui
}
)
# Location Time (location timezone)
elif preferred_time == "time_there":
aware_time = dt.datetime.fromtimestamp(int(forecast_time), tz=pytz.utc)
forecast_day_long = timezone.normalize(aware_time).strftime("%A")
forecast_day_short = timezone.normalize(aware_time).strftime("%a")
forecast_hour = timezone.normalize(aware_time).strftime("%H:%M")
forecast_hour_ui = time.strftime(self.time_format, timezone.normalize(aware_time).timetuple())
zone = dt.datetime.fromtimestamp(forecast_time, timezone)
zone_tuple = zone.timetuple() # tuple
zone_posix = int(time.mktime(zone_tuple)) # timezone timestamp
hourly_forecast_states_list.append({'key': u"h{0}_day".format(fore_counter_text), 'value': forecast_day_long, 'uiValue': forecast_day_long})
hourly_forecast_states_list.append({'key': u"h{0}_day_short".format(fore_counter_text), 'value': forecast_day_short, 'uiValue': forecast_day_short})
hourly_forecast_states_list.append({'key': u"h{0}_epoch".format(fore_counter_text), 'value': zone_posix})
hourly_forecast_states_list.append({'key': u"h{0}_hour".format(fore_counter_text), 'value': forecast_hour, 'uiValue': forecast_hour_ui})
# ================================ Cloud Cover ================================
cloud_cover, cloud_cover_ui = self.fix_corrupted_data(val=cloud_cover * 100)
cloud_cover_ui = self.ui_format_percentage(dev=dev, val=cloud_cover_ui)
hourly_forecast_states_list.append({'key': u"h{0}_cloudCover".format(fore_counter_text), 'value': cloud_cover, 'uiValue': cloud_cover_ui})
# ================================= Humidity ==================================
humidity, humidity_ui = self.fix_corrupted_data(val=humidity * 100)
humidity_ui = self.ui_format_percentage(dev=dev, val=humidity_ui)
hourly_forecast_states_list.append({'key': u"h{0}_humidity".format(fore_counter_text), 'value': humidity, 'uiValue': humidity_ui})
# ============================= Precip Intensity ==============================
precip_intensity, precip_intensity_ui = self.fix_corrupted_data(val=precip_intensity)
precip_intensity_ui = self.ui_format_rain(dev=dev, val=precip_intensity_ui)
hourly_forecast_states_list.append({'key': u"h{0}_precipIntensity".format(fore_counter_text), 'value': precip_intensity, 'uiValue': precip_intensity_ui})
# ============================ Precip Probability =============================
precip_probability, precip_probability_ui = self.fix_corrupted_data(val=precip_probability * 100)
precip_probability_ui = self.ui_format_percentage(dev=dev, val=precip_probability_ui)
hourly_forecast_states_list.append({'key': u"h{0}_precipChance".format(fore_counter_text), 'value': precip_probability, 'uiValue': precip_probability_ui})
# =================================== Icon ====================================
hourly_forecast_states_list.append({'key': u"h{0}_icon".format(fore_counter_text), 'value': u"{0}".format(icon.replace('-', '_'))})
# =================================== Ozone ===================================
ozone, ozone_ui = self.fix_corrupted_data(val=ozone)
ozone_ui = self.ui_format_index(dev, val=ozone_ui)
hourly_forecast_states_list.append({'key': u"h{0}_ozone".format(fore_counter_text), 'value': ozone, 'uiValue': ozone_ui})
# ================================ Precip Type ================================
hourly_forecast_states_list.append({'key': u"h{0}_precipType".format(fore_counter_text), 'value': precip_type})
# ================================= Pressure ==================================
pressure, pressure_ui = self.fix_corrupted_data(val=pressure)
pressure_ui = self.ui_format_pressure(dev=dev, val=pressure_ui)
hourly_forecast_states_list.append({'key': u"h{0}_pressure".format(fore_counter_text), 'value': pressure, 'uiValue': pressure_ui})
# ================================== Summary ==================================
hourly_forecast_states_list.append({'key': u"h{0}_summary".format(fore_counter_text), 'value': summary})
# ================================ Temperature ================================
temperature, temperature_ui = self.fix_corrupted_data(val=temperature)
temperature_ui = self.ui_format_temperature(dev=dev, val=temperature_ui)
hourly_forecast_states_list.append({'key': u"h{0}_temperature".format(fore_counter_text), 'value': temperature, 'uiValue': temperature_ui})
if forecast_counter == int(dev.pluginProps.get('ui_display', '1')):
hour_temp = round(temperature)
# ================================= UV Index ==================================
uv_index, uv_index_ui = self.fix_corrupted_data(val=uv_index)
uv_index_ui = self.ui_format_index(dev, val=uv_index_ui)
hourly_forecast_states_list.append({'key': u"h{0}_uvIndex".format(fore_counter_text), 'value': uv_index, 'uiValue': uv_index_ui})
# =============================== Wind Bearing ================================
wind_bearing, wind_bearing_ui = self.fix_corrupted_data(val=wind_bearing)
# We don't need fractional wind speed values for the UI, so we try to fix that
# here. However, sometimes it comes through as "--" so we need to account for
# that, too.
try:
int(float(wind_bearing_ui))
except ValueError:
pass
hourly_forecast_states_list.append({'key': u"h{0}_windBearing".format(fore_counter_text), 'value': wind_bearing, 'uiValue': wind_bearing_ui})
# ============================= Wind Bearing Name =============================
wind_bearing_name = self.ui_format_wind_name(val=wind_bearing)
hourly_forecast_states_list.append({'key': u"h{0}_windBearingName".format(fore_counter_text), 'value': wind_bearing_name})
# ================================= Wind Gust =================================
wind_gust, wind_gust_ui = self.fix_corrupted_data(val=wind_gust)
wind_gust_ui = self.ui_format_wind(dev=dev, val=wind_gust_ui)
hourly_forecast_states_list.append({'key': u"h{0}_windGust".format(fore_counter_text), 'value': wind_gust, 'uiValue': wind_gust_ui})
# ================================ Wind Speed =================================
wind_speed, wind_speed_ui = self.fix_corrupted_data(val=wind_speed)
wind_speed_ui = self.ui_format_wind(dev=dev, val=wind_speed_ui)
hourly_forecast_states_list.append({'key': u"h{0}_windSpeed".format(fore_counter_text), 'value': wind_speed, 'uiValue': wind_speed_ui})
# ================================ Visibility =================================
visibility, visibility_ui = self.fix_corrupted_data(val=visibility)
visibility_ui = self.ui_format_distance(dev, val=visibility_ui)
hourly_forecast_states_list.append({'key': u"h{0}_visibility".format(fore_counter_text), 'value': visibility, 'uiValue': visibility_ui})
forecast_counter += 1
new_props = dev.pluginProps
new_props['address'] = u"{0:.5f}, {1:.5f}".format(float(dev.pluginProps.get('latitude', 'lat')), float(dev.pluginProps.get('longitude', 'long')))
dev.replacePluginPropsOnServer(new_props)
display_value = u"{0}{1}".format(int(hour_temp), dev.pluginProps['temperatureUnits'])
hourly_forecast_states_list.append({'key': 'onOffState', | |
# -*- coding: utf-8 -*-
import re
from csv import writer
from datetime import datetime
from flask import Blueprint
from flask import flash, redirect, render_template, request, url_for, abort, \
session
from flask_babel import _
from flask_login import current_user, login_user, logout_user, login_required
from io import StringIO
from app import db, login_manager, get_locale
from app.decorators import require_role, response_headers
from app.exceptions.base import ResourceNotFoundException, \
AuthorizationException, ValidationException, BusinessRuleException
from app.forms import init_form
from app.forms.user import (EditUserForm, EditUserInfoForm, SignUpForm,
SignInForm, ResetPasswordForm, RequestPassword,
ChangePasswordForm, EditUvALinkingForm)
from app.models.activity import Activity
from app.models.custom_form import CustomFormResult, CustomForm
from app.models.education import Education
from app.models.user import User
from app.roles import Roles
from app.service import password_reset_service, user_service, \
role_service, file_service, saml_service
from app.utils import copernica
from app.utils.google import HttpError
from app.utils.user import UserAPI
blueprint = Blueprint('user', __name__)
@login_manager.user_loader
def load_user(user_id):
# The hook used by the login manager to get the user from the database by
# user ID.
return user_service.get_user_by_id(user_id)
def view_single(user_id):
"""
View user for admins and edit for admins and users.
User is passed based on routes below.
"""
user = user_service.get_user_by_id(user_id)
user.avatar = UserAPI.avatar(user)
user.groups = UserAPI.get_groups_for_user_id(user)
user.groups_amount = len(user.groups)
if "gravatar" in user.avatar:
user.avatar = user.avatar + "&s=341"
# Get all activity entrees from these forms, order by start_time of
# activity.
activities = Activity.query.join(CustomForm).join(CustomFormResult). \
filter(CustomFormResult.owner_id == user_id and
CustomForm.id == CustomFormResult.form_id and
Activity.form_id == CustomForm.id)
user.activities_amount = activities.count()
new_activities = activities \
.filter(Activity.end_time > datetime.today()).distinct() \
.order_by(Activity.start_time)
old_activities = activities \
.filter(Activity.end_time < datetime.today()).distinct() \
.order_by(Activity.start_time.desc())
can_write = role_service.user_has_role(current_user, Roles.USER_WRITE)
return render_template('user/view_single.htm', user=user,
new_activities=new_activities,
old_activities=old_activities,
can_write=can_write)
@blueprint.route('/users/view/self/', methods=['GET'])
@login_required
def view_single_self():
return view_single(current_user.id)
@blueprint.route('/users/view/<int:user_id>/', methods=['GET'])
@require_role(Roles.USER_READ)
@login_required
def view_single_user(user_id):
return view_single(user_id=user_id)
@blueprint.route('/users/remove_avatar/<int:user_id>/', methods=['DELETE'])
@login_required
@require_role(Roles.USER_WRITE)
def remove_avatar(user_id=None):
user = user_service.get_user_by_id(user_id)
if current_user.is_anonymous or current_user.id != user_id:
return "", 403
user_service.remove_avatar(user.id)
return "", 200
def edit(user_id, form_cls):
"""
Create user for admins and edit for admins and users.
User and form type are passed based on routes below.
"""
if user_id:
user = user_service.get_user_by_id(user_id)
user.avatar = user_service.user_has_avatar(user_id)
else:
user = User()
form = init_form(form_cls, obj=user)
form.new_user = user.id == 0
# Add education.
educations = Education.query.all()
form.education_id.choices = [(e.id, e.name) for e in educations]
def edit_page():
is_admin = role_service.user_has_role(current_user, Roles.USER_WRITE)
return render_template('user/edit.htm', form=form, user=user,
is_admin=is_admin)
if form.validate_on_submit():
# Only new users need a unique email.
query = User.query.filter(User.email == form.email.data)
if user_id:
query = query.filter(User.id != user_id)
if query.count() > 0:
flash(_('A user with this e-mail address already exist.'),
'danger')
return edit_page()
# Because the user model is constructed to have an ID of 0 when it is
# initialized without an email adress provided, reinitialize the user
# with a default string for email adress, so that it will get a unique
# ID when committed to the database.
if not user_id:
user = User('_')
# TODO Move this into the service call.
try:
user.update_email(form.email.data.strip())
except HttpError as e:
if e.resp.status == 404:
flash(_('According to Google this email does not exist. '
'Please use an email that does.'), 'danger')
return edit_page()
raise e
# Note: student id is updated separately.
user.first_name = form.first_name.data.strip()
user.last_name = form.last_name.data.strip()
user.locale = form.locale.data
if role_service.user_has_role(current_user, Roles.USER_WRITE):
user.has_paid = form.has_paid.data
user.honorary_member = form.honorary_member.data
user.favourer = form.favourer.data
user.disabled = form.disabled.data
user.alumnus = form.alumnus.data
user.education_id = form.education_id.data
user.birth_date = form.birth_date.data
user.study_start = form.study_start.data
user.receive_information = form.receive_information.data
user.phone_nr = form.phone_nr.data.strip()
user.address = form.address.data.strip()
user.zip = form.zip.data.strip()
user.city = form.city.data.strip()
user.country = form.country.data.strip()
db.session.add(user)
db.session.commit()
avatar = request.files.get('avatar')
if avatar:
user_service.set_avatar(user.id, avatar)
if user_id:
copernica.update_user(user)
flash(_('Profile succesfully updated'))
else:
copernica.update_user(user, subscribe=True)
flash(_('Profile succesfully created'))
if current_user.id == user_id:
return redirect(url_for('user.view_single_self'))
else:
return redirect(url_for('user.view_single_user', user_id=user.id))
return edit_page()
@blueprint.route('/users/edit/<int:user_id>/student-id-linking',
methods=['GET', 'POST'])
@login_required
@require_role(Roles.USER_WRITE)
def edit_student_id_linking(user_id):
user = user_service.get_user_by_id(user_id)
form = EditUvALinkingForm(request.form, obj=user)
# Fix student_id_confirmed not being set...
if request.method == 'GET':
form.student_id_confirmed.data = user.student_id_confirmed
def edit_page():
return render_template('user/edit_student_id.htm',
user=user, form=form)
if form.validate_on_submit():
if not form.student_id.data:
user_service.remove_student_id(user)
elif form.student_id_confirmed.data:
other_user = user_service.find_user_by_student_id(
form.student_id.data)
if other_user is not None and other_user != user:
error = _('The UvA account corresponding with this student ID '
'is already linked to another user '
'(%(name)s - %(email)s). Please unlink the account '
'from the other user first before linking it '
'to this user.', name=other_user.name,
email=other_user.email)
form.student_id_confirmed.errors.append(error)
return edit_page()
user_service.set_confirmed_student_id(user, form.student_id.data)
else:
user_service.set_unconfirmed_student_id(user, form.student_id.data)
flash(_('Student ID information saved.'), 'success')
return redirect(url_for('.edit_user', user_id=user_id))
return edit_page()
@blueprint.route('/users/edit/self/', methods=['GET', 'POST'])
@login_required
def edit_self():
return edit(current_user.id, EditUserInfoForm)
@blueprint.route('/users/create/', methods=['GET', 'POST'])
@blueprint.route('/users/edit/<int:user_id>', methods=['GET', 'POST'])
@login_required
@require_role(Roles.USER_WRITE)
def edit_user(user_id=None):
return edit(user_id, EditUserForm)
@blueprint.route('/sign-up/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_up():
return render_template('user/sign_up_chooser.htm')
@blueprint.route('/sign-up/manual/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_up_manual():
# Redirect the user to the index page if he or she has been authenticated
# already.
if current_user.is_authenticated:
return redirect(url_for('home.home'))
form = SignUpForm(request.form)
# Add education.
educations = Education.query.all()
form.education_id.choices = [(e.id, e.name) for e in educations]
if form.validate_on_submit():
try:
user = user_service.register_new_user(
email=form.email.data,
password=<PASSWORD>,
first_name=form.first_name.data,
last_name=form.last_name.data,
student_id=form.student_id.data,
education_id=form.education_id.data,
birth_date=form.birth_date.data,
study_start=form.study_start.data,
receive_information=form.receive_information.data,
phone_nr=form.phone_nr.data,
address=form.address.data,
zip_=form.zip.data,
city=form.city.data,
country=form.country.data,
locale=get_locale())
login_user(user)
flash(_('Welcome %(name)s! Your profile has been succesfully '
'created and you have been logged in!',
name=current_user.first_name), 'success')
return redirect(url_for('home.home'))
except BusinessRuleException:
flash(_('A user with this e-mail address already exists'),
'danger')
return render_template('user/sign_up.htm', form=form)
@blueprint.route('/sign-up/process-saml-response/', methods=['GET', 'POST'])
@saml_service.ensure_data_cleared
def sign_up_saml_response():
redir_url = saml_service.get_redirect_url(url_for('home.home'))
# Redirect the user to the index page if he or she has been authenticated
# already.
if current_user.is_authenticated:
# End the sign up session when it is still there somehow
if saml_service.sign_up_session_active():
saml_service.end_sign_up_session()
return redirect(redir_url)
if saml_service.sign_up_session_active():
# Delete the old sign up session when
# the user re-authenticates
if saml_service.user_is_authenticated():
saml_service.end_sign_up_session()
# Otherwise, refresh the timeout timestamp of the session
else:
saml_service.update_sign_up_session_timestamp()
form = SignUpForm(request.form)
# Add education.
educations = Education.query.all()
form.education_id.choices = [(e.id, e.name) for e in educations]
if not saml_service.sign_up_session_active():
if not saml_service.user_is_authenticated():
flash(_('Authentication failed. Please try again.'), 'danger')
return redirect(redir_url)
if not saml_service.user_is_student():
flash(_('You must authenticate with a student '
'UvA account to register.'), 'danger')
return redirect(redir_url)
if saml_service.uid_is_linked_to_other_user():
flash(_('There is already an account linked to this UvA account. '
'If you are sure that this is a mistake please send '
'an email to the board.'), 'danger')
return redirect(redir_url)
# Start a new sign up session and pre-fill the form
saml_service.start_sign_up_session()
saml_service.fill_sign_up_form_with_saml_attributes(
form)
# When we encounter a GET request but a session is already active,
# this means that the user did a refresh without submitting the form.
# We redirect him/her to the SAML sign up, since otherwise all
# pre-filled data would be gone.
elif request.method == 'GET':
return redirect(url_for('saml.sign_up'))
else:
# Make sure that it is not possible to change the student id
form.student_id.data = \
saml_service.get_sign_up_session_linking_student_id()
if form.validate_on_submit():
try:
user = user_service.register_new_user(
email=form.email.data,
password=form.password.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
student_id=form.student_id.data,
education_id=form.education_id.data,
birth_date=form.birth_date.data,
study_start=form.study_start.data,
receive_information=form.receive_information.data,
phone_nr=form.phone_nr.data,
address=form.address.data,
zip_=form.zip.data,
city=form.city.data,
country=form.country.data,
locale=get_locale(),
link_student_id=True)
login_user(user)
saml_service.end_sign_up_session()
flash(_('Welcome %(name)s! Your profile has been succesfully '
'created and you have been logged in!',
name=current_user.first_name), 'success')
return redirect(redir_url)
except BusinessRuleException:
flash(_('A user with this e-mail address already exists'),
'danger')
return render_template('user/sign_up.htm', form=form,
disable_student_id=True)
@blueprint.route('/sign-in/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_in():
# Redirect the user to the index page if he or she has been authenticated
# already.
if current_user.is_authenticated:
return redirect(url_for('home.home'))
form = SignInForm(request.form)
if form.validate_on_submit():
try:
user = user_service.get_user_by_login(form.email.data,
form.password.data)
# Notify the login manager that the user has been signed in.
login_user(user)
next_ = request.args.get("next", '')
if next_ and next_.startswith("/"):
return redirect(next_)
# If referer is empty for some reason (browser policy, user entered
# address in address bar, etc.), use empty string
referer = request.headers.get('Referer', '')
denied = (re.match(
r'(?:https?://[^/]+)%s$' % (url_for('user.sign_in')),
referer) is not None)
denied_from = session.get('denied_from')
if not denied:
if referer:
return redirect(referer)
elif denied_from:
return redirect(denied_from)
return redirect(url_for('home.home'))
except ResourceNotFoundException:
flash(_(
'It appears that this account does not exist. Try again, or '
'contact the website administration at '
'ict (at) svia (dot) nl.'))
except AuthorizationException:
flash(_('Your account has been disabled, you are not allowed '
'to log in'), 'danger')
except ValidationException:
flash(_('The password you entered appears to be incorrect.'),
'danger')
return render_template('user/sign_in.htm', form=form,
show_uvanetid_login=True)
@blueprint.route('/sign-in/process-saml-response/', methods=['GET'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_in_saml_response():
has_redirected = False
redir_url = saml_service.get_redirect_url(url_for('home.home'))
try:
# Redirect the user to the index page if he or she has been
# authenticated already.
if current_user.is_authenticated:
return redirect(redir_url)
if not saml_service.user_is_authenticated():
flash(_('Authentication failed. Please try again.'), 'danger')
return redirect(redir_url)
try:
user = saml_service.get_user_by_uid(needs_confirmed=False)
if user.student_id_confirmed:
login_user(user)
else:
has_redirected = True
return | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''astrokep.py - <NAME> (<EMAIL>) - 05/2016
Contains various useful tools for analyzing Kepler light curves.
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
from time import time as unixtime
import glob
import fnmatch
import sys
import os.path
try:
import cPickle as pickle
except:
import pickle
import gzip
import numpy as np
from numpy import nan as npnan, sum as npsum, abs as npabs, \
roll as nproll, isfinite as npisfinite, std as npstd, \
sign as npsign, sqrt as npsqrt, median as npmedian, \
array as nparray, percentile as nppercentile, \
polyfit as nppolyfit, var as npvar, max as npmax, min as npmin, \
log10 as nplog10, arange as nparange, pi as MPI, floor as npfloor, \
argsort as npargsort, cos as npcos, sin as npsin, tan as nptan, \
where as npwhere, linspace as nplinspace, \
zeros_like as npzeros_like, full_like as npfull_like, all as npall, \
correlate as npcorrelate, zeros as npzeros, ones as npones, \
column_stack as npcolumn_stack, in1d as npin1d, append as npappend, \
unique as npunique, argwhere as npargwhere, concatenate as npconcatenate
from numpy.polynomial.legendre import Legendre
from scipy.optimize import leastsq
from scipy.signal import medfilt
# FIXME: should probably add this to setup.py requirements
try:
from sklearn.ensemble import RandomForestRegressor
SKLEARN = True
except:
SKLEARN = False
from .lcmath import sigclip_magseries, find_lc_timegroups
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
except:
import pyfits
###########################################
## UTILITY FUNCTIONS FOR FLUXES AND MAGS ##
###########################################
def keplerflux_to_keplermag(keplerflux, f12=1.74e5):
'''
This converts the kepler flux in electrons/sec to kepler magnitude.
kepler mag/flux relation:
- fkep = (10.0**(-0.4*(kepmag - 12.0)))*f12
- f12 = 1.74e5 # electrons/sec
'''
kepmag = 12.0 - 2.5*nplog10(keplerflux/f12)
return kepmag
def keplermag_to_keplerflux(keplermag, f12=1.74e5):
'''
This converts the kepler mag back to kepler flux.
'''
kepflux = (10.0**(-0.4*(keplermag - 12.0)))*f12
return kepflux
def keplermag_to_sdssr(keplermag, kic_sdssg, kic_sdssr):
'''
convert from kepmag to SDSS r mag, we must know the sdssg of the target
(from UCAC4 or other transforms). this appears to be a very rough
transformation.
Get kic_sdssg and kic_sdssr from extension 0 of a Kepler llc.fits file.
'''
kic_sdssgr = kic_sdssg - kic_sdssr
if kic_sdssgr < 0.8:
kepsdssr = (keplermag - 0.2*kic_sdssg)/0.8
else:
kepsdssr = (keplermag - 0.1*kic_sdssg)/0.9
return kepsdssr
def flux_ppm_to_magnitudes(ppm):
'''
This converts Kepler's flux parts-per-million to magnitudes.
'''
return -2.5*nplog10(1.0 - ppm/1.0e6)
######################################################
## FUNCTIONS FOR READING KEPLER AND K2 LIGHT CURVES ##
######################################################
# this is the list of keys to pull out of the light curve FITS table
LCDATAKEYS = ['TIME','TIMECORR','CADENCENO',
'SAP_QUALITY',
'PSF_CENTR1','PSF_CENTR1_ERR','PSF_CENTR2','PSF_CENTR2_ERR',
'MOM_CENTR1','MOM_CENTR1_ERR','MOM_CENTR2','MOM_CENTR2_ERR']
LCSAPKEYS = ['SAP_FLUX','SAP_FLUX_ERR','SAP_BKG','SAP_BKG_ERR']
LCPDCKEYS = ['PDCSAP_FLUX','PDCSAP_FLUX_ERR']
# this is the list of keys to pull out of the light curve header
LCHEADERKEYS = ['TIMESYS','BJDREFI','BJDREFF',
'OBJECT','KEPLERID',
'RA_OBJ','DEC_OBJ','EQUINOX',
'EXPOSURE',
'CDPP3_0','CDPP6_0','CDPP12_0',
'PDCVAR','PDCMETHD','CROWDSAP','FLFRCSAP']
# this is the list of keys to pull out of the top header of the FITS
LCTOPKEYS = ['CHANNEL','SKYGROUP','MODULE','OUTPUT',
'QUARTER','SEASON','CAMPAIGN',
'DATA_REL','OBSMODE',
'PMRA','PMDEC','PMTOTAL','PARALLAX',
'GLON','GLAT',
'GMAG','RMAG','IMAG','ZMAG','D51MAG',
'JMAG','HMAG','KMAG','KEPMAG',
'GRCOLOR','JKCOLOR','GKCOLOR',
'TEFF','LOGG','FEH',
'EBMINUSV','AV','RADIUS','TMINDEX']
# this is the list of keys to pull out of the aperture part of the light curve
# we also pull out the whole pixel mask, which looks something like:
# array([[0, 1, 1, 1, 1, 1, 1, 0],
# [1, 1, 1, 3, 3, 1, 1, 1],
# [1, 1, 3, 3, 3, 3, 1, 1],
# [1, 1, 3, 3, 3, 3, 3, 1],
# [1, 1, 3, 3, 3, 3, 3, 1],
# [1, 1, 1, 1, 3, 3, 1, 1],
# [0, 1, 1, 1, 1, 1, 1, 0]], dtype=int32)
# where the value 3 means the actual pixels used to sum the flux for this
# particular object (the optimal aperture). 1 means the pixel was collected by
# the telescope, so its flux is available
# we use CDELT1 and CDELT2 below to get the pixel scale in arcsec/px
# it should be about 3.96 arcsec/pixel in most cases
LCAPERTUREKEYS = ['NPIXSAP','NPIXMISS','CDELT1','CDELT2']
def read_kepler_fitslc(lcfits,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS,
normalize=False,
appendto=None):
'''This extracts the light curve from a single Kepler or K2 LC FITS file.
This works on the light curves available at MAST:
-> kplr{kepid}-{somedatething}_llc.fits files from the Kepler mission
-> ktwo{epicid}-c{campaign}_llc.fits files from the K2 mission
Returns an lcdict.
If normalize == True, then each component light curve's flux measurements
will be normalized to 1.0 by dividing out the median flux for the component
light curve.
If appendto is an lcdict, will append measurements to that dict. This is
used for consolidating light curves for the same object across different
files (quarters). The appending does not care about the time order. To
consolidate light curves in time order, use consolidate_kepler_fitslc below.
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr, lcaperturehdr, lcaperturedata = (hdulist[0].header,
hdulist[2].header,
hdulist[2].data)
hdulist.close()
hdrinfo = {}
# now get the values we want from the header
for key in headerkeys:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in topkeys:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# get the info from the lcaperturehdr
for key in lcaperturehdr:
if key in lcaperturehdr and lcaperturehdr[key] is not None:
hdrinfo[key.lower()] = lcaperturehdr[key]
else:
hdrinfo[key.lower()] = None
# if we're appending to another lcdict
if appendto and isinstance(appendto, dict):
lcdict = appendto
lcdict['quarter'].append(hdrinfo['quarter'])
lcdict['season'].append(hdrinfo['season'])
lcdict['datarelease'].append(hdrinfo['data_rel'])
lcdict['obsmode'].append(hdrinfo['obsmode'])
lcdict['campaign'].append(hdrinfo['campaign'])
# we don't update the objectid
# update lcinfo
lcdict['lcinfo']['timesys'].append(hdrinfo['timesys'])
lcdict['lcinfo']['bjdoffset'].append(
hdrinfo['bjdrefi'] + hdrinfo['bjdreff']
)
lcdict['lcinfo']['exptime'].append(hdrinfo['exposure'])
lcdict['lcinfo']['lcaperture'].append(lcaperturedata)
lcdict['lcinfo']['aperpixused'].append(hdrinfo['npixsap'])
lcdict['lcinfo']['aperpixunused'].append(hdrinfo['npixmiss'])
lcdict['lcinfo']['pixarcsec'].append(
(npabs(hdrinfo['cdelt1']) +
npabs(hdrinfo['cdelt2']))*3600.0/2.0
)
lcdict['lcinfo']['channel'].append(hdrinfo['channel'])
lcdict['lcinfo']['skygroup'].append(hdrinfo['skygroup'])
lcdict['lcinfo']['module'].append(hdrinfo['module'])
lcdict['lcinfo']['output'].append(hdrinfo['output'])
lcdict['lcinfo']['ndet'].append(ndet)
# the objectinfo is not updated for the same object when appending to a
# light curve. FIXME: maybe it should be?
# update the varinfo for this light curve
lcdict['varinfo']['cdpp3_0'].append(hdrinfo['cdpp3_0'])
lcdict['varinfo']['cdpp6_0'].append(hdrinfo['cdpp6_0'])
lcdict['varinfo']['cdpp12_0'].append(hdrinfo['cdpp12_0'])
lcdict['varinfo']['pdcvar'].append(hdrinfo['pdcvar'])
lcdict['varinfo']['pdcmethod'].append(hdrinfo['pdcmethd'])
lcdict['varinfo']['aper_target_total_ratio'].append(hdrinfo['crowdsap'])
lcdict['varinfo']['aper_target_frac'].append(hdrinfo['flfrcsap'])
# update the light curve columns now
for key in datakeys:
if key.lower() in lcdict:
lcdict[key.lower()] = (
npconcatenate((lcdict[key.lower()], lcdata[key]))
)
for key in sapkeys:
if key.lower() in lcdict['sap']:
# normalize the current flux measurements if needed
if normalize and key == '<KEY>':
LOGINFO('normalizing SAP_FLUX')
thislcdata = lcdata[key] / np.nanmedian(lcdata[key])
else:
thislcdata = lcdata[key]
lcdict['sap'][key.lower()] = (
npconcatenate((lcdict['sap'][key.lower()], thislcdata))
)
for key in pdckeys:
if key.lower() in lcdict['pdc']:
# normalize the current flux measurements if needed
if normalize and key == '<KEY>':
LOGINFO('normalizing PDCSAP_FLUX')
thislcdata = lcdata[key] / np.nanmedian(lcdata[key])
else:
thislcdata = lcdata[key]
lcdict['pdc'][key.lower()] = (
npconcatenate((lcdict['pdc'][key.lower()], thislcdata))
)
# append some of the light curve information into existing numpy arrays
# so we can sort on them later
lcdict['lc_channel'] = npconcatenate(
(lcdict['lc_channel'],
npfull_like(lcdata['TIME'],
hdrinfo['channel']))
)
lcdict['lc_skygroup'] = npconcatenate(
(lcdict['lc_skygroup'],
npfull_like(lcdata['TIME'],
hdrinfo['skygroup']))
)
lcdict['lc_module'] = npconcatenate(
(lcdict['lc_module'],
npfull_like(lcdata['TIME'],
hdrinfo['module']))
)
lcdict['lc_output'] = npconcatenate(
(lcdict['lc_output'],
npfull_like(lcdata['TIME'],
hdrinfo['output']))
)
lcdict['lc_quarter'] = npconcatenate(
(lcdict['lc_quarter'],
npfull_like(lcdata['TIME'],
hdrinfo['quarter']))
)
lcdict['lc_season'] = npconcatenate(
(lcdict['lc_season'],
npfull_like(lcdata['TIME'],
hdrinfo['season']))
)
lcdict['lc_campaign'] = npconcatenate(
(lcdict['lc_campaign'],
npfull_like(lcdata['TIME'],
hdrinfo['campaign']))
)
# otherwise, this is a new lcdict
else:
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'quarter':[hdrinfo['quarter']],
'season':[hdrinfo['season']],
'datarelease':[hdrinfo['data_rel']],
'campaign':[hdrinfo['campaign']], # this is None for KepPrime
'obsmode':[hdrinfo['obsmode']],
'objectid':hdrinfo['object'],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcaperture':[lcaperturedata],
'aperpixused':[hdrinfo['npixsap']],
'aperpixunused':[hdrinfo['npixmiss']],
'pixarcsec':[(npabs(hdrinfo['cdelt1']) +
npabs(hdrinfo['cdelt2']))*3600.0/2.0],
'channel':[hdrinfo['channel']],
'skygroup':[hdrinfo['skygroup']],
'module':[hdrinfo['module']],
'output':[hdrinfo['output']],
'ndet':[ndet],
},
'objectinfo':{
'objectid':hdrinfo['object'], # repeated here for checkplot use
'keplerid':hdrinfo['keplerid'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'sdssg':hdrinfo['gmag'],
'sdssr':hdrinfo['rmag'],
'sdssi':hdrinfo['imag'],
'sdssz':hdrinfo['zmag'],
'kepmag':hdrinfo['kepmag'],
'teff':hdrinfo['teff'],
'logg':hdrinfo['logg'],
'feh':hdrinfo['feh'],
'ebminusv':hdrinfo['ebminusv'],
'extinction':hdrinfo['av'],
'starradius':hdrinfo['radius'],
'twomassuid':hdrinfo['tmindex'],
},
'varinfo':{
'cdpp3_0':[hdrinfo['cdpp3_0']],
'cdpp6_0':[hdrinfo['cdpp6_0']],
'cdpp12_0':[hdrinfo['cdpp12_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'aper_target_total_ratio':[hdrinfo['crowdsap']],
'aper_target_frac':[hdrinfo['flfrcsap']],
},
'sap':{},
'pdc':{},
}
# get the LC columns
for key in datakeys:
lcdict[key.lower()] = | |
<gh_stars>0
import sys, os, warnings, datetime, tempfile, glob
from natsort import natsorted
from tqdm import tqdm
from PyQt5 import QtGui, QtCore, Qt, QtWidgets
import pyqtgraph as pg
from pyqtgraph import GraphicsScene
import matplotlib.pyplot as plt
import numpy as np
import cv2
from scipy.ndimage import gaussian_filter1d
from scipy.interpolate import interp1d
from skimage import io
from skimage import transform, draw, measure, segmentation
import mxnet as mx
from mxnet import nd
from . import utils, transforms, models, guiparts, plot
try:
from google.cloud import storage
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'key/cellpose-data-writer.json')
SERVER_UPLOAD = True
except:
SERVER_UPLOAD = False
class QHLine(QtGui.QFrame):
def __init__(self):
super(QHLine, self).__init__()
self.setFrameShape(QtGui.QFrame.HLine)
self.setFrameShadow(QtGui.QFrame.Sunken)
def make_bwr():
# make a bwr colormap
b = np.append(255*np.ones(128), np.linspace(0, 255, 128)[::-1])[:,np.newaxis]
r = np.append(np.linspace(0, 255, 128), 255*np.ones(128))[:,np.newaxis]
g = np.append(np.linspace(0, 255, 128), np.linspace(0, 255, 128)[::-1])[:,np.newaxis]
color = np.concatenate((r,g,b), axis=-1).astype(np.uint8)
bwr = pg.ColorMap(pos=np.linspace(0.0,255,256), color=color)
return bwr
def make_cmap(cm=0):
# make a single channel colormap
r = np.arange(0,256)
color = np.zeros((256,3))
color[:,cm] = r
color = color.astype(np.uint8)
cmap = pg.ColorMap(pos=np.linspace(0.0,255,256), color=color)
return cmap
def run(zstack=None, images=None):
# Always start by initializing Qt (only once per application)
warnings.filterwarnings("ignore")
app = QtGui.QApplication(sys.argv)
icon_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'logo/logo.png'
)
app_icon = QtGui.QIcon()
app_icon.addFile(icon_path, QtCore.QSize(16, 16))
app_icon.addFile(icon_path, QtCore.QSize(24, 24))
app_icon.addFile(icon_path, QtCore.QSize(32, 32))
app_icon.addFile(icon_path, QtCore.QSize(48, 48))
app.setWindowIcon(app_icon)
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
models.download_model_weights()
MainW(zstack=zstack, images=images)
ret = app.exec_()
sys.exit(ret)
def get_unique_points(set):
cps = np.zeros((len(set),3), np.int32)
for k,pp in enumerate(set):
cps[k,:] = np.array(pp)
set = list(np.unique(cps, axis=0))
return set
class MainW(QtGui.QMainWindow):
def __init__(self, zstack=None, images=None):
super(MainW, self).__init__()
pg.setConfigOptions(imageAxisOrder="row-major")
self.setGeometry(50, 50, 1200, 1000)
self.setWindowTitle("cellpose")
self.cp_path = os.path.dirname(os.path.realpath(__file__))
app_icon = QtGui.QIcon()
icon_path = os.path.abspath(os.path.join(
self.cp_path, "logo/logo.png")
)
app_icon.addFile(icon_path, QtCore.QSize(16, 16))
app_icon.addFile(icon_path, QtCore.QSize(24, 24))
app_icon.addFile(icon_path, QtCore.QSize(32, 32))
app_icon.addFile(icon_path, QtCore.QSize(48, 48))
self.setWindowIcon(app_icon)
main_menu = self.menuBar()
file_menu = main_menu.addMenu("&File")
# load processed data
loadImg = QtGui.QAction("&Load image (*.tif, *.png, *.jpg)", self)
loadImg.setShortcut("Ctrl+L")
loadImg.triggered.connect(lambda: self.load_images(images))
file_menu.addAction(loadImg)
self.loadMasks = QtGui.QAction("Load &masks (*.tif, *.png, *.jpg)", self)
self.loadMasks.setShortcut("Ctrl+M")
self.loadMasks.triggered.connect(lambda: self.load_masks(None))
file_menu.addAction(self.loadMasks)
self.loadMasks.setEnabled(False)
loadManual = QtGui.QAction("Load &processed/labelled image (*_seg.npy)", self)
loadManual.setShortcut("Ctrl+P")
loadManual.triggered.connect(lambda: self.load_manual(None))
file_menu.addAction(loadManual)
loadStack = QtGui.QAction("Load &numpy z-stack (*.npy nimgs x nchan x pixels x pixels)", self)
loadStack.setShortcut("Ctrl+N")
loadStack.triggered.connect(lambda: self.load_zstack(None))
file_menu.addAction(loadStack)
self.saveSet = QtGui.QAction("&Save masks and images (as *.npy)", self)
self.saveSet.setShortcut("Ctrl+S")
self.saveSet.triggered.connect(self.save_sets)
file_menu.addAction(self.saveSet)
self.saveSet.setEnabled(False)
self.saveServer = QtGui.QAction("Send manually labelled data to server", self)
self.saveServer.triggered.connect(self.save_server)
file_menu.addAction(self.saveServer)
self.saveServer.setEnabled(False)
edit_menu = main_menu.addMenu("&Edit")
self.undo = QtGui.QAction('Undo previous mask/trace', self)
self.undo.setShortcut("Ctrl+Z")
self.undo.triggered.connect(self.undo_action)
self.undo.setEnabled(False)
edit_menu.addAction(self.undo)
self.ClearButton = QtGui.QAction('Clear all masks', self)
self.ClearButton.setShortcut("Ctrl+0")
self.ClearButton.triggered.connect(self.clear_all)
self.ClearButton.setEnabled(False)
edit_menu.addAction(self.ClearButton)
self.remcell = QtGui.QAction('Remove selected cell (Ctrl+CLICK)', self)
self.remcell.setShortcut("Ctrl+Click")
self.remcell.triggered.connect(self.remove_action)
self.remcell.setEnabled(False)
edit_menu.addAction(self.remcell)
help_menu = main_menu.addMenu("&Help")
openHelp = QtGui.QAction("&Help window", self)
openHelp.setShortcut("Ctrl+H")
openHelp.triggered.connect(self.help_window)
help_menu.addAction(openHelp)
guiparts.HelpWindow()
self.cell_types = ["cytoplasm", "membrane", "plantsy", "nucleus only",
"bio (other)", "miscellaneous"]
self.setStyleSheet("QMainWindow {background: 'black';}")
self.stylePressed = ("QPushButton {Text-align: left; "
"background-color: rgb(100,50,100); "
"border-color: white;"
"color:white;}")
self.styleUnpressed = ("QPushButton {Text-align: left; "
"background-color: rgb(50,50,50); "
"border-color: white;"
"color:white;}")
self.styleInactive = ("QPushButton {Text-align: left; "
"background-color: rgb(30,30,30); "
"border-color: white;"
"color:rgb(80,80,80);}")
self.loaded = False
# ---- MAIN WIDGET LAYOUT ---- #
self.cwidget = QtGui.QWidget(self)
self.l0 = QtGui.QGridLayout()
self.cwidget.setLayout(self.l0)
self.setCentralWidget(self.cwidget)
self.l0.setVerticalSpacing(4)
self.imask = 0
b = self.make_buttons()
# ---- drawing area ---- #
self.win = pg.GraphicsLayoutWidget()
self.l0.addWidget(self.win, 0,3, b, 20)
self.win.scene().sigMouseClicked.connect(self.plot_clicked)
self.win.scene().sigMouseMoved.connect(self.mouse_moved)
self.make_viewbox()
bwrmap = make_bwr()
self.bwr = bwrmap.getLookupTable(start=0.0, stop=255.0, alpha=False)
self.cmap = []
for i in range(3):
self.cmap.append(make_cmap(i).getLookupTable(start=0.0, stop=255.0, alpha=False))
self.colormap = (plt.get_cmap('gist_ncar')(np.linspace(0.0,.9,1000)) * 255).astype(np.uint8)
self.reset()
self.is_stack = True # always loading images of same FOV
# if called with zstack / images, load them
if zstack is not None:
self.filename = zstack
self.load_zstack(self.filename)
elif images is not None:
self.filename = images
self.load_images(self.filename)
self.setAcceptDrops(True)
self.win.show()
self.show()
def help_window(self):
HW = guiparts.HelpWindow(self)
HW.show()
def make_buttons(self):
self.boldfont = QtGui.QFont("Arial", 10, QtGui.QFont.Bold)
self.smallfont = QtGui.QFont("Arial", 8)
self.headings = ('color: rgb(150,255,150);')
self.dropdowns = ("color: white;"
"background-color: rgb(40,40,40);"
"selection-color: white;"
"selection-background-color: rgb(50,100,50);")
self.checkstyle = "color: rgb(190,190,190);"
label = QtGui.QLabel('Views:')#[\u2191 \u2193]')
label.setStyleSheet(self.headings)
label.setFont(self.boldfont)
self.l0.addWidget(label, 0,0,1,1)
label = QtGui.QLabel('[W/S]')
label.setStyleSheet('color: white')
#label.setFont(self.smallfont)
self.l0.addWidget(label, 1,0,1,1)
label = QtGui.QLabel('[pageup/down]')
label.setStyleSheet('color: white')
label.setFont(self.smallfont)
self.l0.addWidget(label, 1,1,1,1)
b=2
self.view = 0 # 0=image, 1=flowsXY, 2=flowsZ, 3=cellprob
self.color = 0 # 0=RGB, 1=gray, 2=R, 3=G, 4=B
self.RGBChoose = guiparts.RGBRadioButtons(self, b,1)
self.RGBDropDown = QtGui.QComboBox()
self.RGBDropDown.addItems(["RGB","gray","red","green","blue"])
self.RGBDropDown.currentIndexChanged.connect(self.color_choose)
self.RGBDropDown.setFixedWidth(60)
self.RGBDropDown.setStyleSheet(self.dropdowns)
self.l0.addWidget(self.RGBDropDown, b,0,1,1)
b+=3
self.resize = -1
self.X2 = 0
b+=1
line = QHLine()
line.setStyleSheet('color: white;')
self.l0.addWidget(line, b,0,1,2)
b+=1
label = QtGui.QLabel('Drawing:')
label.setStyleSheet(self.headings)
label.setFont(self.boldfont)
self.l0.addWidget(label, b,0,1,2)
b+=1
self.brush_size = 3
self.BrushChoose = QtGui.QComboBox()
self.BrushChoose.addItems(["1","3","5","7","9"])
self.BrushChoose.currentIndexChanged.connect(self.brush_choose)
self.BrushChoose.setFixedWidth(60)
self.BrushChoose.setStyleSheet(self.dropdowns)
self.l0.addWidget(self.BrushChoose, b, 1,1,1)
label = QtGui.QLabel('brush size: [, .]')
label.setStyleSheet('color: white;')
self.l0.addWidget(label, b,0,1,1)
# cross-hair
self.vLine = pg.InfiniteLine(angle=90, movable=False)
self.hLine = pg.InfiniteLine(angle=0, movable=False)
b+=1
# turn on draw mode
self.SCheckBox = QtGui.QCheckBox('single stroke')
self.SCheckBox.setStyleSheet(self.checkstyle)
self.SCheckBox.toggled.connect(self.autosave_on)
self.l0.addWidget(self.SCheckBox, b,0,1,2)
b+=1
# turn on crosshairs
self.CHCheckBox = QtGui.QCheckBox('cross-hairs')
self.CHCheckBox.setStyleSheet(self.checkstyle)
self.CHCheckBox.toggled.connect(self.cross_hairs)
self.l0.addWidget(self.CHCheckBox, b,0,1,1)
b+=1
# turn off masks
self.layer_off = False
self.masksOn = True
self.MCheckBox = QtGui.QCheckBox('MASKS ON [X]')
self.MCheckBox.setStyleSheet(self.checkstyle)
self.MCheckBox.setChecked(True)
self.MCheckBox.toggled.connect(self.toggle_masks)
self.l0.addWidget(self.MCheckBox, b,0,1,2)
b+=1
# turn off outlines
self.outlinesOn = True
self.OCheckBox = QtGui.QCheckBox('outlines on [Z]')
self.OCheckBox.setStyleSheet(self.checkstyle)
self.OCheckBox.setChecked(True)
self.OCheckBox.toggled.connect(self.toggle_masks)
self.l0.addWidget(self.OCheckBox, b,0,1,2)
b+=1
# send to server
self.ServerButton = QtGui.QPushButton(' send manual seg. to server')
self.ServerButton.clicked.connect(self.save_server)
self.l0.addWidget(self.ServerButton, b,0,1,2)
self.ServerButton.setEnabled(False)
self.ServerButton.setStyleSheet(self.styleInactive)
self.ServerButton.setFont(self.boldfont)
b+=1
line = QHLine()
line.setStyleSheet('color: white;')
self.l0.addWidget(line, b,0,1,2)
b+=1
label = QtGui.QLabel('Segmentation:')
label.setStyleSheet(self.headings)
label.setFont(self.boldfont)
self.l0.addWidget(label, b,0,1,2)
b+=1
self.diameter = 30
label = QtGui.QLabel('cell diameter (pix):')
label.setStyleSheet('color: white;')
self.l0.addWidget(label, b, 0,1,2)
self.Diameter = QtGui.QLineEdit()
self.Diameter.setText(str(self.diameter))
self.Diameter.returnPressed.connect(self.compute_scale)
self.Diameter.setFixedWidth(50)
b+=1
self.l0.addWidget(self.Diameter, b, 0,1,2)
# recompute model
self.SizeButton = QtGui.QPushButton(' calibrate')
self.SizeButton.clicked.connect(self.calibrate_size)
self.l0.addWidget(self.SizeButton, b,1,1,1)
self.SizeButton.setEnabled(False)
self.SizeButton.setStyleSheet(self.styleInactive)
self.SizeButton.setFont(self.boldfont)
# scale toggle
b+=1
self.scale_on = True
self.ScaleOn = QtGui.QCheckBox('scale disk on')
self.ScaleOn.setStyleSheet('color: red;')
self.ScaleOn.setChecked(True)
self.ScaleOn.toggled.connect(self.toggle_scale)
self.l0.addWidget(self.ScaleOn, b,0,1,2)
# use GPU
b+=1
self.useGPU = QtGui.QCheckBox('use GPU')
self.useGPU.setStyleSheet(self.checkstyle)
self.check_gpu()
self.l0.addWidget(self.useGPU, b,0,1,2)
b+=1
# choose models
self.ModelChoose = QtGui.QComboBox()
self.model_dir = os.path.abspath(os.path.join(self.cp_path, 'models/'))
#models = glob(self.model_dir+'/*')
#models = [os.path.split(m)[-1] for m in models]
models = ['cyto', 'nuclei']
self.ModelChoose.addItems(models)
self.ModelChoose.setFixedWidth(70)
self.ModelChoose.setStyleSheet(self.dropdowns)
self.l0.addWidget(self.ModelChoose, b, 1,1,1)
label = QtGui.QLabel('model: ')
label.setStyleSheet('color: white;')
self.l0.addWidget(label, b, 0,1,1)
b+=1
# choose channel
self.ChannelChoose = [QtGui.QComboBox(), QtGui.QComboBox()]
self.ChannelChoose[0].addItems(['gray','red','green','blue'])
self.ChannelChoose[1].addItems(['none','red','green','blue'])
cstr = ['chan to seg', 'chan2 (opt)']
for i in range(2):
self.ChannelChoose[i].setFixedWidth(70)
self.ChannelChoose[i].setStyleSheet(self.dropdowns)
label = QtGui.QLabel(cstr[i])
label.setStyleSheet('color: white;')
self.l0.addWidget(label, b, 0,1,1)
self.l0.addWidget(self.ChannelChoose[i], b, 1,1,1)
b+=1
b+=1
# recompute model
self.ModelButton = QtGui.QPushButton(' run segmentation')
self.ModelButton.clicked.connect(self.compute_model)
self.l0.addWidget(self.ModelButton, b,0,1,2)
self.ModelButton.setEnabled(False)
self.ModelButton.setStyleSheet(self.styleInactive)
self.ModelButton.setFont(self.boldfont)
b+=1
self.progress = QtGui.QProgressBar(self)
self.progress.setStyleSheet('color: gray;')
self.l0.addWidget(self.progress, b,0,1,2)
self.autobtn = QtGui.QCheckBox('auto-adjust')
self.autobtn.setStyleSheet(self.checkstyle)
self.autobtn.setChecked(True)
self.l0.addWidget(self.autobtn, b+2,0,1,1)
b+=1
label = QtGui.QLabel('saturation')
label.setStyleSheet(self.headings)
label.setFont(self.boldfont)
self.l0.addWidget(label, b,1,1,1)
b+=1
self.slider = guiparts.RangeSlider(self)
self.slider.setMinimum(0)
self.slider.setMaximum(255)
self.slider.setLow(0)
self.slider.setHigh(255)
self.slider.setTickPosition(QtGui.QSlider.TicksRight)
self.l0.addWidget(self.slider, b,1,1,1)
self.l0.setRowStretch(b, 1)
b+=2
# add scrollbar underneath
self.scroll = QtGui.QScrollBar(QtCore.Qt.Horizontal)
self.scroll.setMaximum(10)
self.scroll.valueChanged.connect(self.move_in_Z)
self.l0.addWidget(self.scroll, b,3,1,20)
return b
def check_gpu(self):
if utils.use_gpu():
self.useGPU.setEnabled(True)
self.useGPU.setChecked(True)
else:
self.useGPU.setChecked(False)
self.useGPU.setEnabled(False)
self.useGPU.setStyleSheet("color: rgb(80,80,80);")
def get_channels(self):
channels = [self.ChannelChoose[0].currentIndex(), self.ChannelChoose[1].currentIndex()]
if self.current_model=='nuclei':
channels[1] = 0
return channels
def calibrate_size(self):
self.initialize_model()
diams, _ = self.model.sz.eval([self.stack[self.currentZ].copy()],
channels=self.get_channels(), progress=self.progress)
diams = np.maximum(5.0, diams)
diams *= 2 / np.pi**0.5
print('estimated diameter of cells using %s model = %0.1f pixels'%
(self.current_model, diams))
self.Diameter.setText('%0.1f'%diams[0])
self.diameter = diams[0]
self.compute_scale()
self.progress.setValue(100)
def toggle_scale(self):
if self.scale_on:
self.p0.removeItem(self.scale)
self.scale_on = False
else:
self.p0.addItem(self.scale)
self.scale_on = True
def keyPressEvent(self, event):
if self.loaded:
#self.p0.setMouseEnabled(x=True, y=True)
if (event.modifiers() != QtCore.Qt.ControlModifier and
event.modifiers() != QtCore.Qt.ShiftModifier and
event.modifiers() != QtCore.Qt.AltModifier):
if not self.in_stroke:
if len(self.current_point_set) > 0:
if event.key() == QtCore.Qt.Key_Return:
self.add_set()
else:
if event.key() == QtCore.Qt.Key_X:
self.MCheckBox.toggle()
if event.key() == QtCore.Qt.Key_Z:
self.OCheckBox.toggle()
if event.key() == QtCore.Qt.Key_Left:
self.currentZ = max(0,self.currentZ-1)
if self.NZ==1:
self.get_prev_image()
elif event.key() == QtCore.Qt.Key_Right:
self.currentZ = min(self.NZ-1, self.currentZ+1)
if self.NZ==1:
self.get_next_image()
elif event.key() == QtCore.Qt.Key_A:
self.currentZ = max(0,self.currentZ-1)
if self.NZ==1:
self.get_prev_image()
elif event.key() == QtCore.Qt.Key_D:
self.currentZ = min(self.NZ-1, self.currentZ+1)
if self.NZ==1:
self.get_next_image()
elif (event.key() == QtCore.Qt.Key_Comma or
event.key() == QtCore.Qt.Key_Period):
count = self.BrushChoose.count()
gci = self.BrushChoose.currentIndex()
if event.key() == QtCore.Qt.Key_Comma:
gci = max(0, gci-1)
else:
gci = min(count-1, gci+1)
self.BrushChoose.setCurrentIndex(gci)
self.brush_choose()
elif event.key() == QtCore.Qt.Key_PageDown:
self.view = (self.view+1)%(len(self.RGBChoose.bstr))
self.RGBChoose.button(self.view).setChecked(True)
elif event.key() == QtCore.Qt.Key_PageUp:
self.view = (self.view-1)%(len(self.RGBChoose.bstr))
self.RGBChoose.button(self.view).setChecked(True)
# can change background if stroke not finished
if event.key() == QtCore.Qt.Key_Up or event.key() == QtCore.Qt.Key_W:
self.color = (self.color-1)%(5)
self.RGBDropDown.setCurrentIndex(self.color)
elif event.key() == QtCore.Qt.Key_Down or event.key() == QtCore.Qt.Key_S:
self.color = (self.color+1)%(5)
self.RGBDropDown.setCurrentIndex(self.color)
self.update_plot()
elif event.modifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_Z:
self.undo_action()
if event.key() == QtCore.Qt.Key_0:
self.clear_all()
def toggle_removals(self):
if self.ncells>0:
self.ClearButton.setEnabled(True)
self.remcell.setEnabled(True)
self.undo.setEnabled(True)
else:
self.ClearButton.setEnabled(False)
self.remcell.setEnabled(False)
self.undo.setEnabled(False)
def remove_action(self):
if self.selected>-1:
self.remove_cell(self.selected)
def undo_action(self):
if (len(self.strokes) > 0 and
self.strokes[-1][0][0]==self.currentZ):
self.remove_stroke()
else:
# remove previous cell
if self.ncells> 0:
self.remove_cell(self.ncells-1)
def get_files(self):
images = []
images.extend(glob.glob(os.path.dirname(self.filename) + '/*.png'))
| |
<filename>mopidy_jellyfin/remote.py
from __future__ import unicode_literals
from mopidy import httpclient, models
from mopidy_jellyfin.utils import cache
import mopidy_jellyfin
from .http import JellyfinHttpClient
from unidecode import unidecode
import os
import logging
from collections import OrderedDict, defaultdict
import sys
if sys.version.startswith('3'):
from urllib.parse import (
parse_qs,
quote,
urlencode,
urljoin,
urlsplit,
urlunsplit
)
else:
from urllib import urlencode
from urllib2 import quote
from urlparse import parse_qs, urljoin, urlsplit, urlunsplit
logger = logging.getLogger(__name__)
class JellyfinHandler(object):
def __init__(self, config):
self.config = config
proxy = config.get('proxy')
jellyfin = config.get('jellyfin')
if jellyfin:
self.hostname = jellyfin.get('hostname')
if jellyfin.get('port', False):
logger.warn('Specifying port in the config file is '
'depreciated. This will be removed in a future '
'release. This should be combined with the '
'hostname field')
self.username = jellyfin.get('username')
self.password = <PASSWORD>('password')
self.libraries = jellyfin.get('libraries')
# If no libraries are provided, default to 'Music'
if not self.libraries:
self.libraries = 'Music'
self.albumartistsort = jellyfin.get('albumartistsort')
# If not overridden, default to using Album Artist sort method
# This _really_ shouldn't be necessary, but it is for reasons
if self.albumartistsort not in ['False', 'false']:
self.albumartistsort = True
else:
self.albumartistsort = False
if jellyfin.get('user_id', False):
logger.warn('Specifying user_id in the config file is '
'depreciated. This will be removed in a future '
'release')
cert = None
client_cert = jellyfin.get('client_cert', None)
client_key = jellyfin.get('client_key', None)
if client_cert is not None and client_key is not None:
cert = (client_cert, client_key)
self.album_format = jellyfin.get('album_format', False)
if not self.album_format:
self.album_format = '{Name}'
else:
logger.info('No Jellyfin config found')
# create authentication headers
self.auth_data = self._password_data()
headers = self._create_headers()
self.http = JellyfinHttpClient(headers, cert, proxy)
self._login()
def _save_token(self, token):
# Save the authentication token where the frontend can also access it
cache_dir = mopidy_jellyfin.Extension.get_cache_dir(self.config)
token_file = os.path.join(cache_dir, 'token')
with open(token_file, 'w') as f:
f.write(token)
def _login(self):
"""Return token for a user.
"""
url = self.api_url('/Users/AuthenticateByName')
auth_details = self.http.post(
url, self.auth_data)
token = auth_details.get('AccessToken')
if token:
self.user_id = auth_details.get('User').get('Id')
headers = {'x-mediabrowser-token': token}
self.http.session.headers.update(headers)
self._save_token(token)
else:
logger.error('Unable to login to Jellyfin')
def _password_data(self):
"""Returns a dict with username and its encoded password.
"""
return {
'username': self.username,
'Pw': self.password
}
def _create_headers(self, token=None):
"""Return header dict that is needed to talk to the Jellyfin API.
"""
headers = {}
authorization = (
'MediaBrowser , '
'Client="Mopidy", '
'Device="{device}", '
'DeviceId="{device_id}", '
'Version="{version}"'
).format(
device=mopidy_jellyfin.Extension.device_name,
device_id=mopidy_jellyfin.Extension.device_id,
version=mopidy_jellyfin.__version__
)
headers['x-emby-authorization'] = authorization
if token:
headers['x-mediabrowser-token'] = self.token
return headers
def api_url(self, endpoint):
"""Returns a joined url.
Takes host, and endpoint and generates a valid jellyfin API url.
"""
# check if http or https is defined as host and create hostname
hostname_list = [self.hostname]
if self.hostname.startswith('http://') or \
self.hostname.startswith('https://'):
hostname = ''.join(hostname_list)
else:
hostname_list.insert(0, 'http://')
hostname = ''.join(hostname_list)
joined = urljoin(hostname, endpoint)
scheme, netloc, path, query_string, fragment = urlsplit(joined)
query_params = parse_qs(query_string)
query_params['format'] = ['json']
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_music_root(self):
url = self.api_url(
'/Users/{}/Views'.format(self.user_id)
)
data = self.http.get(url)
media_folders = [
{'Name': library.get('Name'),
'Id': library.get('Id'),
'CollectionType': library.get('CollectionType')}
for library in data.get('Items')
if library.get('CollectionType') in ['books', 'music']
]
if media_folders:
logging.debug('Jellyfin: Found libraries')
return media_folders
else:
logging.debug(
'Jellyfin: All directories found: {}'.format(
[i.get('CollectionType')
for i in data.get('Items')
if 'CollectionType' in i.items()]
)
)
raise Exception('Jellyfin: Cant find music root directory')
def get_library_roots(self):
libraries = self.get_music_root()
return [
models.Ref.directory(
uri='jellyfin:directory:{}'.format(i.get('Id')),
name=i.get('Name')
) for i in libraries if i
]
def get_playlists(self):
url = self.api_url(
'/Users/{}/Views'.format(self.user_id)
)
data = self.http.get(url)
library_id = [
library.get('Id') for library in data.get('Items')
if library.get('Name') == 'Playlists'
]
if library_id:
library_id = library_id[0]
else:
return []
raw_playlists = self.get_directory(library_id)
return raw_playlists.get('Items')
def get_playlist_contents(self, playlist_id):
url = self.api_url(
'/Playlists/{}/Items?UserId={}'.format(playlist_id, self.user_id)
)
data = self.http.get(url).get('Items')
return data
def create_playlist(self, name):
url = self.api_url('/Playlists')
payload = {
'Name': name,
'UserId': self.user_id,
'MediaType': 'Audio'
}
return self.http.post(url, payload)
def delete_playlist(self, playlist_id):
url = self.api_url(
'/Items/{}?UserId={}'.format(
playlist_id, self.user_id
)
)
return self.http.delete(url)
def update_playlist(self, playlist_id, new_ids):
curr_tracks = self.get_playlist_contents(playlist_id)
curr_ids = [i.get('PlaylistItemId') for i in curr_tracks]
del_url = self.api_url(
'Playlists/{}/Items?UserId={}&EntryIds={}'.format(
playlist_id, self.user_id, ','.join(str(i) for i in curr_ids)
)
)
self.http.delete(del_url)
new_url = self.api_url(
'/Playlists/{}/Items?UserId={}&Ids={}'.format(
playlist_id, self.user_id, ','.join(new_ids)
)
)
self.http.post(new_url)
@cache()
def browse_item(self, item_id):
contents = self.get_directory(item_id).get('Items')
ret_value = []
for item in contents:
if item.get('Type') == 'Audio':
ret_value.append(models.Ref.track(
uri='jellyfin:track:{}'.format(
item.get('Id')
),
name=item.get('Name')
))
elif item.get('Type') == 'AudioBook':
ret_value.append(models.Ref.track(
uri='jellyfin:track:{}'.format(
item.get('Id')
),
name=item.get('Name')
))
elif item.get('Type') == 'MusicAlbum':
ret_value.append(models.Ref.album(
uri='jellyfin:album:{}'.format(item.get('Id')),
name=self.format_album(item)
))
elif item.get('Type') == 'Folder':
ret_value.append(models.Ref.album(
uri='jellyfin:album:{}'.format(item.get('Id')),
name=item.get('Name')
))
return ret_value
@cache()
def get_artists(self):
artists = []
libraries = self.get_music_root()
for library in libraries:
if library.get('Name') in self.libraries:
if self.albumartistsort:
url = self.api_url(
'/Artists/AlbumArtists?ParentId={}&UserId={}'.format(
library.get('Id'), self.user_id
)
)
else:
url = self.api_url(
'/Artists?ParentId={}&UserId={}'.format(
library.get('Id'), self.user_id
)
)
artists += self.http.get(url).get('Items')
return [
models.Ref.artist(
uri='jellyfin:artist:{}'.format(
artist.get('Id')
),
name=artist.get('Name')
)
for artist in artists
]
@cache()
def get_albums(self, query):
raw_artist = [""]
raw_albums = []
# Check query for artist name
if 'artist' in query:
raw_artist = query.get('artist')
elif 'albumartist' in query:
raw_artist = query.get('albumartist')
else:
return []
# URL encode artist string
artist = quote(raw_artist[0].encode('utf8')).replace('/', '-')
artist_ref = [models.Artist(name=raw_artist[0])]
url = self.api_url(
'/Artists/{}?UserId={}'.format(
artist, self.user_id)
)
# Pull out artist_id
artist_data = self.http.get(url)
artist_id = artist_data.get('Id')
# Get album list
if self.albumartistsort:
url = self.api_url(
'/Items?AlbumArtistIds={}&UserId={}&'
'IncludeItemTypes=MusicAlbum&Recursive=true'.format(
artist_id, self.user_id
)
)
else:
url = self.api_url(
'/Items?ArtistIds={}&UserId={}&'
'IncludeItemTypes=MusicAlbum&Recursive=true'.format(
artist_id, self.user_id
)
)
result = self.http.get(url)
if result:
raw_albums = result.get('Items')
albums = [
models.Album(
uri='jellyfin:album:{}'.format(item.get('Id')),
name=item.get('Name'),
artists=artist_ref
)
for item in raw_albums
]
return albums
@cache()
def get_directory(self, id):
"""Get directory from Jellyfin API.
:param id: Directory ID
:type id: int
:returns Directory
:rtype: dict
"""
return self.http.get(
self.api_url(
'/Users/{}/Items?ParentId={}&SortOrder=Ascending'.format(
self.user_id, id
)
)
)
@cache()
def get_item(self, id):
"""Get item from Jellyfin API.
:param id: Item ID
:type id: int
:returns: Item
:rtype: dict
"""
data = self.http.get(
self.api_url(
'/Users/{}/Items/{}'.format(self.user_id, id)
)
)
logger.debug('Jellyfin item: {}'.format(data))
return data
def create_track(self, track):
"""Create track from Jellyfin API track dict.
:param track: Track from Jellyfin API
:type track: dict
:returns: Track
:rtype: mopidy.models.Track
"""
# TODO: add more metadata
return models.Track(
uri='jellyfin:track:{}'.format(
track.get('Id')
),
name=track.get('Name'),
track_no=track.get('IndexNumber'),
disc_no=track.get('ParentIndexNumber'),
genre=track.get('Genre'),
artists=self.create_artists(track),
album=self.create_album(track),
length=self.ticks_to_milliseconds(track.get('RunTimeTicks'))
)
def create_album(self, track):
"""Create album object from track.
:param track: Track
:type track: dict
:returns: Album
:rtype: mopidy.models.Album
"""
return models.Album(
name=track.get('Album'),
artists=self.create_artists(track)
)
def create_artists(self, track):
"""Create artist object from track.
:param track: Track
:type track: dict
:returns: List of artists
:rtype: list of mopidy.models.Artist
"""
return [
models.Artist(
name=artist.get('Name')
)
for artist in track.get('ArtistItems')
]
@cache()
def get_track(self, track_id):
"""Get track.
:param track_id: ID of a Jellyfin track
:type track_id: int
:returns: track
:rtype: mopidy.models.Track
"""
track = self.get_item(track_id)
return self.create_track(track)
def _get_search(self, itemtype, term):
"""Gets search data from Jellyfin API.
:param itemtype: Type to search for
:param term: Search term
:type itemtype: str
:type term: str
:returns: List of result dicts
:rtype: list
"""
if itemtype == 'any':
query = 'Audio,MusicAlbum,MusicArtist'
elif itemtype == 'artist' or itemtype == 'albumartist':
query = 'MusicArtist'
elif itemtype == 'album':
query = 'MusicAlbum'
elif itemtype == 'track_name':
query = 'Audio'
else:
raise Exception('Jellyfin search: no itemtype {}'.format(itemtype))
data = self.http.get(
self.api_url(
('/Search/Hints?SearchTerm={}&'
'IncludeItemTypes={}').format(
quote(term.encode('utf8')),
query
)
)
)
return [i for i in data.get('SearchHints', [])]
@cache()
def search(self, query):
"""Search Jellyfin for a term.
:param query: Search query
:type query: dict
:returns: Search results
:rtype: mopidy.models.SearchResult
"""
logger.debug('Searching in Jellyfin for {}'.format(query))
# something to store the results in
data = []
tracks = []
albums = []
artists = []
for itemtype, term in query.items():
for item in term:
data.extend(
self._get_search(itemtype, item)
)
# walk through all items and create stuff
for item in data:
if item.get('Type') == 'Audio':
track_artists = [
models.Artist(
name=artist
)
for artist in item.get('Artists')
]
tracks.append(
models.Track(
uri='jellyfin:track:{}'.format(item.get('ItemId')),
track_no=item.get('IndexNumber'),
name=item.get('Name'),
artists=track_artists,
album=models.Album(
name=item.get('Album'),
artists=track_artists
)
)
)
elif item.get('Type') == 'MusicAlbum':
album_artists = [
models.Artist(
name=artist
)
for artist in item.get('Artists')
]
albums.append(
models.Album(
uri='jellyfin:album:{}'.format(item.get('ItemId')),
name=item.get('Name'),
artists=album_artists
)
)
elif item.get('Type') == 'MusicArtist':
artists.append(
models.Artist(
uri='jellyfin:artist:{}'.format(item.get('ItemId')),
name=item.get('Name')
)
)
return models.SearchResult(
uri='jellyfin:search',
tracks=tracks,
artists=artists,
albums=albums
)
@cache()
def exact_search(self, | |
<reponame>ccoutant/celebrationclassic
#!/usr/bin/env python
import os
import re
import quopri
import cgi
import logging
import webapp2
import datetime
import hashlib
import json
from google.appengine.ext import ndb
from google.appengine.api import users, memcache, images, mail
from django.template.loaders.filesystem import Loader
from django.template.loader import render_to_string
import tournament
import payments
import capabilities
import sponsorship
import auctionitem
import detailpage
import uploadedfile
import tz
import auditing
from sponsor import Sponsor, Team, Golfer, Substitute, DinnerGuest, TributeAd, get_handicap_index
server_software = os.environ.get('SERVER_SOFTWARE')
dev_server = True if server_software and server_software.startswith("Development") else False
# Logout
def show_login_page(out, redirect_url):
email = None
user = users.get_current_user()
if user:
email = user.email()
logging.info("User %s (%s) does not have required capability" % (email, user.nickname()))
template_values = {
'email': email,
'login_url': users.create_login_url(redirect_url),
'logout_url': users.create_logout_url(redirect_url)
}
out.write(render_to_string('login.html', template_values))
class Logout(webapp2.RequestHandler):
def get(self):
self.redirect(users.create_logout_url('/'))
# Users who can update parts of the site.
def make_bitmask(*caps):
bitmask = 0
for cap in caps:
bitmask = bitmask << 1
if cap:
bitmask = bitmask | 1
return bitmask
class ManageUsers(webapp2.RequestHandler):
# Show the form.
def get(self):
if not users.is_current_user_admin():
show_login_page(self.response.out, self.request.uri)
return
caps = capabilities.get_current_user_caps()
q = capabilities.all_caps()
q = q.order(capabilities.Capabilities.email)
allcaps = q.fetch(30)
for u in allcaps:
u.capbits = make_bitmask(u.can_update_sponsorships,
u.can_view_registrations,
u.can_add_registrations,
u.can_update_auction,
u.can_edit_content,
u.can_edit_tournament_properties,
u.can_edit_payment_processor)
template_values = {
'capabilities': caps,
'allcaps': allcaps
}
self.response.out.write(render_to_string('users.html', template_values))
# Process the submitted info.
def post(self):
if not users.is_current_user_admin():
show_login_page(self.response.out, '/admin/users')
return
count = int(self.request.get('count'))
updated_records = [ ]
for i in range(1, count + 1):
email = self.request.get('email%d' % i)
us = True if self.request.get('us%d' % i) == 'u' else False
vr = True if self.request.get('vr%d' % i) == 'v' else False
ar = True if self.request.get('ar%d' % i) == 'a' else False
ua = True if self.request.get('ua%d' % i) == 'u' else False
ec = True if self.request.get('ec%d' % i) == 'e' else False
et = True if self.request.get('et%d' % i) == 't' else False
pp = True if self.request.get('pp%d' % i) == 'p' else False
orig_capbits = int(self.request.get('capbits%d' % i))
new_capbits = make_bitmask(us, vr, ar, ua, ec, et, pp)
if orig_capbits != new_capbits:
u = capabilities.get_caps(email)
logging.info("updating user %s: orig caps %02x new caps %02x" % (email, orig_capbits, new_capbits))
if u.email is None:
logging.error("user %d not found" % email)
else:
u.can_update_sponsorships = us
u.can_view_registrations = vr
u.can_add_registrations = ar
u.can_update_auction = ua
u.can_edit_content = ec
u.can_edit_tournament_properties = et
u.can_edit_payment_processor = pp
updated_records.append(u)
u.audit()
if updated_records:
ndb.put_multi(updated_records)
email = self.request.get('email')
if email:
us = True if self.request.get('us') == 'u' else False
vr = True if self.request.get('vr') == 'v' else False
ar = True if self.request.get('ar') == 'a' else False
ua = True if self.request.get('ua') == 'u' else False
ec = True if self.request.get('ec') == 'e' else False
et = True if self.request.get('et') == 't' else False
pp = True if self.request.get('pp') == 'p' else False
new_capbits = make_bitmask(us, vr, ar, ua, ec, et, pp)
logging.info("adding user %s: caps %02x" % (email, new_capbits))
capabilities.add_user(email = email,
can_update_sponsorships = us,
can_view_registrations = vr,
can_add_registrations = ar,
can_update_auction = ua,
can_edit_content = ec,
can_edit_tournament_properties = et,
can_edit_payment_processor = pp)
memcache.flush_all()
self.redirect('/admin/users')
def reinitialize_counters(t):
counters = tournament.get_counters(t)
q = Sponsor.query(ancestor = t.key)
q = q.filter(Sponsor.confirmed == True)
q = q.order(Sponsor.timestamp)
num_golfers = 0
num_dinners = 0
for s in q:
num_golfers += s.num_golfers + s.num_golfers_no_dinner
num_dinners += s.num_golfers + s.num_dinners
counters.golfer_count = num_golfers
counters.dinner_count = num_dinners
counters.put()
logging.info("updated golfer_count = %d, dinner_count = %d" % (num_golfers, num_dinners))
# Tournament properties
class ManageTournament(webapp2.RequestHandler):
# Show the form.
def get(self):
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_edit_tournament_properties:
show_login_page(self.response.out, self.request.uri)
return
t = tournament.get_tournament(self.request.get("t"))
if t.dinner_early_bird_deadline is None:
t.dinner_early_bird_deadline = t.early_bird_deadline
counters = tournament.get_counters(t)
template_values = {
'capabilities': caps,
'tournament': t,
'counters': counters
}
self.response.out.write(render_to_string('tournament.html', template_values))
# Process the submitted info.
def post(self):
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_edit_tournament_properties:
show_login_page(self.response.out, '/admin/tournament')
return
t = tournament.get_tournament(self.request.get("original_name"))
if self.request.get("num_golfers") == "" or self.request.get("num_dinners") == "":
reinitialize_counters(t)
t.name = self.request.get("new_name")
t.published = (self.request.get("published") == "y")
t.accepting_registrations = (self.request.get("accepting") == "y")
golf_month = int(self.request.get("golf_month"))
golf_day = int(self.request.get("golf_day"))
golf_year = int(self.request.get("golf_year"))
t.golf_date = datetime.date(golf_year, golf_month, golf_day)
dinner_month = int(self.request.get("dinner_month"))
dinner_day = int(self.request.get("dinner_day"))
dinner_year = int(self.request.get("dinner_year"))
t.dinner_date = datetime.date(dinner_year, dinner_month, dinner_day)
early_bird_month = int(self.request.get("early_bird_month"))
early_bird_day = int(self.request.get("early_bird_day"))
early_bird_year = int(self.request.get("early_bird_year"))
t.early_bird_deadline = datetime.date(early_bird_year, early_bird_month, early_bird_day)
dinner_early_bird_month = int(self.request.get("dinner_early_bird_month"))
dinner_early_bird_day = int(self.request.get("dinner_early_bird_day"))
dinner_early_bird_year = int(self.request.get("dinner_early_bird_year"))
t.dinner_early_bird_deadline = datetime.date(dinner_early_bird_year, dinner_early_bird_month, dinner_early_bird_day)
deadline_month = int(self.request.get("deadline_month"))
deadline_day = int(self.request.get("deadline_day"))
deadline_year = int(self.request.get("deadline_year"))
t.deadline = datetime.date(deadline_year, deadline_month, deadline_day)
tribute_deadline_month = int(self.request.get("tribute_deadline_month"))
tribute_deadline_day = int(self.request.get("tribute_deadline_day"))
tribute_deadline_year = int(self.request.get("tribute_deadline_year"))
t.tribute_deadline = datetime.date(tribute_deadline_year, tribute_deadline_month, tribute_deadline_day)
t.golf_price_early = int(self.request.get("golf_price_early"))
t.golf_price_late = int(self.request.get("golf_price_late"))
t.golf_only_price_early = int(self.request.get("golf_only_price_early"))
t.golf_only_price_late = int(self.request.get("golf_only_price_late"))
t.dinner_price_early = int(self.request.get("dinner_price_early"))
t.dinner_price_late = int(self.request.get("dinner_price_late"))
t.limit_golfers = int(self.request.get("limit_golfers"))
t.limit_dinners = int(self.request.get("limit_dinners"))
t.golf_sold_out = (self.request.get("golf_sold_out") == "y")
t.dinner_sold_out = (self.request.get("dinner_sold_out") == "y")
t.wait_list_email = self.request.get("wait_list_email")
t.dinner_choices = self.request.get("dinner_choices")
t.go_discount_codes = self.request.get("go_discount_codes")
t.red_course_rating = float(self.request.get("red_course_rating"))
t.red_course_slope = float(self.request.get("red_course_slope"))
t.white_course_rating = float(self.request.get("white_course_rating"))
t.white_course_slope = float(self.request.get("white_course_slope"))
t.blue_course_rating = float(self.request.get("blue_course_rating"))
t.blue_course_slope = float(self.request.get("blue_course_slope"))
t.put()
tournament.set_tournament_cache(t)
auditing.audit(t, "Updated tournament properties", request = self.request)
self.redirect('/admin/tournament')
# Payment Gateway
class PaymentGateway(webapp2.RequestHandler):
# Show the form.
def get(self):
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_edit_payment_processor:
show_login_page(self.response.out, self.request.uri)
return
t = tournament.get_tournament()
payments_info = payments.get_payments_info(t)
template_values = {
'capabilities': caps,
'payments': payments_info
}
self.response.out.write(render_to_string('payments.html', template_values))
# Process the submitted info.
def post(self):
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_edit_payment_processor:
show_login_page(self.response.out, self.request.uri)
return
t = tournament.get_tournament()
payment_gateway = payments.Payments.query(ancestor = t.key).get()
if payment_gateway is None:
payment_gateway = payments.Payments(parent = t.key)
payment_gateway.gateway_url = self.request.get("gateway_url")
payment_gateway.relay_url = self.request.get("relay_url")
payment_gateway.receipt_url = self.request.get("receipt_url")
payment_gateway.api_login_id = self.request.get("api_login_id")
payment_gateway.transaction_key = self.request.get("transaction_key")
payment_gateway.test_mode = self.request.get("test_mode") == "true"
payment_gateway.put()
auditing.audit(t, "Updated payment gateway", request = self.request)
self.redirect('/admin/payments')
# Sponsorship information.
class Sponsorships(webapp2.RequestHandler):
# Show the form.
def get(self):
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_update_sponsorships:
show_login_page(self.response.out, self.request.uri)
return
t = tournament.get_tournament()
sponsorships = sponsorship.Sponsorship.query(ancestor = t.key).order(sponsorship.Sponsorship.sequence).fetch(30)
next_seq = 1
last_level = "Double Eagle"
for s in sponsorships:
next_seq = s.sequence + 1
last_level = s.level
template_values = {
'capabilities': caps,
'next_seq': next_seq,
'last_level': last_level,
'sponsorships': sponsorships
}
self.response.out.write(render_to_string('sponsorships.html', template_values))
# Process the submitted info.
def post(self):
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_update_sponsorships:
show_login_page(self.response.out, '/admin/sponsorships')
return
t = tournament.get_tournament()
sponsorship.clear_sponsorships_cache()
count = int(self.request.get('count'))
for i in range(1, count + 1):
name = self.request.get('name%d' % i)
q = sponsorship.Sponsorship.query(ancestor = t.key)
q = q.filter(sponsorship.Sponsorship.name == name)
s = q.get()
if self.request.get('delete%d' % i) == 'd':
auditing.audit(t, "Deleted Sponsorship", data = s.level + "/" + s.name, request = self.request)
s.key.delete()
else:
try:
price = int(self.request.get('price%s' % i))
except:
price = s.price
try:
golfers_included = int(self.request.get('golfers_included%d' % i))
except:
golfers_included = s.golfers_included
unique = True if self.request.get('unique%d' % i) == 'u' else False
sold = True if self.request.get('sold%d' % i) == 's' else False
if price != s.price or golfers_included != s.golfers_included or unique != s.unique or sold != s.sold:
s.price = price
s.golfers_included = golfers_included
s.unique = unique
s.sold = sold
s.put()
auditing.audit(t, "Updated Sponsorship", data = s.level + "/" + name, request = self.request)
name = self.request.get('name')
level = self.request.get('level')
sequence = self.request.get('seq')
price = self.request.get('price')
golfers_included = self.request.get('golfers_included')
if golfers_included is None:
golfers_included = 1
unique = True if self.request.get('unique') == 'u' else False
sold = True if self.request.get('sold') == 's' else False
if name and sequence and price:
s = sponsorship.Sponsorship(parent = t.key,
name = name,
level = level,
sequence = int(sequence),
price = int(price),
golfers_included = int(golfers_included),
unique = unique,
sold = sold)
s.put()
auditing.audit(t, "Added Sponsorship", data = level + "/" + name, request = self.request)
self.redirect('/admin/sponsorships')
def get_tees(flight, gender):
if gender == "F":
return 1 # Red
if flight == 2:
return 3 # Blue
return 2 # White
def calc_course_handicap(tournament, handicap_index, tees):
slope = [tournament.red_course_slope, tournament.white_course_slope, tournament.blue_course_slope][tees - 1]
return min(36, int(round(handicap_index * slope / 113.0)))
class ViewGolfer(object):
def __init__(self, t, s, g, count):
self.golfer_key = g.key.urlsafe()
if s:
self.sponsor_id = s.sponsor_id
self.sponsor_name = s.first_name + " " + s.last_name
if g.substitute:
g1 = g.substitute.get()
self.has_substitute = True
else:
g1 = g
self.has_substitute = False
self.golfer = g1
self.sequence = g.sequence
team = None
if g.team:
team = g.team.get()
if g1.first_name or g1.last_name:
self.golfer_name = g1.first_name + " " + g1.last_name
if self.has_substitute:
self.golfer_name = "*" + self.golfer_name
elif s:
self.golfer_name = "(%s #%d)" % (s.last_name, g.sequence)
else:
self.golfer_name = "(TBD)"
self.count = count
if s:
self.pairing = s.pairing if g.sequence == s.num_golfers + s.num_golfers_no_dinner else '' # TODO: remove this
self.team_name = team.name if team else '-'
self.starting_hole = team.starting_hole if team else ''
self.cart = g.cart
if g1.tees:
self.tees = g1.tees
else:
flight = 1
if team:
flight = team.flight
self.tees = get_tees(flight, g1.gender)
handicap_index = get_handicap_index(g1)
if g1.has_index:
self.handicap_index_str = "%.1f" % handicap_index
self.computed_index = ''
self.course_handicap = calc_course_handicap(t, handicap_index, self.tees)
elif handicap_index is not None:
self.handicap_index_str = ''
self.computed_index = "%.1f" % handicap_index
self.course_handicap = calc_course_handicap(t, handicap_index, self.tees)
else:
self.handicap_index_str = ''
self.computed_index = ''
self.course_handicap = 'n/a'
class ViewDinner(object):
def __init__(self, s, first_name, last_name, choice, sequence, table_num, count):
self.sponsor_id = s.sponsor_id
self.sponsor_name = s.first_name + " " + s.last_name
if first_name or last_name:
self.guest_name = first_name + " " + last_name
else:
self.guest_name = "(%s #%d)" % (s.last_name, sequence)
self.dinner_choice = choice
self.sequence = sequence
self.count = count
self.table_num = str(table_num) if table_num > 0 else ""
self.seating = s.dinner_seating if sequence == s.num_golfers + s.num_dinners else ''
class ViewRegistrations(webapp2.RequestHandler):
def get(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, self.request.uri)
return
q = Sponsor.query(ancestor = t.key)
q = q.filter(Sponsor.confirmed == True)
if self.request.get('sort') == 'date':
q = q.order(Sponsor.timestamp)
else:
q = q.order(Sponsor.sort_name)
sponsors = q.fetch(limit = None)
golfer_count = 0
dinner_count = 0
for s in sponsors:
no_dinners = 0
total_golfers = s.num_golfers + s.num_golfers_no_dinner
if total_golfers:
golfers = ndb.get_multi(s.golfer_keys[:total_golfers])
for g in golfers:
if g.dinner_choice == 'none':
no_dinners += 1
s.total_golfers = total_golfers
s.adjusted_dinners = total_golfers - no_dinners + s.num_dinners
s.flag_dinners = True if no_dinners != s.num_golfers_no_dinner else False
s.net_due = s.payment_due - s.payment_made
if s.discount:
s.net_due -= s.discount
s.net_due = max(0, s.net_due)
golfer_count += total_golfers
dinner_count += total_golfers - no_dinners + s.num_dinners
template_values = {
'sponsors': sponsors,
'sponsor_count': len(sponsors),
'golfer_count': golfer_count,
'dinner_count': dinner_count,
'incomplete': '',
'capabilities': caps
}
self.response.out.write(render_to_string('viewsponsors.html', template_values))
class ViewIncomplete(webapp2.RequestHandler):
def get(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, self.request.uri)
return
q = Sponsor.query(ancestor = t.key)
q = q.filter(Sponsor.confirmed == True)
q = q.order(Sponsor.sort_name)
sponsors = []
for s in q:
golfers_complete = 0
ndinners = 0
no_dinners = 0
total_golfers = s.num_golfers + s.num_golfers_no_dinner
if total_golfers:
golfers = ndb.get_multi(s.golfer_keys[:total_golfers])
for g in golfers:
if g.first_name and g.last_name and g.gender and (g.ghin_number or g.has_index or g.average_score):
golfers_complete += 1
if g.dinner_choice:
ndinners += 1
if | |
default = True
Whether to keep tight layout or not
linewidth: float/int, default = 0.1
The linewidth to use for heatmap
fontsize: int, default = 10
The font size for the X and Y tick labels
cmap: str, default = 'Blues'
The colormap to be used for heatmap
Returns:
None
'''
self.data = data
self.columns_to_drop = columns_to_drop
self.figsize = figsize
self.mask_upper = mask_upper
self.tight_layout = tight_layout
self.linewidth = linewidth
self.fontsize = fontsize
self.cmap = cmap
def plot_correlation_matrix(self):
'''
Function to plot the Correlation Matrix Heatmap
Inputs:
self
Returns:
None
'''
# print('-' * 79)
# building the correlation dataframe
self.corr_data = self.data.drop(
self.columns_to_drop + ['TARGET'], axis=1).corr()
if self.mask_upper:
# masking the heatmap to show only lower triangle. This is to save
# the RAM.
mask_array = np.ones(self.corr_data.shape)
mask_array = np.triu(mask_array)
else:
mask_array = np.zeros(self.corr_data.shape)
plt.figure(figsize=self.figsize, tight_layout=self.tight_layout)
sns.heatmap(
self.corr_data,
annot=False,
mask=mask_array,
linewidth=self.linewidth,
cmap=self.cmap)
plt.xticks(rotation=90, fontsize=self.fontsize)
plt.yticks(fontsize=self.fontsize)
plt.title("Heatmap de corrélation des variables numériques", fontsize=20)
plt.show()
# print("-" * 100)
def target_top_corr(self, target_top_columns=10):
'''
Function to return the Top Correlated features with the Target
Inputs:
self
target_top_columns: int, default = 10
The number of top correlated features with target to display
Returns:
Top correlated features DataFrame.
'''
phik_target_arr = np.zeros(self.corr_data.shape[1])
# calculating the Phik-Correlation with Target
for index, column in enumerate(self.corr_data.columns):
phik_target_arr[index] = self.data[[
'TARGET', column]].phik_matrix().iloc[0, 1]
# getting the top correlated columns and their values
top_corr_target_df = pd.DataFrame(
{'Column Name': self.corr_data.columns, 'Phik-Correlation': phik_target_arr})
top_corr_target_df = top_corr_target_df.sort_values(
by='Phik-Correlation', ascending=False)
return top_corr_target_df.iloc[:target_top_columns]
# --------------------------------------------------------------------
# -- AFFICHE LA LISTE DES IDENTIFIANTS UNIQUES
# --------------------------------------------------------------------
def print_unique_categories(data, column_name, show_counts=False):
'''
Function to print the basic stats such as unique categories and their counts for categorical variables
Inputs:
data: DataFrame
The DataFrame from which to print statistics
column_name: str
Column's name whose stats are to be printed
show_counts: bool, default = False
Whether to show counts of each category or not
'''
print('-' * 79)
print(
f"Les catégories uniques de la variable '{column_name}' sont :\n{data[column_name].unique()}")
print('-' * 79)
if show_counts:
print(
f"Répartition dans chaque catégorie :\n{data[column_name].value_counts()}")
print('-' * 79)
# --------------------------------------------------------------------
# -- BARPLOT DES VARIABLES CATEGORIELLES
# --------------------------------------------------------------------
def plot_categorical_variables_bar(data, column_name, figsize=(18, 6),
percentage_display=True,
plot_defaulter=True, rotation=0,
horizontal_adjust=0,
fontsize_percent='xx-small',
palette1='Set1',
palette2='Set2'):
'''
Function to plot Categorical Variables Bar Plots
Inputs:
data: DataFrame
The DataFrame from which to plot
column_name: str
Column's name whose distribution is to be plotted
figsize: tuple, default = (18,6)
Size of the figure to be plotted
percentage_display: bool, default = True
Whether to display the percentages on top of Bars in Bar-Plot
plot_defaulter: bool
Whether to plot the Bar Plots for Defaulters or not
rotation: int, default = 0
Degree of rotation for x-tick labels
horizontal_adjust: int, default = 0
Horizontal adjustment parameter for percentages displayed on the top of Bars of Bar-Plot
fontsize_percent: str, default = 'xx-small'
Fontsize for percentage Display
'''
print(
f"Nombre de catégories uniques pour {column_name} = {len(data[column_name].unique())}")
plt.figure(figsize=figsize, tight_layout=True)
sns.set(style='whitegrid', font_scale=1.2)
# plotting overall distribution of category
plt.subplot(1, 2, 1)
data_to_plot = data[column_name].value_counts().sort_values(ascending=False)
ax = sns.barplot(x=data_to_plot.index, y=data_to_plot, palette=palette1)
if percentage_display:
total_datapoints = len(data[column_name].dropna())
for p in ax.patches:
ax.text(
p.get_x() +
horizontal_adjust,
p.get_height() +
0.005 *
total_datapoints,
'{:1.02f}%'.format(
p.get_height() *
100 /
total_datapoints),
fontsize=fontsize_percent)
plt.xlabel(column_name, labelpad=10)
plt.title('Toutes TARGET', pad=20, fontsize=30)
plt.xticks(rotation=rotation, fontsize=20)
plt.yticks(fontsize=20)
plt.ylabel('Nombre', fontsize=20)
# plotting distribution of category for Defaulters
if plot_defaulter:
percentage_defaulter_per_category = (data[column_name][data.TARGET == 1].value_counts(
) * 100 / data[column_name].value_counts()).dropna().sort_values(ascending=False)
plt.subplot(1, 2, 2)
sns.barplot(x=percentage_defaulter_per_category.index,
y=percentage_defaulter_per_category, palette=palette2)
plt.ylabel(
'Pourcentage par catégorie pour les défaillants',
fontsize=20)
plt.xlabel(column_name, labelpad=10)
plt.xticks(rotation=rotation, fontsize=20)
plt.yticks(fontsize=20)
plt.title('Défaillants seuls', pad=20, fontsize=30)
plt.suptitle(f'Répartition de {column_name}', fontsize=40)
plt.show()
def plot_categorical_variable_bar(data, column_name, figsize=(18, 6),
percentage_display=True, rotation=0,
horizontal_adjust=0,
fontsize_percent='xx-small',
palette1='Set1'):
'''
Function to plot Categorical Variables Bar Plots
Inputs:
data: DataFrame
The DataFrame from which to plot
column_name: str
Column's name whose distribution is to be plotted
figsize: tuple, default = (18,6)
Size of the figure to be plotted
percentage_display: bool, default = True
Whether to display the percentages on top of Bars in Bar-Plot
plot_defaulter: bool
Whether to plot the Bar Plots for Defaulters or not
rotation: int, default = 0
Degree of rotation for x-tick labels
horizontal_adjust: int, default = 0
Horizontal adjustment parameter for percentages displayed on the top of Bars of Bar-Plot
fontsize_percent: str, default = 'xx-small'
Fontsize for percentage Display
'''
print(
f"Nombre de catégories uniques pour {column_name} = {len(data[column_name].unique())}")
plt.figure(figsize=figsize, tight_layout=True)
sns.set(style='whitegrid', font_scale=1.2)
data_to_plot = data[column_name].value_counts().sort_values(ascending=False)
ax = sns.barplot(x=data_to_plot.index, y=data_to_plot, palette=palette1)
if percentage_display:
total_datapoints = len(data[column_name].dropna())
for p in ax.patches:
ax.text(
p.get_x() +
horizontal_adjust,
p.get_height() +
0.005 *
total_datapoints,
'{:1.02f}%'.format(
p.get_height() *
100 /
total_datapoints),
fontsize=fontsize_percent)
plt.xlabel(column_name, labelpad=10)
plt.title(f'Barplot de {column_name}', pad=20, fontsize=30)
plt.xticks(rotation=rotation, fontsize=20)
plt.yticks(fontsize=20)
plt.ylabel('Nombre', fontsize=20)
plt.show()
# --------------------------------------------------------------------
# -- PIEPLOT DES VARIABLES CATEGORIELLES
# --------------------------------------------------------------------
def plot_categorical_variables_pie(
data,
column_name,
plot_defaulter=True,
hole=0):
'''
Function to plot categorical variables Pie Plots
Inputs:
data: DataFrame
The DataFrame from which to plot
column_name: str
Column's name whose distribution is to be plotted
plot_defaulter: bool
Whether to plot the Pie Plot for Defaulters or not
hole: int, default = 0
Radius of hole to be cut out from Pie Chart
'''
if plot_defaulter:
cols = 2
specs = [[{'type': 'domain'}, {'type': 'domain'}]]
titles = ['Toutes TARGET', 'Défaillants seuls']
else:
cols = 1
specs = [[{'type': 'domain'}]]
titles = [f'Répartition de la variable {column_name}']
values_categorical = data[column_name].value_counts()
labels_categorical = values_categorical.index
fig = make_subplots(rows=1, cols=cols,
specs=specs,
subplot_titles=titles)
fig.add_trace(
go.Pie(
values=values_categorical,
labels=labels_categorical,
hole=hole,
textinfo='percent',
textposition='inside'),
row=1,
col=1)
if plot_defaulter:
percentage_defaulter_per_category = data[column_name][data.TARGET == 1].value_counts(
) * 100 / data[column_name].value_counts()
percentage_defaulter_per_category.dropna(inplace=True)
percentage_defaulter_per_category = percentage_defaulter_per_category.round(
2)
fig.add_trace(
go.Pie(
values=percentage_defaulter_per_category,
labels=percentage_defaulter_per_category.index,
hole=hole,
textinfo='percent',
hoverinfo='label+value'),
row=1,
col=2)
fig.update_layout(title=f'Répartition de la variable {column_name}')
fig.show()
# --------------------------------------------------------------------
# -- AFFICHE DISTPLOT ou CDF ou BOXPLOT ou VIOLINPLOT DES VARIABLES CONTINUES
# --------------------------------------------------------------------
def plot_continuous_variables(data, column_name,
plots=['distplot', 'CDF', 'box', 'violin'],
scale_limits=None, figsize=(20, 9),
histogram=True, log_scale=False,
palette=['SteelBlue', 'Crimson']):
'''
Function to plot continuous variables distribution
Inputs:
data: DataFrame
The DataFrame from which to plot.
column_name: str
Column's name whose distribution is to be plotted.
plots: list, default = ['distplot', 'CDF', box', 'violin']
List of plots to plot for Continuous Variable.
scale_limits: tuple (left, right), default = None
To control the limits of values to be plotted in case of outliers.
figsize: tuple, default = (20,8)
Size of the figure to be plotted.
histogram: bool, default = True
Whether to plot histogram along with distplot or not.
log_scale: bool, default = False
Whether to use log-scale for variables with outlying points.
'''
data_to_plot = data.copy()
if scale_limits:
# taking only the data within the specified limits
data_to_plot[column_name] = data[column_name][(
data[column_name] > scale_limits[0]) & (data[column_name] < scale_limits[1])]
number_of_subplots = len(plots)
plt.figure(figsize=figsize)
sns.set_style('whitegrid')
for i, ele in enumerate(plots):
plt.subplot(1, number_of_subplots, i + 1)
plt.subplots_adjust(wspace=0.25)
if ele == 'CDF':
# making the percentile DataFrame for both positive and negative
# Class Labels
percentile_values_0 = data_to_plot[data_to_plot.TARGET == 0][[
column_name]].dropna().sort_values(by=column_name)
percentile_values_0['Percentile'] = [
ele / (len(percentile_values_0) - 1) for ele in range(len(percentile_values_0))]
percentile_values_1 = data_to_plot[data_to_plot.TARGET == 1][[
column_name]].dropna().sort_values(by=column_name)
percentile_values_1['Percentile'] = [
ele / (len(percentile_values_1) - 1) for ele in range(len(percentile_values_1))]
plt.plot(
percentile_values_0[column_name],
percentile_values_0['Percentile'],
color='SteelBlue',
label='Non-Défaillants')
plt.plot(
percentile_values_1[column_name],
percentile_values_1['Percentile'],
color='crimson',
label='Défaillants')
plt.xlabel(column_name, fontsize=16)
plt.ylabel('Probabilité', fontsize=16)
plt.title('CDF de {}'.format(column_name), fontsize=18)
plt.legend(fontsize='medium')
if log_scale:
plt.xscale('log')
plt.xlabel(column_name + ' - (log-scale)')
if ele == 'distplot':
sns.distplot(data_to_plot[column_name][data['TARGET'] == 0].dropna(
), label='Non-Défaillants', hist=False, color='SteelBlue')
sns.distplot(data_to_plot[column_name][data['TARGET'] == 1].dropna(
), label='Défaillants', hist=False, color='Crimson')
plt.xlabel(column_name, fontsize=16)
plt.ylabel('Probability Density', fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.legend(fontsize=18)
plt.title("Dist-Plot de {}".format(column_name), fontsize=18)
if log_scale:
plt.xscale('log')
plt.xlabel(f'{column_name} (log scale)', fontsize=16)
if ele == 'violin':
sns.violinplot(x='TARGET', y=column_name, data=data_to_plot, palette=palette)
plt.title("Violin-Plot de {}".format(column_name), fontsize=18)
if log_scale:
plt.yscale('log')
plt.ylabel(f'{column_name} (log Scale)')
if ele == 'box':
sns.boxplot(x='TARGET', y=column_name, data=data_to_plot, palette=palette)
plt.title("Box-Plot de {}".format(column_name), fontsize=18)
if log_scale:
plt.yscale('log')
plt.ylabel(f'{column_name} (log Scale)', fontsize=16)
plt.xlabel('TARGET', fontsize=16)
plt.ylabel(f'{column_name}', fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
def plot_continuous_variable(data, column_name,
plots=['distplot', 'CDF', 'box', 'violin'],
scale_limits=None, figsize=(20, 9),
histogram=True, log_scale=False,
palette=['SteelBlue', 'Crimson']):
'''
Function to plot continuous variables distribution
Inputs:
data: DataFrame
The DataFrame from which to plot.
column_name: str
Column's name whose distribution is to be plotted.
plots: list, default = ['distplot', 'CDF', box', 'violin']
List of plots to plot for Continuous | |
__author__ = 'aarongary'
import json
import ijson
import requests
import base64
import sys
import math
import numpy as np
if sys.version_info.major == 3:
from urllib.request import urlopen, Request, HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm, \
build_opener, install_opener, HTTPError, URLError
else:
from urllib2 import urlopen, Request, HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm, \
build_opener, install_opener, HTTPError, URLError
class NiceCXBuilder(object):
def __init__(self, cx=None, server=None, username='scratch', password='<PASSWORD>', uuid=None, networkx_G=None, data=None, **attr):
from ndex2.nice_cx_network import NiceCXNetwork
self.nice_cx = NiceCXNetwork(user_agent='niceCx Builder')
self.node_id_lookup = {}
self.node_id_counter = 0
self.edge_id_counter = 0
self.node_inventory = {}
self.node_attribute_inventory = []
self.node_attribute_map = {}
self.edge_inventory = {}
self.edge_attribute_inventory = []
self.edge_attribute_map = {}
self.opaque_aspect_inventory = []
self.context_inventory = []
self.network_attribute_inventory = {}
self.user_base64 = None
self.username = None
self.password = <PASSWORD>
if username and password:
self.username = username
self.password = password
if sys.version_info.major == 3:
encode_string = '%s:%s' % (username, password)
byte_string = encode_string.encode()
self.user_base64 = base64.b64encode(byte_string)#.replace('\n', '')
else:
self.user_base64 = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
def set_context(self, context):
"""
Set the @context information of the network. This information maps namespace prefixes to their defining URIs
Example:
``set_context({'pmid': 'https://www.ncbi.nlm.nih.gov/pubmed/'})``
:param context: dict of name, URI pairs
:type context: dict
:return: None
:rtype: none
"""
if isinstance(context, dict):
self.context_inventory = context
elif isinstance(context, list):
if len(context) > 0:
self.context_inventory = context[0]
def set_name(self, network_name):
"""
Set the network name
:param network_name: Network name
:type network_name: string
:return: None
:rtype:none
"""
self.network_attribute_inventory['name'] = {'n': 'name', 'v': network_name, 'd': 'string'}
def add_network_attribute(self, name=None, values=None, type=None, cx_element=None):
"""
Add an attribute to the network
:param name: Name of the attribute
:type name: str
:param values: The value(s) of the attribute
:type values: One of the allowable CX types. See `Supported data types`_
:param type: They type of data supplied in values. Default is string. See `Supported data types`_
:type type: str
:return: None
:rtype: None
"""
add_this_network_attribute = {'n': name, 'v': values}
if type:
add_this_network_attribute['d'] = type
self.network_attribute_inventory[name] = add_this_network_attribute
def add_node(self, name=None, represents=None, id=None, data_type=None, map_node_ids=False):
"""
Adds a new node with the corresponding name and represents (external id)
:param node_name: Name of the node
:type node_name: str
:param represents: Representation of the node (alternate identifier)
:type represents: str
:param id:
:type id:
:return: Node ID
:rtype: int
"""
if self.node_inventory.get(name) is not None:
return self.node_inventory.get(name).get('@id')
if id:
node_id = id
else:
node_id = self.node_id_counter
self.node_id_counter += 1
add_this_node = {'@id': node_id, 'n': name}
if represents:
add_this_node['r'] = represents
if data_type:
add_this_node['d'] = data_type
self.node_inventory[name] = add_this_node
if map_node_ids:
self.node_id_lookup[name] = node_id
return node_id
def add_edge(self, source=None, target=None, interaction=None, id=None):
"""
Adds a new edge in the network by specifying source-interaction-target
:param source: The source node of this edge, either its id or the node object itself.
:type source: int, dict (with @id property)
:param target: The target node of this edge, either its id or the node object itself.
:type target: int, dict (with @id property)
:param interaction: The interaction that describes the relationship between the source and target nodes
:type interaction: str
:param id: Edge id for this edge. If none is provided the builder will create one
:type id: int
:return: Edge ID
:rtype: int
"""
if id is not None:
edge_id = id
else:
edge_id = self.edge_id_counter
self.edge_id_counter += 1
add_this_edge = {'@id': edge_id, 's': source, 't': target}
if interaction:
add_this_edge['i'] = interaction
else:
add_this_edge['i'] = 'interacts-with'
self.edge_inventory[edge_id] = add_this_edge
return edge_id
def add_node_attribute(self, property_of, name, values, type=None):
"""
Set an attribute of a node, where the node may be specified by its id or passed in as a node dict.
:param property_of: Node ID to add the attribute to
:type property_of: int
:param name: Attribute name
:type name: str
:param value: A value or list of values of the attribute
:type value: list, string, int or float
:param type: The datatype of the attribute values, defaults is string. See `Supported data types`_
:type type: str
:return: None
:rtype: None
"""
if property_of is None:
raise TypeError('Node value is None')
if name is None:
raise TypeError('Property name is None')
if values is None:
raise TypeError('Attribute value is None')
add_this_node_attribute = {'po': property_of, 'n': name, 'v': values}
if self.node_attribute_map.get(property_of) is None:
self.node_attribute_map[property_of] = {}
elif self.node_attribute_map[property_of].get(name) is not None:
# TODO - Raise warning/exception for duplicate attribute
return
if type:
if type == 'float' or type == 'double':
type = 'double'
try:
if not isinstance(values, float):
add_this_node_attribute['v'] = float(values)
except ValueError as e:
raise ValueError('Value was not of type %s' % type)
if type == 'list_of_float' or type == 'list_of_double':
try:
if isinstance(values, list):
for value in values:
if not isinstance(value, float):
value = float(value)
except ValueError as e:
raise ValueError('Value was not of type %s' % type)
type = 'list_of_double'
add_this_node_attribute['d'] = type
else:
use_this_value, attr_type = self._infer_data_type(values)
add_this_node_attribute['v'] = use_this_value
add_this_node_attribute['d'] = attr_type
if add_this_node_attribute['v'] is not None:
self.node_attribute_inventory.append(add_this_node_attribute)
self.node_attribute_map[property_of][name] = True
def add_edge_attribute(self, property_of=None, name=None, values=None, type=None):
"""
Set the value(s) of attribute of an edge, where the edge may be specified by its id or passed in an object.
Example:
``set_edge_attribute(0, 'weight', 0.5, type='float')``
or
``set_edge_attribute(edge, 'Disease', 'Atherosclerosis')``
:param property_of: Edge to add the attribute to
:type property_of: int or edge dict with @id attribute
:param name: Attribute name
:type name: str
:param values: A value or list of values of the attribute
:type values: list, string, int or float
:param type: The datatype of the attribute values, defaults to the python datatype of the values. See `Supported data types`_
:type type: str
:return: None
:rtype: None
"""
if property_of is None:
raise TypeError('Edge value is None')
if name is None:
raise TypeError('Property name is None')
if values is None:
raise TypeError('Attribute value is None')
add_this_edge_attribute = {'po': property_of, 'n': name, 'v': values}
if self.edge_attribute_map.get(property_of) is None:
self.edge_attribute_map[property_of] = {}
elif self.edge_attribute_map[property_of].get(name) is not None:
return
if type:
if type == 'float' or type == 'double':
type = 'double'
try:
if not isinstance(values, float):
add_this_edge_attribute['v'] = float(values)
except ValueError as e:
raise ValueError('Value was not of type %s' % type)
if type == 'list_of_float' or type == 'list_of_double':
try:
if isinstance(values, list):
for value in values:
if not isinstance(value, float):
value = float(value)
except ValueError as e:
raise ValueError('Value was not of type %s' % type)
type = 'list_of_double'
add_this_edge_attribute['d'] = type
else:
use_this_value, attr_type = self._infer_data_type(values)
add_this_edge_attribute['v'] = use_this_value
add_this_edge_attribute['d'] = attr_type
if add_this_edge_attribute['v'] is not None:
self.edge_attribute_inventory.append(add_this_edge_attribute)
self.edge_attribute_map[property_of][name] = True
def add_opaque_aspect(self, oa_name, oa_list):
self.opaque_aspect_inventory.append({oa_name: oa_list})
#===================================
# methods to add data by fragment
#===================================
def _add_network_attributes_from_fragment(self, fragment):
self.nice_cx.networkAttributes.append(fragment)
def _add_node_from_fragment(self, fragment):
self.nice_cx.nodes[fragment.get('@id')] = fragment
def _add_edge_from_fragment(self, fragment):
self.nice_cx.edges[fragment.get('@id')] = fragment
def _add_node_attribute_from_fragment(self, fragment):
if self.nice_cx.nodeAttributes.get(fragment.get('po')) is None:
self.nice_cx.nodeAttributes[fragment.get('po')] = []
self.nice_cx.nodeAttributes[fragment.get('po')].append(fragment)
def _add_edge_attribute_from_fragment(self, fragment):
if self.nice_cx.edgeAttributes.get(fragment.get('po')) is None:
self.nice_cx.edgeAttributes[fragment.get('po')] = []
self.nice_cx.edgeAttributes[fragment.get('po')].append(fragment)
def _add_citation_from_fragment(self, fragment):
self.nice_cx.citations[fragment.get('@id')] = fragment
def _add_supports_from_fragment(self, fragment):
self.nice_cx.supports[fragment.get('@id')] = fragment
def _add_edge_supports_from_fragment(self, fragment):
for po_id in fragment.get('po'):
self.nice_cx.edgeSupports[po_id] = fragment.get('supports')
def _add_node_citations_from_fragment(self, fragment):
for po_id in fragment.get('po'):
self.nice_cx.nodeCitations[po_id] = fragment.get('citations')
def _add_edge_citations_from_fragment(self, fragment):
for po_id in fragment.get('po'):
self.nice_cx.edgeCitations[po_id] = fragment.get('citations')
def get_nice_cx(self):
#==========================
# ADD CONTEXT
#==========================
if isinstance(self.context_inventory, dict):
self.nice_cx.set_context(self.context_inventory)
else:
for c in self.context_inventory:
self.nice_cx.set_context(c)
#=============================
# ASSEMBLE NETWORK ATTRIBUTES
#=============================
#{'n': 'name', 'v': network_name, 'd': 'string'}
for k, v in self.network_attribute_inventory.items():
self.nice_cx.add_network_attribute(name=v.get('n'), values=v.get('v'), type=v.get('d'))
#==========================
# ASSEMBLE NODES
#==========================
for k, v in self.node_inventory.items():
self.nice_cx.nodes[v.get('@id')] = v
#==========================
# ASSEMBLE NODE ATTRIBUTES
#==========================
for a in self.node_attribute_inventory:
property_of = a.get('po')
if self.nice_cx.nodeAttributes.get(property_of) is None:
self.nice_cx.nodeAttributes[property_of] = []
self.nice_cx.nodeAttributes[property_of].append(a)
#==========================
# ASSEMBLE EDGES
#==========================
for k, v in self.edge_inventory.items():
self.nice_cx.edges[k] = v
#==========================
# ASSEMBLE EDGE ATTRIBUTES
#==========================
for a in self.edge_attribute_inventory:
property_of = a.get('po')
if self.nice_cx.edgeAttributes.get(property_of) is None:
self.nice_cx.edgeAttributes[property_of] = []
self.nice_cx.edgeAttributes[property_of].append(a)
#==========================
# ASSEMBLE OPAQUE ASPECTS
#==========================
for oa in self.opaque_aspect_inventory:
for k, v in oa.items():
self.nice_cx.add_opaque_aspect(k, v)
return self.nice_cx
def get_frag_from_list_by_key(self, cx, key):
return_list = []
for aspect in cx:
if key in aspect:
if isinstance(aspect[key], list):
for a_item in aspect[key]:
return_list.append(a_item)
else:
return_list.append(aspect[key])
return return_list
def load_aspect(self, aspect_name):
#with open('Signal1.cx', mode='r') | |
<filename>format_gis_data.py<gh_stars>1-10
#!/usr/bin/python
import json
import re
import sys
def Feature(name2, id2, **kwargs):
kwargs.update({'name2': name2, 'id2': id2})
return {'type': 'Feature',
'properties': kwargs,
'geometry': {'type': 'MultiPolygon', 'coordinates': []}}
FeatureFlag_SetBounds = 0x0001 # has partial subunits; set/extend item.bounds for this feature
FeatureFlag_SkipBounds = 0x0002 # don't extend parent/ancestor bounds for this feature
NPS_NezPerce_BuffaloEddy = Feature('Buffalo Eddy', 'be')
NPS_NezPerce_Spalding = Feature('Spalding', 'sp', W2='Spalding,_Idaho')
NPS_FortVancouver = Feature('Fort Vancouver', 'fv')
NPS_WA_NetulLanding = Feature('Netul Landing', 'netul')
NPS_WA_SunsetBeach = Feature('Sunset Beach', 'sunset')
NPS_Rainier_Wilkeson = Feature('Wilkeson', 'wilkeson', flags=FeatureFlag_SkipBounds)
NPS_SanJuan_EnglishCamp = Feature('English Camp', 'e')
G = None
XY2LL = None
SecondPoints = set()
HolesToRemove = ()
PolygonsToRemove = {
'nps_ca': (
#
# Golden Gate National Recreation Area
#
(-122.519694, 37.533448), # coastal area including Fitzgerald Marine Reserve
(-122.519702, 37.533489), # coastal area including Point Montara Light
(-122.517480, 37.539958), # small area near Montara
(-122.509973, 37.584858), # small area near Devil's Slide
(-122.513501, 37.594477), # Pedro Point Headlands
(-122.499031, 37.600852), # Pacifica State Beach
(-122.425814, 37.874280), # Angel Island State Park
#
# Lassen Volcanic National Park
#
(-121.600877, 40.348154), # Headquarters in Mineral, CA
#
# World War II Valor in the Pacific National Monument
#
(-157.954368, 21.363719), # HI
(-157.954916, 21.363748), # HI
(-157.949648, 21.364684), # HI
(-157.937538, 21.366363), # HI
(-157.936582, 21.367199), # HI
(-157.937516, 21.369237), # HI
),
'nps_hi': (
#
# World War II Valor in the Pacific National Monument
#
(-121.378507, 41.887758), # Tule Lake Unit (CA)
),
'nps_id': (
#
# Nez Perce National Historical Park
#
(-109.206051, 48.373056), # MT
(-117.224839, 45.337427), # OR
(-117.520354, 45.570223), # OR
#
# Minidoka National Historic Site
#
(-122.507464, 47.614825), # Bainbridge Island Japanese American Exclusion Memorial (WA)
),
'nps_mt': (
#
# <NAME>ce National Historical Park
#
(-116.267642, 45.803354), # ID
(-115.959301, 46.076778), # ID
(-116.918370, 46.170823), # ID
(-116.935269, 46.173151), # ID
(-116.004092, 46.203655), # ID
(-116.818266, 46.445164), # ID
(-116.818326, 46.446846), # ID
(-116.817829, 46.446914), # ID
(-116.814847, 46.448688), # ID
(-116.810456, 46.449968), # ID
(-116.329538, 46.500551), # ID
(-117.224839, 45.337427), # OR
(-117.520354, 45.570223), # OR
),
'nps_nm': (
#
# Manhattan Project National Historical Park
#
(-84.317198, 35.928011), # Oak Ridge, TN: X-10 Graphite Reactor
(-84.394445, 35.938672), # Oak Ridge, TN: K-25 Building
(-84.256801, 35.984691), # Oak Ridge, TN: Y-12 Building 9204-3
(-84.255495, 35.985871), # Oak Ridge, TN: Y-12 Building 9731
(-119.387098, 46.587399), # Hanford, WA: Hanford High School
(-119.646295, 46.628954), # Hanford, WA: B Reactor
(-119.715131, 46.638641), # Hanford, WA: Bruggemann's Warehouse
(-119.618684, 46.644369), # Hanford, WA: Allard Pump House
(-119.478732, 46.660534), # Hanford, WA: White Bluffs Bank
),
'nps_or': (
#
# Nez Perce National Historical Park
#
(-116.267642, 45.803354), # ID
(-115.959301, 46.076778), # ID
(-116.918370, 46.170823), # ID
(-116.935269, 46.173151), # ID
(-116.004092, 46.203655), # ID
(-116.818266, 46.445164), # ID
(-116.818326, 46.446846), # ID
(-116.817829, 46.446914), # ID
(-116.814847, 46.448688), # ID
(-116.810456, 46.449968), # ID
(-116.329538, 46.500551), # ID
(-109.206051, 48.373056), # MT
),
'nps_tn': (
#
# Manhattan Project National Historical Park
#
(-106.264959, 35.840842), # Los Alamos, NM: Pajarito Site
(-106.345095, 35.843839), # Los Alamos, NM: V-Site
(-106.347393, 35.855718), # Los Alamos, NM: Gun Site
(-119.387098, 46.587399), # Hanford, WA: Hanford High School
(-119.646295, 46.628954), # Hanford, WA: B Reactor
(-119.715131, 46.638641), # Hanford, WA: Bruggemann's Warehouse
(-119.618684, 46.644369), # Hanford, WA: Allard Pump House
(-119.478732, 46.660534), # Hanford, WA: White Bluffs Bank
),
'nps_wa': (
#
# Manhattan Project National Historical Park
#
(-84.317198, 35.928011), # Oak Ridge, TN: X-10 Graphite Reactor
(-84.394445, 35.938672), # Oak Ridge, TN: K-25 Building
(-84.256801, 35.984691), # Oak Ridge, TN: Y-12 Building 9204-3
(-84.255495, 35.985871), # Oak Ridge, TN: Y-12 Building 9731
(-106.264959, 35.840842), # Los Alamos, NM: Pajarito Site
(-106.345095, 35.843839), # Los Alamos, NM: V-Site
(-106.347393, 35.855718), # Los Alamos, NM: Gun Site
#
# Minidoka National Historic Site
#
(-114.239971, 42.678247), # ID
(-114.249704, 42.692788), # ID
#
# Nez Perce National Historical Park
#
(-116.267642, 45.803354), # ID
(-115.959301, 46.076778), # ID
(-116.918370, 46.170823), # ID
(-116.935269, 46.173151), # ID
(-116.004092, 46.203655), # ID
(-116.818266, 46.445164), # ID
(-116.818326, 46.446846), # ID
(-116.817829, 46.446914), # ID
(-116.814847, 46.448688), # ID
(-116.810456, 46.449968), # ID
(-116.329538, 46.500551), # ID
(-109.206051, 48.373056), # MT
(-117.224839, 45.337427), # OR
(-117.520354, 45.570223), # OR
),
}
SeparateFeatures = {
#
# NPS -> AZ -> Montezuma Castle National Monument
#
(-111.825721, 34.613346): Feature('Montezuma Castle', 'castle'),
(-111.749979, 34.645673): Feature('Montezuma Well', 'well', W2='Montezuma_Well', flags=FeatureFlag_SkipBounds),
#
# NPS -> AZ -> Navajo National Monument
#
(-110.817732, 36.670105): Feature('Inscription House', 'ih', flags=FeatureFlag_SkipBounds),
(-110.537306, 36.689120): Feature('Betatakin', 'bt'),
(-110.501645, 36.764945): Feature('Keet Seel', 'ks', flags=FeatureFlag_SkipBounds),
#
# NPS -> AZ -> Saguaro National Park
#
(-110.498832, 32.230070): Feature('Rincon Mountain District', 'rmd'),
(-111.113908, 32.315900): Feature('Tucson Mountain District', 'tmd'),
#
# NPS -> AZ -> Tumacacori National Historical Park
#
(-110.901675, 31.408678): Feature('Los Santos Angeles de Guevavi', 'g',
W2='Mission_Los_Santos_%C3%81ngeles_de_Guevavi', flags=FeatureFlag_SkipBounds),
(-110.959223, 31.454015): Feature('San Cayetano de Calabazas', 'c',
W2='Mission_San_Cayetano_de_Calabazas', flags=FeatureFlag_SkipBounds),
(-111.043785, 31.569337): Feature('<NAME>', 't',
W2='Mission_San_Jos%C3%A9_de_Tumac%C3%A1cori'),
#
# NPS -> CA -> Channel Islands National Park
#
(-119.037070, 33.448103): Feature('Santa Barbara Island', 'sb', W2='Santa_Barbara_Island'),
(-120.270728, 34.001945): Feature('Santa Rosa Island', 'sr', W2='Santa_Rosa_Island_(California)'),
(-119.339176, 34.022787): Feature('Anacapa Island', 'ac', W2='Anacapa_Island'),
(-120.472997, 34.030254): Feature('San Miguel Island', 'sm', W2='San_Miguel_Island'),
(-119.949952, 34.060441): Feature('Santa Cruz Island', 'sc', W2='Santa_Cruz_Island'),
(-119.266819, 34.248069): Feature('Headquarters', 'hq', flags=FeatureFlag_SkipBounds),
#
# NPS -> CA -> Golden Gate National Recreation Area
#
(-122.486926, 37.642721): Feature('Milagra Ridge', 'mr', W2='Milagra_Ridge'),
(-122.509305, 37.776285): Feature('Sutro Heights Park', 'sh', W2='Sutro_Heights_Park'),
(-122.424849, 37.832023): Feature('Alcatraz Island', 'az', W2='Alcatraz_Island'),
#
# NPS -> CA -> Lava Beds National Monument
#
(-121.394089, 41.851072): Feature('Petroglyph Section', 'p', W2='Petroglyph_Point_Archeological_Site'),
#
# NPS -> CA -> World War II Valor in the Pacific National Monument
#
(-121.378507, 41.887758): Feature('Tule Lake Unit', 'tule',
W2='Tule_Lake_Unit,_World_War_II_Valor_in_the_Pacific_National_Monument'),
#
# NPS -> ID -> Nez Perce National Historical Park
#
(-116.267642, 45.803354): Feature('White Bird Battlefield', 'wb', W2='Battle_of_White_Bird_Canyon'),
(-115.959301, 46.076778): Feature('Clearwater Battlefield', 'cw', W2='Battle_of_the_Clearwater'),
(-116.918370, 46.170823): NPS_NezPerce_BuffaloEddy,
(-116.935269, 46.173151): NPS_NezPerce_BuffaloEddy,
(-116.004092, 46.203655): Feature('Heart of the Monster', 'hm'),
(-116.818266, 46.445164): NPS_NezPerce_Spalding,
(-116.818326, 46.446846): NPS_NezPerce_Spalding,
(-116.817829, 46.446914): NPS_NezPerce_Spalding,
(-116.814847, 46.448688): NPS_NezPerce_Spalding,
(-116.810456, 46.449968): NPS_NezPerce_Spalding,
(-116.329538, 46.500551): Feature('Canoe Camp', 'cc'),
#
# NPS -> MT -> Little Bighorn Battlefield National Monument
#
(-107.384146, 45.521082): Feature('Reno-Benteen Battlefield', 'rb'),
(-107.443667, 45.564359): Feature('Custer Battlefield', 'c'),
#
# NPS -> MT -> Nez Perce National Historical Park
#
(-109.206051, 48.373056): Feature('Bear Paw Battlefield', 'bp', W2='Battle_of_Bear_Paw'),
#
# NPS -> NM -> Bandelier National Monument
#
(-106.206648, 35.867916): Feature('Tsankawi Section', 't', W2='Tsankawi'),
#
# NPS -> NM -> Carlsbad Caverns National Park
#
(-104.460401, 32.109626): Feature('Rattlesnake Springs', 'rs', W2='Rattlesnake_Springs_Historic_District'),
#
# NPS -> NM -> Chaco Culture National Historical Park
#
(-108.109815, 35.674474): Feature("Kin Ya'a", 'ky', W2='Kin_Ya%27a', flags=FeatureFlag_SkipBounds),
(-107.681287, 35.972367): Feature('Pueblo Pintado', 'pp', flags=FeatureFlag_SkipBounds),
(-108.145752, 35.979813): Feature('Kin Bineola', 'kb', W2='Kin_Bineola', flags=FeatureFlag_SkipBounds),
#
# NPS -> NM -> El Malpais National Monument
#
(-107.819072, 35.096448): Feature('Northwest New Mexico Visitor Center', 'v', flags=FeatureFlag_SkipBounds),
#
# NPS -> NM -> Manhattan Project National Historical Park -> Los Alamos Unit
#
(-106.264959, 35.840842): Feature('Pajarito Site', 'p'),
(-106.345095, 35.843839): Feature('V-Site', 'v'),
(-106.347393, 35.855718): Feature('Gun Site', 'g'),
#
# NPS -> NM -> Pecos National Historical Park
#
(-105.817663, 35.539247): Feature('Glorieta Unit (Canoncito)', 'gwest', W2='Glorieta_Pass_Battlefield'),
(-105.683200, 35.565951): Feature('Main Unit', 'main'),
(-105.755533, 35.577073): Feature("Glorieta Unit (Pigeon's Ranch)", 'geast', W2='Glorieta_Pass_Battlefield'),
#
# NPS -> NM -> Petroglyph National Monument
#
(-106.749622, 35.153536): Feature('Southern Geologic Window', 'sgw'),
(-106.758586, 35.174355): Feature('Northern Geologic Window', 'ngw'),
(-106.688781, 35.189383): Feature('<NAME>', 'pmc'),
#
# NPS -> NM -> <NAME> Missions National Monument
#
(-106.075920, 34.260079): Feature('<NAME>', 'gq'),
(-106.364623, 34.451208): Feature('<NAME>', 'a', W2='Abo_(historic_place)'),
(-106.292308, 34.591781): Feature('Qu<NAME>', 'q', W2='Quarai'),
#
# NPS -> OR -> John Day Fossil Beds National Monument
#
(-119.618141, 44.596444): Feature('Sheep Rock Unit', 'sr'),
(-119.643497, 44.629640): Feature('Sheep Rock Unit (Cathedral Rock)', 'cr'),
(-119.632783, 44.659439): Feature('Sheep Rock Unit (Foree Area)', 'foree'),
(-120.263931, 44.663927): Feature('Painted Hills Unit', 'ph'),
(-120.402547, 44.929289): Feature('Clarno Unit', 'clarno'),
#
# NPS -> OR -> Oregon Caves National Monument and Preserve
#
(-123.644339, 42.160947): Feature('Illinois Valley Visitors Center', 'ivvc', flags=FeatureFlag_SkipBounds),
#
# NPS -> OR -> Nez Perce National Historical Park
#
(-117.224839, 45.337427): Feature('Old Chief Joseph Gravesite', 'cj', W2='Old_Chief_Joseph_Gravesite'),
(-117.520354, 45.570223): Feature('Lostine Homesite', 'lost'),
#
# NPS -> TN -> Manhattan Project National Historical Park -> Oak Ridge Unit
#
(-84.317198, 35.928011): Feature('X-10 Graphite Reactor', 'x10',
W2='X-10_Graphite_Reactor'),
(-84.394445, 35.938672): Feature('K-25 Building', 'k25',
W2='K-25'),
(-84.256801, 35.984691): Feature('Y-12 Building 9204-3', 'y9204',
W2='Clinton_Engineer_Works#Y-12_electromagnetic_separation_plant'),
(-84.255495, 35.985871): Feature('Y-12 Building 9731', 'y9731',
W2='Clinton_Engineer_Works#Y-12_electromagnetic_separation_plant'),
#
# NPS -> UT -> Canyonlands National Park
#
(-110.189552, 38.441484): Feature('Horseshoe Canyon Unit', 'hc', W2='Horseshoe_Canyon_(Utah)'),
#
# NPS -> UT -> Hovenweep National Monument
#
(-109.186458, 37.302006): Feature('Cajon', 'cajon'),
(-109.082068, 37.388997): Feature('Square Tower', 'st'),
(-109.038125, 37.397360): Feature('Holly', 'holly'),
(-109.033020, 37.405043): Feature('Horseshoe and Hackberry', 'hh'),
(-108.722510, 37.413030): Feature('Goodman Point', 'gp'),
(-108.983395, 37.444011): Feature('Cutthroat Castle', 'cc'),
#
# NPS -> WA -> Fort Vancouver National Historic Site
#
(-122.605329, 45.357518): Feature('McLoughlin House', 'mh',
W2='Fort_Vancouver_National_Historic_Site#McLoughlin_House_site'),
(-122.668086, 45.620146): NPS_FortVancouver,
(-122.670418, 45.621595): NPS_FortVancouver,
(-122.666020, 45.624453): NPS_FortVancouver,
#
# NPS -> WA -> Lewis and Clark National Historical Park
#
(-123.932193, 45.984622): Feature('Salt Works', 'salt'),
(-123.929978, 46.093451): NPS_WA_SunsetBeach,
(-123.929925, 46.105998): NPS_WA_SunsetBeach,
(-123.860655, 46.117749): NPS_WA_NetulLanding,
(-123.861380, 46.117812): NPS_WA_NetulLanding,
(-123.870444, 46.138536): Feature('Fort Clatsop', 'clatsop', W2='Fort_Clatsop'),
(-123.897380, 46.243354): Feature('Station Camp', 'station'),
(-123.858657, 46.248833): Feature('Dismal Nitch', 'dn', W2='Dismal_Nitch'),
(-124.074637, 46.302412): Feature('Cape Disappointment', 'cd', W2='Cape_Disappointment_(Washington)'),
#
# NPS -> WA -> Manhattan Project National Historical Park -> Hanford Unit
#
(-119.387098, 46.587399): Feature('Hanford High School', 'hh', W2='Hanford_Site'),
(-119.646295, 46.628954): Feature('B Reactor', 'br', W2='B_Reactor'),
(-119.715131, 46.638641): Feature("Bruggemann's Warehouse", 'bw'),
(-119.618684, 46.644369): Feature('Allard Pump House', 'ap'),
(-119.478732, 46.660534): Feature('White Bluffs Bank', 'wb', W2='White_Bluffs,_Washington'),
#
# NPS -> WA -> Minidoka National Historic Site
#
(-122.507464, 47.614825): Feature('Bainbridge Island|Japanese American|Exclusion Memorial', 'jam',
W2='Bainbridge_Island_Japanese_American_Exclusion_Memorial'),
#
# NPS -> WA -> Mount Rainier National Park
#
(-122.110922, 46.753557): Feature('Elbe', 'elbe', flags=FeatureFlag_SkipBounds),
(-121.952864, 46.992832): Feature('Carbon River Ranger Station', 'cr'),
(-122.045713, 47.103524): NPS_Rainier_Wilkeson,
(-122.046664, 47.103544): NPS_Rainier_Wilkeson,
(-122.046975, 47.103899): NPS_Rainier_Wilkeson,
#
# NPS -> WA -> San Juan Island National Historical Park
#
(-123.004894, 48.469481): Feature('American Camp', 'a'),
(-123.153833, 48.584716): NPS_SanJuan_EnglishCamp,
(-123.133915, 48.594316): NPS_SanJuan_EnglishCamp,
(-123.132979, 48.595408): NPS_SanJuan_EnglishCamp,
}
def log(message, *formatArgs):
print >>sys.stderr, message.format(*formatArgs)
def err(*args):
log(*args)
sys.exit()
def getMapLink(point, numPoints):
return 'pmap.html?o={}&ll={:f},{:f} {:>14}'.format(G.id, point[1], point[0],
'({} points)'.format(numPoints))
def featureKey(feature):
return feature['properties']['name']
def getBounds(points):
minLng, minLat = maxLng, maxLat = points[0]
for lng, lat in points[1:-1]:
if lng | |
= (F1[i][j] + cs) / 2
# if F1[i][j] > one:
# F1[i][j] = one
# stdout.write("{0:.5f}".format(F1[i][j]) + "\t")
for child in children:
# if (F1[i][child] / cs) * F1[i][j] == zero and F1[i][child] != zero:
# print(i, j, child, F1[i][j], F1[i][child])
F1[i][child] = (F1[i][child] / cs) * F1[i][j]
# for i in range(1, m):
# for j in order:
# children = children_dict[j]
# cs = 0
# for child in children:
# cs += F1[i][child]
# C[i][j] = F1[i][j] - cs
# for i in range(1, m):
# s = sum(C[i])
# for j in range(0, n):
# C[i][j] = C[i][j] / s
return C, F1
def get_p(F, parents, fail_threshold):
tree = list_to_tree(parents)
_, p = get_c(F, tree, fail_threshold)
return p
def get_f(C, T):
m = len(C)
if m == 0:
return []
n = len(C[0])
F = [[Decimal('0.0') for _ in range(n)] for _ in range(m)]
for i in range(0, m):
for j in range(0, n):
temp = Decimal('0.0')
for k in range(0, n):
temp += (C[i][k] * T[k][j])
F[i][j] = temp
return F
def get_f_from_parents(C, parents):
T = list_to_tree(parents)
F = get_f(C, T)
return F
def write_im(mat, out):
m = len(mat)
n = len(mat[0])
for i in range(0, m):
for j in range(0, n):
out.write(str(mat[i][j]) + "\t")
out.write("\n")
def write_dm(mat, out):
m = len(mat)
n = len(mat[0])
for i in range(0, m):
for j in range(0, n):
out.write("{0:.5f}".format(mat[i][j]) + "\t")
out.write("\n")
def read_F(infile):
F = []
lines = infile.readlines()
var = lines[0].split(None)
for i in range(1, len(lines)):
line = lines[i]
words = line.split(None)
F.append(list(map(Decimal, words)))
return F, var
def read_dm(infile):
out = []
lines = infile.readlines()
for line in lines:
words = line.split(None)
out.append(list(map(Decimal, words)))
return out
def print_dm(mat):
# Assumes square mat
m = len(mat)
n = len(mat[0])
for i in range(0, m):
for j in range(0, n):
stdout.write("{0:.5f}".format(mat[i][j]) + "\t")
stdout.write("\n")
def print_dl(l):
for item in l:
stdout.write("{0:.3f}".format(item) + "\t")
stdout.write("\n")
def dl_to_str(l):
out = []
for item in l:
out.append("{0:.3f}".format(item))
return "\t".join(out)
def print_tree(T):
n = len(T)
for i in range(0, n):
for j in range(0, n):
stdout.write(str(T[i][j]) + " ")
stdout.write("\n")
def get_children(parents, parent):
# print(parents, parent)
children = []
for i in range(1, len(parents)):
if parents[i] == parent:
children.append(i)
# print(children)
return children
def parents_to_children(parents, num_clones):
children = {}
for clone in range(0, num_clones):
children[clone] = []
for clone in range(1, num_clones):
if parents[clone] != -1:
children[parents[clone]].append(clone)
return children
def children_to_parents(children, num_clones):
parents = {}
for clone in range(1, num_clones):
parents[clone] = -1
for clone in range(0, num_clones):
cur_children = children[clone]
for child in cur_children:
parents[child] = clone
return parents
def read_variants_set(f):
out = set()
for line in f:
out.add(int(line.strip()))
return out
def read_variants_list(f):
out = []
for line in f:
out.append(int(line.strip()))
return out
def reorder(l, order):
return [l[i] for i in order]
def rearrange_rows(mat1, variants1, variants2):
mat2 = []
for var in variants2:
mat2.append(mat1[variants1.index(var)])
return mat2
def remove_redundant_time_points(F, R):
selected_times = []
removed_times = []
z = 0
z_prev = -1
new_F = []
new_R = []
for j in range(0, len(F)):
row = F[j]
for z in range(len(row) - 1, -2, -1):
if row[z] > zero:
break
if z - z_prev > 0 and not all([v == zero for v in row]):
new_F.append(F[j])
new_R.append(R[j])
selected_times.append(j)
else:
removed_times.append(j)
z_prev = z
return new_F, new_R, selected_times, removed_times
def rearrange_penalty(F, R, variants):
F_out = deepcopy(F)
R_temp = deepcopy(R)
R_out = []
m = len(F_out)
if m == 0:
return F, R, variants
if len(F[0]) == 0:
return F, R, variants
# Make F_out a diagonal (step) matrix
F_out = list(map(list, zip(*F_out)))
R_temp = list(map(list, zip(*R_temp)))
order = []
i = 0
for row in F_out:
for i in range(0, len(row)):
if row[i] > zero:
break
order.append(i)
indices = list(range(0, len(F_out)))
[order, indices, F_out] = list(zip(*sorted(zip(order, indices, F_out), key=lambda x: x[0])))
for index in indices:
R_out.append(R_temp[index])
variants_out = reorder(variants, indices)
F_out = list(map(list, zip(*F_out)))
R_out = list(map(list, zip(*R_out)))
return F_out, R_out, variants_out, list(indices)
def rearrange(F, variants):
F_out = deepcopy(F)
m = len(F_out)
if m == 0:
return F, variants
if len(F[0]) == 0:
return F, variants
# Make F_out a diagonal (step) matrix
F_out = list(map(list, zip(*F_out)))
order = []
i = 0
for row in F_out:
for i in range(0, len(row)):
if row[i] > zero:
break
order.append(i)
indices = list(range(0, len(F_out)))
[order, indices, F_out] = list(zip(*sorted(zip(order, indices, F_out), key=lambda x: x[0])))
variants_out = reorder(variants, indices)
F_out = list(map(list, zip(*F_out)))
return F_out, variants_out, list(indices)
def get_step_structure(F):
# Assumes step structured F
# Also assumes that redundant time points are removed
m = len(F)
if m == 0:
return []
n = len(F[0])
multi_spawns = []
z_prev = -1
for i in range(m):
z = n - 1
for z in range(n - 1, -1, -1):
if F[i][z] > 0:
break
multi_spawns.append(range(z_prev + 1, z + 1))
z_prev = z
steps = list(map(list, multi_spawns))
arrival_times = []
i = 0
for step in steps:
arrival_times = arrival_times + ([i] * len(step))
i = i + 1
return steps, arrival_times
def correct_steps_for_known_founders(steps, k):
out_steps = [[0]]
step = steps[1]
if k == 0 or k == step[-1]:
return steps
if step[-1] < k:
exit("Invalid input for known founders. The VAF for founder variants in first time point should be non-zero.\n")
out_steps.append(list(range(1, k + 1)))
out_steps.append(list(range(k + 1, step[-1] + 1)))
for i in range(2, len(steps)):
out_steps.append(steps[i])
return out_steps
def squarify(F, step_structure):
# Assumes F's step structure matches with step_structure
if not F:
return []
m = len(F)
n = len(F[0])
new_order = list(chain.from_iterable(step_structure))
r = len(new_order)
if r < n:
new_order = new_order + list(range(r, n))
F_out = []
k = 0
for i in range(0, len(step_structure)):
tup = step_structure[i]
reordered = reorder(F[i], new_order)
for j in range(0, len(tup)):
F_out.append(reordered[:k] + [F[i][x] for x in tup[:(j + 1)]] + [zero] * (n - j - 1 - k))
k += len(tup)
if r < n:
for i in range(len(step_structure), m):
F_out.append(F[i])
return F_out
def square_forms_penalty(F, S, variants):
F1, S1, variants1 = rearrange_penalty(F, S, variants, True)
m = len(F1)
if m == 0:
return [], [], []
# Getting the step structure
step_structure = get_step_structure(F1)
num_matrices = 1
for item in step_structure:
num_matrices = num_matrices * factorial(len(item))
perm = map(permutations, step_structure)
prod = product(*perm)
stdout.write("\t" + str(num_matrices) + " matrices\n")
stdout.flush()
i = 0
for order in prod:
# if i % 100000 == 0:
stdout.write("\tM" + str(i + 1) + "; " + str(datetime.now()) + "\n")
stdout.flush()
i += 1
F_yield = squarify(F1, order)
S_yield = squarify(S1, order)
yield F_yield, S_yield, reorder(variants1, list(chain.from_iterable(order)))
def square_forms(F, variants):
F1, variants1 = rearrange(F, variants, True)
m = len(F1)
if m == 0:
return [], []
# Getting the step structure
step_structure = get_step_structure(F1)
num_matrices = 1
for item in step_structure:
num_matrices = num_matrices * factorial(len(item))
perm = map(permutations, step_structure)
prod = product(*perm)
stdout.write("\t" + str(num_matrices) + " matrices\n")
stdout.flush()
i = 0
for order in prod:
# if i % 100000 == 0:
stdout.write("\tM" + str(i + 1) + "; " + str(datetime.now()) + "\n")
stdout.flush()
i += 1
F_yield = squarify(F1, order)
yield F_yield, reorder(variants1, list(chain.from_iterable(order)))
def sub_f(in_F_file, in_var_file, sub_var_file, out_F_file, out_var_file):
f1 = open(in_F_file)
f2 = open(in_var_file)
f3 = open(sub_var_file)
f4 = open(out_F_file, "w")
f5 = open(out_var_file, "w")
in_F = read_dm(f1)
in_var = read_variants_list(f2)
sub_var = read_variants_list(f3)
in_F = list(map(list, zip(*in_F)))
out_F = []
out_var = []
for i in range(0, len(in_var)):
var = in_var[i]
if var in sub_var:
out_var.append(var)
out_F.append(in_F[i])
out_F = list(map(list, zip(*out_F)))
out_F, out_var = rearrange(out_F, out_var, True)
write_dm(out_F, f4)
for var in out_var:
f5.write(str(var) + "\n")
f1.close()
f2.close()
f3.close()
f4.close()
f5.close()
def remap_parents(indices, parents):
remapped = [0] * len(parents)
for i in range(1, len(parents)):
if parents[i] != 0:
remapped[indices[i - 1]] = indices[parents[i] - 1]
return remapped
def read_results(f_path):
f = open(f_path)
out = list(map(lambda x: list(map(int, x.split(None))), f.readlines()))
f.close()
return out
def read_clones(f_path):
f = open(f_path)
out = []
lines = f.readlines()
for line in lines:
out.append(list(map(lambda x: [0] + list(map(int, | |
from archive.groups import Group
from archive.teams import Team
from logging import Logging
from itertools import cycle
import datetime
from copy import deepcopy
from sys import exit
import collections
from os import path, getcwd
class Tournament:
"""
Store (and initialise) dictionary and storage structures
Contains dictionaries for teams, groups, pitches which in turn
contain values which are Team, Group and Pitch object respectively.
"""
def __init__(self, team_list, name="tournament"):
self.name = name
self.log_file = path.join(getcwd(), self.name + ".log")
self.groups = {}
self.teams = {}
self.pitches = {}
self.schedule = []
self.group_size = 0
self.total_groups = 0
self.total_teams = 0
self.total_pitches = 0
self.max_placement_game_slots = 0
self.max_concurrent_games = 0
self.req_group_games = 0
# Timings
self.timings = Timings
self.current_time_slot = datetime.datetime
# Populate teams with Team objects
for pos, team in enumerate(team_list):
self.teams[pos + 1] = Team(team, pos + 1)
self.total_teams = len(team_list)
Logging.write_log_event(self.log_file,
'Tournament object initialisation',
'Populated dict teams',
'Total teams: {}'.format(self.total_teams))
def create_groups(self, group_size=4):
self.group_size = group_size
self.total_groups = self.total_teams // self.group_size
print("{} groups of {} teams".format(self.total_groups, group_size))
self.req_group_games = self.total_groups * (self.group_size * (self.group_size - 1)) // 2
print("Total group games: {}".format(self.req_group_games))
# Create group objects within dictionary
for i in range(self.total_groups):
self.groups[i] = Group(i, self.group_size)
# Assign teams in initial self.groups by seed
temp_seed_list = list(range(1, self.total_teams + 1))
for i in cycle(range(self.total_groups)):
try:
team = self.teams[temp_seed_list.pop(0)]
self.groups[i].addTeam(team)
team = self.teams[temp_seed_list.pop(-1)]
self.groups[i].addTeam(team)
except IndexError:
# Run out of teams to place into self.groups
break
def create_pitches(self, total_pitches=2):
self.total_pitches = total_pitches
for id in range(total_pitches):
self.pitches[id] = Pitch(id)
def set_timings(self, timings):
self.timings = timings
length_day1 = self.timings.day1_end - self.timings.day1_start
length_day2 = self.timings.day2_end - self.timings.day2_start
available_time = length_day1 + length_day2
adj_group_game_length = self.timings.group_game_length + self.timings.game_break
adj_game_length = self.timings.game_length + self.timings.game_break
self.max_placement_game_slots = self.total_pitches * (available_time -
adj_group_game_length * (
self.req_group_games // self.total_pitches)
) // adj_game_length
total_group_game_time = (self.req_group_games * adj_group_game_length) / self.total_pitches
print("Total Tournament Time: {}".format(available_time))
if total_group_game_time / available_time > 0.6:
print("{} group games lasting {} ({}% of available time!)".format(self.req_group_games,
total_group_game_time,
100 * total_group_game_time / available_time))
if self.max_placement_game_slots < self.total_teams:
print("Only {} game slots available for placement games!".format(self.max_placement_game_slots))
print("Consider lengthening tournament hours, adding more pitches or removing teams")
self.current_time_slot = self.timings.day1_start
def create_group_stage(self):
"""
create match_ups dictionary of Fixture objects
assign_fixtures_to_schedule()
get_match_priority()
"""
self.max_concurrent_games = min([self.total_pitches, self.group_size // 2])
Logging.write_log_event(self.log_file,
"create_group_stage",
"",
'max concurrent games: {}'.format(self.max_concurrent_games))
# Create dictionary of group game match ups (as fixtures)
match_ups = {}
for group in self.groups.values():
match_ups[group.index] = self.create_group_fixtures(group)
self.assign_fixtures_to_schedule(self.groups.keys(), match_ups, group_game=True, log_stage="create_group_stage")
@staticmethod
def get_group_match_up_indices(group_size):
"""
Create a list of tuples for possible team1 and team2 index
combinations. When a team is chosen as t1, it is removed from t2
to stop double counting
:rtype: tuple array
:param group_size: number of teams in a group
"""
match_ups = []
team_ones = list(range(0, group_size))
team_twos = list(range(0, group_size))
for t1 in team_ones:
for t2 in team_twos:
if t1 != t2:
match_ups.append((t1, t2))
team_twos.pop(team_twos.index(t1))
return match_ups
def assign_fixtures_to_schedule(self, group_keys, fixtures, group_game, log_stage="-"):
"""
:param group_keys: list of groups, made into a cycled iterable
:param fixtures: dictionary of fixtures with pitch, time emitted, containing only the teams involved
:param group_game: True/False
:param log_stage: information for logging
:return: None
"""
groups_it = cycle(iter(group_keys))
pitches = cycle(range(self.total_pitches))
pitch = next(pitches)
group = next(groups_it)
i_concurrent = 0
assigned_fixtures = 0
match_to_append = Fixture(None, None, None, None, None)
Logging.write_log_event(self.log_file,
log_stage,
'assign_fixtures_to_schedule',
'Begin assigning {} games to schedule'.format(
sum(len(matches) for matches in fixtures.values())))
while True:
# Get match priorities
match_priority = self.return_match_priorities(fixtures)
try:
prio_i_match = match_priority[group].index(max(match_priority[group]))
except ValueError:
print("Assigned {} fixtures to the schedule".format(assigned_fixtures))
break
match_to_append = fixtures[group].pop(prio_i_match)
# Assign Time and Pitch to match before adding to schedule
match_to_append.game_start = self.current_time_slot
match_to_append.pitch = pitch
# Log chosen fixture to append
Logging.write_log_event(path.join(getcwd(), 'create_group_stage.log'),
log_stage,
'highest priority match chosen',
'T:{} P:{} T1:{} T2:{} Priority:{:5.0f}'
.format(match_to_append.game_start.strftime('%H:%M'),
match_to_append.pitch,
match_to_append.team1.name,
match_to_append.team2.name,
match_priority[group][prio_i_match]))
self.schedule.append(match_to_append)
assigned_fixtures += 1
i_concurrent += 1
# Increment pitch for next game
pitch = next(pitches)
if pitch == 0:
# Pitch choice has has just cycled around: must move to next time slot
self.current_time_slot = self.increment_current_time(self.current_time_slot, group_game=group_game)
# Increment group if max concurrent games has been reached
if i_concurrent == self.max_concurrent_games:
i_concurrent = 0
group = next(groups_it)
return None
def create_bracket(self):
"""
top x, the rest decided by group stage
Match ups:
Top8/bottom 8
1-8,2-7,3-6,4-5
"""
top_half = self.total_teams // 2
if top_half % 2 != 0:
if top_half <= 7:
top_half += 1
else:
top_half -= 1
grouped_seeds = {'top': list(range(1, top_half + 1)), 'bottom': list(range(top_half + 1, self.total_teams + 1))}
if len(grouped_seeds['bottom']) % 2 != 0:
print("Must have even number of teams in bottom half of bracket")
print(len(grouped_seeds['bottom']))
exit(1)
# Create dictionary of lists of level 1 bracket match ups
# todo dictionary creation should be in a method to avoid repetition for both group and bracket stages
seed_combos = {}
match_ups = {}
for g in ['top', 'bottom']:
seed_combos[g] = []
while len(grouped_seeds[g]) > 0:
t1 = grouped_seeds[g].pop(0)
t2 = grouped_seeds[g].pop(-1)
seed_combos[g].append((t1, t2))
# Turn match up seed combinations to a dict of fixtures
match_ups[g] = []
for t1, t2 in seed_combos[g]:
match_ups[g].append(deepcopy(Fixture(self.teams[t1],
self.teams[t2],
-1,
None,
None,
None)))
# Assign match ups to schedule
self.assign_fixtures_to_schedule(['top', 'bottom'], match_ups, group_game=False, log_stage="create_bracket")
def create_group_fixtures(self, group):
# Generate list of (t1, t2) tuples for a generic group
matchup_indices = self.get_group_match_up_indices(self.group_size)
group_fixtures = []
for t1, t2 in matchup_indices:
group_fixtures.append(deepcopy(Fixture(group.get_team_by_index(t1),
group.get_team_by_index(t2),
-1,
None,
None,
group.index)))
return group_fixtures
def assign_timings_to_schedule(self):
"""
Iterate through schedule, assigning timings to the fixture list
"""
# Iterate over schedule items and iterate timings
current_time = {}
for pitch in self.pitches.keys():
current_time[pitch] = self.timings.day1_start
for fixture in self.schedule:
if fixture.group is not None:
game_length = self.timings.group_game_length
else:
game_length = self.timings.game_length
# Move to 'next day' if required
if self.timings.day2_start > current_time[fixture.pitch] > self.timings.day1_end:
current_time[fixture.pitch] = self.timings.day2_start
if fixture.game_start is None:
fixture.game_start = current_time[fixture.pitch]
fixture.game_length = game_length
current_time[fixture.pitch] += game_length + self.timings.game_break
else:
# Fixture already has a time assigned, skip
current_time[fixture.pitch] += (fixture.game_length + self.timings.game_break)
def print_schedule(self):
"""Output schedule in easy to read format"""
fixtures_by_pitch = []
for pitch in range(self.total_pitches):
fixtures_by_pitch.append([])
assert len(fixtures_by_pitch) == self.total_pitches, "incorrect fixtures_by_pitch initialisation"
for fixture in self.schedule:
fixtures_by_pitch[fixture.pitch].append(fixture)
# Find longest dimension list
longest_length = len(max(fixtures_by_pitch, key=lambda col: len(col)))
# Time for printing to screen
header = "{:<16}".format("Game Time")
for pitch in range(self.total_pitches):
header += "Pitch {:<20}".format(pitch)
print("longest_length", longest_length)
print(header)
for i in range(longest_length):
fixture_info = [
" {} ".format(datetime.datetime.strftime(fixtures_by_pitch[0][i].game_start, '%d/%m %H:%M'))]
for pitch in range(self.total_pitches):
try:
fixture = fixtures_by_pitch[pitch][i]
fixture_info.append("{:<10s} vs {:<10s}".format(fixture.team1.name,
fixture.team2.name))
except IndexError:
fixture_info.append("{:^10s} vs {:^10s}".format("-", "-", "-", "-"))
print(" | ".join(fixture_info))
def return_match_priorities(self, remaining_matches):
"""
:return index linked match priority dictionary of lists
Prioritise a match according to:
Slots since last match
Already playing in current slot?
Games already played(?)
:parameter remaining_matches - dictionary of lists
"""
# Iterate through list, assessing current priority of match
# relative to current fixture list
priorities = {}
for g_key, group in remaining_matches.items():
priorities[g_key] = []
for match_up in group:
# Assess match priority
# Slots since last match
# Games already played
# Work backwards through schedule
t1_games_played = 0
t1_last_game_time = self.timings.day1_start
t2_games_played = 0
t2_last_game_time = self.timings.day1_start
for fixture in self.schedule:
if fixture.team1.id == match_up.team1.id or fixture.team2.id == match_up.team2.id:
t1_games_played += 1
t1_last_game_time = fixture.game_start
if fixture.team1.id == match_up.team1.id or fixture.team2.id == match_up.team2.id:
t2_last_game_time = fixture.game_start
t2_games_played += 1
# lowest_games_played = min([t1_games_played, t2_games_played])
total_games_played = t1_games_played + t2_games_played
time_since_last_game = min([self.current_time_slot - t1_last_game_time,
self.current_time_slot - t2_last_game_time])
priority = (24.0 - time_since_last_game.seconds / 3600.0) + (10 - total_games_played) * 10
if time_since_last_game < (
min([self.timings.game_length, self.timings.group_game_length]) + self.timings.game_break):
if t1_games_played == 0 and t2_games_played == 0:
pass
else:
priority = -1000
priorities[g_key].append(priority)
return priorities
def increment_current_time(self, current_time, group_game):
"""
:param current_time: datetime object
:param group_game: bool
:return: incremented time
"""
if group_game:
g_length = self.timings.group_game_length
else:
g_length = self.timings.game_length
current_time += (g_length + self.timings.game_break)
if current_time >= self.timings.day1_end:
current_time = self.timings.day2_end
return current_time
class Pitch:
def __init__(self, identifier):
self.id = | |
<gh_stars>0
#
# Copyright 2020 Nebulon, Inc.
# All Rights Reserved.
#
# DISCLAIMER: THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
# EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
# OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from .graphqlclient import GraphQLParam, NebMixin
from datetime import datetime
from .common import PageInput, read_value
from .filters import StringFilter
from .sorting import SortDirection
from .npods import NPodSpuInput, \
BondType, \
BondLACPTransmitRate, \
BondTransmitHashPolicy
from .updates import UpdateHistory
from .tokens import TokenResponse
__all__ = [
"SpuSort",
"SpuFilter",
"NTPServerInput",
"SecureEraseSPUInput",
"ReplaceSpuInput",
"SetNTPServersInput",
"NTPServer",
"IPInfoState",
"Spu",
"SpuList",
"SpuCustomDiagnostic",
"SpuMixin"
]
class SpuSort:
"""A sort object for services processing units (SPU)
Allows sorting SPUs on common properties. The sort object allows only one
property to be specified.
"""
def __init__(
self,
serial: SortDirection = None
):
"""Constructs a new sort object for SPUs
Allows sorting SPUs on common properties. The sort object allows
only one property to be specified.
:param serial: Sort direction for the ``serial`` property
:type serial: SortDirection, optional
"""
self.__serial = serial
@property
def serial(self) -> SortDirection:
"""Sort direction for the ``serial`` property"""
return self.__serial
@property
def as_dict(self):
result = dict()
result["serial"] = self.serial
return result
class SpuFilter:
"""A filter object to filter services processing units (SPU)
Allows filtering for specific SPUs registered in nebulon ON. The
filter allows only one property to be specified. If filtering on multiple
properties is needed, use the ``and_filter`` and ``or_filter`` options to
concatenate multiple filters.
"""
def __init__(
self,
serial: StringFilter = None,
not_in_npod: bool = None,
and_filter=None,
or_filter=None
):
"""Constructs a new filter object
The filter allows only one property to be specified. If filtering on
multiple properties is needed, use the ``and_filter`` and ``or_filter``
options to concatenate multiple filters.
:param serial: Filter based on SPU serial number
:type serial: StringFilter, optional
:param not_in_npod: Filter for SPUs that are not in a nPod
:type not_in_npod: bool, optional
:param and_filter: Concatenate another filter with a logical AND
:type and_filter: SpuFilter, optional
:param or_filter: Concatenate another filter with a logical OR
:type or_filter: SpuFilter, optional
"""
self.__serial = serial
self.__not_in_npod = not_in_npod
self.__and = and_filter
self.__or = or_filter
@property
def serial(self) -> StringFilter:
"""Filter based on SPU serial number"""
return self.__serial
@property
def not_in_npod(self) -> bool:
"""Filter for SPUs that are not in a nPod"""
return self.__not_in_npod
@property
def and_filter(self):
"""Allows concatenation of multiple filters via logical AND"""
return self.__and
@property
def or_filter(self):
"""Allows concatenation of multiple filters via logical OR"""
return self.__or
@property
def as_dict(self):
result = dict()
result["serial"] = self.serial
result["notInNPod"] = self.not_in_npod
result["and"] = self.and_filter
result["or"] = self.or_filter
return result
class NTPServerInput:
"""An input object to configure a NTP server
NTP servers are used for automatic time configuration on the services
processing unit (SPU). The SPU has default network time servers (NTP)
configured. However, customers can customize them if the default NTP
servers are not accessible or different time settings are required.
"""
def __init__(
self,
server_hostname: str,
pool: bool = None,
prefer: bool = None
):
"""Constructs a new input object to configure NTP servers
NTP servers are used for automatic time configuration on the services
processing unit (SPU). The SPU has default network time servers (NTP)
configured. However, customers can customize them if the default NTP
servers are not accessible or different time settings are required.
:param server_hostname: The DNS hostname of the NTP server to use
:type server_hostname: str
:param pool: Indicates if the specified NTP server hostname is a NTP
pool. By default, this value is considered ``False``.
:type pool: bool, optional
:param prefer: Indicates if the specified NTP server is the preferred
NTP server. By default, this value is considered ``False``.
:type prefer: bool, optional
"""
self.__server_hostname = server_hostname
self.__pool = pool
self.__prefer = prefer
@property
def server_hostname(self) -> str:
"""The DNS hostname of the NTP server"""
return self.__server_hostname
@property
def pool(self) -> bool:
"""Indicates if the specified NTP server hostname is a NTP pool"""
return self.__pool
@property
def prefer(self) -> bool:
"""Indicates if the specified NTP server is the preferred NTP server"""
return self.__prefer
@property
def as_dict(self):
result = dict()
result["serverHostname"] = self.server_hostname
result["pool"] = self.pool
result["prefer"] = self.prefer
return result
class SecureEraseSPUInput:
"""An input object to secure-erase a services processing unit (SPU)
The secure erase functionality allows a deep-erase of data stored on the
physical drives attached to the SPU. Only SPUs that are not part of a
nPod can be secure-erased.
"""
def __init__(
self,
spu_serial: str
):
"""Constructs a new input object for secure-erase a SPU
The secure erase functionality allows a deep-erase of data stored on
the physical drives attached to the SPU. Only SPUs that are not part
of a nPod can be secure-erased.
:param spu_serial: The serial number of the SPU to secure-erase
:type spu_serial: str
"""
self.__spu_serial = spu_serial
@property
def spu_serial(self) -> str:
"""The serial number of the SPU"""
return self.__spu_serial
@property
def as_dict(self):
result = dict()
result["spuSerial"] = self.spu_serial
return result
class ReplaceSpuInput:
"""An input object to replace a services processing unit (SPU)
The replace services processing unit (SPU) operation is used to transition
the configuration of an old, likely failed, SPU to a new replacement unit
and allows modifying the configuration during the process.
"""
def __init__(
self,
npod_uuid: str,
previous_spu_serial: str,
new_spu_info: NPodSpuInput,
sset_uuid: str
):
"""Constructs a new input object to replace a SPU
The replace services processing unit (SPU) operation is used to
transition the configuration of an old, likely failed, SPU to a new
replacement unit and allows modifying the configuration during the
process.
:param npod_uuid: The unique identifier of the nPod of the old SPU
that is being replaced
:type npod_uuid: str
:param previous_spu_serial: The serial number of the old SPU that is
being replaced
:type previous_spu_serial: str
:param new_spu_info: Configuration information for the new SPU
:type new_spu_info: NPodSpuInput
:param sset_uuid: The storage set information for the existing SPU.
This information can be obtained from the active replacement
alert and only used to verify that the correct SPU is selected.
:type sset_uuid: str
"""
self.__npod_uuid = npod_uuid
self.__previous_spu_serial = previous_spu_serial
self.__new_spu_info = new_spu_info
self.__sset_uuid = sset_uuid
@property
def npod_uuid(self) -> str:
"""The UUID of the nPod of the old SPU that is being replaced"""
return self.__npod_uuid
@property
def previous_spu_serial(self) -> str:
"""The serial number of the old SPU that is being replaced"""
return self.__previous_spu_serial
@property
def new_spu_info(self) -> NPodSpuInput:
"""Configuration information for the new SPU"""
return self.__new_spu_info
@property
def sset_uuid(self) -> str:
"""The storage set information for the existing SPU"""
return self.__sset_uuid
@property
def as_dict(self):
result = dict()
result["nPodUUID"] = self.npod_uuid
result["previousSPUSerial"] = self.previous_spu_serial
result["newSPUInfo"] = self.new_spu_info
result["ssetUUID"] = self.sset_uuid
return result
class SetNTPServersInput:
"""An input object to configure NTP servers
NTP servers are used for automatic time configuration on the services
processing unit (SPU). The SPU has default network time servers (NTP)
configured. However, customers can customize them if the default NTP
servers are not accessible or different time settings are required.
"""
def __init__(
self,
servers: [NTPServerInput],
spu_serial: str = None,
npod_uuid: str = None
):
"""Constructs a new input object to configure NTP servers
NTP servers are used for automatic time configuration on the services
processing unit (SPU). The SPU has default network time servers (NTP)
configured. However, customers can customize them if the default NTP
servers are not accessible or different time settings are required.
Either a SPU serial number or a nPod uuid must be specified.
:param servers: List of NTP server configurations that shall be applied
to an SPU
:type servers: [NTPServerInput]
:param spu_serial: The serial number of the services processing unit
:type spu_serial: str, optional
:param | |
robot_pos - self.x[:3])
# Calculate jacobian for the back hight
torso_jac = np.array([[0], [0], [1]])
# Calculate object jacobian
# obj_jac = -1*np.array([np.cross(axis, obj_pos - gp - obj_trans[:3,3].flatten()) for axis in axises]).T
obj_jac = (
-1
* np.array(
[
np.cross(axis, obj_pos - obj_trans[:3, 3].flatten())
for axis in axises
]
).T
)
obj_jac = np.c_[-np.eye(3), obj_jac]
# Create final 3x26 jacobian matrix -> (Gradient checked to be correct)
dist_jac = np.hstack(
(base_jac, torso_jac, np.zeros((3, 8)), arm_jac, np.zeros((3, 1)), obj_jac)
)
return (dist_val, dist_jac)
def rot_lock(self, obj_trans, robot_trans, axises, arm_joints):
"""
This function calculates the value and the jacobian of the rotational error between
robot gripper's rotational axis and object's rotational axis
obj_trans: object's rave_body transformation
robot_trans: robot gripper's rave_body transformation
axises: rotational axises of the object
arm_joints: list of robot joints
"""
rot_vals = []
rot_jacs = []
for local_dir in np.eye(3):
obj_dir = np.dot(obj_trans[:3, :3], local_dir)
world_dir = robot_trans[:3, :3].dot(local_dir)
rot_vals.append(np.array([[np.dot(obj_dir, world_dir) - 1]]))
# computing robot's jacobian
arm_jac = np.array(
[
np.dot(obj_dir, np.cross(joint.GetAxis(), world_dir))
for joint in arm_joints
]
).T.copy()
arm_jac = arm_jac.reshape((1, len(arm_joints)))
base_jac = np.array(np.dot(obj_dir, np.cross([0, 0, 1], world_dir)))
base_jac = np.array([[0, 0, base_jac]])
# computing object's jacobian
obj_jac = np.array(
[np.dot(world_dir, np.cross(axis, obj_dir)) for axis in axises]
)
obj_jac = np.r_[[0, 0, 0], obj_jac].reshape((1, 6))
# Create final 1x26 jacobian matrix
rot_jacs.append(
np.hstack(
(base_jac, np.zeros((1, 9)), arm_jac, np.zeros((1, 1)), obj_jac)
)
)
rot_val = np.vstack(rot_vals)
rot_jac = np.vstack(rot_jacs)
return (rot_val, rot_jac)
class PR2EEReachableRight(PR2EEReachable):
pass
class PR2EEReachableLeft(PR2EEReachable):
def get_robot_info(self, robot_body):
# Provide functionality of Obtaining Robot information
tool_link = robot_body.env_body.GetLink("l_gripper_tool_frame")
robot_trans = tool_link.GetTransform()
arm_inds = robot_body.env_body.GetManipulator("leftarm").GetArmIndices()
return robot_trans, arm_inds
def pos_error_rel_to_obj(self, obj_trans, robot_trans, axises, arm_joints, rel_pt):
"""
This function calculates the value and the jacobian of the displacement between center of gripper and a point relative to the object
obj_trans: object's rave_body transformation
robot_trans: robot gripper's rave_body transformation
axises: rotational axises of the object
arm_joints: list of robot joints
"""
gp = rel_pt
robot_pos = robot_trans[:3, 3]
obj_pos = np.dot(obj_trans, np.r_[gp, 1])[:3]
dist_val = (robot_pos.flatten() - obj_pos.flatten()).reshape((3, 1))
# Calculate the joint jacobian
arm_jac = np.array(
[
np.cross(joint.GetAxis(), robot_pos.flatten() - joint.GetAnchor())
for joint in arm_joints
]
).T.copy()
# Calculate jacobian for the robot base
base_jac = np.eye(3)
base_jac[:, 2] = np.cross(np.array([0, 0, 1]), robot_pos - self.x[:3])
# Calculate jacobian for the back hight
torso_jac = np.array([[0], [0], [1]])
# Calculate object jacobian
# obj_jac = -1*np.array([np.cross(axis, obj_pos - gp - obj_trans[:3,3].flatten()) for axis in axises]).T
obj_jac = (
-1
* np.array(
[
np.cross(axis, obj_pos - obj_trans[:3, 3].flatten())
for axis in axises
]
).T
)
obj_jac = np.c_[-np.eye(3), obj_jac]
# Create final 3x26 jacobian matrix -> (Gradient checked to be correct)
dist_jac = np.hstack((base_jac, torso_jac, arm_jac, np.zeros((3, 9)), obj_jac))
return (dist_val, dist_jac)
def rot_lock(self, obj_trans, robot_trans, axises, arm_joints):
"""
This function calculates the value and the jacobian of the rotational error between
robot gripper's rotational axis and object's rotational axis
obj_trans: object's rave_body transformation
robot_trans: robot gripper's rave_body transformation
axises: rotational axises of the object
arm_joints: list of robot joints
"""
rot_vals = []
rot_jacs = []
for local_dir in np.eye(3):
obj_dir = np.dot(obj_trans[:3, :3], local_dir)
world_dir = robot_trans[:3, :3].dot(local_dir)
rot_vals.append(np.array([[np.dot(obj_dir, world_dir) - 1]]))
# computing robot's jacobian
arm_jac = np.array(
[
np.dot(obj_dir, np.cross(joint.GetAxis(), world_dir))
for joint in arm_joints
]
).T.copy()
arm_jac = arm_jac.reshape((1, len(arm_joints)))
base_jac = np.array(np.dot(obj_dir, np.cross([0, 0, 1], world_dir)))
base_jac = np.array([[0, 0, base_jac]])
# computing object's jacobian
obj_jac = np.array(
[np.dot(world_dir, np.cross(axis, obj_dir)) for axis in axises]
)
obj_jac = np.r_[[0, 0, 0], obj_jac].reshape((1, 6))
# Create final 1x26 jacobian matrix
rot_jacs.append(
np.hstack(
(base_jac, np.zeros((1, 1)), arm_jac, np.zeros((1, 9)), obj_jac)
)
)
rot_val = np.vstack(rot_vals)
rot_jac = np.vstack(rot_jacs)
return (rot_val, rot_jac)
class PR2EEReachablePosRight(PR2EEReachableRight):
# EEUnreachable Robot, StartPose, EEPose
def __init__(
self,
name,
params,
expected_param_types,
env=None,
debug=False,
steps=const.EEREACHABLE_STEPS,
):
self.coeff = const.EEREACHABLE_COEFF
self.opt_coeff = const.EEREACHABLE_OPT_COEFF
self.eval_f = self.stacked_f
self.eval_grad = self.stacked_grad
self.attr_dim = 26
super(PR2EEReachablePosRight, self).__init__(
name, params, expected_param_types, env, debug, steps
)
class PR2EEReachableRotRight(PR2EEReachableRight):
# EEUnreachable Robot, StartPose, EEPose
def __init__(
self, name, params, expected_param_types, env=None, debug=False, steps=0
):
self.coeff = const.EEREACHABLE_COEFF
self.opt_coeff = const.EEREACHABLE_ROT_OPT_COEFF
self.eval_f = lambda x: self.ee_rot_check(x)[0]
self.eval_grad = lambda x: self.ee_rot_check(x)[1]
super(PR2EEReachableRotRight, self).__init__(
name, params, expected_param_types, env, debug, steps
)
class PR2EEReachablePosLeft(PR2EEReachableLeft):
# EEUnreachable Robot, StartPose, EEPose
def __init__(
self,
name,
params,
expected_param_types,
env=None,
debug=False,
steps=const.EEREACHABLE_STEPS,
):
self.coeff = const.EEREACHABLE_COEFF
self.opt_coeff = const.EEREACHABLE_OPT_COEFF
self.eval_f = self.stacked_f
self.eval_grad = self.stacked_grad
self.attr_dim = 26
super(PR2EEReachablePosLeft, self).__init__(
name, params, expected_param_types, env, debug, steps
)
class PR2EEReachableRotLeft(PR2EEReachableLeft):
# EEUnreachable Robot, StartPose, EEPose
def __init__(
self, name, params, expected_param_types, env=None, debug=False, steps=0
):
self.coeff = const.EEREACHABLE_COEFF
self.opt_coeff = const.EEREACHABLE_ROT_OPT_COEFF
self.eval_f = lambda x: self.ee_rot_check(x)[0]
self.eval_grad = lambda x: self.ee_rot_check(x)[1]
super(PR2EEReachableRotLeft, self).__init__(
name, params, expected_param_types, env, debug, steps
)
class PR2Obstructs(robot_predicates.Obstructs):
# Obstructs, Robot, RobotPose, RobotPose, Can
def __init__(
self,
name,
params,
expected_param_types,
env=None,
debug=False,
tol=const.COLLISION_TOL,
):
self.attr_dim = 20
self.dof_cache = None
self.coeff = -1
self.neg_coeff = 1
self.attr_inds = OrderedDict(
[
(params[0], list(ATTRMAP[params[0]._type])),
(params[3], list(ATTRMAP[params[3]._type])),
]
)
super(PR2Obstructs, self).__init__(
name, params, expected_param_types, env, debug, tol
)
def resample(self, negated, t, plan):
target_pose = self.can.pose[:, t]
return resample_bp_around_target(
self, t, plan, target_pose, dist=const.OBJ_RING_SAMPLING_RADIUS
)
def set_robot_poses(self, x, robot_body):
# Provide functionality of setting robot poses
back_height = x[0]
l_arm_pose, l_gripper = x[1:8], x[8]
r_arm_pose, r_gripper = x[9:16], x[16]
base_pose = x[17:20]
robot_body.set_pose(base_pose)
dof_value_map = {
"backHeight": back_height,
"lArmPose": l_arm_pose,
"lGripper": l_gripper,
"rArmPose": r_arm_pose,
"rGripper": r_gripper,
}
robot_body.set_dof(dof_value_map)
def set_active_dof_inds(self, robot_body, reset=False):
robot = robot_body.env_body
if reset == True and self.dof_cache != None:
robot.SetActiveDOFs(self.dof_cache)
self.dof_cache = None
elif reset == False and self.dof_cache == None:
self.dof_cache = robot.GetActiveDOFIndices()
dof_inds = np.ndarray(0, dtype=np.int)
dof_inds = np.r_[dof_inds, robot.GetJoint("torso_lift_joint").GetDOFIndex()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetArmIndices()]
dof_inds = np.r_[
dof_inds, robot.GetManipulator("leftarm").GetGripperIndices()
]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetArmIndices()]
dof_inds = np.r_[
dof_inds, robot.GetManipulator("rightarm").GetGripperIndices()
]
robot.SetActiveDOFs(
dof_inds, DOFAffine.X + DOFAffine.Y + DOFAffine.RotationAxis, [0, 0, 1]
)
else:
raise PredicateException("Incorrect Active DOF Setting")
class PR2ObstructsHolding(robot_predicates.ObstructsHolding):
# ObstructsHolding, Robot, RobotPose, RobotPose, Can, Can
def __init__(self, name, params, expected_param_types, env=None, debug=False):
self.attr_dim = 20
self.dof_cache = None
self.coeff = -1
self.neg_coeff = 1
self.attr_inds = OrderedDict(
[
(params[0], list(ATTRMAP[params[0]._type])),
(params[3], list(ATTRMAP[params[3]._type])),
(params[4], list(ATTRMAP[params[4]._type])),
]
)
self.OBSTRUCTS_OPT_COEFF = const.OBSTRUCTS_OPT_COEFF
super(PR2ObstructsHolding, self).__init__(
name, params, expected_param_types, env, debug
)
self.dsafe = const.DIST_SAFE
def resample(self, negated, t, plan):
target_pose = self.obstruct.pose[:, t]
return resample_bp_around_target(
self, t, plan, target_pose, dist=const.OBJ_RING_SAMPLING_RADIUS
)
def set_active_dof_inds(self, robot_body, reset=False):
robot = robot_body.env_body
if reset == True and self.dof_cache != None:
robot.SetActiveDOFs(self.dof_cache)
self.dof_cache = None
elif reset == False and self.dof_cache == None:
self.dof_cache = robot.GetActiveDOFIndices()
dof_inds = np.ndarray(0, dtype=np.int)
dof_inds = np.r_[dof_inds, robot.GetJoint("torso_lift_joint").GetDOFIndex()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetArmIndices()]
dof_inds = np.r_[
dof_inds, robot.GetManipulator("leftarm").GetGripperIndices()
]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetArmIndices()]
dof_inds = np.r_[
dof_inds, robot.GetManipulator("rightarm").GetGripperIndices()
]
# dof_inds = [12]+ list(range(15, 22)) + [22]+ list(range(27, 34)) + [34]
robot.SetActiveDOFs(
dof_inds, DOFAffine.X + DOFAffine.Y + DOFAffine.RotationAxis, [0, 0, 1]
)
else:
raise PredicateException("Incorrect Active DOF Setting")
def set_robot_poses(self, x, robot_body):
# Provide functionality of setting robot poses
back_height = x[0]
l_arm_pose, l_gripper = x[1:8], x[8]
r_arm_pose, r_gripper = x[9:16], x[16]
base_pose = x[17:20]
robot_body.set_pose(base_pose)
dof_value_map = {
"backHeight": back_height,
"lArmPose": l_arm_pose,
"lGripper": l_gripper,
"rArmPose": r_arm_pose,
"rGripper": r_gripper,
}
robot_body.set_dof(dof_value_map)
class PR2Collides(robot_predicates.Collides):
pass
class PR2RCollides(robot_predicates.RCollides):
# RCollides Robot Obstacle
def __init__(self, name, params, expected_param_types, env=None, debug=False):
self.attr_dim = 20
self.dof_cache = None
self.coeff = -1
self.neg_coeff = 1
self.opt_coeff = const.RCOLLIDES_OPT_COEFF
self.attr_inds = OrderedDict(
[
(params[0], list(ATTRMAP[params[0]._type])),
(params[1], list(ATTRMAP[params[1]._type])),
]
)
super(PR2RCollides, self).__init__(
name, params, expected_param_types, env, debug
)
self.dsafe = const.RCOLLIDES_DSAFE
def resample(self, negated, t, plan):
target_pose = self.obstacle.pose[:, t]
return resample_bp_around_target(
self, t, plan, target_pose, dist=const.TABLE_SAMPLING_RADIUS
)
def set_active_dof_inds(self, robot_body, reset=False):
robot = robot_body.env_body
if reset == True and self.dof_cache != None:
robot.SetActiveDOFs(self.dof_cache)
self.dof_cache = None
elif reset == False and self.dof_cache == None:
self.dof_cache = robot.GetActiveDOFIndices()
dof_inds = np.ndarray(0, dtype=np.int)
dof_inds = np.r_[dof_inds, robot.GetJoint("torso_lift_joint").GetDOFIndex()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetArmIndices()]
dof_inds = np.r_[
dof_inds, robot.GetManipulator("leftarm").GetGripperIndices()
]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetArmIndices()]
dof_inds = np.r_[
dof_inds, robot.GetManipulator("rightarm").GetGripperIndices()
]
robot.SetActiveDOFs(
dof_inds, DOFAffine.X + DOFAffine.Y + DOFAffine.RotationAxis, [0, 0, 1]
)
else:
raise PredicateException("Incorrect Active | |
import enum
import itertools
import solaredge_setapp.information
import solaredge_setapp.maintenance
import solaredge_setapp.status
TARGET_VERSION = "1.4.10"
_COUNTRIES = {
-1: ["Not Set", "COUNTRY_NONE"],
0: ["General", "COUNTRY_GENERAL"],
1: ["Australia", "COUNTRY_AUSTRALIA"],
2: ["France", "COUNTRY_FRANCE"],
3: ["Germany", "COUNTRY_GERMANY"],
4: ["Greece (Continent)", "COUNTRY_GREECE_CONTINENT"],
5: ["Greece (Islands)", "COUNTRY_GREECE_ISLANDS"],
6: ["Israel", "COUNTRY_ISRAEL"],
7: ["Italy", "COUNTRY_ITALY"],
8: ["Spain", "COUNTRY_SPAIN"],
9: ["United Kingdom", "COUNTRY_UK"],
10: ["US Auto", "COUNTRY_US_AUTO"],
11: ["US 208V", "COUNTRY_US_208V"],
12: ["US 240V", "COUNTRY_US_240V"],
13: ["US 208V No-Neutral", "COUNTRY_US_208V_NO_NEUTRAL"],
14: ["US 240V No-Neutral", "COUNTRY_US_240V_NO_NEUTRAL"],
15: ["Bulgaria", "COUNTRY_BULGARIA"],
16: ["Czech Republic", "COUNTRY_CZECH_REPUBLIC"],
17: ["Cyprus", "COUNTRY_CYPRESS"],
18: ["Belgium", "COUNTRY_BELGIUM"],
19: ["Netherlands", "COUNTRY_NETHERLANDS"],
20: ["Portugal", "COUNTRY_PORTUGAL"],
21: ["Austria", "COUNTRY_AUSTRIA"],
22: ["Thailand MEA", "COUNTRY_THAILAND_MEA"],
23: ["Singapore", "COUNTRY_SINGAPORE"],
24: ["Korea", "COUNTRY_KOREA"],
25: ["Japan Auto", "COUNTRY_JAPAN_AUTO"],
26: ["Japan 50Hz", "COUNTRY_JAPAN_50HZ"],
27: ["Japan 60Hz", "COUNTRY_JAPAN_60HZ"],
28: ["Taiwan", "COUNTRY_TAIWAN"],
29: ["Denmark", "COUNTRY_DENMARK"],
30: ["Sweden", "COUNTRY_SWEDEN"],
31: ["Thailand PEA", "COUNTRY_THAILAND_PEA"],
32: ["Sri Lanka", "COUNTRY_SRI_LANKA"],
33: ["Mauritius", "COUNTRY_MAURITIUS"],
34: ["Denmark (Residential)", "COUNTRY_DENMARK_RES"],
35: ["US 277V", "COUNTRY_US_277V"],
36: ["Slovenia", "COUNTRY_SLOVENIA"],
37: ["Poland", "COUNTRY_POLAND"],
38: ["Germany MVGC", "COUNTRY_GERMANY_MVGC"],
39: ["UK 240V", "COUNTRY_UK_240V"],
40: ["Lithuania", "COUNTRY_LITHUANIA"],
41: ["China", "COUNTRY_CHINA"],
42: ["Philipines", "COUNTRY_PHILIPPINES"],
43: ["Brazil", "COUNTRY_BRAZIL"],
44: ["Mexico 220V", "COUNTRY_MEXICO_220"],
45: ["Mexico 277V", "COUNTRY_MEXICO_277"],
46: ["Romania", "COUNTRY_ROMANIA"],
47: ["Latvia", "COUNTRY_LATVIA"],
48: ["South Africa", "COUNTRY_SOUTH_AFRICA"],
49: ["Turkey", "COUNTRY_TURKEY"],
50: ["Italy No SPI", "COUNTRY_ITALY_NO_SPI"],
51: ["US/Hawaii Auto", "COUNTRY_US_HAWAII_AUTO"],
52: ["US/Hawaii 208V", "COUNTRY_US_HAWAII_208V"],
53: ["US/Hawaii 240V", "COUNTRY_US_HAWAII_240V"],
54: ["US/Hawaii 208V No-Neutral", "COUNTRY_US_HAWAII_208V_NO_NEUTRAL"],
55: ["US/Hawaii 240V No-Neutral", "COUNTRY_US_HAWAII_240V_NO_NEUTRAL"],
56: ["US/Hawaii 277V", "COUNTRY_US_HAWAII_277"],
57: ["Switzerland", "COUNTRY_SWITZERLAND"],
58: ["Custom", "COUNTRY_CUSTOM"],
59: ["India", "COUNTRY_INDIA"],
60: ["Croatia", "COUNTRY_CROATIA"],
61: ["Jamaica 240V No-Neutral", "COUNTRY_JAMAICA_240_NO_NEUTRAL"],
62: ["Jamaica 220V No-Neutral", "COUNTRY_JAMAICA_220_NO_NEUTRAL"],
63: ["Barbados 230V No-Neutral", "COUNTRY_BARBADOS_230_NO_NEUTRAL"],
64: ["St. Lucia", "COUNTRY_ST_LUCIA"],
65: ["Australia Queensland", "COUNTRY_AUSTRALIA_QLD"],
66: ["Denmark VDE", "COUNTRY_DENMARK_VDE"],
67: ["Denmark VDE (Residential)", "COUNTRY_DENMARK_VDE_RES"],
68: ["Ireland", "COUNTRY_IRELAND"],
69: ["US/Kauai Auto", "COUNTRY_US_KAUAI_AUTO"],
70: ["US/Kauai 208V", "COUNTRY_US_KAUAI_208"],
71: ["US/Kauai 240V", "COUNTRY_US_KAUAI_240"],
72: ["US/Kauai 208V No-Neutral", "COUNTRY_US_KAUAI_208_NO_NEUTRAL"],
73: ["US/Kauai 240V No-Neutral", "COUNTRY_US_KAUAI_240_NO_NEUTRAL"],
74: ["US/Kauai 277V", "COUNTRY_US_KAUAI_277"],
75: ["Cyprus 240V", "COUNTRY_CYPRESS_240"],
76: ["Curacao", "COUNTRY_CURACAO"],
77: ["Northern Cyprus 240V", "COUNTRY_N_CYPRESS_240"],
78: ["Israel (Commercial)", "COUNTRY_ISRAEL_COMMERCIAL"],
79: ["Aruba", "COUNTRY_ARUBA"],
80: ["Mexico 240V", "COUNTRY_MEXICO_240"],
81: ["Barbados 115V No-Neutral", "COUNTRY_BARBADOS_115V_NO_NEUTRAL"],
82: ["Malaysia", "COUNTRY_MALAYSIA"],
83: ["Tahiti", "COUNTRY_TAHITI"],
84: ["Hungary", "COUNTRY_HUNGARY"],
85: ["Kuwait", "COUNTRY_KUWAIT"],
86: ["Cyprus MV", "COUNTRY_CYPRUS_MV"],
87: ["Norway", "COUNTRY_NORWAY"],
88: ["Northern Ireland", "COUNTRY_NORTH_IRELAND"],
89: ["Germany MV 480V", "COUNTRY_GERMANY_MV_480V"],
90: ["US/Hawaii2 Auto", "COUNTRY_US_HAWAII2_AUTO"],
91: ["US/Hawaii2 208V", "COUNTRY_US_HAWAII2_208V"],
92: ["US/Hawaii2 240V", "COUNTRY_US_HAWAII2_208V_NO_NEUTRAL"],
93: ["US/Hawaii2 208V No-Neutral", "COUNTRY_US_HAWAII2_240V"],
94: ["US/Hawaii2 240V No-Neutral", "COUNTRY_US_HAWAII2_240V_NO_NEUTRAL"],
95: ["US/Hawaii2 277V", "COUNTRY_US_HAWAII2_277"],
96: ["US/NY Auto", "COUNTRY_US_NY_AUTO"],
97: ["US/NY 208V", "COUNTRY_US_NY_208V"],
98: ["US/NY 240V ", "COUNTRY_US_NY_208V_NO_NEUTRAL"],
99: ["US/NY 208V No-Neutral", "COUNTRY_US_NY_240V"],
100: ["US/NY 240V No-Neutral", "COUNTRY_US_NY_240V_NO_NEUTRAL"],
101: ["US/NY 277V ", "COUNTRY_US_NY_277"],
102: ["Japan MV 420V 50Hz", "COUNTRY_JAPAN_MV_380V_50HZ"],
103: ["Japan MV 440V 60Hz", "COUNTRY_JAPAN_MV_380V_60HZ"],
104: ["US/Rule21 Auto", "COUNTRY_US_AUTO_RULE21"],
105: ["US/Rule21 208V", "COUNTRY_US_208V_RULE21"],
106: ["US/Rule21 208V No-Neutral", "COUNTRY_US_208V_NO_NEUTRAL_RULE21"],
107: ["US/Rule21 240V", "COUNTRY_US_240V_RULE21"],
108: ["US/Rule21 240V No-Neutral", "COUNTRY_US_240V_NO_NEUTRAL_RULE21"],
109: ["US/Rule21 277V", "COUNTRY_US_277V_RULE21"],
110: ["Italy 277V No SPI", "COUNTRY_ITALY_277V_NO_SPI"],
111: ["Philippines 230V Delta", "COUNTRY_PHILIPPINES_230V_DELTA"],
112: ["UK 480V", "COUNTRY_UK_480V"],
113: ["Zimbabwe 230V", "COUNTRY_ZIMBABWE_230V"],
114: ["Indonesia", "COUNTRY_INDONESIA"],
115: ["Japan MV 480V 50Hz", "COUNTRY_JAPAN_MV_480V_50_HZ"],
116: ["Japan MV 480V 60Hz", "COUNTRY_JAPAN_MV_480V_60_HZ"],
117: ["Europe EN50438", "COUNTRY_EUROPE_EN50438"],
118: ["Cape Verde", "COUNTRY_CAPE_VERDE"],
119: ["New Zealand", "COUNTRY_NEW_ZEALAND"],
120: ["Ghana", "COUNTRY_GHANA"],
121: ["Finland", "COUNTRY_FINLAND"],
122: ["Grenada", "COUNTRY_GRENADA"],
123: ["Dubai LV", "COUNTRY_DUBAI_LV"],
124: ["Slovakia ZSED", "COUNTRY_SLOVAKIA_ZSED"],
125: ["Slovakia SSED", "COUNTRY_SLOVAKIA_SSED"],
126: ["Slovakia VSD", "COUNTRY_SLOVAKIA_VSD"],
127: ["Puerto Rico 277V", "COUNTRY_PUERTO_RICO_277V"],
128: ["South Africa MV", "COUNTRY_SOUTH_AFRICA_MV"],
129: ["Philippines MV", "COUNTRY_PHILIPPINES_MV"],
130: ["Taiwan MV", "COUNTRY_TAIWAN_MV"],
131: ["India MV", "COUNTRY_INDIA_MV"],
132: ["US/CO Auto", "COUNTRY_US_CO_AUTO"],
133: ["US/CO 208V", "COUNTRY_US_CO_208V"],
134: ["US/CO 208V No-Neutral", "COUNTRY_US_CO_208V_NO_NEUTRAL"],
135: ["US/CO 240V No-Neutral", "COUNTRY_US_CO_240V_NO_NEUTRAL"],
136: ["US/CO 240V", "COUNTRY_US_CO_240V"],
137: ["US/CO 277V", "COUNTRY_US_CO_277V"],
138: ["Australia Victoria", "COUNTRY_VICTORIA"],
139: ["Kenya", "COUNTRY_KENYA"],
140: ["Turkey MV", "COUNTRY_TURKEY_MV"],
141: ["Spain MV", "COUNTRY_SPAIN_MV"],
142: ["Thailand MEA MV", "COUNTRY_THAILAND_MEA_MV"],
143: ["Thailand PEA MV", "COUNTRY_THAILAND_PEA_MV"],
144: ["China MV", "COUNTRY_CHINA_MV"],
145: ["Taiwan 220V No-Neutral", "COUNTRY_TAIWAN_220_NO_NEUTRAL"],
146: ["Mauritius >220K", "COUNTRY_MAURITIUS_ABOVE_220K"],
147: ["France MV", "COUNTRY_FRANCE_MV"],
148: ["Czech Republic 16A", "COUNTRY_CEZ"],
149: ["Belgium Delta", "COUNTRY_BELGIUM_DELTA"],
150: ["Norway Delta", "COUNTRY_NORWAY_DELTA"],
151: ["Netherlands MV", "COUNTRY_NETHERLANDS_MV"],
152: ["Macau", "COUNTRY_MACAU"],
153: ["Argentina", "COUNTRY_ARGENTINA"],
154: ["Argentina (Commercial)", "COUNTRY_ARGENTINA_COMMERCIAL"],
155: ["Sweden MV", "COUNTRY_SWEDEN_MV"],
156: ["Vietnam", "COUNTRY_VIETNAM"],
157: ["Brzail 127V-220V", "COUNTRY_BRAZIL_127V_220V"],
158: ["Barbados 220V", "COUNTRY_BARBADOS_200"],
159: ["ISO-NE 208V", "COUNTRY_US_NEW_ENGLAND_208"],
160: ["ISO-NE 240V", "COUNTRY_US_NEW_ENGLAND_240"],
161: ["ISO-NE 208V No-Neutral", "COUNTRY_US_NEW_ENGLAND_208_NO_NEUTRAL"],
162: ["ISO-NE 240V No-Neutral", "COUNTRY_US_NEW_ENGLAND_240_NO_NEUTRAL"],
163: ["ISO-NE 277V", "COUNTRY_US_NEW_ENGLAND_277"],
164: ["Korea Low DC", "COUNTRY_KOREA_LOW_DC"],
165: ["Israel 480 MV", "COUNTRY_ISRAEL_480V_MV"],
166: ["Brzail 277V", "COUNTRY_BRAZIL_277"],
167: ["Hungary EON", "COUNTRY_HUNGARY_EON"],
168: ["Spain (Islands)", "COUNTRY_SPANISH_ISLANDS"],
169: ["Peru", "COUNTRY_PERU"],
170: ["Columbia", "COUNTRY_COLUMBIA"],
171: ["Chile", "COUNTRY_CHILE"],
172: ["Ecuador", "COUNTRY_ECUADOR"],
173: ["Qatar", "COUNTRY_QATAR"],
174: ["Australia 480V", "COUNTRY_AUSTRALIA_480V"],
175: ["Hong Kong", "COUNTRY_HONG_KONG"],
176: ["Uruguay", "COUNTRY_URUGUAY"],
177: ["Italy A68", "COUNTRY_ITALIY_A68"],
178: ["Estonia", "COUNTRY_ESTONIA"],
179: ["Mauritius", "COUNTRY_MAURITIUS_2"],
180: ["Surinam", "COUNTRY_SURINAM"],
181: ["Western Power", "COUNTRY_WESTERN_POWER"],
182: ["India (Kerala)", "COUNTRY_INDIA_KERALA"]
}
Countries = enum.Enum(
value="Countries",
names=itertools.chain.from_iterable(
itertools.product(v, [k]) for k, v in _COUNTRIES.items()
)
)
_STATUS = {
-1: ["Not Set", "UNSET"],
0: ["Shutting Down", "SHUTTING_DOWN"],
1: ["Error", "ERROR"],
2: ["Standby", "STANDBY"],
3: ["Pairing", "PAIRING"],
4: ["Producing", "POWER_PRODUCTION"],
5: ["AC Charging", "AC_CHARGING"],
6: ["Not Paired", "NOT_PAIRED"],
7: ["Night Mode", "NIGHT_MODE"],
8: ["Grid Monitoring", "GRID_MONITORING"],
9: ["Idle", "IDLE"],
10: ["Grid Pairing", "GRM_PAIRING"],
11: ["PID Rectification", "PID_RECTIFICATION"]
}
Status = enum.Enum(
value="Status",
names=itertools.chain.from_iterable(
itertools.product(v, [k]) for k, v in _STATUS.items()
)
)
class AfciTestResult(enum.Enum):
AFCI_TEST_FAIL = 0
AFCI_TEST_ERROR = -1
AFCI_TEST_PASS = 1
class InverterTestCondition(enum.Enum):
TEST_COND_OK = 0
TEST_COND_NOT_READY_GRM = 1
TEST_COND_NOT_READY_PROD = 2
TEST_COND_NOT_READY_INV_OFF = 3
class InverterTestStatus(enum.Enum):
TEST_STATUS_NOT_TESTED = 0
TEST_STATUS_PASSED = 1
TEST_STATUS_FAILED = 2
class BatterySelfTestPrecondition(enum.Enum):
TEST_PRE_COND_OK = 0
TEST_PRE_COND_NOT_READY_INV_OFF = 1
TEST_PRE_COND_NOT_READY_INV_COMM_ERROR = 2
TEST_PRE_COND_NOT_READY_INV_BATT_ERROR = 3
TEST_PRE_COND_NOT_READY_MIN_SOE = 4
class BatterTestStatus(enum.Enum):
NOT_TESTED = 0
IN_PROGRESS = 1
PASSED = 2
FAILED = 3
class LanStatus(enum.Enum):
OK = 0
class CellularSignal(enum.Enum):
NONE = 0
LOW = 1
LOWEST = 2
MEDIUM = 3
HIGH = 4
HIGHEST = 5
UNKNOWN = 6
class WifiSignal(enum.Enum):
NONE = 0
LOW = 1
MID = 2
HIGH = 3
EXCELLENT = 4
class ZigbeeSignal(enum.Enum):
NONE = 0
LOW = 1
MID = 2
HIGH = 3
class ZigbeeSlaveStatus(enum.Enum):
NOT_CONNECTED = 0
CONNECTED = 1
MASTER_NOT_FOUND = 2
class ZigbeeModuleStatus(enum.Enum):
INITIALIZING = 0
OK = 1
class EvseCarStatus(enum.Enum):
disconnected = 0
connected = 1
charging_car = 2
class EvseChargerStatus(enum.Enum):
ready = 0
initializing = 1
charging = 2
charging_boost = 3
charging_excess_pv = 4
class MeterConnectionType(enum.Enum):
RS485_1 = 0
RS485_2 = 1
S0 = 2
class MeterStatus(enum.Enum):
OK = 0
COMM_ERROR = 1
class BatteryStatus(enum.Enum):
CONNECTED = 0
DISCNNECTED = 1
class BatteryState(enum.Enum):
BMS_STATE_INVALID = 0
BMS_STATE_OFF = 1
BMS_STATE_STDBY = 2
BMS_STATE_INIT = 3
BMS_STATE_CHARGE = 4
BMS_STATE_DISCHARGE = 5
BMS_STATE_FAULT = 6
BMS_STATE_IDLE = 7
BMS_STATE_COMM_ERROR = 8
BMS_STATE_RESERVED1 = 9
BMS_STATE_RESERVED2 = 10
BMS_STATE_SLEEP = 11
class HeaderType(enum.Enum):
ERROR = 0
WARNING = 1
INFORMATION = 2
class PairingStatusInfo(enum.Enum):
OK = 0
INV_OFF = 2
NIGHT_MODE = 3
IN_PROCESS = 4
ERROR = 5
ERROR_OPT_DETECT = 6
ERROR_STRING_DETECT = 7
NOT_IN_PROCESS = 8
class PairingStatusStage(enum.Enum):
NOT_ACTIVE = 0
WAIT_VIN_DECREASE = 1
PAIRING = 2
SAVE_SESSION = 3
OPT_DETECT = 4
STRING_DETECT = 5
END = 6
class CommTestStatus(enum.Enum):
FAILED = 0
PASSED = 1
NOT_TESTED = 2
class ServerChannelMethod(enum.Enum):
MANUAL_SELECT = 0
AUTO_SELECT = 1
class OperationMode(enum.Enum):
INV_OPER_MODE_ON_GRID = 0
INV_OPER_MODE_STORAGE_OFF_GRID = 1
INV_OPER_MODE_DG_OFF_GRID = 2
class LanNetStatus(enum.Enum):
LAN_UNKNOWN = 0
LAN_CABLE_NOT_CONNECTED = 1
LAN_ERROR_DHCP = 2
LAN_NO_INTERNET_CONNECTION = 3
LAN_ERROR_DNS = 4
LAN_CONNECTING_TO_SERVER = 5
LAN_WAITING_FOR_SERVER_RESPONSE = 6
LAN_SERVER_RESPONDED = 7
class WifiNetStatus(enum.Enum):
WIFI_UNKNOWN = 0
WIFI_NOT_CONNECTED = 1
WIFI_CONNECTION_IN_PROGRESS = 2
WIFI_ERROR_WRONG_PASSWORD = 3
WIFI_ERROR_UNREACHABLE = 4
WIFI_ERROR_DHCP = 5
WIFI_ERROR_CONNECTION_OTHER = 6
WIFI_NO_INTERNET_CONNECTION = 7
WIFI_ERROR_DNS = 8
WIFI_CONNECTING_TO_SERVER = 9
WIFI_WAITING_FOR_SERVER_RESPONSE = 10
WIFI_SERVER_RESPONDED = 11
class RS485SlaveNetStatus(enum.Enum):
RS485_SLAVE_UNKNOWN = 0
RS485_SLAVE_NOT_CONFIGURED = 1
RS485_SLAVE_NO_MASTER = 2
RS485_SLAVE_NO_GRANT_RECEIVED = 3
RS485_SLAVE_CONNECTED_TO_MASTER = 4
RS485_SLAVE_SERVER_RESPONDED = 5
class ZigbeeSlaveNetStatus(enum.Enum):
ZB_SLAVE_UNKNOWN = 0
ZB_SLAVE_NOT_DETECTED = 1
ZB_DETECTION_IN_PROGRESS = 2
ZB_INIT_MODULE = 3
ZB_SLAVE_NOT_CONFIGURED = 4
ZB_SLAVE_DISASSOCIATED = 5
ZB_SLAVE_NO_COMM_WITH_COORDINATOR = 6
ZB_SLAVE_CONNECTED_TO_COORDINATOR = 7
ZB_SLAVE_SERVER_RESPONDED = 8
class CellNetStatus(enum.Enum):
CELL_UNKNOWN = 0
CELL_NOT_DETECTED = 1
CELL_DETECTION_IN_PROGRESS = 2
CELL_INIT_MODULE = 3
CELL_NO_SIM_CARD = 4
CELL_MISSING_PIN = 5
CELL_SIM_NOT_REGISTERED = 6
CELL_MODEM_NOT_ACTIVATED = 7
CELL_MISSING_APN = 8
CELL_NO_SIGNAL = 9
CELL_NO_COMM_WITH_MODULE = 10
CELL_ESTABLISHING_INTERNET_CONNECTION = 11
CELL_NO_INTERNET_CONNECTION = 12
CELL_NO_TELEM_PLAN_SELECT = 13
CELL_ACTIVATING_TELEM_PLAN = 14
CELL_ACTIVATING_ERROR_NO_RESPONSE = 15
CELL_ACTIVATING_ERROR_UNIDENTIFIED_NUMBER = 16
CELL_ACTIVATING_ERROR_SMS_BLOCKED = 17
CELL_ACTIVATING_ERROR_NO_SMS = 18
CELL_ACTIVATING_ERROR | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'ProviderAssumeRoleArgs',
'ProviderEndpointArgs',
]
@pulumi.input_type
class ProviderAssumeRoleArgs:
def __init__(__self__, *,
role_arn: pulumi.Input[str],
policy: Optional[pulumi.Input[str]] = None,
session_expiration: Optional[pulumi.Input[int]] = None,
session_name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "role_arn", role_arn)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if session_expiration is not None:
pulumi.set(__self__, "session_expiration", session_expiration)
if session_name is not None:
pulumi.set(__self__, "session_name", session_name)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy", value)
@property
@pulumi.getter(name="sessionExpiration")
def session_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "session_expiration")
@session_expiration.setter
def session_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_expiration", value)
@property
@pulumi.getter(name="sessionName")
def session_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "session_name")
@session_name.setter
def session_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_name", value)
@pulumi.input_type
class ProviderEndpointArgs:
def __init__(__self__, *,
actiontrail: Optional[pulumi.Input[str]] = None,
adb: Optional[pulumi.Input[str]] = None,
alb: Optional[pulumi.Input[str]] = None,
alidfs: Optional[pulumi.Input[str]] = None,
alidns: Optional[pulumi.Input[str]] = None,
alikafka: Optional[pulumi.Input[str]] = None,
apigateway: Optional[pulumi.Input[str]] = None,
arms: Optional[pulumi.Input[str]] = None,
bastionhost: Optional[pulumi.Input[str]] = None,
brain_industrial: Optional[pulumi.Input[str]] = None,
bssopenapi: Optional[pulumi.Input[str]] = None,
cas: Optional[pulumi.Input[str]] = None,
cassandra: Optional[pulumi.Input[str]] = None,
cbn: Optional[pulumi.Input[str]] = None,
cddc: Optional[pulumi.Input[str]] = None,
cdn: Optional[pulumi.Input[str]] = None,
cds: Optional[pulumi.Input[str]] = None,
clickhouse: Optional[pulumi.Input[str]] = None,
cloudauth: Optional[pulumi.Input[str]] = None,
cloudphone: Optional[pulumi.Input[str]] = None,
cloudsso: Optional[pulumi.Input[str]] = None,
cms: Optional[pulumi.Input[str]] = None,
config: Optional[pulumi.Input[str]] = None,
cr: Optional[pulumi.Input[str]] = None,
cs: Optional[pulumi.Input[str]] = None,
datahub: Optional[pulumi.Input[str]] = None,
dataworkspublic: Optional[pulumi.Input[str]] = None,
dbfs: Optional[pulumi.Input[str]] = None,
dcdn: Optional[pulumi.Input[str]] = None,
ddosbgp: Optional[pulumi.Input[str]] = None,
ddoscoo: Optional[pulumi.Input[str]] = None,
dds: Optional[pulumi.Input[str]] = None,
devopsrdc: Optional[pulumi.Input[str]] = None,
dg: Optional[pulumi.Input[str]] = None,
dm: Optional[pulumi.Input[str]] = None,
dms_enterprise: Optional[pulumi.Input[str]] = None,
dns: Optional[pulumi.Input[str]] = None,
drds: Optional[pulumi.Input[str]] = None,
dts: Optional[pulumi.Input[str]] = None,
eais: Optional[pulumi.Input[str]] = None,
eci: Optional[pulumi.Input[str]] = None,
ecs: Optional[pulumi.Input[str]] = None,
ehpc: Optional[pulumi.Input[str]] = None,
eipanycast: Optional[pulumi.Input[str]] = None,
elasticsearch: Optional[pulumi.Input[str]] = None,
emr: Optional[pulumi.Input[str]] = None,
ens: Optional[pulumi.Input[str]] = None,
ess: Optional[pulumi.Input[str]] = None,
eventbridge: Optional[pulumi.Input[str]] = None,
fc: Optional[pulumi.Input[str]] = None,
fnf: Optional[pulumi.Input[str]] = None,
ga: Optional[pulumi.Input[str]] = None,
gds: Optional[pulumi.Input[str]] = None,
gpdb: Optional[pulumi.Input[str]] = None,
gwsecd: Optional[pulumi.Input[str]] = None,
hbr: Optional[pulumi.Input[str]] = None,
hcs_sgw: Optional[pulumi.Input[str]] = None,
hitsdb: Optional[pulumi.Input[str]] = None,
imm: Optional[pulumi.Input[str]] = None,
imp: Optional[pulumi.Input[str]] = None,
ims: Optional[pulumi.Input[str]] = None,
iot: Optional[pulumi.Input[str]] = None,
kms: Optional[pulumi.Input[str]] = None,
kvstore: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
log: Optional[pulumi.Input[str]] = None,
market: Optional[pulumi.Input[str]] = None,
maxcompute: Optional[pulumi.Input[str]] = None,
mhub: Optional[pulumi.Input[str]] = None,
mns: Optional[pulumi.Input[str]] = None,
mscopensubscription: Optional[pulumi.Input[str]] = None,
mse: Optional[pulumi.Input[str]] = None,
nas: Optional[pulumi.Input[str]] = None,
ons: Optional[pulumi.Input[str]] = None,
onsproxy: Optional[pulumi.Input[str]] = None,
oos: Optional[pulumi.Input[str]] = None,
opensearch: Optional[pulumi.Input[str]] = None,
oss: Optional[pulumi.Input[str]] = None,
ots: Optional[pulumi.Input[str]] = None,
polardb: Optional[pulumi.Input[str]] = None,
privatelink: Optional[pulumi.Input[str]] = None,
pvtz: Optional[pulumi.Input[str]] = None,
quickbi: Optional[pulumi.Input[str]] = None,
quotas: Optional[pulumi.Input[str]] = None,
r_kvstore: Optional[pulumi.Input[str]] = None,
ram: Optional[pulumi.Input[str]] = None,
rds: Optional[pulumi.Input[str]] = None,
redisa: Optional[pulumi.Input[str]] = None,
resourcemanager: Optional[pulumi.Input[str]] = None,
resourcesharing: Optional[pulumi.Input[str]] = None,
ros: Optional[pulumi.Input[str]] = None,
sas: Optional[pulumi.Input[str]] = None,
scdn: Optional[pulumi.Input[str]] = None,
sddp: Optional[pulumi.Input[str]] = None,
serverless: Optional[pulumi.Input[str]] = None,
servicemesh: Optional[pulumi.Input[str]] = None,
sgw: Optional[pulumi.Input[str]] = None,
slb: Optional[pulumi.Input[str]] = None,
sts: Optional[pulumi.Input[str]] = None,
swas: Optional[pulumi.Input[str]] = None,
vod: Optional[pulumi.Input[str]] = None,
vpc: Optional[pulumi.Input[str]] = None,
vs: Optional[pulumi.Input[str]] = None,
waf: Optional[pulumi.Input[str]] = None,
waf_openapi: Optional[pulumi.Input[str]] = None):
if actiontrail is not None:
pulumi.set(__self__, "actiontrail", actiontrail)
if adb is not None:
pulumi.set(__self__, "adb", adb)
if alb is not None:
pulumi.set(__self__, "alb", alb)
if alidfs is not None:
pulumi.set(__self__, "alidfs", alidfs)
if alidns is not None:
pulumi.set(__self__, "alidns", alidns)
if alikafka is not None:
pulumi.set(__self__, "alikafka", alikafka)
if apigateway is not None:
pulumi.set(__self__, "apigateway", apigateway)
if arms is not None:
pulumi.set(__self__, "arms", arms)
if bastionhost is not None:
pulumi.set(__self__, "bastionhost", bastionhost)
if brain_industrial is not None:
pulumi.set(__self__, "brain_industrial", brain_industrial)
if bssopenapi is not None:
pulumi.set(__self__, "bssopenapi", bssopenapi)
if cas is not None:
pulumi.set(__self__, "cas", cas)
if cassandra is not None:
pulumi.set(__self__, "cassandra", cassandra)
if cbn is not None:
pulumi.set(__self__, "cbn", cbn)
if cddc is not None:
pulumi.set(__self__, "cddc", cddc)
if cdn is not None:
pulumi.set(__self__, "cdn", cdn)
if cds is not None:
pulumi.set(__self__, "cds", cds)
if clickhouse is not None:
pulumi.set(__self__, "clickhouse", clickhouse)
if cloudauth is not None:
pulumi.set(__self__, "cloudauth", cloudauth)
if cloudphone is not None:
pulumi.set(__self__, "cloudphone", cloudphone)
if cloudsso is not None:
pulumi.set(__self__, "cloudsso", cloudsso)
if cms is not None:
pulumi.set(__self__, "cms", cms)
if config is not None:
pulumi.set(__self__, "config", config)
if cr is not None:
pulumi.set(__self__, "cr", cr)
if cs is not None:
pulumi.set(__self__, "cs", cs)
if datahub is not None:
pulumi.set(__self__, "datahub", datahub)
if dataworkspublic is not None:
pulumi.set(__self__, "dataworkspublic", dataworkspublic)
if dbfs is not None:
pulumi.set(__self__, "dbfs", dbfs)
if dcdn is not None:
pulumi.set(__self__, "dcdn", dcdn)
if ddosbgp is not None:
pulumi.set(__self__, "ddosbgp", ddosbgp)
if ddoscoo is not None:
pulumi.set(__self__, "ddoscoo", ddoscoo)
if dds is not None:
pulumi.set(__self__, "dds", dds)
if devopsrdc is not None:
pulumi.set(__self__, "devopsrdc", devopsrdc)
if dg is not None:
pulumi.set(__self__, "dg", dg)
if dm is not None:
pulumi.set(__self__, "dm", dm)
if dms_enterprise is not None:
pulumi.set(__self__, "dms_enterprise", dms_enterprise)
if dns is not None:
pulumi.set(__self__, "dns", dns)
if drds is not None:
pulumi.set(__self__, "drds", drds)
if dts is not None:
pulumi.set(__self__, "dts", dts)
if eais is not None:
pulumi.set(__self__, "eais", eais)
if eci is not None:
pulumi.set(__self__, "eci", eci)
if ecs is not None:
pulumi.set(__self__, "ecs", ecs)
if ehpc is not None:
pulumi.set(__self__, "ehpc", ehpc)
if eipanycast is not None:
pulumi.set(__self__, "eipanycast", eipanycast)
if elasticsearch is not None:
pulumi.set(__self__, "elasticsearch", elasticsearch)
if emr is not None:
pulumi.set(__self__, "emr", emr)
if ens is not None:
pulumi.set(__self__, "ens", ens)
if ess is not None:
pulumi.set(__self__, "ess", ess)
if eventbridge is not None:
pulumi.set(__self__, "eventbridge", eventbridge)
if fc is not None:
pulumi.set(__self__, "fc", fc)
if fnf is not None:
pulumi.set(__self__, "fnf", fnf)
if ga is not None:
pulumi.set(__self__, "ga", ga)
if gds is not None:
pulumi.set(__self__, "gds", gds)
if gpdb is not None:
pulumi.set(__self__, "gpdb", gpdb)
if gwsecd is not None:
pulumi.set(__self__, "gwsecd", gwsecd)
if hbr is not None:
pulumi.set(__self__, "hbr", hbr)
if hcs_sgw is not None:
pulumi.set(__self__, "hcs_sgw", hcs_sgw)
if hitsdb is not None:
pulumi.set(__self__, "hitsdb", hitsdb)
if imm is not None:
pulumi.set(__self__, "imm", imm)
if imp is not None:
pulumi.set(__self__, "imp", imp)
if ims is not None:
pulumi.set(__self__, "ims", ims)
if iot is not None:
pulumi.set(__self__, "iot", iot)
if kms is not None:
pulumi.set(__self__, "kms", kms)
if kvstore is not None:
pulumi.set(__self__, "kvstore", kvstore)
if location is not None:
pulumi.set(__self__, "location", location)
if log is not None:
pulumi.set(__self__, "log", log)
if market is not None:
pulumi.set(__self__, "market", market)
if maxcompute is not None:
pulumi.set(__self__, "maxcompute", maxcompute)
if mhub is not None:
pulumi.set(__self__, "mhub", mhub)
if mns is not None:
pulumi.set(__self__, "mns", mns)
if mscopensubscription is not None:
pulumi.set(__self__, "mscopensubscription", mscopensubscription)
if mse is not None:
pulumi.set(__self__, "mse", mse)
if nas is not None:
pulumi.set(__self__, "nas", nas)
if ons is not None:
pulumi.set(__self__, "ons", ons)
if onsproxy is not None:
pulumi.set(__self__, "onsproxy", onsproxy)
if oos is not None:
pulumi.set(__self__, "oos", oos)
if opensearch is not None:
pulumi.set(__self__, "opensearch", opensearch)
if oss is not None:
pulumi.set(__self__, "oss", oss)
if ots is not None:
pulumi.set(__self__, "ots", ots)
if polardb is not None:
pulumi.set(__self__, "polardb", polardb)
if privatelink is not None:
pulumi.set(__self__, "privatelink", privatelink)
if pvtz is not None:
pulumi.set(__self__, "pvtz", pvtz)
if quickbi is not None:
pulumi.set(__self__, "quickbi", quickbi)
if quotas is not None:
pulumi.set(__self__, "quotas", quotas)
if r_kvstore is not None:
pulumi.set(__self__, "r_kvstore", r_kvstore)
if ram is not None:
pulumi.set(__self__, "ram", ram)
if rds is not None:
| |
#!/usr/bin/env python
import rospy
import roslib
import sys
import numpy as np
from geometry_msgs.msg import Point
# from pahap.srv import image_cmd
from pahap.srv import pcl_segment, pcl_segmentResponse
from pahap.msg import cluster
# from pahap.srv import display2Ddata, display2DdataResponse
from numpy import expand_dims
import sys
from sklearn import mixture
import matplotlib.pyplot as plt
class dataSegment_server:
def __init__(self):
# self.image_topic = ""
# self.image_file = ""
# self.modelAdress = ""
rospy.init_node('dataSegment') # initialize a node
def callSpawnServiceTopic(self):
rospy.Service('pcl_segment', pcl_segment, self.dataSegment_response) # advertise a service
# rospy.Service('display_2D_data', display2Ddata, self.dataDisplay_response) # advertise a service
r= rospy.Rate(50)
# print("Check check check .........")
while not rospy.is_shutdown():
r.sleep()
def dataSegment_response(self, data):
numofCluster = 0
if data.cmd:
X =[]
Y =[]
D =[]
n = data.dataNumber
# print('The number of data is: ',n)
for i in range(n):
# X.append(data.points[i].x)
# Y.append(data.points[i].y)
D.append([data.points[i].x, data.points[i].y])
labels = []
clusterSet=[]
numofData = []
opt_neigbor = 0.0
opt_cluster = 0
for i in range(3,7): # run from 3 -> 6
# gaussian mixture model - maximum likelihood estimation
em_gmm = mixture.GaussianMixture(n_components=i, covariance_type='full')
# em_gmm = mixture.GaussianMixture(n_components=5, covariance_type='diag')
# train model by fit function
em_gmm.fit(D)
# test model by predict function
labels = em_gmm.predict(D)
clusterSet1=[]
boundarySet = []
# push each labeled data into a set
for k in range(i):
points = []
for j in range(n):
if labels[j] == k:
# point = Point()
point_x = D[j][0]
point_y = D[j][1]
if len(points) ==0:
points = np.array([[point_x, point_y]])
else:
points = np.append(points, [[point_x, point_y]], axis=0)
clusterSet1.append(points)
# print('number of cluster is hehre: ', len(clusterSet1))
for j in range(len(clusterSet1)):
data1 = clusterSet1[j]
min_max = self.det_maxmin_xy(data1)
# print('check min max position: ', min_max)
bound1 = self.est_2DBound(data1, min_max, 0.04)
# print('check how much data in the boundary: ', bound1)
boundarySet.append(bound1)
# print('how much boundary do we have:', len(boundarySet))
neighMatrix = self.determine_neighbors(boundarySet, 0.01)
print(neighMatrix)
max_neigh, second_neigh = self.max_clusterNeighbor(neighMatrix)
print('the neighbor is: ', max_neigh)
print('the cluster is: ', i)
# ratio = (float(max_neigh) - float(second_neigh))/np.sqrt(float(i))
ratio = float(max_neigh)/(float(max_neigh) + float(second_neigh)) + float(max_neigh)/np.sqrt(float(i))
print('the neighbor ration is: ', ratio)
if ratio > opt_neigbor:
opt_neigbor = ratio
opt_cluster = i
# em_gmm = mixture.GaussianMixture(n_components=opt_cluster, covariance_type='full')
em_gmm = mixture.GaussianMixture(n_components=3, covariance_type='full')
# em_gmm = mixture.GaussianMixture(n_components=5, covariance_type='diag')
# train model by fit function
em_gmm.fit(D)
# test model by predict function
labels = em_gmm.predict(D)
# print('the length of labels: ', len(labels))
# data to show
# get the segmented data and send back to the service
for i in range(6):
points = cluster()
x1 =[]
y1 = []
for j in range(n):
if labels[j] == i:
point = Point()
point.x = D[j][0]
point.y = D[j][1]
point.z = 0.0
x1.append(D[j][0])
y1.append(D[j][1])
points.cluster.append(point)
# print(point)
if not (len(points.cluster) == 0):
print(len(points.cluster))
clusterSet.append(points)
numofData.append(len(points.cluster))
X.append(x1)
Y.append(y1)
# print (labels)
# x0 =[]
# y0 =[]
# for j in range(len(d0)):
# x0.append(d0[j][0])
# y0.append(d0[j][1])
plt.figure()
# plt.subplot(221)
color = ['r','b', 'y', 'm', 'g', 'c', 'k', 'w']
for i in range(len(X)):
plt.scatter(X[i], Y[i], c=color[i])
# plt.subplot(222)
# plt.scatter(x1, y1, c='b')
# plt.subplot(223)
# plt.scatter(x2, y2, c='g')
# plt.title("cluster 1")
# plt.scatter(X, Y, c=labels, s=40, cmap='viridis')
plt.title("EM-GMM")
# plt.show()
return pcl_segmentResponse(True, numofData, clusterSet) # return the value for the service.response
# get the maximum and minimum coordinate of 2D data
def det_maxmin_xy(self, data):
max_min = np.array([0.0,0.0,0.0,0.0])
max_x = 10.0
min_x = -10.0
max_y = 10.0
min_y = -10.0
for i in range(len(data)):
if i ==0:
max_x = data[i][0]
min_x = data[i][0]
max_y = data[i][1]
min_y = data[i][1]
# For x
if data[i][0] > max_x:
max_x = data[i][0]
if data[i][0] < min_x:
min_x = data[i][0]
# For y
if data[i][1] > max_y:
max_y = data[i][1]
if data[i][1] < min_y:
min_y = data[i][1]
max_min[0] = min_x;
max_min[1] = max_x;
max_min[2] = min_y;
max_min[3] = max_y;
return max_min
# get the set of boundary points
def est_2DBound(self, data, max_minxy, slicing_factor): # slicing_factor =0.02
twoDboundary = []
point1 = np.array([0.0,0.0])
point2 = np.array([0.0,0.0])
start = 0.0
distance1 = 0.0
distance2 = 0.0
max_distance = 0.0
maxmin = np.array([0.0,0.0,0.0,0.0])
for i in range(4):
maxmin[i] = max_minxy[i]
count_check = 0
# slicing for each axis direction find the farthest point couples in each slicing axis
for axis in range(2):
start = maxmin[2*axis]
# find the point cloud belong to one slicing
while start < maxmin[2*axis+1]:
for i in range(len(data)):
# select the right distance
dis_xy = 0.0
if axis == 0:
dis_xy = data[i][0] - start
if axis == 1:
dis_xy = data[i][1] - start
# if the point belong to the slicing, check whether it set a furthest distance pair
test1 = dis_xy - slicing_factor/2
test2 = dis_xy + slicing_factor/2
# if (dis_xy < slicing_factor/2) and (dis_xy > -slicing_factor/2):
if (test1 < 0) and (test2 > 0):
if count_check == 0: # this is the first point
point1[0] = data[i][0]
point1[1] = data[i][1]
elif count_check == 1: # this is the second point
point2[0] = data[i][0]
point2[1] = data[i][1]
max_distance = np.power((point2[0]-point1[0]),2) + np.power((point2[1]-point1[1]),2)
else:
distance1 = np.power((point1[0]-data[i][0]),2) + np.power((point1[1]-data[i][1]),2)
distance2 = np.power((point2[0]-data[i][0]),2) + np.power((point2[1]-data[i][1]),2)
if distance2 < distance1:
if distance1 > max_distance:
max_distance = distance1
point2[0] = data[i][0]
point2[1] = data[i][1]
if distance2 > distance1:
if distance2 > max_distance:
max_distance = distance2
point1[0] = data[i][0]
point1[1] = data[i][1]
count_check = count_check + 1
count_check = 0
# check if there is points with common coordinate
commonCoor1 = False
commonCoor2 = False
if len(twoDboundary) == 0:
twoDboundary = np.array([point1])
twoDboundary = np.append(twoDboundary, [point2], axis=0)
else:
for j in range(len(twoDboundary)):
if (point1[0] == twoDboundary[j][0]) and (point1[1] == twoDboundary[j][1]):
commonCoor1 = True
# std::cout << "Point 1 has the same coordinate with a point in boundary set" << std::endl;
if (point2[0] == twoDboundary[j][0]) and (point2[1] == twoDboundary[j][1]):
commonCoor2 = True
# std::cout << "Point 2 has the same coordinate with a point in boundary set" << std::endl;
if (not commonCoor1):
# print('how many time they will come herelsdfkjasldjfl;asjdl;jkfasl;df')
# print('check the value of point 1', point1)
twoDboundary = np.append(twoDboundary, [point1], axis=0)
if (not commonCoor2):
twoDboundary = np.append(twoDboundary, [point2], axis=0)
# print('the length of the boundary set: ', len(twoDboundary))
start = start + slicing_factor/2
point1[0] = 0.0
point1[1] = 0.0
point2[0] = 0.0
point2[1] = 0.0
# twoDboundary = np.array(twoDboundary)
return twoDboundary
def determine_neighbors(self, data, min_dis): # data here is the boundarySet
neig_matrix = np.zeros((len(data),len(data)))
for i in range(len(data)-1):
clus1 = data[i]
for j in range(i+1,len(data)):
borderPoints = []
clus2 = data[j]
for m in range(len(clus1)):
for n in range(len(clus2)):
distance = np.sqrt(np.power((clus1[m][0] -clus2[n][0]),2) + np.power((clus1[m][1] - clus2[n][1]),2))
# if two points closer than the threshold
if distance < min_dis:
p_x = (clus1[m][0] + clus2[n][0])/2
p_y = (clus1[m][1] + clus2[n][1])/2
if len(borderPoints) == 0:
borderPoints = np.array([[p_x, p_y]])
else:
borderPoints = np.append(borderPoints, [[p_x, p_y]], axis=0)
# the border should be long enough to be considered as neighbors
max_distance = 0.0
if len(borderPoints) > 2:
for k in range(len(borderPoints)-1):
for h in range(k, len(borderPoints)):
dis = self.cal_distance(borderPoints[k], borderPoints[h])
if max_distance < dis:
max_distance = dis
# condition to be neighbor is the border should longer than 0.1
if max_distance > 0.11:
neig_matrix[i][j] = 1.0
neig_matrix[j][i] = 1.0
# print(neig_matrix)
return neig_matrix
def cal_distance(self, p1, p2):
return np.sqrt(np.power((p1[0]-p2[0]),2) + np.power((p1[1]-p2[1]),2))
def max_clusterNeighbor(self, neighMatrix):
max_neighbor = 0.0
sec_neighbor = 0.0
cluster_index = 0
for i in range(len(neighMatrix)):
count =0
for j in range(len(neighMatrix)):
if neighMatrix[i][j] == 1.0:
count += 1
if count > max_neighbor:
max_neighbor = count
cluster_index = i
# for the second most neighbor
for i in range(len(neighMatrix)):
count =0
if not (i == cluster_index):
for j in range(len(neighMatrix)):
if neighMatrix[i][j] == 1.0:
count += 1
if count > sec_neighbor:
sec_neighbor = count
# cluster_index = i
return max_neighbor, sec_neighbor
def main(args):
segmentServer = dataSegment_server()
segmentServer.callSpawnServiceTopic()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
# cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
# # topic name
# pub_dzungYolov3Keras = rospy.Publisher('Object_Centroid', String, queue_size=2)
| |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Second cut at astrometry fitting for UCD project.
#
# <NAME>
# Created: 2021-08-30
# Last modified: 2021-08-30
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Logging setup:
import logging
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
## Current version:
__version__ = "0.1.0"
## Modules:
import os
import sys
import time
import numpy as np
from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#import scipy.linalg as sla
#import scipy.signal as ssig
#import scipy.ndimage as ndi
import scipy.optimize as opti
#import scipy.interpolate as stp
#import scipy.spatial.distance as ssd
#from functools import partial
#from collections import OrderedDict
#from collections.abc import Iterable
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#from statsmodels.regression.quantile_regression import QuantReg
_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))
import theil_sen as ts
## Useful stats routines:
def calc_ls_med_MAD(a, axis=None):
"""Return median and median absolute deviation of *a* (scaled to normal)."""
med_val = np.median(a, axis=axis)
sig_hat = (1.482602218 * np.median(np.abs(a - med_val), axis=axis))
return (med_val, sig_hat)
## Median absolute residual:
def calc_MAR(residuals, scalefactor=1.482602218):
"""Return median absolute residual (MAR) of input array. By default,
the result is scaled to the normal distribution."""
return scalefactor * np.median(np.abs(residuals))
##--------------------------------------------------------------------------##
##------------------ Astrometry Fitting (5-par) ----------------##
##--------------------------------------------------------------------------##
_ARCSEC_PER_RADIAN = 180. * 3600.0 / np.pi
_MAS_PER_RADIAN = _ARCSEC_PER_RADIAN * 1e3
class AstFit(object):
"""
This module provides astrometric fitting capability. Internally, a
5-parameter model is maintained in a numpy array. Its contents are:
* RA (radians) at reference epoch
* DE (radians) at reference epoch
* pmRA (radians / yr). [this is pmRA* / cos(dec)]
* pmDE (radians / yr)
* parallax (radians)
"""
_need_eph_keys = ['jdtdb', 'x', 'y', 'z']
_need_data_keys = ['jdtdb', 'dra', 'dde', 'obs_x', 'obs_y', 'obs_z']
_asec_per_rad = _ARCSEC_PER_RADIAN
_mas_per_rad = _MAS_PER_RADIAN
def __init__(self):
self._jd_tdb = None
self._dt_yrs = None
self.obs_eph = None
self.ref_tdb = None
self.inliers = None
self.rweight = None
self._is_set = False
self._chiexp = 2
self._can_iterate = False
return
def set_exponent(self, exponent=2):
"""
Choose exponent used in penalty function (N below). The solver seeks
to minimize the sum over data points of:
((obs - model) / err)**N
"""
#Setting N=2 behaves like Chi-squared. Setting N=1 minimizes total
#absolute deviation
self._chiexp = exponent
return
#def setup(self, jd_tdb_ref, RA_deg, DE_deg, obs_eph,
def setup(self, data, reject_outliers=True,
jd_tdb_ref=None, RA_err=None, DE_err=None):
self._is_rdy = False
if not all([isinstance(data[x], np.ndarray) \
for x in self._need_data_keys]):
sys.stderr.write("Incomplete data set!\n")
sys.stderr.write("Required columns include:\n")
sys.stderr.write("--> %s\n" % str(self._need_data_keys))
return False
self._outrej = reject_outliers
#if not all([isinstance(obs_eph[x], np.ndarray) \
# for x in self._need_eph_keys]):
# sys.stderr.write("Incomplete ephemeris data!\n")
# sys.stderr.write("Required columns include:\n")
# sys.stderr.write("--> %s\n" % str(self._need_eph_keys))
# return False
#self.inliers = np.ones_like(RA_deg, dtype='bool')
#self.rweight = np.ones_like(RA_deg)
self.inliers = np.ones(len(data), dtype='bool')
self.rweight = np.ones(len(data), dtype='float')
#self.obs_eph = self._augmented_eph(obs_eph)
self.dataset = np.copy(data)
if jd_tdb_ref:
self.ref_tdb = jd_tdb_ref
else:
self.ref_tdb = data['jdtdb'][0]
#self.ref_tdb = jd_tdb_ref
self._dt_yrs = (self.dataset['jdtdb'] - self.ref_tdb) / 365.25
#self._RA_rad = np.radians(RA_deg)
#self._DE_rad = np.radians(DE_deg)
self._RA_rad = np.radians(self.dataset['dra'])
self._DE_rad = np.radians(self.dataset['dde'])
#self._RA_med, self._RA_MAD = calc_ls_med_MAD(self._RA_rad)
#self._DE_med, self._DE_MAD = calc_ls_med_MAD(self._DE_rad)
#self._RA_MAD *= np.cos(self._DE_med)
self._RA_err = RA_err
self._DE_err = DE_err
self._need_resid_errors = False
if not isinstance(RA_err, np.ndarray):
sys.stderr.write("WARNING: RA_err not given, using estimated\n")
self._need_resid_errors = True
if not isinstance(DE_err, np.ndarray):
sys.stderr.write("WARNING: DE_err not given, using estimated\n")
self._need_resid_errors = True
#if isinstance(RA_err, np.ndarray):
# self._RA_err = np.radians(RA_err)
#else:
# self._RA_err = self._RA_MAD
#if isinstance(DE_err, np.ndarray):
# self._DE_err = np.radians(DE_err)
#else:
# self._DE_err = self._DE_MAD
#self._DE_err = np.radians(DE_err) if DE_err else self._DE_MAD
self._is_set = True
self._can_iterate = False
return True
#def set_ref_time(self, t_ref):
# self.ref_time = t_ref
# return
@staticmethod
def _calc_parallax_factors(RA_rad, DE_rad, X_au, Y_au, Z_au):
"""Compute parallax factors in arcseconds. The RA component has
been divided by cos(dec) so that it can be used directly for
residual minimization."""
sinRA, cosRA = np.sin(RA_rad), np.cos(RA_rad)
sinDE, cosDE = np.sin(DE_rad), np.cos(DE_rad)
ra_factor = (X_au * sinRA - Y_au * cosRA) / cosDE
de_factor = X_au * cosRA * sinDE \
+ Y_au * sinRA * sinDE \
- Z_au * cosDE
return ra_factor, de_factor
#def ts_fit_coord(self, time_vals, coo_vals):
@staticmethod
def ts_fit_radec_pm(t_yrs, RA_rad, DE_rad, plx_as=0, weighted=False):
ts_ra_model = ts.linefit(t_yrs, RA_rad, weighted=weighted)
ts_de_model = ts.linefit(t_yrs, DE_rad, weighted=weighted)
return np.array([ts_ra_model[0], ts_de_model[0],
ts_ra_model[1], ts_de_model[1], plx_as])
def apparent_radec(self, t_ref, astrom_pars, eph_obs):
"""
t_ref -- chosen reference epoch
astrom_pars -- five astrometric parameters specified at the
reference epoch: meanRA (rad), meanDE (rad),
pmRA*cos(DE), pmDE, and parallax
eph_obs -- dict with x,y,z,t elements describing the times
and places of observations (numpy arrays)
FOR NOW, assume
[t_ref] = JD (TDB)
[t] = JD (TDB)
[pars] = rad, rad, arcsec/yr, arcsec/yr, arcsec
*no cos(d)*
"""
rra, rde, pmra, pmde, prlx = astrom_pars
t_diff_yr = (eph_obs['t'] - t_ref) / 365.25 # units of years
pfra, pfde = self._calc_parallax_factors(rra, rde,
eph_obs['x'], eph_obs['y'], eph_obs['z'])
delta_ra = (t_diff_yr * pmra + prlx * pfra)
delta_de = (t_diff_yr * pmde + prlx * pfde)
return (rra + delta_ra, rde + delta_de)
def eval_model(self, params):
return self._solver_eval(params)
#def eval_model(self, params):
# rra, rde, pmra, pmde, prlx = params
# pfra, pfde = self._calc_parallax_factors(rra, rde,
# self.dataset['obs_x'], self.dataset['obs_y'],
# self.dataset['obs_z'])
# delta_ra = self._dt_yrs * pmra + prlx * pfra
# delta_de = self._dt_yrs * pmde + prlx * pfde
# return (rra + delta_ra, rde + delta_de)
def _solver_eval(self, params):
rra, rde, pmra, pmde, prlx = params
pfra, pfde = self._calc_parallax_factors(rra, rde,
self.dataset['obs_x'], self.dataset['obs_y'],
self.dataset['obs_z'])
delta_ra = self._dt_yrs * pmra + prlx * pfra
delta_de = self._dt_yrs * pmde + prlx * pfde
#delta_ra = self._dt_yrs * pmra - prlx * pfra
#delta_de = self._dt_yrs * pmde - prlx * pfde
return (rra + delta_ra, rde + delta_de)
def _calc_radec_residuals(self, params):
model_RA, model_DE = self._solver_eval(params)
return (self._RA_rad - model_RA, self._DE_rad - model_DE)
def _calc_radec_residuals_sigma(self, params):
model_RA, model_DE = self._solver_eval(params)
#rsigs_RA = (self._RA_rad - model_RA) / self._RA_err
#rsigs_DE = (self._DE_rad - model_DE) / self._DE_err
rsigs_RA = (self._RA_rad - model_RA) / self._use_RA_err
rsigs_DE = (self._DE_rad - model_DE) / self._use_DE_err
return rsigs_RA, rsigs_DE
def _calc_total_residuals_sigma(self, params):
return np.hypot(*self._calc_radec_residuals_sigma(params))
def _calc_chi_square(self, params, negplxhit=100.):
model_ra, model_de = self._solver_eval(params)
#resid_ra = (model_ra - self._RA_rad) #/ np.cos(model_de)
#resid_de = (model_de - self._DE_rad)
resid_ra = (self._RA_rad - model_ra) #/ np.cos(model_de)
resid_de = (self._DE_rad - model_de)
#resid_ra = (model_ra - self._RA_rad) / self._RA_err
#resid_de = (model_de - self._DE_rad) / self._DE_err
#if isinstance(self._RA_err, np.ndarray):
# resid_ra /= self._RA_err
#if isinstance(self._DE_err, np.ndarray):
# resid_de /= self._DE_err
if isinstance(self._use_RA_err, np.ndarray):
resid_ra /= self._use_RA_err
if isinstance(self._use_DE_err, np.ndarray):
resid_de /= self._use_DE_err
#return np.sum(np.hypot(resid_ra, resid_de))
#return np.sum(np.hypot(resid_ra, resid_de)**2)
resid_tot = np.hypot(resid_ra, resid_de)[self.inliers]
if (params[4] < 0.0):
resid_tot *= negplxhit
return np.sum(resid_tot**self._chiexp)
#return np.sum(np.hypot(resid_ra, resid_de)**self._chiexp)
#return np.sum(np.abs(resid_ra * resid_de)**self._chiexp)
def _calc_initial_parallax(self, params):
rra_resid, rde_resid = self._calc_radec_residuals(params)
mar_ra_rad = calc_MAR(rra_resid)
mar_ra_mas = _MAS_PER_RADIAN * mar_ra_rad
sys.stderr.write("mar_ra_rad: %f\n" % mar_ra_rad)
sys.stderr.write("mar_ra_mas: %f\n" % mar_ra_mas)
pfra, pfde = self._calc_parallax_factors(
self._RA_rad, self._DE_rad, self.dataset['obs_x'],
self.dataset['obs_y'], self.dataset['obs_z'])
#sys.stderr.write("pfra_arcsec: %s\n" % str(pfra_arcsec))
#pfra_rad = pfra_arcsec / _ARCSEC_PER_RADIAN
adjustment_arcsec = ts.linefit(pfra, _ARCSEC_PER_RADIAN * rra_resid)
sys.stderr.write("adjustment (arcsec): %s\n" % str(adjustment_arcsec))
return adjustment_arcsec
# Driver routine for 5-parameter astrometric fitting:
def fit_bestpars(self, sigcut=5):
if not self._is_set:
sys.stderr.write("Error: data not OK for fitting!\n")
sys.stderr.write("Run setup() first and retry ...\n")
return False
# robust initial guess with Theil-Sen:
uguess = self.ts_fit_radec_pm(self._dt_yrs, self._RA_rad, self._DE_rad)
wguess = self.ts_fit_radec_pm(self._dt_yrs, self._RA_rad, self._DE_rad,
weighted=True)
#sys.stderr.write("Initial guess: %s\n" % str(guess))
sys.stderr.write("Initial guess (unweighted):\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(uguess)))
sys.stderr.write("\n")
sys.stderr.write("Initial guess (weighted):\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(wguess)))
sys.stderr.write("\n")
guess = uguess # adopt unweighted for now
#guess[4] = 1000. / _MAS_PER_RADIAN
# initial crack at parallax and zero-point:
woohoo = self._calc_initial_parallax(guess)
sys.stderr.write("woohoo: %s\n" % str(woohoo))
self.woohoo = woohoo
ra_nudge_rad, plx_rad = woohoo / _ARCSEC_PER_RADIAN
guess[0] += ra_nudge_rad
guess[4] = plx_rad
# estimate RA,Dec uncertainty from residuals if not known a prior:
if self._need_resid_errors:
rra_resid, rde_resid = self._calc_radec_residuals(guess)
rra_scatter = calc_MAR(rra_resid)
rde_scatter = calc_MAR(rde_resid)
mra_scatter = _MAS_PER_RADIAN * rra_scatter
mde_scatter = _MAS_PER_RADIAN * rde_scatter
#sys.stderr.write("rra_resid: %s\n" % str(rra_resid))
#sys.stderr.write("rde_resid: %s\n" % str(rde_resid))
sys.stderr.write("rra_scatter: %e (rad)\n" % rra_scatter)
sys.stderr.write("rde_scatter: %e (rad)\n" % rde_scatter)
sys.stderr.write("mra_scatter: %10.5f (mas)\n" % mra_scatter)
sys.stderr.write("mde_scatter: %10.5f (mas)\n" % mde_scatter)
self._RA_err = np.ones_like(self._RA_rad) * rra_scatter
self._DE_err = np.ones_like(self._DE_rad) * rde_scatter
self._use_RA_err = np.copy(self._RA_err)
self._use_DE_err = np.copy(self._DE_err)
# check whether anything looks really bad:
self._par_guess = guess
#rsig_tot = np.hypot(*self._calc_radec_residuals_sigma(guess))
| |
tuple or list, optional
:param bd: Parent block diagram, defaults to None
:type bd: BlockDiagram, optional
:param nin: Number of inputs, defaults to None
:type nin: int, optional
:param nout: Number of outputs, defaults to None
:type nout: int, optional
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param ``**kwargs``: Unknow arguments
:return: A Block superclass
:rtype: Block
A block object is the superclass of all blocks in the simulation environment.
This is the top-level initializer, and handles most options passed to
the superclass initializer for each block in the library.
"""
type: str # a string holding the name of a concrete block. Usually the lowercase
# name of the block's class definition
blockclass: Literal['source', 'sink', 'function', 'transfer', 'subsystem']
def __new__(cls, *args, bd=None, **kwargs):
"""
Construct a new Block object.
:param cls: The class to construct
:type cls: class type
:param *args: positional args passed to constructor
:type *args: list
:param **kwargs: keyword args passed to constructor
:type **kwargs: dict
:return: new Block instance
:rtype: Block instance
"""
# print('Block __new__', args,bd, kwargs)
block = super(Block, cls).__new__(cls) # create a new instance
# we overload setattr, so need to know whether it is being passed a port
# name. Add this attribute now to allow proper operation.
block.__dict__['portnames'] = [] # must be first, see __setattr__
block.bd = bd
block.nin = 0
block.nout = 0
block.nstates = 0
return block
_latex_remove = str.maketrans({
'$': '',
'\\': '',
'{': '',
'}': '',
'^': '',
'_': ''
})
def __init__(self,
name: str = None,
inames: List[str] = None,
onames: List[str] = None,
snames: List[str] = None,
pos: Tuple[int, int] = None,
nin: int = None,
nout: int = None,
bd: 'BlockDiagram' = None,
*inputs: Union['Block', Plug],
**kwargs):
# print('Block constructor, bd = ', bd)
if name is not None:
self.name_tex = name
self.name = self._fixname(name)
else:
self.name = None
self.pos = pos
self.id = None
self.out = []
self.updated = False
self.shape = 'block' # for box
self._inport_names = None
self._outport_names = None
self._state_names = None
self.initd = True
self.bd = self.bd or bd
self.nstates = self.nstates
# appease pylint
self.portnames = self.portnames # this gets set in Block.__new__()
# these get set in BlockDiagram.compile() They are None until wired..?
self.inports: List[Opt[Wire]] = []
self.outports: List[List[Wire]] = []
if nin is not None:
self.nin = nin
if nout is not None:
self.nout = nout
if inames is not None:
self.inport_names(inames)
if onames is not None:
self.outport_names(onames)
if snames is not None:
self.state_names(snames)
for i, input in enumerate(inputs):
self.bd.connect(input, Plug(self, port=i))
if len(kwargs) > 0:
print('WARNING: unused arguments', kwargs.keys())
@property
def info(self):
"""
Interactive display of block properties.
Displays all attributes of the block for debugging purposes.
"""
print("block: " + type(self).__name__)
for k, v in self.__dict__.items():
if k != 'sim':
print(" {:11s}{:s}".format(k + ":", str(v)))
self.inputs
# for use in unit testing
def _eval(self, *inputs: Any, t=None):
"""
Evaluate a block for unit testing.
:param *inputs: List of input port values
:type *inputs: list
:param t: Simulation time, defaults to None
:type t: float, optional
:return: Block output port values
:rtype: list
The output ports of the block are evaluated for a given set of input
port values and simulation time. Input and output port values are treated
as lists.
Mostly used for making concise unit tests.
"""
assert len(inputs) == self.nin, 'wrong number of inputs provided'
self.inputs = list(inputs)
out = self.output(t=t)
assert isinstance(out, list), 'result must be a list'
assert len(out) == self.nout, 'result list is wrong length'
return out
def __getitem__(self, port: int):
"""
Convert a block slice reference to a plug.
:param port: Port number
:type port: int
:return: A port plug
:rtype: Plug
Invoked whenever a block is referenced as a slice, for example::
c = bd.CONSTANT(1)
bd.connect(x, c[0])
bd.connect(c[0], x)
In both cases ``c[0]`` is converted to a ``Plug`` by this method.
"""
# block[i] is a plug object
#print('getitem called', self, port)
return Plug(self, port)
def __setitem__(self, port: int, src: SourceType):
"""
Convert a LHS block slice reference to a wire.
:param port: Port number
:type port: int
:param src: the RHS
:type src: Block or Plug
Used to create a wired connection by assignment, for example::
c = bd.CONSTANT(1)
c[0] = x
Ths method is invoked to create a wire from ``x`` to port 0 of
the constant block ``c``.
"""
# b[port] = src
# src --> b[port]
#print('connecting', src, self, port)
self.bd.connect(src, self[port])
def __setattr__(self, name: str, value: SourceType):
"""
Convert a LHS block name reference to a wire.
:param name: Port name
:type port: str
:param value: the RHS
:type value: Block or Plug
Used to create a wired connection by assignment, for example::
c = bd.CONSTANT(1, inames=['u'])
c.u = x
Ths method is invoked to create a wire from ``x`` to port 'u' of
the constant block ``c``.
Notes:
- this overloaded method handles all instances of ``setattr`` and
implements normal functionality as well, only creating a wire
if ``name`` is a known port name.
"""
# b[port] = src
# src --> b[port]
# gets called for regular attribute settings, as well as for wiring
if name in self.portnames:
# we're doing wiring
#print('in __setattr___', self, name, value)
self.bd.connect(value, getattr(self, name))
else:
#print('in __setattr___', self, name, value)
# regular case, add attribute to the instance's dictionary
self.__dict__[name] = value
def __mul__(self, right: SourceType):
"""
Operator for implicit wiring.
:param right: A block or plugto be wired to
:type right: Block or Plug
:return: ``right``
:rtype: Block or Plug
Implements implicit wiring, for example::
a = bd.CONSTANT(1) * bd.GAIN(2)
will connect the output of the CONSTANT block to the input of the
GAIN block. The result will be GAIN block, whose output in this case
will be assigned to ``a``.
Note that::
a = bd.CONSTANT(1) * func[1]
will connect port 0 of CONSTANT to port 1 of ``func``, and port 1 of ``func``
will be assigned to ``a``. To specify a different outport port on ``func``
we need to use parentheses::
a = (bd.CONSTANT(1) * func[1])[0]
which will connect port 0 of CONSTANT ` to port 1 of ``func``, and port 0 of ``func``
will be assigned to ``a``.
:seealso: Plug.__mul__
"""
# called for the cases:
# block * block
# block * plug
s = self.bd
#assert isinstance(right, Block), 'arguments to * must be blocks not ports (for now)'
w = s.connect(self, right) # add a wire
#print('block * ' + str(w))
return right
# make connection, return a plug
def __str__(self):
if hasattr(self, 'name') and self.name is not None:
return self.name
else:
return self.blockclass + '.??'
def __repr__(self):
return self.__str__()
def _fixname(self, s):
return s.translate(self._latex_remove)
def inport_names(self, names: Iterable[str]):
"""
Set the names of block input ports.
:param names: List of port names
:type names: list of str
Invoked by the ``inames`` argument to the Block constructor.
The names can include LaTeX math markup. The LaTeX version is used
where appropriate, but the port names are a de-LaTeXd version of the
given string with backslash, underscore, caret, braces and dollar signs
removed.
"""
self._inport_names = names
for port, name in enumerate(names):
fn = self._fixname(name)
setattr(self, fn, self[port])
self.portnames.append(fn)
def outport_names(self, names: Iterable[str]):
"""
Set the names of block output ports.
:param names: List of port names
:type names: list of str
Invoked by the ``onames`` argument to the Block constructor.
The names can include LaTeX math markup. The LaTeX version is used
where appropriate, but the port names are a de-LaTeXd version of the
given string with backslash, underscore, caret, braces and dollar signs
removed.
"""
self._outport_names = names
for port, name in enumerate(names):
fn = self._fixname(name)
setattr(self, fn, self[port])
self.portnames.append(fn)
def state_names(self, names: Iterable[str]):
self._state_names = names
def sourcename(self, port: int):
"""
Get | |
import random
import numpy as np
import pynmmso as nmmso
class Swarm:
"""
Represents a swarm in the NMMSO algorithm.
Arguments
---------
id : int
Id used to refer to the swarm
swarm_size : int
Maximum number of particles in the swarm
problem :
Instance of the problem class. Must implement get_bounds and fitness functions.
listener : subclass of nmmso.listeners.BaseListener
Listener object to receive notification of events. Optional.
Attributes
----------
id : int
A unique identification number of this swarm.
mode_location : numpy array
The location of this mode.
mode_value : float
The fitness of the mode location.
number_of_particles : int
Number of particles in the swarm.
history_locations : 2D Numpy array
The current locations of each particle in the swarm.
history_values : 1D Numpy array
The fitness values for current locations of each particle in the swarm.
velocities : 2D Numpy array
Current velocity of each particle in the swarm.
pbest_location : 2D Numpy array
The best location discovered for each particle.
pbest_value : 1D Numpy array
The fitness value associated with the best location for each particle in the swarm.
"""
def __init__(self, id, swarm_size, problem, listener=None):
self.id = id
self.swarm_size = swarm_size
self.problem = problem
self.listener = listener
self.mn = np.array(problem.get_bounds()[0])
self.mx = np.array(problem.get_bounds()[1])
self.changed = True
self.converged = False
self.num_dimensions = len(self.mn)
self.mode_location = None # Will be populated later on
self.new_location = None # Will be populated later on
self.mode_value = None # Will be populated later on
# Initialize locations for swarm elements
# current locations of swarm
self.history_locations = np.zeros((self.swarm_size, self.num_dimensions))
# current values of swarm
self.history_values = np.full(self.swarm_size, -np.inf)
# current best locations of swarm
self.pbest_locations = np.zeros((self.swarm_size, self.num_dimensions))
# current best values of swarm
self.pbest_values = np.full(self.swarm_size, -np.inf)
self.velocities = np.zeros((swarm_size, self.num_dimensions))
self.number_of_particles = 1
self.shifted_loc = None # Will be populated later on
self.dist = None # Will be populated later on
def set_initial_location(self):
"""Sets the initial location of a swarm."""
self.changed = True
self.new_location = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn
# random initial velocities of swarm
self.velocities[0, :] = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn
def set_arbitrary_distance(self):
"""Set an arbitrary distance - this is done when we only have one swarm"""
self.dist = np.min(self.mx-self.mn)
def increment(self):
""" Increments the swarm. """
new_location = self.mn - 1
d = self.dist
shifted = False
omega = 0.1
reject = 0
r = random.randrange(self.swarm_size) # select particle at random to move
while np.sum(new_location < self.mn) > 0 or np.sum(new_location > self.mx) > 0:
# if swarm is not yet at capacity, simply add a new particle
if self.number_of_particles < self.swarm_size:
usp = nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0]
new_location = self.mode_location + usp * (d/2)
else:
# move an existing particle
shifted = True
self.shifted_loc = r
r1 = np.random.rand(self.num_dimensions)
r2 = np.random.rand(self.num_dimensions)
temp_vel = omega * self.velocities[self.shifted_loc, :] + \
2.0 * r1 * \
(self.mode_location - self.history_locations[self.shifted_loc, :]) + \
2.0 * r2 * \
(self.pbest_locations[self.shifted_loc, :] -
self.history_locations[self.shifted_loc, :])
if reject > 20:
# if we keep rejecting then put at extreme any violating design parameters
i_max = np.flatnonzero(
np.asarray(
self.history_locations[self.shifted_loc, :] + temp_vel > self.mx))
i_min = np.flatnonzero(
np.asarray(
self.history_locations[self.shifted_loc, :] + temp_vel < self.mn))
if i_max.size > 0:
temp_vel[i_max] = \
np.random.rand(i_max.size) * \
(self.mx[i_max] - self.history_locations[self.shifted_loc, i_max])
if i_min.size > 0:
temp_vel[i_min] = \
np.random.rand(i_min.size) * \
(self.history_locations[self.shifted_loc, i_min] - self.mn[i_min])
new_location = self.history_locations[self.shifted_loc, :] + temp_vel
reject = reject + 1
if shifted:
self.velocities[self.shifted_loc, :] = temp_vel
else:
# otherwise initialise velocity in sphere based on distance from gbest to next
# closest mode
self.number_of_particles = self.number_of_particles + 1
self.shifted_loc = self.number_of_particles - 1
temp_vel = self.mn - 1
reject = 0
while np.sum(temp_vel < self.mn) > 0 or np.sum(temp_vel > self.mx) > 0:
temp_vel = \
self.mode_location + \
nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0] * (d / 2)
reject = reject + 1
if reject > 20: # resolve if keep rejecting
temp_vel = np.random.rand(self.num_dimensions)*(self.mx-self.mn) + self.mn
self.velocities[self.shifted_loc, :] = temp_vel
self.new_location = new_location
if self.listener is not None:
if shifted:
self.listener.swarm_moved_particle(self)
else:
self.listener.swarm_added_particle(self)
def initialise_with_uniform_crossover(self, swarm1, swarm2):
"""
Initialise a new swarm with the uniform crossover of the given swarms.
Arguments
---------
swarm1 : Swarm
swarm2 : Swarm
"""
self.new_location, _ = Swarm.uni(swarm1.mode_location, swarm2.mode_location)
self.evaluate_first()
self.changed = True
self.converged = False
def distance_to(self, swarm):
"""
Euclidean distance between this swarm and the given swarm, based on their mode locations.
Returns
-------
float
The distance between the two swarms.
"""
return np.linalg.norm(self.mode_location-swarm.mode_location)
def merge(self, swarm):
"""
Merges the give swarm into this swarm.
Arguments
----------
swarm : Swarm
Swarm to merge into this swarm.
"""
n1 = self.number_of_particles
n2 = swarm.number_of_particles
if n1 + n2 < self.swarm_size:
# simplest solution, where the combined active members of both populations
# are below the total size they can grow to
self.number_of_particles = n1 + n2
self.history_locations[n1:n1 + n2, :] = swarm.history_locations[0:n2, :]
self.history_values[n1:n1 + n2] = swarm.history_values[0:n2]
self.pbest_locations[n1:n1 + n2, :] = swarm.pbest_locations[0:n2, :]
self.pbest_values[n1:n1 + n2] = swarm.pbest_values[0:n2]
self.velocities[n1:n1 + n2, :] = swarm.velocities[0:n2, :]
else:
# select best out of combines population, based on current location (rather than pbest)
self.number_of_particles = self.swarm_size
temp_h_loc = \
np.concatenate((self.history_locations[0:n1, :], swarm.history_locations[0:n2, :]))
temp_h_v = \
np.concatenate((self.history_values[0:n1], swarm.history_values[0:n2]))
temp_p_loc = \
np.concatenate((self.pbest_locations[0:n1, :], swarm.pbest_locations[0:n2, :]))
temp_p_v = np.concatenate((self.pbest_values[0:n1], swarm.pbest_values[0:n2]))
temp_vel = np.concatenate((self.velocities[0:n1, :], swarm.velocities[0:n2, :]))
# get the indices of highest values
I = np.argsort(temp_h_v)[len(temp_h_v) - self.swarm_size:]
self.history_locations = temp_h_loc[I, :]
self.history_values = temp_h_v[I]
self.pbest_locations = temp_p_loc[I, :]
self.pbest_values = temp_p_v[I]
self.velocities = temp_vel[I, :]
def initialise_new_swarm_velocities(self):
"""Initialises velocities of a new swarm."""
reject = 0
temp_vel = self.mn - 1
while np.sum(temp_vel < self.mn) > 0 or np.sum(temp_vel > self.mx) > 0:
temp_vel = self.mode_location + \
nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0] * \
(self.dist / 2)
reject += 1
if reject > 20:
temp_vel = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn
self.velocities[0, :] = temp_vel
def update_location_and_value(self, location, value):
"""
Updates the location and value of this swarm.
Arguments
---------
location : numpy arrya
New location of swarm
value : float
New fitness value at swarm location.
"""
previous_location = self.mode_location
previous_value = self.mode_value
self.mode_location = location
self.mode_value = value
if self.listener is not None:
self.listener.swarm_peak_changed(self, previous_location, previous_value)
def evaluate_first(self):
"""
Evaluates the new location. This is the first evaluation so no need to examine
if a shift has occurred
"""
# new location is the only solution thus far in mode, so by definition
# is also the mode estimate, and the only history thus far
y = self.problem.fitness(self.new_location)
if not np.isscalar(y):
raise ValueError("Problem class's fitness method must return a scalar value.")
if self.listener is not None:
self.listener.location_evaluated(self.new_location, y)
self.mode_location = self.new_location # gbest location
self.mode_value = y # gbest value
self.history_locations[0, :] = self.mode_location
self.history_values[0] = y
self.pbest_locations[0, :] = self.mode_location
self.pbest_values[0] = y
def evaluate(self, y):
"""
Takes the value at the new location and updates the swarm statistics and history.
Arguments
---------
y : float
fitness value at the new location.
"""
if y > self.mode_value:
self.update_location_and_value(self.new_location, y)
self.changed = True
self.history_locations[self.shifted_loc, :] = self.new_location
self.history_values[self.shifted_loc] = y
if y > self.pbest_values[self.shifted_loc]:
self.pbest_values[self.shifted_loc] = y
self.pbest_locations[self.shifted_loc, :] = self.new_location
def find_nearest(self, swarms):
"""
Finds the nearest swarm from the given set of swarms.
Returns
-------
swarm
The nearest swarm this this swarm.
"""
best_swarm = None
distance = np.inf
for s in swarms:
if self != s:
d = np.sum((self.mode_location - s.mode_location) ** 2)
if d < distance:
distance = d
best_swarm = s
self.dist = np.sqrt(distance) # track Euc distance to nearest neighbour
return best_swarm, self.dist
@staticmethod
def uni(x1, x2):
"""
Uniform binary crossover.
Arguments
---------
x1 : numpy array of parameters
x2 : numpy array of parameters
Returns:
numpy array
New array of parameters formed from uniform crossover.
"""
# simulated binary crossover
x_c = x1.copy()
x_d = x2.copy()
l = len(x1)
r = np.flatnonzero(np.random.rand(l, | |
<filename>ledcontrol/animationpatterns.py
# led-control WS2812B LED Controller Server
# Copyright 2021 jackw01. Released under the MIT License (see LICENSE for details).
from random import random
from enum import Enum
import ledcontrol.driver as driver
import ledcontrol.utils as utils
ColorMode = Enum('ColorMode', ['hsv', 'rgb'])
# Primary animations that generate patterns in HSV or RGB color spaces
# return color, mode
def blank(t, dt, x, y, prev_state):
return (0, 0, 0), ColorMode.hsv
static_patterns = [0, 1, 2] # pattern IDs that display a solid color
default = {
0: {
'name': 'Static Color',
'primary_speed': 0.0,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(0), hsv
'''
},
1: {
'name': 'Static White',
'primary_speed': 0.0,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (0, 0, 1), hsv
'''
},
2: {
'name': 'Static Gradient 1D',
'primary_speed': 0.0,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(x), hsv
'''
},
3: {
'name': 'Static Gradient Mirrored 1D',
'primary_speed': 0.0,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(x), hsv
'''
},
10: {
'name': 'Hue Cycle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (t + x, 1, 1), hsv
'''
},
20: {
'name': 'Hue Cycle Quantized 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
hue = (t + x) % 1
return (hue - (hue % 0.1666), 1, 1), hsv
'''
},
30: {
'name': 'Hue Scan 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_triangle(t) + x, 1, 1), hsv
'''
},
31: {
'name': 'Hue Bounce 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_sine(t) + x, 1, 1), hsv
'''
},
40: {
'name': 'Hue Waves 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
h = (x + t) * 0.5 + x + wave_sine(t)
return (h, 1, wave_sine(h + t)), hsv
'''
},
50: {
'name': 'Hue Ripples 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
return (wave3 % 0.15 + t, 1, wave1 + wave3), hsv
'''
},
100: {
'name': 'Palette Cycle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(t + x), hsv
'''
},
110: {
'name': 'Palette Cycle Mirrored 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(t + x), hsv
'''
},
120: {
'name': 'Palette Cycle Quantized 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
t = (t + x) % 1
return palette(t - (t % (1 / palette_length()))), hsv
'''
},
130: {
'name': 'Palette Cycle Random 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
t = t + x
i = (t - (t % 0.2)) / 0.2
return palette(i * 0.618034), hsv
'''
},
140: {
'name': 'Palette Scan Mirrored 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(wave_triangle(t) + x), hsv
'''
},
141: {
'name': 'Palette Bounce Mirrored 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(wave_sine(t) + x), hsv
'''
},
150: { # Performance isn't as good as it could be
'name': 'Palette Waves 1D',
'primary_speed': 0.05,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
h = (x + t) * 0.1 + x + wave_sine(t)
c = palette(wave_triangle(h))
return (c[0], c[1], wave_sine(h + t)), hsv
'''
},
160: {
'name': 'Palette Ripples 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
c = palette(wave3 % 0.15 + t)
return (c[0], c[1], wave1 + wave3), hsv
'''
},
161: {
'name': 'Palette Ripples (Fast Cycle) 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
c = palette(wave3 % 0.8 + t)
return (c[0], c[1], wave1 + wave3), hsv
'''
},
170: {
'name': 'Palette Plasma 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return palette(wave_triangle(v)), hsv
'''
},
180: {
'name': 'Palette Fractal Plasma 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines_octave(x, y, t, 7, 2.0, 0.5)
return palette(wave_triangle(v)), hsv
'''
},
190: {
'name': 'Palette Twinkle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = prev_state[2] - dt
if v <= 0:
c = palette(t + x)
return (c[0], c[1], random.random()), hsv
elif v > 0:
return (prev_state[0], prev_state[1], v), hsv
else:
return (0, 0, 0), hsv
'''
},
200: {
'name': 'Palette Perlin Noise 2D',
'primary_speed': 0.3,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(perlin_noise_3d(x, y, t)), hsv
'''
},
300: {
'name': 'RGB Sines 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_sine(t + x),
wave_sine((t + x) * 1.2),
wave_sine((t + x) * 1.4)), rgb
'''
},
310: {
'name': 'RGB Cubics 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_cubic(t + x),
wave_cubic((t + x) * 1.2),
wave_cubic((t + x) * 1.4)), rgb
'''
},
320: {
'name': 'RGB Ripples 1 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v0 = x + (wave_sine(t)) + wave_sine(x + 0.666 * t)
v1 = x + (wave_sine(t + 0.05)) + wave_sine(x + 0.666 * t + 0.05)
v2 = x + (wave_sine(t + 0.1)) + wave_sine(x + 0.666 * t + 0.1)
return (0.01 / (wave_triangle(v0) + 0.01), 0.01 / (wave_triangle(v1) + 0.01), 0.01 / (wave_triangle(v2) + 0.01)), rgb
'''
},
330: {
'name': 'RGB Plasma (Spectrum Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return (wave_sine(v),
wave_sine(v + 0.333),
wave_sine(v + 0.666)), rgb
'''
},
340: {
'name': 'RGB Plasma (Fire Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return (0.9 - wave_sine(v),
wave_sine(v + 0.333) - 0.1,
0.9 - wave_sine(v + 0.666)), rgb
'''
},
350: {
'name': 'RGB Fractal Plasma (Fire Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines_octave(x, y, t, 7, 2.0, 0.5)
return (1.0 - wave_sine(v),
wave_sine(v + 0.333),
1.0 - wave_sine(v + 0.666)), rgb
'''
},
360: {
'name': 'Blackbody Cycle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = wave_triangle(t + x)
c = blackbody_to_rgb(v * v * 5500 + 1000)
return (c[0] * v, c[1] * v, c[2] * v), rgb
'''
},
}
# Secondary animations that transform finalized colors to add brightness effects
# return brightness, colorRGB
def sine_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_sine(t + x)
def cubic_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_cubic(t + x)
def ramp_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, (t + x) % 1 # test ramp^2
def bounce_linear_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_sine(x + driver.wave_triangle(t))
def bounce_sine_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_sine(x + driver.wave_sine(t))
def bounce_cubic_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_sine(x + driver.wave_cubic(t))
def perlin_noise_2d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.perlin_noise_3d(x, y, t)
def twinkle_pulse_1d(t, dt, x, y, z, prev_state, in_color):
v = prev_state[1] - dt
if v <= -0.2:
return in_color, random()
elif v > 0:
return prev_state[0], | |
<gh_stars>0
import getpass
import hashlib
import json
import os
import pkgutil
import re
import sys
import time
import typing as t
import uuid
from io import BytesIO
from itertools import chain
from os.path import basename
from os.path import join
from zlib import adler32
from .._internal import _log
from ..exceptions import NotFound
from ..http import parse_cookie
from ..security import gen_salt
from ..utils import send_file
from ..wrappers.request import Request
from ..wrappers.response import Response
from .console import Console
from .tbtools import Frame
from .tbtools import get_current_traceback
from .tbtools import render_console_html
from .tbtools import Traceback
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin: str) -> str:
return hashlib.sha1(f"{pin} added salt".encode("utf-8", "replace")).hexdigest()[:12]
_machine_id: t.Optional[t.Union[str, bytes]] = None
def get_machine_id() -> t.Optional[t.Union[str, bytes]]:
global _machine_id
if _machine_id is not None:
return _machine_id
def _generate() -> t.Optional[t.Union[str, bytes]]:
linux = b""
# machine-id is stable across boots, boot_id is not.
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
value = f.readline().strip()
except OSError:
continue
if value:
linux += value
break
# Containers share the same machine id, add some cgroup
# information. This is used outside containers too but should be
# relatively stable across boots.
try:
with open("/proc/self/cgroup", "rb") as f:
linux += f.readline().strip().rpartition(b"/")[2]
except OSError:
pass
if linux:
return linux
# On OS X, use ioreg to get the computer's serial number.
try:
# subprocess may not be available, e.g. Google App Engine
# https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows, use winreg to get the machine guid.
try:
import winreg
except ImportError:
pass
else:
try:
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
winreg.KEY_READ | winreg.KEY_WOW64_64KEY,
) as rk:
guid: t.Union[str, bytes]
guid_type: int
guid, guid_type = winreg.QueryValueEx(rk, "MachineGuid")
if guid_type == winreg.REG_SZ:
return guid.encode("utf-8") # type: ignore
return guid
except OSError:
pass
return None
_machine_id = _generate()
return _machine_id
class _ConsoleFrame:
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace: t.Dict[str, t.Any]):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(
app: "WSGIApplication",
) -> t.Union[t.Tuple[str, str], t.Tuple[None, None]]:
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", t.cast(object, app).__class__.__module__)
username: t.Optional[str]
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", type(app).__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.sha1()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, str):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = f"__wzd{h.hexdigest()[:20]}"
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = f"{int(h.hexdigest(), 16):09d}"[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication:
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
_pin: str
_pin_cookie: str
def __init__(
self,
app: "WSGIApplication",
evalex: bool = False,
request_key: str = "werkzeug.request",
console_path: str = "/console",
console_init_func: t.Optional[t.Callable[[], t.Dict[str, t.Any]]] = None,
show_hidden_frames: bool = False,
pin_security: bool = True,
pin_logging: bool = True,
) -> None:
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames: t.Dict[int, t.Union[Frame, _ConsoleFrame]] = {}
self.tracebacks: t.Dict[int, Traceback] = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
else:
_log("info", " * Debugger PIN: %s", self.pin)
else:
self.pin = None
@property
def pin(self) -> t.Optional[str]:
if not hasattr(self, "_pin"):
pin_cookie = get_pin_and_cookie_name(self.app)
self._pin, self._pin_cookie = pin_cookie # type: ignore
return self._pin
@pin.setter
def pin(self, value: str) -> None:
self._pin = value
@property
def pin_cookie_name(self) -> str:
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
pin_cookie = get_pin_and_cookie_name(self.app)
self._pin, self._pin_cookie = pin_cookie # type: ignore
return self._pin_cookie
def debug_application(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterator[bytes]:
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
yield from app_iter
if hasattr(app_iter, "close"):
app_iter.close() # type: ignore
except Exception:
if hasattr(app_iter, "close"):
app_iter.close() # type: ignore
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
is_trusted = bool(self.check_pin_trust(environ))
html = traceback.render_full(
evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
).encode("utf-8", "replace")
response = Response(html, status=500, content_type="text/html")
try:
yield from response(environ, start_response)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
traceback.log(environ["wsgi.errors"])
def execute_command(
self, request: Request, command: str, frame: t.Union[Frame, _ConsoleFrame]
) -> Response:
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request: Request) -> Response:
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def get_resource(self, request: Request, filename: str) -> Response:
"""Return a static resource from the shared folder."""
path = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, path)
except OSError:
return NotFound() # type: ignore[return-value]
else:
if data is None:
return | |
from __future__ import absolute_import, division, print_function
from nfldb.db import _upsert
class Entity (object):
"""
This is an abstract base class that handles most of the SQL
plumbing for entities in `nfldb`. Its interface is meant to be
declarative: specify the schema and let the methods defined here
do the SQL generation work. However, it is possible to override
methods (like `nfldb.Entity._sql_field`) when more customization
is desired.
Note that many of the methods defined here take an `aliases`
argument. This should be a dictionary mapping table name (defined
in `nfldb.Entity._sql_tables`) to some custom prefix. If it
isn't provided, then the table name itself is used.
"""
# This class doesn't introduce any instance variables, but we need
# to declare as such, otherwise all subclasses will wind up with a
# `__dict__`. (Thereby negating the benefit of using __slots__.)
__slots__ = []
_sql_tables = {}
"""
A dictionary with four keys: `primary`, `tables`, `managed` and
`derived`.
The `primary` key should map to a list of primary key
fields that correspond to a shared minimal subset of primary keys
in all tables that represent this entity. (i.e., It should be the
foreign key that joins all tables in the representation together.)
The `tables` key should map to an association list of table names
that map to lists of fields for that table. The lists of fields for
every table should be *disjoint*: no two tables may share a field
name in common (outside of the primary key).
The `managed` key should be a list of tables that are managed
directly by `nfldb`. `INSERT`, `UPDATE` and `DELETE` queries
will be generated appropriately. (Tables not in this list are
assumed to be maintained by the database itself, e.g., they are
actually views or materialized views maintained by triggers.)
The `derived` key should map to a list of *computed* fields. These
are fields that aren't directly stored in the table, but can be
computed from combining columns in the table (like `offense_tds` or
`points`). This API will expose such fields as regular SQL columns
in the API, and will handle writing them for you in `WHERE` and
`ORDER BY` statements. The actual implementation of each computed
field should be in an entity's `_sql_field` method (overriding the
one defined on `nfldb.Entity`). The derived fields must be listed
here so that the SQL generation code is aware of them.
"""
@classmethod
def _sql_columns(cls):
"""
Returns all columns defined for this entity. Every field
corresponds to a single column in a table.
The first `N` columns returned correspond to this entity's
primary key, where `N` is the number of columns in the
primary key.
"""
cols = cls._sql_tables['primary'][:]
for table, table_cols in cls._sql_tables['tables']:
cols += table_cols
return cols
@classmethod
def sql_fields(cls):
"""
Returns a list of all SQL fields across all tables for this
entity, including derived fields. This method can be used
in conjunction with `nfldb.Entity.from_row_tuple` to quickly
create new `nfldb` objects without every constructing a dict.
"""
if not hasattr(cls, '_cached_sql_fields'):
cls._cached_sql_fields = cls._sql_columns()
cls._cached_sql_fields += cls._sql_tables['derived']
return cls._cached_sql_fields
@classmethod
def from_row_dict(cls, db, row):
"""
Introduces a new entity object from a full SQL row result from
the entity's tables. (i.e., `row` is a dictionary mapping
column to value.) Note that the column names must be of the
form '{entity_name}_{column_name}'. For example, in the `game`
table, the `gsis_id` column must be named `game_gsis_id` in
`row`.
"""
obj = cls(db)
seta = setattr
prefix = cls._sql_primary_table() + '_'
slice_from = len(prefix)
for k in row:
if k.startswith(prefix):
seta(obj, k[slice_from:], row[k])
return obj
@classmethod
def from_row_tuple(cls, db, t):
"""
Given a tuple `t` corresponding to a result from a SELECT query,
this will construct a new instance for this entity. Note that
the tuple `t` must be in *exact* correspondence with the columns
returned by `nfldb.Entity.sql_fields`.
"""
cols = cls.sql_fields()
seta = setattr
obj = cls(db)
for i, field in enumerate(cols):
seta(obj, field, t[i])
return obj
@classmethod
def _sql_from(cls, aliases=None):
"""
Return a valid SQL `FROM table AS alias [LEFT JOIN extra_table
...]` string for this entity.
"""
# This is a little hokey. Pick the first table as the 'FROM' table.
# Subsequent tables are joined.
from_table = cls._sql_primary_table()
as_from_table = cls._sql_table_alias(from_table, aliases)
extra_tables = ''
for table, _ in cls._sql_tables['tables'][1:]:
extra_tables += cls._sql_join_to(cls,
from_table=from_table,
to_table=table,
from_aliases=aliases,
to_aliases=aliases)
return '''
FROM {from_table} AS {as_from_table}
{extra_tables}
'''.format(from_table=from_table, as_from_table=as_from_table,
extra_tables=extra_tables)
@classmethod
def _sql_select_fields(cls, fields, wrap=None, aliases=None):
"""
Returns correctly qualified SELECT expressions for each
field in `fields` (namely, a field may be a derived field).
If `wrap` is a not `None`, then it is applied to the result
of calling `cls._sql_field` on each element in `fields`.
All resulting fields are aliased with `AS` to correspond to
the name given in `fields`. Namely, this makes table aliases
opaque to the resulting query, but this also disallows
selecting columns of the same name from multiple tables.
"""
if wrap is None:
wrap = lambda x: x
sql = lambda f: wrap(cls._sql_field(f, aliases=aliases))
entity_prefix = cls._sql_primary_table()
return ['%s AS %s_%s' % (sql(f), entity_prefix, f) for f in fields]
@classmethod
def _sql_relation_distance(cls_from, cls_to):
primf = set(cls_from._sql_tables['primary'])
primt = set(cls_to._sql_tables['primary'])
if len(primf.intersection(primt)) == 0:
return None
outsiders = primf.difference(primt).union(primt.difference(primf))
if len(primf) > len(primt):
return -len(outsiders)
else:
return len(outsiders)
@classmethod
def _sql_join_all(cls_from, cls_tos):
"""
Given a list of sub classes `cls_tos` of `nfldb.Entity`,
produce as many SQL `LEFT JOIN` clauses as is necessary so
that all fields in all entity types given are available for
filtering.
Unlike the other join functions, this one has no alias support
or support for controlling particular tables.
The key contribution of this function is that it knows how to
connect a group of tables correctly. e.g., If the group of
tables is `game`, `play` and `play_player`, then `game` and
`play` will be joined and `play` and `play_player` will be
joined. (Instead of `game` and `play_player` or some other
erronoeous combination.)
In essence, each table is joined with the least general table
in the group.
"""
assert cls_from not in cls_tos, \
'cannot join %s with itself with `sql_join_all`' % cls_from
def dist(f, t):
return f._sql_relation_distance(t)
def relation_dists(froms, tos):
return filter(lambda p:(f:=p[0], t:=p[1], d:=p[2], d is not None),
((f, t, dist(f, t)) for f in froms for t in tos))
def more_general(froms, tos):
return filter(lambda p:(f:=p[0], t:=p[1], d:=p[2], d < 0), relation_dists(froms, tos))
def more_specific(froms, tos):
return filter(lambda p:(f:=p[0], t:=p[1], d:=p[2], d > 0), relation_dists(froms, tos))
joins = ''
froms, tos = set([cls_from]), set(cls_tos)
while len(tos) > 0:
general = more_general(froms, tos)
specific = more_specific(froms, tos)
assert len(general) > 0 or len(specific) > 0, \
'Cannot compute distances between sets. From: %s, To: %s' \
% (froms, tos)
def add_join(f, t):
tos.discard(t)
froms.add(t)
return f._sql_join_to_all(t)
if general:
f, t, _ = max(general, key=lambda p:(f:=p[0], t:=p[1], d:=p[2], d))
joins += add_join(f, t)
if specific:
f, t, _ = min(specific, key=lambda p:(f:=p[0], t:=p[1], d:=p[2], d))
joins += add_join(f, t)
return joins
@classmethod
def _sql_join_to_all(cls_from, cls_to, from_table=None,
from_aliases=None, to_aliases=None):
"""
Given a **sub class** `cls_to` of `nfldb.Entity`, produce
as many SQL `LEFT JOIN` clauses as is necessary so that all
fields in `cls_to.sql_fields()` are available for filtering.
See the documentation for `nfldb.Entity._sql_join_to` for
information on the parameters.
"""
to_primary = cls_to._sql_primary_table()
joins = cls_from._sql_join_to(cls_to,
from_table=from_table,
to_table=to_primary,
from_aliases=from_aliases,
to_aliases=to_aliases)
for table, _ in cls_to._sql_tables['tables'][1:]:
joins += cls_to._sql_join_to(cls_to,
from_table=to_primary,
to_table=table,
from_aliases=to_aliases,
to_aliases=to_aliases)
return joins
@classmethod
def _sql_join_to(cls_from, cls_to,
from_table=None, to_table=None,
from_aliases=None, to_aliases=None):
"""
Given a **sub class** `cls_to` of `nfldb.Entity`, produce
a SQL `LEFT JOIN` clause.
If the primary keys in `cls_from` and `cls_to` have an empty
intersection, then an assertion error is raised.
Note that the first table defined for each of `cls_from` and
`cls_to` is used to join them if `from_table` or `to_table`
are `None`.
`from_aliases` are only applied to the `from` tables and
`to_aliases` | |
settings for switch devices:")
_LOGGER.info("- Devices (%s):", dev_id)
switch_change = False
for new_state in [False, True, False]:
_LOGGER.info("- Switching %s", new_state)
try:
switch_change = await smile.set_switch_state(
dev_id, members, model, new_state
)
except (
pw_exceptions.ErrorSendingCommandError,
pw_exceptions.ResponseError,
):
if unhappy:
_LOGGER.info(" + failed as expected")
else: # pragma: no cover
_LOGGER.info(" - failed unexpectedly")
raise self.UnexpectedError
return switch_change
@pytest.mark.asyncio
async def tinker_thermostat_temp(self, smile, loc_id, unhappy=False):
"""Toggle temperature to test functionality."""
_LOGGER.info("Asserting modifying settings in location (%s):", loc_id)
for new_temp in [20.0, 22.9]:
_LOGGER.info("- Adjusting temperature to %s", new_temp)
try:
temp_change = await smile.set_temperature(loc_id, new_temp)
assert temp_change
_LOGGER.info(" + worked as intended")
except (
pw_exceptions.ErrorSendingCommandError,
pw_exceptions.ResponseError,
):
if unhappy:
_LOGGER.info(" + failed as expected")
else: # pragma: no cover
_LOGGER.info(" - failed unexpectedly")
raise self.UnexpectedError
@pytest.mark.asyncio
async def tinker_thermostat_preset(self, smile, loc_id, unhappy=False):
"""Toggle preset to test functionality."""
for new_preset in ["asleep", "home", "!bogus"]:
assert_state = True
warning = ""
if new_preset[0] == "!":
assert_state = False
warning = " Negative test"
new_preset = new_preset[1:]
_LOGGER.info("%s", f"- Adjusting preset to {new_preset}{warning}")
try:
preset_change = await smile.set_preset(loc_id, new_preset)
assert preset_change == assert_state
_LOGGER.info(" + worked as intended")
except (
pw_exceptions.ErrorSendingCommandError,
pw_exceptions.ResponseError,
):
if unhappy:
_LOGGER.info(" + failed as expected")
else: # pragma: no cover
_LOGGER.info(" - failed unexpectedly")
raise self.UnexpectedError
@pytest.mark.asyncio
async def tinker_thermostat_schema(
self, smile, loc_id, good_schemas=None, unhappy=False
):
if good_schemas != []:
good_schemas.append("!VeryBogusSchemaNameThatNobodyEverUsesOrShouldUse")
for new_schema in good_schemas:
assert_state = True
warning = ""
if new_schema[0] == "!":
assert_state = False
warning = " Negative test"
new_schema = new_schema[1:]
_LOGGER.info("- Adjusting schedule to %s", f"{new_schema}{warning}")
try:
schema_change = await smile.set_schedule_state(
loc_id, new_schema, "auto"
)
assert schema_change == assert_state
_LOGGER.info(" + failed as intended")
except (
pw_exceptions.ErrorSendingCommandError,
pw_exceptions.ResponseError,
):
if unhappy:
_LOGGER.info(" + failed as expected before intended failure")
else: # pragma: no cover
_LOGGER.info(" - succeeded unexpectedly for some reason")
raise self.UnexpectedError
else: # pragma: no cover
_LOGGER.info("- Skipping schema adjustments")
@pytest.mark.asyncio
async def tinker_thermostat(self, smile, loc_id, good_schemas=None, unhappy=False):
"""Toggle various climate settings to test functionality."""
if good_schemas is None: # pragma: no cover
good_schemas = ["Weekschema"]
await self.tinker_thermostat_temp(smile, loc_id, unhappy)
await self.tinker_thermostat_preset(smile, loc_id, unhappy)
await self.tinker_thermostat_schema(smile, loc_id, good_schemas, unhappy)
@pytest.mark.asyncio
async def test_connect_legacy_anna(self):
"""Test a legacy Anna device."""
# testdata is a dictionary with key ctrl_id_dev_id => keys:values
testdata = {
# Anna
"0d266432d64443e283b5d708ae98b455": {
"attributes": {
"available_schemas": ["Thermostat schedule"],
"selected_schema": "Thermostat schedule",
},
"last_used": "Thermostat schedule",
"location": 0,
"presets": {
"asleep": [19.0, 0],
"away": [19.0, 0],
"home": [20.0, 0],
"no_frost": [10.0, 0],
"vacation": [15.0, 0],
},
"active_preset": "home",
"preset_modes": ["away", "vacation", "asleep", "home", "no_frost"],
"schedule_temperature": 20.0,
"sensors": [
{"id": "illuminance", "state": 151},
{"id": "setpoint", "state": 20.5},
{"id": "temperature", "state": 20.4},
],
},
# Central
"04e4cbfe7f4340f090f85ec3b9e6a950": {
"heating_state": True,
"sensors": [
{"id": "water_temperature", "state": 23.6},
{"id": "intended_boiler_temperature", "state": 17.0},
{"id": "modulation_level", "state": 0.0},
{"id": "return_temperature", "state": 21.7},
{"id": "water_pressure", "state": 1.2},
],
},
}
self.smile_setup = "legacy_anna"
server, smile, client = await self.connect_wrapper()
assert smile.smile_hostname is None
_LOGGER.info("Basics:")
_LOGGER.info(" # Assert type = thermostat")
assert smile.smile_type == "thermostat"
_LOGGER.info(" # Assert version")
assert smile.smile_version[0] == "1.8.0"
_LOGGER.info(" # Assert legacy")
assert smile._smile_legacy # pylint: disable=protected-access
_LOGGER.info(" # Assert master thermostat")
assert smile.single_master_thermostat()
assert not smile.notifications
await self.device_test(smile, testdata)
assert smile._active_device_present
await self.tinker_thermostat(
smile,
"c34c6864216446528e95d88985e714cc",
good_schemas=[
"Thermostat schedule",
],
)
await smile.close_connection()
await self.disconnect(server, client)
server, smile, client = await self.connect_wrapper(raise_timeout=True)
await self.tinker_thermostat(
smile,
"c34c6864216446528e95d88985e714cc",
good_schemas=[
"Thermostat schedule",
],
unhappy=True,
)
await smile.close_connection()
await self.disconnect(server, client)
@pytest.mark.asyncio
async def test_connect_legacy_anna_2(self):
"""Test a legacy Anna device."""
# testdata is a dictionary with key ctrl_id_dev_id => keys:values
# testdata={
# 'ctrl_id': { 'outdoor+temp': 20.0, }
# 'ctrl_id:dev_id': { 'type': 'thermostat', 'battery': None, }
# }
testdata = {
# Anna
"9e7377867dc24e51b8098a5ba02bd89d": {
"sensors": [
{"id": "illuminance", "state": 19.5},
{"id": "setpoint", "state": 15.0},
{"id": "temperature", "state": 21.4},
],
},
# Central
"ea5d8a7177e541b0a4b52da815166de4": {
"sensors": [{"id": "water_pressure", "state": 1.7}]
},
}
self.smile_setup = "legacy_anna_2"
server, smile, client = await self.connect_wrapper()
assert smile.smile_hostname is None
_LOGGER.info("Basics:")
_LOGGER.info(" # Assert type = thermostat")
assert smile.smile_type == "thermostat"
_LOGGER.info(" # Assert version")
assert smile.smile_version[0] == "1.8.0"
_LOGGER.info(" # Assert legacy")
assert smile._smile_legacy # pylint: disable=protected-access
_LOGGER.info(" # Assert master thermostat")
assert smile.single_master_thermostat()
assert not smile.notifications
await self.device_test(smile, testdata)
assert smile._active_device_present
await self.tinker_thermostat(
smile,
"c34c6864216446528e95d88985e714cc",
good_schemas=[
"Thermostat schedule",
],
)
await smile.close_connection()
await self.disconnect(server, client)
server, smile, client = await self.connect_wrapper(raise_timeout=True)
await self.tinker_thermostat(
smile,
"c34c6864216446528e95d88985e714cc",
good_schemas=[
"Thermostat schedule",
],
unhappy=True,
)
await smile.close_connection()
await self.disconnect(server, client)
@pytest.mark.asyncio
async def test_connect_smile_p1_v2(self):
"""Test a legacy P1 device."""
# testdata dictionary with key ctrl_id_dev_id => keys:values
testdata = {
# Gateway / P1 itself
"938696c4bcdb4b8a9a595cb38ed43913": {
"location": "938696c4bcdb4b8a9a595cb38ed43913",
"sensors": [
{"id": "electricity_consumed_point", "state": 456.0},
{"id": "net_electricity_point", "state": 456.0},
{"id": "gas_consumed_cumulative", "state": 584.431},
{"id": "electricity_produced_peak_cumulative", "state": 1296.136},
{
"id": "electricity_produced_off_peak_cumulative",
"state": 482.598,
},
{"id": "net_electricity_cumulative", "state": 1019.161},
],
}
}
self.smile_setup = "smile_p1_v2"
server, smile, client = await self.connect_wrapper()
assert smile.smile_hostname == "smile000000"
_LOGGER.info("Basics:")
_LOGGER.info(" # Assert type = power")
assert smile.smile_type == "power"
_LOGGER.info(" # Assert version")
assert smile.smile_version[0] == "2.5.9"
_LOGGER.info(" # Assert legacy")
assert smile._smile_legacy # pylint: disable=protected-access
_LOGGER.info(" # Assert no master thermostat")
assert smile.single_master_thermostat() is None # it's not a thermostat :)
assert not smile.notifications
await self.device_test(smile, testdata)
await smile.close_connection()
await self.disconnect(server, client)
server, smile, client = await self.connect_wrapper(raise_timeout=True)
@pytest.mark.asyncio
async def test_connect_smile_p1_v2_2(self):
"""Test another legacy P1 device."""
# testdata dictionary with key ctrl_id_dev_id => keys:values
testdata = {
# Gateway / P1 itself
"199aa40f126840f392983d171374ab0b": {
"sensors": [
{"id": "electricity_consumed_point", "state": 456.0},
{"id": "net_electricity_point", "state": 456.0},
{"id": "gas_consumed_cumulative", "state": 584.431},
{"id": "electricity_produced_peak_cumulative", "state": 1296.136},
]
}
}
self.smile_setup = "smile_p1_v2_2"
server, smile, client = await self.connect_wrapper()
assert smile.smile_hostname == "smile000000"
_LOGGER.info("Basics:")
_LOGGER.info(" # Assert type = power")
assert smile.smile_type == "power"
_LOGGER.info(" # Assert version")
assert smile.smile_version[0] == "2.5.9"
_LOGGER.info(" # Assert legacy")
assert smile._smile_legacy # pylint: disable=protected-access
_LOGGER.info(" # Assert no master thermostat")
assert smile.single_master_thermostat() is None # it's not a thermostat :)
assert not smile.notifications
await self.device_test(smile, testdata)
await smile.close_connection()
await self.disconnect(server, client)
@pytest.mark.asyncio
async def test_connect_anna_v4(self):
"""Test an Anna firmware 4 setup without a boiler."""
# testdata is a dictionary with key ctrl_id_dev_id => keys:values
# testdata={
# 'ctrl_id': { 'outdoor+temp': 20.0, }
# 'ctrl_id:dev_id': { 'type': 'thermostat', 'battery': None, }
# }
testdata = {
# Anna
"01b85360fdd243d0aaad4d6ac2a5ba7e": {
"selected_schedule": None,
"active_preset": "home",
"sensors": [{"id": "illuminance", "state": 60.0}],
},
# Central
"cd0e6156b1f04d5f952349ffbe397481": {
"heating_state": True,
"binary_sensors": [
{
"id": "flame_state",
"state": True,
"icon": pw_constants.FLAME_ICON,
}
],
"sensors": [
{"id": "water_pressure", "state": 2.1},
{"id": "water_temperature", "state": 52.0},
],
},
"0466eae8520144c78afb29628384edeb": {
"sensors": [{"id": "outdoor_temperature", "state": 7.44}]
},
}
self.smile_setup = "anna_v4"
server, smile, client = await self.connect_wrapper()
assert smile.smile_hostname == "smile000000"
_LOGGER.info("Basics:")
_LOGGER.info(" # Assert type = thermostat")
assert smile.smile_type == "thermostat"
_LOGGER.info(" # Assert version")
assert smile.smile_version[0] == "4.0.15"
_LOGGER.info(" # Assert no legacy")
assert not smile._smile_legacy # pylint: disable=protected-access
_LOGGER.info(" # Assert master thermostat")
assert smile.single_master_thermostat()
assert not smile.notifications
await self.device_test(smile, testdata)
assert smile._active_device_present
await self.tinker_thermostat(
smile,
"eb5309212bf5407bb143e5bfa3b18aee",
good_schemas=["Standaard", "Thuiswerken"],
)
await smile.close_connection()
await self.disconnect(server, client)
server, smile, client = await self.connect_wrapper(raise_timeout=True)
await self.tinker_thermostat(
smile,
"eb5309212bf5407bb143e5bfa3b18aee",
good_schemas=["Standaard", "Thuiswerken"],
unhappy=True,
)
await smile.close_connection()
await self.disconnect(server, client)
@pytest.mark.asyncio
async def test_connect_anna_v4_no_tag(self):
"""Test an Anna firmware 4 setup without a boiler - no presets."""
self.smile_setup = "anna_v4_no_tag"
server, smile, client = await self.connect_wrapper()
assert smile.smile_hostname == "smile000000"
_LOGGER.info("Basics:")
_LOGGER.info(" # Assert type = thermostat")
assert smile.smile_type == "thermostat"
_LOGGER.info(" # Assert version")
assert smile.smile_version[0] == "4.0.15"
_LOGGER.info(" # Assert no legacy")
assert not smile._smile_legacy # pylint: disable=protected-access
_LOGGER.info(" # Assert master thermostat")
assert smile.single_master_thermostat()
await self.tinker_thermostat(
smile,
"eb5309212bf5407bb143e5bfa3b18aee",
good_schemas=["Standaard", "Thuiswerken"],
)
await smile.close_connection()
await self.disconnect(server, client)
server, smile, client = await self.connect_wrapper(raise_timeout=True)
await self.tinker_thermostat(
smile,
"eb5309212bf5407bb143e5bfa3b18aee",
good_schemas=["Standaard", "Thuiswerken"],
unhappy=True,
)
await smile.close_connection()
await self.disconnect(server, client)
@pytest.mark.asyncio
async def test_connect_anna_without_boiler_fw3(self):
"""Test an Anna firmware 3 without a boiler."""
# testdata is a dictionary with key ctrl_id_dev_id => keys:values
# testdata={
# 'ctrl_id': { 'outdoor+temp': 20.0, }
# 'ctrl_id:dev_id': { 'type': | |
data
scaler.fit(train_data[di+smoothing_window_size:,:])
train_data[di+smoothing_window_size:,:] = scaler.transform(train_data[di+smoothing_window_size:,:])
# Reshape both train and test data
train_data = train_data.reshape(-1)
# Normalize test data
test_data = scaler.transform(test_data).reshape(-1)
print (textwrap.dedent("""\
The test and training data is now normalized. To show this off lets take a
quick peak at a few data points
"""))
input ('[Press enter to continue]\n########################################################\n')
print_code(lines[lineno():lineno()+8])
print (df.iloc[[4058]]),
print (train_data[[4058]])
print (df.iloc[[4059]]),
print (train_data[[4059]])
print (df.iloc[[6985]]),
print (train_data[[6985]])
print (df.iloc[[7067]]),
print (train_data[[7067]])
print('')
print (textwrap.dedent("""\
Here we can see just how effetive batch normalization is at applying context and
scale to the data in a meaningful way. With the drop from $3.44 to $1.85 being
represented as 0.75 to 0.28, and a gain from $72.49 to $90.32 being represented
as 0.87 to 0.98.
Now before we can get onto different predictions methods we have to perfrom one
last data preparation step. We will perform exponential moving average smoothing
to just the training data.
This will smooth over the day to day roughness of the stock data effectively
removing the noise. This will allow the algorithms we use to more easily see
trends over longer periods.
"""))
input ('[Press enter to continue]\n########################################################\n')
print_code(lines[lineno():lineno()+10])
# Now perform exponential moving average smoothing
# So the data will have a smoother curve than the original ragged data
EMA = 0.0
gamma = 0.1
for ti in range(cutoff):
EMA = gamma*train_data[ti] + (1-gamma)*EMA
train_data[ti] = EMA
# Used for visualization and test purposes
all_mid_data = np.concatenate([train_data,test_data],axis=0)
print (textwrap.dedent("""\
With the data smoothed we can explore the simplist form of stock prediction,
the Standard Average.
The Standard Average predicts the value of a stock 1 day in the future by
averaging all prices observed within an arbitrarily specified window.
Put simply...
The price at t + 1 is the average of all prices from t to t - N
The implementation of such an algorithm is very simple...
"""))
input ('[Press enter to continue]\n########################################################\n')
print_code(lines[lineno():lineno()+19])
# Standard Average: One day ahead predictions
window_size = 100
N = train_data.size
std_avg_predictions = []
std_avg_x = []
mse_errors = []
for pred_idx in range(window_size,N):
if pred_idx >= N:
date = dt.datetime.strptime(k, '%Y-%m-%d').date() + dt.timedelta(days=1)
else:
date = df.loc[pred_idx,'Date']
std_avg_predictions.append(np.mean(train_data[pred_idx-window_size:pred_idx]))
mse_errors.append((std_avg_predictions[-1]-train_data[pred_idx])**2)
std_avg_x.append(date)
print('MSE error for standard averaging: %.5f'%(0.5*np.mean(mse_errors)))
print('')
print (textwrap.dedent("""\
Taking a look at the MSE error, we can see that this model is relatively accurate.
The major downside though is that model can only accurately predict prices a single
day in the future. This is not very useful, especially considering that this model
can't predict huge jump or dips in value.
Let's take a peak at a comperative graph
"""))
input ('[Press enter to open graph]\n########################################################\n')
print_code(lines[lineno():lineno()+9])
plt.figure(figsize = (18,9))
plt.plot(range(df.shape[0]),all_mid_data,color='b',label='True')
plt.plot(range(window_size,N),std_avg_predictions,color='orange',label='Prediction')
#plt.xticks(range(0,df.shape[0],50),df['Date'].loc[::50],rotation=45)
plt.xlabel('Date')
plt.ylabel('Mid Price')
plt.legend(fontsize=18)
plt.axvline(x=(cutoff-1))
plt.show(block=False)
plt.savefig('fig2.png')
print (textwrap.dedent("""\
Next we will look at a more sophisticated alogrithm called Exponential Moving
Average (EMA). To calculate the prediction... (_of_ representing subscript)
x_of_(t+1) = EMA_of_t = gamma * EMA_of_(t-1) + (1 - gamma) * x_of_t
Where EMA_of_0 = 0
The Exponential Moving Average is the Standard Average, but with an extra moving
part. The weight of the most recent prediction is decided by gamma. This scales
the impact of recent prices to be much more influential on predictions. This
fixes the issue of the algorithm not being able to predict or keep up with
sudden jumps and dips in stock value.
"""))
input ('[Press enter to continue\n########################################################\n')
print_code(lines[lineno():lineno()+22])
# Exponential Averaging
window_size = 100
N = train_data.size
run_avg_predictions = []
run_avg_x = []
mse_errors = []
running_mean = 0.0
run_avg_predictions.append(running_mean)
decay = 0.5
for pred_idx in range(1,N):
running_mean = running_mean*decay + (1.0-decay)*train_data[pred_idx-1]
run_avg_predictions.append(running_mean)
mse_errors.append((run_avg_predictions[-1]-train_data[pred_idx])**2)
run_avg_x.append(date)
print('MSE error for EMA averaging: %.5f'%(0.5*np.mean(mse_errors)))
print ('')
print (textwrap.dedent("""\
As you can see the error for EMA is several magnitudes smaller than that of
Standard Averaging.
Lets plot the EMA results.
"""))
input ('[Press enter view graph\n########################################################\n')
print_code(lines[lineno():lineno()+8])
plt.figure(figsize = (18,9))
plt.plot(range(df.shape[0]),all_mid_data,color='b',label='True')
plt.plot(range(0,N),run_avg_predictions,color='orange', label='Prediction')
#plt.xticks(range(0,df.shape[0],50),df['Date'].loc[::50],rotation=45)
plt.xlabel('Date')
plt.ylabel('Mid Price')
plt.legend(fontsize=18)
plt.show()
plt.savefig('fig3.png')
print (textwrap.dedent("""\
Zoomed out you might not have been able to tell that the 'True' prices for
the training data was even graphed. It was, the model was just so accurate
that while zoomed out the two graphs appear to be completely overlapped.
While this model does seem impressive at being able to calculate a near
exact copy of the model, it can only calculate a given stock value one
day in advance which is not very useful. In fact, the way the algorithm is
setup, trying to predict values any further than a single step (a day
in this case) gives you the same answer for all future predictions.
What would be useful is a model that can predict any arbitrary number of steps
into the future. Luckily there does exist a model that can do this...
The Long Short-Term Memory Model (LSTM)
"""))
input ('[Press enter to continue\n########################################################\n')
print (textwrap.dedent("""\
Long Short-Term Memory Models are extreamly powerful at making predictions
based upon a time-series and can predict any number of steps into the future.
The exact accuracy and power of a given LSTM model though is very dependent
on its fine-tuning for a given application, thus falling heavily on the
programmer implementing it.
But before we get to all of that, lets break down what exactly is an LSTM.
"""))
input ('[Press enter to continue\n########################################################\n')
# TODO: have NN_example print here
print (textwrap.dedent("""\
An LSTM is a type of Neural Network (NN), a Recurrent Neural Network(RNN)
to be exact. NN are are effectively machine brains that are designed to
complete one abstract task that conventional programming cannot solve.
A common example is identifying handwritten numbers.
While it sounds simple even some of the best NNs only have an accuracy rate
of 98%. And at a huge computational overhead. This is because neural networks
are a network of connections between layers that contain thousands upon thousands
of nodes. These nodes are either an input, output, or some simple arithmatic
expression. The power of NN are breaking down complex tasks into a network of
simple arithmetic expressions. The output of these expressions are the weighted
by a bias that is calulated when the NN is trained. These biases and training
are an optimization problem that incrementaly makes the NN more accurate.
A NN has the issue of forgetting its output as soon as it finishes its
calulations. It can't use previously crunched data to influence it performance
outside of the training process. While this isn't an issue for identifying
handwritten numbers, it would be for predicting stock prices. You can't train
an NN with live data to have it adapt to new information unless you design it
that way, which is exactly what a Recurrent Neural Network is.
"""))
input ('[Press enter to continue\n########################################################\n')
# TODO insert RNN_ref here
print (textwrap.dedent("""\
RNN are designed with a memory of sorts that allow it to adjust its performance
based on data recieved during testing. This is essential for predicting stock
data especially if you want your predictions to be able to update without
retraining the whole system.
That is why LSTMs are a type of RNN.
"""))
input ('[Press enter to continue\n########################################################\n')
# TODO insert LSTM_diag here
print (textwrap.dedent("""\
An LSTM is comprised of 5 components:
*Cell State = The internal memory of a cell for both the short and long
term memories.
*Hidden State = The output state calculated from the current input,
current cell input, and the previous hidden state.
This is used to predict the future stock prices.
Can decide to retrive either both short and long
term memory or just one of the two.
*Input Gate = Determines the amount of information from the input state
that flows into the current cell state.
*Forget Gate = Determines the amount of information from the input state
and previous cell state flows into the current cell
state.
*Output Gate = Determines how much information from the current state
flows into the hidden state
These 5 components form modules that an LSTM is comprised of. While these
technical descriptions each component are useful at understanding what each
part is responsible for, it understates the ultimate goal of these components
within the module.
"""))
input ('[Press enter to continue\n########################################################\n')
# TODO insert LSTM_CORE_DIAG.png here
print (textwrap.dedent("""\
The Cell state as shown in the image is the bottom line. The best explaination
I've found for the Cell State is that it's like a converyor belt.
"It runs straight down the entire chain, with only some minor linear interactions.
It’s very easy for information to just flow along it unchanged."
The LSTM has the ability to add and remove information from this conveyor belt,
thats where the gates come into play.
"""))
input ('[Press enter to continue\n########################################################\n')
print (textwrap.dedent("""\
Let us get to building a Long Short-Term Memory Model. The first thing we | |
'''
(c) Copyright 2021
All rights reserved
Programs written by <NAME>
Department of Computer Science
New Jersey Institute of Technology
University Heights, Newark, NJ 07102, USA
Permission to use, copy, modify, and distribute this
software and its documentation for any purpose and without
fee is hereby granted, provided that this copyright
notice appears in all copies. Programmer(s) makes no
representations about the suitability of this
software for any purpose. It is provided "as is" without
express or implied warranty.
@author: <NAME>
'''
from __future__ import division
import warnings
warnings.filterwarnings('ignore')
import json
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.metrics import multilabel_confusion_matrix
import numpy as np
import sys
import time
from contextlib import contextmanager
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn_extensions.extreme_learning_machines.elm import GenELMClassifier
from sklearn_extensions.extreme_learning_machines.random_layer import RBFRandomLayer, MLPRandomLayer
from sklearn.linear_model import LogisticRegression
import pickle
from os import listdir
from os.path import isfile, join
import os
import datetime
from pathlib import Path
custom_models_dir = "custom_models"
custom_models_data_dir = "custom_models_data"
custom_models_time_limit = 24 * 60 #24 hours in minutes
default_models_dir = "models"
algorithms = ['ENS','RF','MLP','ELM']
algorithms_names = ['Ensemble','Random Forest','Multiple Layer Perceptron (MLP)' ,'Extreme Learning Machine (ELM)']
DEFAULT_INPUT_FILE = 'train_data/flaringar_simple.csv'
logFileHandler = None
timestr = time.strftime("%Y%m%d_%H%M%S")
loggingString = []
algorithm = 'rf,mlp,elm'
flares_col_name ='Flare Class'
logFile = "logs/ens_deepsun.log"
mapping ={1:"B", 2:"C", 3:"M", 4:'X', -1:'N/A'}
B = mapping[1]
C = mapping[2]
M = mapping[3]
X = mapping[4]
class_to_num = {"B":1, "C":2, "M":3, 'X':4, 'N/A':-1}
req_columns =[flares_col_name, "TOTUSJH","TOTBSQ","TOTPOT","TOTUSJZ","ABSNJZH","SAVNCPP","USFLUX","AREA_ACR","TOTFZ","MEANPOT","R_VALUE","EPSZ","SHRGT45"]
no_ver_o = {}
no_ver_o['fcnumber'] = 0
no_ver_o['fcname'] = 'A'
predicted = []
actual = []
confusion_matrix_result= []
cv_mean_value = None
overall_test_accuracy=None
feature_importances = None
partial_ens_trained = False
noLogging = False
log_to_terminal = False
verbose = False
save_stdout = sys.stdout
@contextmanager
def stdout_redirected(new_stdout):
sys.stdout = new_stdout
try:
yield None
finally:
sys.stdout = save_stdout
@contextmanager
def stdout_default():
sys.stdout = save_stdout
def log(*message,verbose=True, logToTerminal=False, no_time=False, end=' '):
global noLogging
if (noLogging) :
return
global log_to_terminal
if log_to_terminal or logToTerminal:
if not no_time:
print ('[' + str(datetime.datetime.now().replace(microsecond=0)) +'] ', end=end)
for msg in message:
print (msg,end=end)
print('')
with open(logFile,"a+") as logFileHandler :
with stdout_redirected(logFileHandler) :
if no_time:
print ('[' + str(datetime.datetime.now().replace(microsecond=0)) +'] ',end=end)
for msg in message:
print (msg,end=end)
global loggingString
if verbose :
loggingString.append( msg)
print('')
def set_log_to_terminal(v):
global log_to_terminal
log_to_terminal = v
def set_verbose(v):
verbose = boolean(v)
def boolean(b):
if b == None:
return False
b = str(b).strip().lower()
if b in ['y','yes','ye','1','t','tr','tru','true']:
return True
return False
def create_default_model(trained_model, model_id):
return create_model(trained_model, model_id, default_models_dir)
def create_custom_model(trained_model, model_id):
return create_model(trained_model, model_id, custom_models_dir)
def create_model(trained_model, model_id, model_dir):
model_file = model_dir + "/" + model_id + ".sav"
log("create_model saving model with dill " + model_id + " to file: " + model_file)
pickle.dump(trained_model, open(model_file, 'wb'))
return model_file
def is_model_file_exists(file):
path = Path(custom_models_dir + "/" + file)
return path.exists()
def is_file_exists(file):
path = Path(file)
log("Check if file exists: " + file + " : " + str(path.exists()))
return path.exists()
def are_model_files_exist(models_dir, modelId, alg='ENS'):
alg = str(alg).strip().upper()
log("Searching for model is: " + modelId + " in directory: " + models_dir)
modelExenstion = ".sav"
fname = models_dir + "/" + modelId + "_rf" + modelExenstion
rf_model_exist = is_file_exists(fname)
fname = models_dir + "/" + modelId + "_mlp" + modelExenstion
mlp_model_exist = is_file_exists(fname)
fname = models_dir + "/" + modelId + "_elm" + modelExenstion
elm_model_exist = is_file_exists(fname)
if alg == 'ENS':
exist = (rf_model_exist and mlp_model_exist and elm_model_exist)
if exist:
return True
msg ='exist for this model id: ' + modelId + '\nThe ENS algorithm requires the three models: RF, MLP, and ELM to be trained.'
msg = msg +'\nPlease use the -a open to specify the algorithm you want to test with.\n'
msg = msg +'Available models for this model id:'
available_modes =[]
models = []
if not rf_model_exist:
models.append('RF')
else:
available_modes.append('RF')
if not mlp_model_exist:
models.append('MLP')
else:
available_modes.append('MLP')
if not elm_model_exist:
models.append('ELM')
else:
available_modes.append('ELM')
if len(available_modes) == 0:
return False
global partial_ens_trained
partial_ens_trained = True
models_exist = 'model does not'
if len(models) > 1:
models_exist = 'model(s) do not'
print('\n' + ', '.join(models),models_exist, msg, ', '.join(available_modes))
return False
if alg == 'RF':
return rf_model_exist
if alg == 'MLP':
return mlp_model_exist
if alg == 'ELM':
return elm_model_exist
return True
def get_partial_ens_trained():
global partial_ens_trained
return partial_ens_trained
def convert_class_to_num(c):
c = c[0].strip().upper()
if c in class_to_num.keys():
return class_to_num[c]
return -1
def load_model(model_dir, model_id):
model_file = model_dir + "/" + model_id + ".sav"
log("Loading model file: " + model_file)
if is_file_exists(model_file) :
model = pickle.load(open(model_file, 'rb'))
log("Loaded model " + model_file)
log("Returning loaded model")
return model
log("returning NO MODEL FILE exist")
return "NO MODEL FILE"
def load_dataset_csv(data_file):
log("Reading data set from file: " + data_file)
dataset = pd.read_csv(data_file)
return dataset
def load_dataset_csv_default():
return load_dataset_csv(DEFAULT_INPUT_FILE)
def removeDataColumn (col, data):
if col in data.columns:
data = data.drop(col, axis = 1)
return data
def remove_default_columns(dataset):
log('Removing default columns from data set')
dataset = removeDataColumn('goes', dataset)
dataset = removeDataColumn('fdate', dataset)
dataset = removeDataColumn('goesstime', dataset)
dataset = removeDataColumn('flarec', dataset)
dataset = removeDataColumn('noaaar', dataset)
return dataset
def remove_additional_columns(dataset):
log('Removing default columns from data set')
remove_default_columns(dataset)
cols = dataset.columns
for c in cols:
if c not in req_columns:
dataset = removeDataColumn(c, dataset)
return dataset
def split_data(dataset, target_column = 'flarecn', test_percent=0.1):
labels = np.array(dataset[target_column])
dataset = removeDataColumn(target_column, dataset)
columns = dataset.columns
train_x, test_x, train_y, test_y = train_test_split(dataset[columns], labels, test_size = test_percent)
return (train_x, test_x, train_y, test_y)
def normalize_scale_data(d):
min = np.array(d).min()
max = np.array(d).max()
d = (d - min) / (max - min)
return d
def load_train_test_datasets_csv(testFile, trainFile = DEFAULT_INPUT_FILE, target_column = 'flarecn', additional_col=''):
dataset = load_dataset_csv(trainFile)
dataset = remove_default_columns(dataset)
dataset = removeDataColumn(additional_col, dataset)
testData = pd.read_csv(testFile)
testData = remove_default_columns(testData)
testData = removeDataColumn(additional_col, testData)
labels = np.array(dataset[target_column])
labels1 = np.array(testData[target_column])
dataset = removeDataColumn(target_column,dataset)
testData = removeDataColumn(target_column,testData)
log ("training labels are as follows:")
log(labels)
train_x = dataset[dataset.columns]
train_y = labels
test_x = testData[testData.columns]
test_y = labels1
log('test labels are as follows')
log(labels1)
return (train_x, test_x, train_y, test_y)
def get_train_test_datasets(trainData, testData, target_column = 'flarecn', additional_col=''):
trainData = remove_default_columns(trainData)
trainData = removeDataColumn(additional_col, trainData)
testData = remove_default_columns(testData)
testData = removeDataColumn(additional_col, testData)
labels = np.array(trainData[target_column])
labels1 = np.array(testData[target_column])
trainData = removeDataColumn(target_column,trainData)
testData = removeDataColumn(target_column,testData)
log ("training labels are as follows:")
log(labels)
train_x = trainData[trainData.columns]
train_y = labels
test_x = testData[testData.columns]
test_y = labels1
log('test labels are as follows')
log(labels1)
return (train_x, test_x, train_y, test_y)
def set_print_results(test_y, predictions):
return set_results(test_y, predictions)
def set_results(test_y, predictions, logging=True):
c = 0
results = []
index = 0
c1 = 0
c2 = 0
c3 = 0
c4 = 0
total = 0
for i in range(0, len(test_y)):
if list(test_y)[i] == 1 :
c1 = c1 + 1
if list(test_y)[i] == 2 :
c2 = c2 + 1
if list(test_y)[i] == 3 :
c3 = c3 + 1
if list(test_y)[i] == 4 :
c4 = c4 + 1
e = ""
if list(test_y)[i] == predictions[i] :
e = "match"
c = c + 1
if logging:
log (str(i) + ") - Actual outcome :: {} and Predicted outcome :: {}".format(list(test_y)[i], predictions[i]) + " " + e)
obj = {}
# obj['dataitem'] = index
index = index +1
obj["fcnumber"] = predictions[i]
obj["fcname"] = "" + mapping[predictions[i]]
results.append(obj)
total = total + 1
if logging:
log ("c: " + str(c) + " total test " + str( len(test_y)))
log ( "c1: " + str(c1) + ", c2: " + str(c2) + ", c3: " + str(c3) + ", c4: " + str(c4) + ", total: " + str(total))
log ("Test Accuracy :: " + str( accuracy_score(test_y, predictions)))
global overall_test_accuracy
overall_test_accuracy = accuracy_score(test_y, predictions)
global predicted
predicted = predictions
global actual
actual = test_y
return results
def print_confusion_matrix(test_y, predictions):
log (" Confusion matrix ")
conf_matrix = confusion_matrix(test_y, predictions)
log(conf_matrix)
cmp = pd.crosstab(test_y, predictions, rownames=['Actual'], colnames=['Predicted'], margins=True)
log("confusion matrix printed")
log(cmp)
row =0
col =0
global confusion_matrix_result
confusion_matrix_result = []
for c in conf_matrix:
st = ''
a = []
for c1 in c:
a.append(int(c1))
col = col + 1
if st == '':
st = str(c1)
else:
st = str(st) + '\t' + str(c1)
log (st)
confusion_matrix_result.append(a)
return conf_matrix
def rf_train_model(train_x=None,
test_x=None,
train_y=None,
test_y=None,
model_id="default_model"):
| |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.logging.v2 LoggingServiceV2 API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.api import monitored_resource_pb2
from google.cloud.logging_v2.gapic import enums
from google.cloud.logging_v2.gapic import logging_service_v2_client_config
from google.cloud.logging_v2.gapic.transports import logging_service_v2_grpc_transport
from google.cloud.logging_v2.proto import log_entry_pb2
from google.cloud.logging_v2.proto import logging_pb2
from google.cloud.logging_v2.proto import logging_pb2_grpc
from google.protobuf import empty_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-logging").version
class LoggingServiceV2Client(object):
"""Service for ingesting and querying logs."""
SERVICE_ADDRESS = "logging.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.logging.v2.LoggingServiceV2"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LoggingServiceV2Client: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def billing_path(cls, billing_account):
"""Return a fully-qualified billing string."""
return google.api_core.path_template.expand(
"billingAccounts/{billing_account}", billing_account=billing_account
)
@classmethod
def billing_log_path(cls, billing_account, log):
"""Return a fully-qualified billing_log string."""
return google.api_core.path_template.expand(
"billingAccounts/{billing_account}/logs/{log}",
billing_account=billing_account,
log=log,
)
@classmethod
def folder_path(cls, folder):
"""Return a fully-qualified folder string."""
return google.api_core.path_template.expand("folders/{folder}", folder=folder)
@classmethod
def folder_log_path(cls, folder, log):
"""Return a fully-qualified folder_log string."""
return google.api_core.path_template.expand(
"folders/{folder}/logs/{log}", folder=folder, log=log
)
@classmethod
def log_path(cls, project, log):
"""Return a fully-qualified log string."""
return google.api_core.path_template.expand(
"projects/{project}/logs/{log}", project=project, log=log
)
@classmethod
def organization_path(cls, organization):
"""Return a fully-qualified organization string."""
return google.api_core.path_template.expand(
"organizations/{organization}", organization=organization
)
@classmethod
def organization_log_path(cls, organization, log):
"""Return a fully-qualified organization_log string."""
return google.api_core.path_template.expand(
"organizations/{organization}/logs/{log}",
organization=organization,
log=log,
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.LoggingServiceV2GrpcTransport,
Callable[[~.Credentials, type], ~.LoggingServiceV2GrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = logging_service_v2_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=logging_service_v2_grpc_transport.LoggingServiceV2GrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = logging_service_v2_grpc_transport.LoggingServiceV2GrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def delete_log(
self,
log_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes all the log entries in a log.
The log reappears if it receives new entries.
Log entries written shortly before the delete operation might not be
deleted.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> log_name = client.log_path('[PROJECT]', '[LOG]')
>>>
>>> client.delete_log(log_name)
Args:
log_name (str): Required. The resource name of the log to delete:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example,
``"projects/my-project-id/logs/syslog"``,
``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
For more information about log names, see ``LogEntry``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_log" not in self._inner_api_calls:
self._inner_api_calls[
"delete_log"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_log,
default_retry=self._method_configs["DeleteLog"].retry,
default_timeout=self._method_configs["DeleteLog"].timeout,
client_info=self._client_info,
)
request = logging_pb2.DeleteLogRequest(log_name=log_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("log_name", log_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_log"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def write_log_entries(
self,
entries,
log_name=None,
resource=None,
labels=None,
partial_success=None,
dry_run=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Writes log entries to Logging. This API method is the
only way to send log entries to Logging. This method
is used, directly or indirectly, by the Logging agent
(fluentd) and all logging libraries configured to use Logging.
A single request may contain log entries for a maximum of 1000
different resources (projects, organizations, billing accounts or
folders)
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> response = client.write_log_entries(entries)
Args:
entries (list[Union[dict, ~google.cloud.logging_v2.types.LogEntry]]): Required. The log entries to send to Logging. The order of log entries
in this list does not matter. Values supplied in this method's
``log_name``, ``resource``, and ``labels`` fields are copied into those
log entries in this list that do not include values for their
corresponding fields. For more information, see the ``LogEntry`` type.
If the ``timestamp`` or ``insert_id`` fields are missing in log entries,
then this method supplies the current time or a unique identifier,
respectively. The supplied values are chosen so that, among the log
entries that did not supply their own values, the entries earlier in the
list will sort before the entries later in the list. See the
``entries.list`` method.
Log entries with timestamps that are more than the `logs retention
period <https://cloud.google.com/logging/quota-policy>`__ in the past or
more than 24 hours in the future will not be available when calling
``entries.list``. However, those log entries | |
<filename>parser.py<gh_stars>0
import lexer
import ast
class Parser:
block_end_tokens = [lexer.TokenKind.KW_RETURN, lexer.TokenKind.EOF,
lexer.TokenKind.KW_END, lexer.TokenKind.KW_ELSE,
lexer.TokenKind.KW_ELSEIF, lexer.TokenKind.KW_UNTIL]
priority_table = {
lexer.TokenKind.OP_ADD: {'left': 10, 'right': 10}, # +
lexer.TokenKind.OP_SUB: {'left': 10, 'right': 10}, # -
lexer.TokenKind.OP_MUL: {'left': 11, 'right': 11}, # *
lexer.TokenKind.OP_MOD: {'left': 11, 'right': 11}, # %
lexer.TokenKind.OP_DIV: {'left': 11, 'right': 11}, # /
lexer.TokenKind.OP_IDIV: {'left': 11, 'right': 11}, # //
lexer.TokenKind.OP_POW: {'left': 14, 'right': 13}, # ^
lexer.TokenKind.OP_BAND: {'left': 6, 'right': 6}, # &
lexer.TokenKind.OP_BOR: {'left': 4, 'right': 4}, # |
lexer.TokenKind.OP_BNOT: {'left': 5, 'right': 5}, # ~
lexer.TokenKind.OP_SHL: {'left': 7, 'right': 7}, # <<
lexer.TokenKind.OP_SHR: {'left': 7, 'right': 7}, # >>
lexer.TokenKind.OP_CONCAT: {'left': 9, 'right': 8}, # ..
lexer.TokenKind.OP_EQ: {'left': 3, 'right': 3}, # ==
lexer.TokenKind.OP_LE: {'left': 3, 'right': 3}, # <=
lexer.TokenKind.OP_LT: {'left': 3, 'right': 3}, # <
lexer.TokenKind.OP_NE: {'left': 3, 'right': 3}, # ~=
lexer.TokenKind.OP_GT: {'left': 3, 'right': 3}, # >
lexer.TokenKind.OP_GE: {'left': 3, 'right': 3}, # >=
lexer.TokenKind.OP_AND: {'left': 2, 'right': 2}, # and
lexer.TokenKind.OP_OR: {'left': 1, 'right': 1}, # or
}
unops = [
lexer.TokenKind.OP_SUB, lexer.TokenKind.OP_NOT,
lexer.TokenKind.OP_LEN, lexer.TokenKind.OP_BNOT
]
binops = [
lexer.TokenKind.OP_ADD, lexer.TokenKind.OP_SUB,
lexer.TokenKind.OP_MUL, lexer.TokenKind.OP_MOD,
lexer.TokenKind.OP_POW, lexer.TokenKind.OP_DIV,
lexer.TokenKind.OP_IDIV, lexer.TokenKind.OP_BAND,
lexer.TokenKind.OP_BOR, lexer.TokenKind.OP_BXOR,
lexer.TokenKind.OP_SHL, lexer.TokenKind.OP_SHR,
lexer.TokenKind.OP_CONCAT, lexer.TokenKind.OP_NE,
lexer.TokenKind.OP_EQ, lexer.TokenKind.OP_LT,
lexer.TokenKind.OP_LE, lexer.TokenKind.OP_GT,
lexer.TokenKind.OP_GE, lexer.TokenKind.OP_AND,
lexer.TokenKind.OP_OR
]
unary_priority = 12
def __init__(self, lex):
self.lex = lex
def parse(self):
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.EOF)
return block
# explist ::= exp {‘,’ exp}
def parse_exp_list(self):
exp_list = []
exp_list.append(self.parse_exp(0)[1])
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
exp_list.append(self.parse_exp(0)[1])
return exp_list
# exp ::= (simpleexp | unop exp) {binop exp}
def parse_exp(self, prev_priority):
token = self.lex.look_ahead()
if token.kind in self.unops:
self.lex.next_token()
op_left = ast.UnopExp(self.parse_exp(self.unary_priority)[1], token.kind)
else:
op_left = self.parse_simple_exp()
bin_op = self.lex.look_ahead().kind
while bin_op in self.binops and self.priority_table[bin_op]['left'] > prev_priority:
bin_op, op_left = self.parse_binop_exp(op_left, self.priority_table[bin_op]['right'])
return bin_op, op_left
# args ::= ‘(’ [explist] ‘)’ | tableconstructor | LiteralString
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_func_args(self):
look_token = self.lex.look_ahead()
exp_list = []
if look_token.kind == lexer.TokenKind.SEP_LPAREN:
self.lex.next_token()
if self.lex.look_ahead().kind != lexer.TokenKind.SEP_RPAREN:
exp_list = self.parse_exp_list()
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
elif look_token.kind == lexer.TokenKind.SEP_LCURLY:
exp_list = [self.parse_table_constructor_exp()]
else:
exp_list = [ast.String(self.lex.next_token_of_kind(lexer.TokenKind.STRING)).data]
return exp_list
# simpleexp ::= nil | false | true | Numeral | LiteralString | ‘...’ |
# functiondef | prefixexp | tableconstructor
def parse_simple_exp(self):
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.KW_NIL:
self.lex.next_token()
return ast.NilExp()
elif look_token.kind == lexer.TokenKind.KW_FALSE:
self.lex.next_token()
return ast.BoolConstExp(False)
elif look_token.kind == lexer.TokenKind.KW_TRUE:
self.lex.next_token()
return ast.BoolConstExp(True)
elif look_token.kind == lexer.TokenKind.NUMBER:
return self.parse_number_exp()
elif look_token.kind == lexer.TokenKind.STRING:
self.lex.next_token()
return ast.StringExp(look_token.data)
elif look_token.kind == lexer.TokenKind.VARARG:
self.lex.next_token()
return ast.VarargExp()
elif look_token.kind == lexer.TokenKind.KW_FUNCTION:
return self.parse_func_def_exp()
elif look_token.kind == lexer.TokenKind.SEP_LCURLY:
return self.parse_table_constructor_exp()
else:
return self.parse_prefix_exp()
def parse_func_def_exp(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
func_body_exp = self.parse_func_body_exp(False)
return func_body_exp
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_table_constructor_exp(self):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LCURLY)
if self.lex.look_ahead().kind != lexer.TokenKind.SEP_RCURLY:
key_list, val_list = self.parse_field_list()
else:
key_list = []
val_list = []
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RCURLY)
return ast.TableConstructorExp(key_list, val_list)
# fieldlist ::= field {fieldsep field} [fieldsep]
# fieldsep ::= ‘,’ | ‘;’
def parse_field_list(self):
key, val = self.parse_field()
key_list = [key]
val_list = [val]
while self.lex.look_ahead().kind in [lexer.TokenKind.SEP_COMMA, lexer.TokenKind.SEP_SEMI]:
self.lex.next_token()
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_RCURLY:
break
else:
key, val = self.parse_field()
key_list.append(key)
val_list.append(val)
return key_list, val_list
# field ::= ‘[’ exp ‘]’ ‘=’ exp | Name ‘=’ exp | exp
def parse_field(self):
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_LBRACK:
self.lex.next_token()
key_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RBRACK)
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
val_exp = self.parse_exp(0)[1]
return key_exp, val_exp
exp = self.parse_exp(0)[1]
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
if not isinstance(exp, ast.NameExp):
raise Exception("syntax error near '%s'" % token)
self.lex.next_token()
key_exp = ast.StringExp(exp.id_name)
val_exp = self.parse_exp(0)[1]
return key_exp, val_exp
return ast.NilExp(), exp
# binop exp
def parse_binop_exp(self, op_left, prev_priority):
token = self.lex.next_token()
if token.kind not in self.binops:
raise Exception("syntax error near '%s'" % token)
bin_op, op_right = self.parse_exp(prev_priority)
return bin_op, ast.BinopExp(op_left, op_right, token.kind)
def parse_number_exp(self):
token = self.lex.next_token_of_kind(lexer.TokenKind.NUMBER)
val = eval(token.data)
if isinstance(val, int):
return ast.IntegerExp(val)
else:
return ast.FloatExp(val)
# retstat ::= return [explist] [‘;’]
def parse_retstat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_RETURN)
exp_list = []
token = self.lex.look_ahead()
if not self.is_block_end(token.kind) and token.kind != lexer.TokenKind.SEP_SEMI:
exp_list = self.parse_exp_list()
return ast.RetStat(exp_list)
# block ::= {stat} [retstat]
def parse_block(self):
stats = self.parse_stats()
block = ast.Block(stats)
if self.lex.look_ahead().kind == lexer.TokenKind.KW_RETURN:
retstat = self.parse_retstat()
block.append_stat(retstat)
return block
def parse_goto_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_GOTO)
label = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER)
return ast.GotoStat(label)
def parse_do_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.DoStat(block)
def parse_while_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_WHILE)
exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.WhileStat(exp, block)
def parse_repeat_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_REPEAT)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_UNTIL)
exp = self.parse_exp(0)[1]
return ast.RepeatStat(exp, block)
def parse_if_stat(self):
exp_list = []
block_list = []
self.lex.next_token_of_kind(lexer.TokenKind.KW_IF)
exp = self.parse_exp(0)[1]
exp_list.append(exp)
self.lex.next_token_of_kind(lexer.TokenKind.KW_THEN)
block = self.parse_block()
block_list.append(block)
while self.lex.look_ahead().kind == lexer.TokenKind.KW_ELSEIF:
self.lex.next_token_of_kind(lexer.TokenKind.KW_ELSEIF)
exp_list.append(self.parse_exp(0)[1])
self.lex.next_token_of_kind(lexer.TokenKind.KW_THEN)
block_list.append(self.parse_block())
if self.lex.look_ahead().kind == lexer.TokenKind.KW_ELSE:
self.lex.next_token_of_kind(lexer.TokenKind.KW_ELSE)
exp_list.append(ast.BoolConstExp(True))
block_list.append(self.parse_block())
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.IfStat(exp_list, block_list)
def parse_for_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FOR)
name = ast.NameExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
return self.finish_for_num_stat(name)
else:
return self.finish_for_in_stat(name)
def finish_for_num_stat(self, var):
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
init_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_COMMA)
limit_exp = self.parse_exp(0)[1]
step_exp = None
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
step_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.ForNumStat(var, init_exp, limit_exp, step_exp, block)
def finish_for_in_stat(self, name):
var_list = self.parse_name_list(name)
self.lex.next_token_of_kind(lexer.TokenKind.KW_IN)
exp_list = self.parse_exp_list()
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.ForInStat(var_list, exp_list, block)
def parse_func_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
func_name_exp, has_colon = self.parse_func_name_exp()
func_body_exp = self.parse_func_body_exp(has_colon)
return ast.AssignStat([func_name_exp], [func_body_exp])
# parlist ::= namelist [‘,’ ‘...’] | ‘...’
# namelist ::= Name {‘,’ Name}
def parse_parlist(self):
parlist = []
is_var_arg = False
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_RPAREN:
return parlist, is_var_arg
if self.lex.look_ahead().kind == lexer.TokenKind.VARARG:
is_var_arg = True
self.lex.next_token()
return parlist, is_var_arg
parlist.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
if self.lex.look_ahead().kind == lexer.TokenKind.IDENTIFIER:
parlist.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
else:
self.lex.next_token_of_kind(lexer.TokenKind.VARARG)
is_var_arg = True
break
return parlist, is_var_arg
# funcbody ::= ‘(’ [parlist] ‘)’ block end
def parse_func_body_exp(self, has_colon):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LPAREN)
parlist, is_var_arg = self.parse_parlist()
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
if has_colon:
parlist.insert(0, ast.StringExp('self'))
body = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.FunctionDefExp(parlist, is_var_arg, body)
# funcname ::= Name {‘.’ Name} [‘:’ Name]
def parse_func_name_exp(self):
has_colon = False
name_exp = ast.NameExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_DOT:
self.lex.next_token()
name_exp = ast.TableAccessExp(name_exp, ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_COLON:
self.lex.next_token()
name_exp = ast.TableAccessExp(name_exp, ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
has_colon = True
return name_exp, has_colon
def parse_local_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_LOCAL)
if self.lex.look_ahead().kind == lexer.TokenKind.KW_FUNCTION:
return self.parse_local_func_def_stat()
else:
return self.parse_local_var_decl_stat()
# namelist ::= Name {‘,’ Name}
def parse_name_list(self, name=None):
if name:
var_list = [name]
else:
var_list = [ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)]
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
var_list.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
return var_list
# local function Name funcbody
def parse_local_func_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
var_list = [ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)]
exp_list = [self.parse_func_body_exp(False)]
return ast.LocalDeclStat(var_list, exp_list)
# local namelist [‘=’ explist]
def parse_local_var_decl_stat(self):
var_list = self.parse_name_list()
exp_list = []
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
exp_list = self.parse_exp_list()
return ast.LocalDeclStat(var_list, exp_list)
# var ::= Name | prefixexp ‘[’ exp ‘]’ | prefixexp ‘.’ Name
# functioncall ::= prefixexp args | prefixexp ‘:’ Name args
# prefixexp ::= var | functioncall | ‘(’ exp ‘)’
# prefixexp ::= prefixexp args
# | prefixexp ‘:’ Name args
# | prefixexp ‘[’ exp ‘]’
# | prefixexp ‘.’ Name
# | ‘(’ exp ‘)’
# | Name
# args ::= ‘(’ [explist] ‘)’ | tableconstructor | LiteralString
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_prefix_exp(self):
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.SEP_LPAREN:
self.lex.next_token()
exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
else:
name = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER)
exp = ast.NameExp(name.data)
while True:
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.SEP_DOT:
self.lex.next_token()
idx_exp = ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
exp = ast.TableAccessExp(exp, idx_exp)
elif look_token.kind == lexer.TokenKind.SEP_COLON:
self.lex.next_token()
args_exp = [exp]
idx_exp = ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
exp = ast.TableAccessExp(exp, idx_exp)
args_exp.extend(self.parse_func_args())
exp = ast.FunctionCallExp(exp, args_exp)
elif look_token.kind in [lexer.TokenKind.SEP_LPAREN, lexer.TokenKind.SEP_LCURLY, lexer.TokenKind.STRING]:
args_exp = self.parse_func_args()
exp = ast.FunctionCallExp(exp, args_exp)
elif look_token.kind == lexer.TokenKind.SEP_LBRACK:
self.lex.next_token()
idx_exp = self.parse_exp(0)[1]
exp = ast.TableAccessExp(exp, idx_exp)
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RBRACK)
else:
break
return exp
# varlist ‘=’ explist
# functioncall
def parse_assign_or_func_call_stat(self):
exp = self.parse_prefix_exp()
look_token = self.lex.look_ahead()
if look_token.kind in [lexer.TokenKind.OP_ASSIGN, lexer.TokenKind.SEP_COMMA]:
return self.finsh_assign_stat(exp)
elif isinstance(exp, ast.FunctionCallExp):
return exp
else:
raise Exception("syntax error near '%s'" % look_token)
def check_var(self, exp):
if isinstance(exp, ast.TableAccessExp) or isinstance(exp, ast.NameExp):
return exp
raise Exception("syntax error near '%s'" % token)
# varlist ‘=’ explist
# varlist ::= var {‘,’ var}
# var ::= Name | prefixexp ‘[’ exp ‘]’ | prefixexp ‘.’ Name
def finsh_assign_stat(self, first_var):
var_list = | |
SoftwarePlanVersion(
plan=self.plan,
role=role
)
self.new_product_rate.save()
new_version.product_rate = self.new_product_rate
new_version.save()
for feature_rate in self.new_feature_rates:
feature_rate.save()
new_version.feature_rates.add(feature_rate)
new_version.save()
messages.success(
request,
'The version for %s Software Plan was successfully updated.' % new_version.plan.name
)
if self.plan.is_customer_software_plan and self.cleaned_data['upgrade_subscriptions']:
upgrade_subscriptions_to_latest_plan_version(
self.plan_version,
self.admin_web_user,
upgrade_note="Immediately upgraded when creating a new version."
)
messages.success(
request,
"All subscriptions on the previous version of this plan were "
"also upgraded to this new version."
)
class FeatureRateForm(forms.ModelForm):
"""
A form for creating a new FeatureRate.
"""
# feature id will point to a select2 field, hence the CharField here.
feature_id = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
rate_id = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
class Meta(object):
model = FeatureRate
fields = ['monthly_fee', 'monthly_limit', 'per_excess_fee']
def __init__(self, data=None, *args, **kwargs):
super(FeatureRateForm, self).__init__(data, *args, **kwargs)
self.helper = FormHelper()
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.form_tag = False
self.helper.layout = crispy.Layout(
crispy.HTML("""
<h4><span data-bind="text: name"></span>
<span class="label label-default"
style="display: inline-block; margin: 0 10px;"
data-bind="text: feature_type"></span></h4>
<hr />
"""),
crispy.Field('feature_id', data_bind="value: feature_id"),
crispy.Field('rate_id', data_bind="value: rate_id"),
crispy.Field('monthly_fee', data_bind="value: monthly_fee"),
crispy.Field('monthly_limit', data_bind="value: monthly_limit"),
crispy.Div(
crispy.Field('per_excess_fee',
data_bind="value: per_excess_fee"),
data_bind="visible: isPerExcessVisible",
),
)
def is_new(self):
return not self['rate_id'].value()
def get_instance(self, feature):
instance = self.save(commit=False)
instance.feature = feature
return instance
class ProductRateForm(forms.ModelForm):
"""
A form for creating a new ProductRate.
"""
name = forms.CharField(
required=True,
widget=forms.HiddenInput,
)
rate_id = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
class Meta(object):
model = SoftwareProductRate
fields = ['monthly_fee', 'name']
def __init__(self, data=None, *args, **kwargs):
super(ProductRateForm, self).__init__(data, *args, **kwargs)
self.helper = FormHelper()
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.form_tag = False
self.helper.layout = crispy.Layout(
crispy.HTML("""
<h4><span data-bind="text: name"></span></h4>
<hr />
"""),
crispy.Field('monthly_fee', data_bind="value: monthly_fee"),
)
def is_new(self):
return not self['rate_id'].value()
def get_instance(self):
return self.save(commit=False)
class EnterprisePlanContactForm(forms.Form):
name = forms.CharField(
label=ugettext_noop("Name")
)
company_name = forms.CharField(
required=False,
label=ugettext_noop("Company / Organization")
)
message = forms.CharField(
required=False,
label=ugettext_noop("Message"),
widget=forms.Textarea
)
def __init__(self, domain, web_user, data=None, *args, **kwargs):
self.domain = domain
self.web_user = web_user
super(EnterprisePlanContactForm, self).__init__(data, *args, **kwargs)
from corehq.apps.domain.views.accounting import SelectPlanView
self.helper = FormHelper()
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.form_class = "form-horizontal"
self.helper.layout = crispy.Layout(
'name',
'company_name',
'message',
hqcrispy.FormActions(
hqcrispy.LinkButton(
_("Select different plan"),
reverse(SelectPlanView.urlname, args=[self.domain]),
css_class="btn btn-default"
),
StrictButton(
_("Request Quote"),
type="submit",
css_class="btn-primary",
),
)
)
def send_message(self):
subject = "[Enterprise Plan Request] %s" % self.domain
context = {
'name': self.cleaned_data['name'],
'company': self.cleaned_data['company_name'],
'message': self.cleaned_data['message'],
'domain': self.domain,
'email': self.web_user.email
}
html_content = render_to_string('accounting/email/sales_request.html', context)
text_content = """
Email: %(email)s
Name: %(name)s
Company: %(company)s
Domain: %(domain)s
Message:
%(message)s
""" % context
send_html_email_async.delay(subject, settings.BILLING_EMAIL,
html_content, text_content,
email_from=settings.DEFAULT_FROM_EMAIL)
class AnnualPlanContactForm(forms.Form):
name = forms.CharField(
label=ugettext_noop("Name")
)
company_name = forms.CharField(
required=False,
label=ugettext_noop("Company / Organization")
)
message = forms.CharField(
required=False,
label=ugettext_noop("Message"),
widget=forms.Textarea
)
def __init__(self, domain, web_user, on_annual_plan, data=None, *args, **kwargs):
self.domain = domain
self.web_user = web_user
super(AnnualPlanContactForm, self).__init__(data, *args, **kwargs)
from corehq.apps.domain.views.accounting import SelectPlanView, DomainSubscriptionView
self.helper = FormHelper()
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.form_class = "form-horizontal"
if on_annual_plan:
back_button_text = "Back to my Subscription"
urlname = DomainSubscriptionView.urlname
else:
back_button_text = "Select different plan"
urlname = SelectPlanView.urlname
self.helper.layout = crispy.Layout(
'name',
'company_name',
'message',
hqcrispy.FormActions(
hqcrispy.LinkButton(
_(back_button_text),
reverse(urlname, args=[self.domain]),
css_class="btn btn-default"
),
StrictButton(
_("Submit"),
type="submit",
css_class="btn-primary",
),
)
)
def send_message(self):
subject = "[Annual Plan Request] %s" % self.domain
context = {
'name': self.cleaned_data['name'],
'company': self.cleaned_data['company_name'],
'message': self.cleaned_data['message'],
'domain': self.domain,
'email': self.web_user.email
}
html_content = render_to_string('accounting/email/sales_request.html', context)
text_content = """
Email: %(email)s
Name: %(name)s
Company: %(company)s
Domain: %(domain)s
Message:
%(message)s
""" % context
send_html_email_async.delay(subject, settings.BILLING_EMAIL,
html_content, text_content,
email_from=settings.DEFAULT_FROM_EMAIL)
class TriggerInvoiceForm(forms.Form):
month = forms.ChoiceField(label="Statement Period Month")
year = forms.ChoiceField(label="Statement Period Year")
domain = forms.CharField(label="Project Space", widget=forms.Select(choices=[]))
num_users = forms.IntegerField(
label="Number of Users",
required=False,
help_text="This is part of accounting tests and overwrites the "
"DomainUserHistory recorded for this month. Please leave "
"this blank to use what is already in the system."
)
def __init__(self, *args, **kwargs):
self.show_testing_options = kwargs.pop('show_testing_options')
super(TriggerInvoiceForm, self).__init__(*args, **kwargs)
today = datetime.date.today()
one_month_ago = today - relativedelta(months=1)
self.fields['month'].initial = one_month_ago.month
self.fields['month'].choices = list(MONTHS.items())
self.fields['year'].initial = one_month_ago.year
self.fields['year'].choices = [
(y, y) for y in range(one_month_ago.year, 2012, -1)
]
self.helper = FormHelper()
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.form_class = 'form form-horizontal'
details = [
'Trigger Invoice Details',
crispy.Field('month', css_class="input-large"),
crispy.Field('year', css_class="input-large"),
crispy.Field(
'domain',
css_class="input-xxlarge accounting-async-select2",
placeholder="Search for Project"
)
]
if self.show_testing_options:
details.append(crispy.Field('num_users', css_class='input_large'))
else:
del self.fields['num_users']
self.helper.layout = crispy.Layout(
crispy.Fieldset(*details),
hqcrispy.FormActions(
StrictButton(
"Trigger Invoice",
css_class="btn-primary disable-on-submit",
type="submit",
),
)
)
@transaction.atomic
def trigger_invoice(self):
year = int(self.cleaned_data['year'])
month = int(self.cleaned_data['month'])
invoice_start, invoice_end = get_first_last_days(year, month)
domain_obj = Domain.get_by_name(self.cleaned_data['domain'])
self.clean_previous_invoices(invoice_start, invoice_end, domain_obj.name)
if self.show_testing_options and self.cleaned_data['num_users']:
num_users = int(self.cleaned_data['num_users'])
existing_histories = DomainUserHistory.objects.filter(
domain=domain_obj.name,
record_date__gte=invoice_start,
record_date__lte=invoice_end,
)
if existing_histories.exists():
existing_histories.all().delete()
DomainUserHistory.objects.create(
domain=domain_obj.name,
record_date=invoice_end,
num_users=num_users
)
invoice_factory = DomainInvoiceFactory(
invoice_start, invoice_end, domain_obj, recipients=[settings.ACCOUNTS_EMAIL]
)
invoice_factory.create_invoices()
@staticmethod
def clean_previous_invoices(invoice_start, invoice_end, domain_name):
prev_invoices = Invoice.objects.filter(
date_start__lte=invoice_end, date_end__gte=invoice_start,
subscription__subscriber__domain=domain_name
)
if prev_invoices.count() > 0:
from corehq.apps.accounting.views import InvoiceSummaryView
raise InvoiceError(
"Invoices exist that were already generated with this same "
"criteria. You must manually suppress these invoices: "
"{invoice_list}".format(
num_invoices=prev_invoices.count(),
invoice_list=', '.join(
['<a href="{edit_url}">{name}</a>'.format(
edit_url=reverse(InvoiceSummaryView.urlname,
args=(x.id,)),
name=x.invoice_number
) for x in prev_invoices.all()]
),
)
)
def clean(self):
today = datetime.date.today()
year = int(self.cleaned_data['year'])
month = int(self.cleaned_data['month'])
if (year, month) >= (today.year, today.month):
raise ValidationError('Statement period must be in the past')
class TriggerCustomerInvoiceForm(forms.Form):
month = forms.ChoiceField(label="Statement Period Month")
year = forms.ChoiceField(label="Statement Period Year")
customer_account = forms.CharField(label="Billing Account", widget=forms.Select(choices=[]))
def __init__(self, *args, **kwargs):
super(TriggerCustomerInvoiceForm, self).__init__(*args, **kwargs)
today = datetime.date.today()
one_month_ago = today - relativedelta(months=1)
self.fields['month'].initial = one_month_ago.month
self.fields['month'].choices = list(MONTHS.items())
self.fields['year'].initial = one_month_ago.year
self.fields['year'].choices = [
(y, y) for y in range(one_month_ago.year, 2012, -1)
]
self.helper = FormHelper()
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.form_class = 'form form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Trigger Customer Invoice Details',
crispy.Field('month', css_class="input-large"),
crispy.Field('year', css_class="input-large"),
crispy.Field('customer_account', css_class="input-xxlarge accounting-async-select2",
placeholder="Search for Customer Billing Account")
),
hqcrispy.FormActions(
StrictButton(
"Trigger Customer Invoice",
css_class="btn-primary disable-on-submit",
type="submit",
),
)
)
@transaction.atomic
def trigger_customer_invoice(self):
year = int(self.cleaned_data['year'])
month = int(self.cleaned_data['month'])
try:
account = BillingAccount.objects.get(name=self.cleaned_data['customer_account'])
invoice_start, invoice_end = self.get_invoice_dates(account, year, month)
self.clean_previous_invoices(invoice_start, invoice_end, account)
invoice_factory = CustomerAccountInvoiceFactory(
date_start=invoice_start,
date_end=invoice_end,
account=account,
recipients=[settings.ACCOUNTS_EMAIL]
)
invoice_factory.create_invoice()
except BillingAccount.DoesNotExist:
raise InvoiceError(
"There is no Billing Account associated with %s" % self.cleaned_data['customer_account']
)
@staticmethod
def clean_previous_invoices(invoice_start, invoice_end, account):
prev_invoices = CustomerInvoice.objects.filter(
date_start__lte=invoice_end,
date_end__gte=invoice_start,
account=account
)
if prev_invoices:
from corehq.apps.accounting.views import CustomerInvoiceSummaryView
raise InvoiceError(
"Invoices exist that were already generated with this same "
"criteria. You must manually suppress these invoices: "
"{invoice_list}".format(
num_invoices=len(prev_invoices),
invoice_list=', '.join(
['<a href="{edit_url}">{name}</a>'.format(
edit_url=reverse(CustomerInvoiceSummaryView.urlname, args=(x.id,)),
name=x.invoice_number
) for x in prev_invoices]
),
)
)
def clean(self):
today = datetime.date.today()
year = int(self.cleaned_data['year'])
month = int(self.cleaned_data['month'])
if (year, month) >= (today.year, today.month):
raise ValidationError('Statement period must be in the past')
def get_invoice_dates(self, account, year, month):
if account.invoicing_plan == InvoicingPlan.YEARLY:
if month == 12:
# Set invoice start date to January 1st
return datetime.date(year, 1, 1), datetime.date(year, 12, 31)
else:
raise InvoiceError(
"%s is set to be invoiced yearly, and you may not invoice in this month. "
"You must select December in the year for which you are triggering an annual invoice."
% self.cleaned_data['customer_account']
)
if account.invoicing_plan == InvoicingPlan.QUARTERLY:
if month == 3:
return datetime.date(year, 1, 1), datetime.date(year, 3, 31) # Quarter 1
if month == 6:
return datetime.date(year, 4, 1), datetime.date(year, 6, 30) # Quarter 2
if month == 9:
return datetime.date(year, 7, 1), datetime.date(year, 9, 30) # Quarter 3
if month == 12:
return datetime.date(year, 10, 1), datetime.date(year, 12, 31) # Quarter 4
else:
raise InvoiceError(
"%s is set to be invoiced quarterly, and you may not invoice in this month. "
"You must select the last month of a quarter to trigger a quarterly invoice."
% self.cleaned_data['customer_account']
)
else:
return get_first_last_days(year, month)
class TriggerBookkeeperEmailForm(forms.Form):
month = forms.ChoiceField(label="Invoice Month")
year = forms.ChoiceField(label="Invoice Year")
emails = forms.CharField(label="Email To", widget=forms.SelectMultiple(choices=[]),)
def __init__(self, *args, **kwargs):
super(TriggerBookkeeperEmailForm, self).__init__(*args, **kwargs)
today = datetime.date.today()
self.fields['month'].initial = today.month
self.fields['month'].choices = list(MONTHS.items())
self.fields['year'].initial = today.year
self.fields['year'].choices = [
(y, y) for y in range(today.year, 2012, -1)
]
self.helper = FormHelper()
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.form_class = 'form form-horizontal'
self.helper.layout | |
self._support_selection_criteria()
class BinningProcess(Base, BaseEstimator, BaseBinningProcess):
"""Binning process to compute optimal binning of variables in a dataset,
given a binary, continuous or multiclass target dtype.
Parameters
----------
variable_names : array-like
List of variable names.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
max_pvalue : float or None, optional (default=0.05)
The maximum p-value among bins.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
selection_criteria : dict or None (default=None)
Variable selection criteria. See notes.
.. versionadded:: 0.6.0
fixed_variables : array-like or None
List of variables to be fixed. The binning process will retain these
variables if the selection criteria is not satisfied.
.. versionadded:: 0.12.1
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
categorical_variables : array-like or None, optional (default=None)
List of variables numerical variables to be considered categorical.
These are nominal variables. Not applicable when target type is
multiclass.
binning_fit_params : dict or None, optional (default=None)
Dictionary with optimal binning fitting options for specific variables.
Example: ``{"variable_1": {"max_n_bins": 4}}``.
binning_transform_params : dict or None, optional (default=None)
Dictionary with optimal binning transform options for specific
variables. Example ``{"variable_1": {"metric": "event_rate"}}``.
n_jobs : int or None, optional (default=None)
Number of cores to run in parallel while binning variables.
``None`` means 1 core. ``-1`` means using all processors.
.. versionadded:: 0.7.1
verbose : bool (default=False)
Enable verbose output.
Notes
-----
Parameter ``selection_criteria`` allows to specify criteria for
variable selection. The input is a dictionary as follows
.. code::
selection_criteria = {
"metric_1":
{
"min": 0, "max": 1, "strategy": "highest", "top": 0.25
},
"metric_2":
{
"min": 0.02
}
}
where several metrics can be combined. For example, above dictionary
indicates that top 25% variables with "metric_1" in [0, 1] and "metric_2"
greater or equal than 0.02 are selected. Supported key values are:
* keys ``min`` and ``max`` support numerical values.
* key ``strategy`` supports options "highest" and "lowest".
* key ``top`` supports an integer or decimal (percentage).
.. warning::
If the binning process instance is going to be saved, do not pass the
option ``"solver": "mip"`` via the ``binning_fit_params`` parameter.
"""
def __init__(self, variable_names, max_n_prebins=20, min_prebin_size=0.05,
min_n_bins=None, max_n_bins=None, min_bin_size=None,
max_bin_size=None, max_pvalue=None,
max_pvalue_policy="consecutive", selection_criteria=None,
fixed_variables=None, categorical_variables=None,
special_codes=None, split_digits=None,
binning_fit_params=None, binning_transform_params=None,
n_jobs=None, verbose=False):
self.variable_names = variable_names
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.selection_criteria = selection_criteria
self.fixed_variables = fixed_variables
self.binning_fit_params = binning_fit_params
self.binning_transform_params = binning_transform_params
self.special_codes = special_codes
self.split_digits = split_digits
self.categorical_variables = categorical_variables
self.n_jobs = n_jobs
self.verbose = verbose
# auxiliary
self._n_samples = None
self._n_variables = None
self._target_dtype = None
self._n_numerical = None
self._n_categorical = None
self._n_selected = None
self._binned_variables = {}
self._variable_dtypes = {}
self._variable_stats = {}
self._support = None
# timing
self._time_total = None
self._is_updated = False
self._is_fitted = False
def fit(self, X, y, sample_weight=None, check_input=False):
"""Fit the binning process. Fit the optimal binning to all variables
according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
.. versionchanged:: 0.4.0
X supports ``numpy.ndarray`` and ``pandas.DataFrame``.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``. This option is only
available for a binary target.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit(X, y, sample_weight, check_input)
def fit_disk(self, input_path, target, **kwargs):
"""Fit the binning process according to the given training data on
disk.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv or .parquet.
target : str
Target column.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv`` or
``pandas.read_parquet``.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit_disk(input_path, target, **kwargs)
def fit_from_dict(self, dict_optb):
"""Fit the binning process from a dict of OptimalBinning objects
already fitted.
Parameters
----------
dict_optb : dict
Dictionary with OptimalBinning objects for binary, continuous
or multiclass target. All objects must share the same class.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit_from_dict(dict_optb)
def fit_transform(self, X, y, sample_weight=None, metric=None,
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the binning process according to the given training data, then
transform it.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``. This option is only
available for a binary target.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : numpy array, shape = (n_samples, n_features_new)
Transformed array.
"""
return self.fit(X, y, sample_weight, check_input).transform(
X, metric, metric_special, metric_missing, show_digits,
check_input)
def fit_transform_disk(self, input_path, output_path, target, chunksize,
metric=None, metric_special=0, metric_missing=0,
show_digits=2, **kwargs):
"""Fit the binning process according to the given training data on
disk, then transform it and save to comma-separated values (csv) file.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv.
output_path : str
Any valid string path to a file with extension .csv.
target : str
Target column.
chunksize :
Rows to read, transform and write at a time.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" | |
<filename>google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/api/yaml_builder.py<gh_stars>1-10
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyYAML event builder handler
Receives events from YAML listener and forwards them to a builder
object so that it can construct a properly structured object.
"""
# WARNING: This file is externally viewable by our users. All comments from
# this file will be stripped. The docstrings will NOT. Do not put sensitive
# information in docstrings. If you must communicate internal information in
# this source file, please place them in comments only.
from googlecloudsdk.third_party.appengine.api import yaml_errors
from googlecloudsdk.third_party.appengine.api import yaml_listener
import yaml
# Token constants used by handler for keeping track of handler state.
_TOKEN_DOCUMENT = 'document'
_TOKEN_SEQUENCE = 'sequence'
_TOKEN_MAPPING = 'mapping'
_TOKEN_KEY = 'key'
_TOKEN_VALUES = frozenset((
_TOKEN_DOCUMENT,
_TOKEN_SEQUENCE,
_TOKEN_MAPPING,
_TOKEN_KEY))
class Builder(object):
"""Interface for building documents and type from YAML events.
Implement this interface to create a new builder. Builders are
passed to the BuilderHandler and used as a factory and assembler
for creating concrete representations of YAML files.
"""
def BuildDocument(self):
"""Build new document.
The object built by this method becomes the top level entity
that the builder handler constructs. The actual type is
determined by the sub-class of the Builder class and can essentially
be any type at all. This method is always called when the parser
encounters the start of a new document.
Returns:
New object instance representing concrete document which is
returned to user via BuilderHandler.GetResults().
"""
def InitializeDocument(self, document, value):
"""Initialize document with value from top level of document.
This method is called when the root document element is encountered at
the top level of a YAML document. It should get called immediately
after BuildDocument.
Receiving the None value indicates the empty document.
Args:
document: Document as constructed in BuildDocument.
value: Scalar value to initialize the document with.
"""
def BuildMapping(self, top_value):
"""Build a new mapping representation.
Called when StartMapping event received. Type of object is determined
by Builder sub-class.
Args:
top_value: Object which will be new mappings parant. Will be object
returned from previous call to BuildMapping or BuildSequence.
Returns:
Instance of new object that represents a mapping type in target model.
"""
def EndMapping(self, top_value, mapping):
"""Previously constructed mapping scope is at an end.
Called when the end of a mapping block is encountered. Useful for
additional clean up or end of scope validation.
Args:
top_value: Value which is parent of the mapping.
mapping: Mapping which is at the end of its scope.
"""
def BuildSequence(self, top_value):
"""Build a new sequence representation.
Called when StartSequence event received. Type of object is determined
by Builder sub-class.
Args:
top_value: Object which will be new sequences parant. Will be object
returned from previous call to BuildMapping or BuildSequence.
Returns:
Instance of new object that represents a sequence type in target model.
"""
def EndSequence(self, top_value, sequence):
"""Previously constructed sequence scope is at an end.
Called when the end of a sequence block is encountered. Useful for
additional clean up or end of scope validation.
Args:
top_value: Value which is parent of the sequence.
sequence: Sequence which is at the end of its scope.
"""
def MapTo(self, subject, key, value):
"""Map value to a mapping representation.
Implementation is defined by sub-class of Builder.
Args:
subject: Object that represents mapping. Value returned from
BuildMapping.
key: Key used to map value to subject. Can be any scalar value.
value: Value which is mapped to subject. Can be any kind of value.
"""
def AppendTo(self, subject, value):
"""Append value to a sequence representation.
Implementation is defined by sub-class of Builder.
Args:
subject: Object that represents sequence. Value returned from
BuildSequence
value: Value to be appended to subject. Can be any kind of value.
"""
class BuilderHandler(yaml_listener.EventHandler):
"""PyYAML event handler used to build objects.
Maintains state information as it receives parse events so that object
nesting is maintained. Uses provided builder object to construct and
assemble objects as it goes.
As it receives events from the YAML parser, it builds a stack of data
representing structural tokens. As the scope of documents, mappings
and sequences end, those token, value pairs are popped from the top of
the stack so that the original scope can resume processing.
A special case is made for the _KEY token. It represents a temporary
value which only occurs inside mappings. It is immediately popped off
the stack when it's associated value is encountered in the parse stream.
It is necessary to do this because the YAML parser does not combine
key and value information in to a single event.
"""
def __init__(self, builder):
"""Initialization for builder handler.
Args:
builder: Instance of Builder class.
Raises:
ListenerConfigurationError when builder is not a Builder class.
"""
if not isinstance(builder, Builder):
raise yaml_errors.ListenerConfigurationError(
'Must provide builder of type yaml_listener.Builder')
self._builder = builder
self._stack = None
self._top = None
self._results = []
def _Push(self, token, value):
"""Push values to stack at start of nesting.
When a new object scope is beginning, will push the token (type of scope)
along with the new objects value, the latter of which is provided through
the various build methods of the builder.
Args:
token: Token indicating the type of scope which is being created; must
belong to _TOKEN_VALUES.
value: Value to associate with given token. Construction of value is
determined by the builder provided to this handler at construction.
"""
# _top is an easy to use reference to the top of the handler stack.
self._top = (token, value)
self._stack.append(self._top)
def _Pop(self):
"""Pop values from stack at end of nesting.
Called to indicate the end of a nested scope.
Returns:
Previously pushed value at the top of the stack.
"""
assert self._stack != [] and self._stack is not None
token, value = self._stack.pop()
# Restore _top variable with previous values.
if self._stack:
self._top = self._stack[-1]
else:
self._top = None
return value
def _HandleAnchor(self, event):
"""Handle anchor attached to event.
Currently will raise an error if anchor is used. Anchors are used to
define a document wide tag to a given value (scalar, mapping or sequence).
Args:
event: Event which may have anchor property set.
Raises:
NotImplementedError if event attempts to use an anchor.
"""
# TODO(user): Implement anchors and aliases.
# If there is an anchor raise an error.
if hasattr(event, 'anchor') and event.anchor is not None:
raise NotImplementedError('Anchors not supported in this handler')
def _HandleValue(self, value):
"""Handle given value based on state of parser
This method handles the various values that are created by the builder
at the beginning of scope events (such as mappings and sequences) or
when a scalar value is received.
Method is called when handler receives a parser, MappingStart or
SequenceStart.
Args:
value: Value received as scalar value or newly constructed mapping or
sequence instance.
Raises:
InternalError if the building process encounters an unexpected token.
This is an indication of an implementation error in BuilderHandler.
"""
token, top_value = self._top
# If the last token was a key, it means that it is necessary
# to insert the value in to a map.
if token == _TOKEN_KEY:
# Fetch the key (removing from the stack)
key = self._Pop()
# New values at top of stack
mapping_token, mapping = self._top
assert _TOKEN_MAPPING == mapping_token
# Forward to builder for assembly
self._builder.MapTo(mapping, key, value)
# Parent object for new value is a mapping. It means that
# this value that is passed in is a scalar and should
# get | |
== other.__dict__
def __ne__(self, other):
return not (self == other)
class similarModels_args(object):
"""
Attributes:
- modelId
- compMetrics
- numModels
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'modelId', None, None, ), # 1
(2, TType.LIST, 'compMetrics', (TType.I32, None, False), None, ), # 2
(3, TType.I32, 'numModels', None, None, ), # 3
)
def __init__(self, modelId=None, compMetrics=None, numModels=None,):
self.modelId = modelId
self.compMetrics = compMetrics
self.numModels = numModels
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.modelId = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.compMetrics = []
(_etype393, _size390) = iprot.readListBegin()
for _i394 in range(_size390):
_elem395 = iprot.readI32()
self.compMetrics.append(_elem395)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.numModels = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('similarModels_args')
if self.modelId is not None:
oprot.writeFieldBegin('modelId', TType.I32, 1)
oprot.writeI32(self.modelId)
oprot.writeFieldEnd()
if self.compMetrics is not None:
oprot.writeFieldBegin('compMetrics', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.compMetrics))
for iter396 in self.compMetrics:
oprot.writeI32(iter396)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.numModels is not None:
oprot.writeFieldBegin('numModels', TType.I32, 3)
oprot.writeI32(self.numModels)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class similarModels_result(object):
"""
Attributes:
- success
- rnfEx
- brEx
- svEx
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.I32, None, False), None, ), # 0
(1, TType.STRUCT, 'rnfEx', (ResourceNotFoundException, ResourceNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'brEx', (BadRequestException, BadRequestException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, rnfEx=None, brEx=None, svEx=None,):
self.success = success
self.rnfEx = rnfEx
self.brEx = brEx
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype400, _size397) = iprot.readListBegin()
for _i401 in range(_size397):
_elem402 = iprot.readI32()
self.success.append(_elem402)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.rnfEx = ResourceNotFoundException()
self.rnfEx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.brEx = BadRequestException()
self.brEx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.svEx = ServerLogicException()
self.svEx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('similarModels_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.I32, len(self.success))
for iter403 in self.success:
oprot.writeI32(iter403)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.rnfEx is not None:
oprot.writeFieldBegin('rnfEx', TType.STRUCT, 1)
self.rnfEx.write(oprot)
oprot.writeFieldEnd()
if self.brEx is not None:
oprot.writeFieldBegin('brEx', TType.STRUCT, 2)
self.brEx.write(oprot)
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 3)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class linearModelFeatureImportances_args(object):
"""
Attributes:
- modelId
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'modelId', None, None, ), # 1
)
def __init__(self, modelId=None,):
self.modelId = modelId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.modelId = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('linearModelFeatureImportances_args')
if self.modelId is not None:
oprot.writeFieldBegin('modelId', TType.I32, 1)
oprot.writeI32(self.modelId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class linearModelFeatureImportances_result(object):
"""
Attributes:
- success
- rnfEx
- ioEx
- svEx
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'UTF8', False), None, ), # 0
(1, TType.STRUCT, 'rnfEx', (ResourceNotFoundException, ResourceNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ioEx', (IllegalOperationException, IllegalOperationException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, rnfEx=None, ioEx=None, svEx=None,):
self.success = success
self.rnfEx = rnfEx
self.ioEx = ioEx
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype407, _size404) = iprot.readListBegin()
for _i408 in range(_size404):
_elem409 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.success.append(_elem409)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.rnfEx = ResourceNotFoundException()
self.rnfEx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ioEx = IllegalOperationException()
self.ioEx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.svEx = ServerLogicException()
self.svEx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('linearModelFeatureImportances_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter410 in self.success:
oprot.writeString(iter410.encode('utf-8') if sys.version_info[0] == 2 else iter410)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.rnfEx is not None:
oprot.writeFieldBegin('rnfEx', TType.STRUCT, 1)
self.rnfEx.write(oprot)
oprot.writeFieldEnd()
if self.ioEx is not None:
oprot.writeFieldBegin('ioEx', TType.STRUCT, 2)
self.ioEx.write(oprot)
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 3)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compareLinearModelFeatureImportances_args(object):
"""
Attributes:
- model1Id
- model2Id
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'model1Id', None, None, ), # 1
(2, TType.I32, 'model2Id', None, None, ), # 2
)
def __init__(self, model1Id=None, model2Id=None,):
self.model1Id = model1Id
self.model2Id = model2Id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.model1Id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.model2Id = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compareLinearModelFeatureImportances_args')
if self.model1Id is not None:
oprot.writeFieldBegin('model1Id', TType.I32, 1)
oprot.writeI32(self.model1Id)
oprot.writeFieldEnd()
if self.model2Id is not None:
oprot.writeFieldBegin('model2Id', TType.I32, 2)
oprot.writeI32(self.model2Id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compareLinearModelFeatureImportances_result(object):
"""
Attributes:
- success
- rnfEx
- ioEx
- svEx
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (FeatureImportanceComparison, FeatureImportanceComparison.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'rnfEx', (ResourceNotFoundException, ResourceNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ioEx', (IllegalOperationException, IllegalOperationException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, rnfEx=None, ioEx=None, svEx=None,):
self.success = success
self.rnfEx = rnfEx
self.ioEx = ioEx
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == | |
of the full circuit to be cut
auto_cutter (Union[bool, Callable]): Toggle for enabling automatic cutting with the default
:func:`~.kahypar_cut` partition method. Can also pass a graph partitioning function that
takes an input graph and returns a list of edges to be cut based on a given set of
constraints and objective. The default :func:`~.kahypar_cut` function requires KaHyPar to
be installed using ``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from source for Windows users.
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
device_wires (Wires): Wires of the device that the cut circuits are to be run on.
When transforming a QNode, this argument is optional and will be set to the
QNode's device wires. Required when transforming a tape.
max_depth (int): The maximum depth used to expand the circuit while searching for wire cuts.
Only applicable when transforming a QNode.
kwargs: Additional keyword arguments to be passed to a callable ``auto_cutter`` argument.
For the default KaHyPar cutter, please refer to the docstring of functions
:func:`~.find_and_place_cuts` and :func:`~.kahypar_cut` for the available arguments.
Returns:
Callable: Function which accepts the same arguments as the QNode.
When called, this function will perform a process tomography of the
partitioned circuit fragments and combine the results via tensor
contractions.
**Example**
The following :math:`3`-qubit circuit contains a :class:`~.WireCut` operation. When decorated
with ``@qml.cut_circuit``, we can cut the circuit into two :math:`2`-qubit fragments:
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.cut_circuit
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.WireCut(wires=1)
qml.CZ(wires=[1, 2])
return qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
Executing ``circuit`` will run multiple configurations of the :math:`2`-qubit fragments which
are then postprocessed to give the result of the original circuit:
>>> x = np.array(0.531, requires_grad=True)
>>> circuit(x)
0.47165198882111165
Futhermore, the output of the cut circuit is also differentiable:
>>> qml.grad(circuit)(x)
-0.276982865449393
Alternatively, if the optimal wire-cut placement is unknown for an arbitrary circuit, the
``auto_cutter`` option can be enabled to make attempts in finding such an optimal cut. The
following examples shows this capability on the same circuit as above but with the
:class:`~.WireCut` removed:
.. code-block:: python
@qml.cut_circuit(auto_cutter=True)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.CZ(wires=[1, 2])
return qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> x = np.array(0.531, requires_grad=True)
>>> circuit(x)
0.47165198882111165
>>> qml.grad(circuit)(x)
-0.276982865449393
.. UsageDetails::
Manually placing :class:`~.WireCut` operations and decorating the QNode with the
``cut_circuit()`` batch transform is the suggested entrypoint into circuit cutting. However,
advanced users also have the option to work directly with a :class:`~.QuantumTape` and
manipulate the tape to perform circuit cutting using the below functionality:
.. autosummary::
:toctree:
~transforms.qcut.tape_to_graph
~transforms.qcut.find_and_place_cuts
~transforms.qcut.replace_wire_cut_nodes
~transforms.qcut.fragment_graph
~transforms.qcut.graph_to_tape
~transforms.qcut.remap_tape_wires
~transforms.qcut.expand_fragment_tape
~transforms.qcut.qcut_processing_fn
~transforms.qcut.CutStrategy
The following shows how these elementary steps are combined as part of the
``cut_circuit()`` transform.
Consider the circuit below:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.531, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.WireCut(wires=1)
qml.CZ(wires=[1, 2])
qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> print(tape.draw())
0: ──RX(0.531)──╭C──RY(-0.4)──────╭┤ ⟨Z ⊗ Z ⊗ Z⟩
1: ──RY(0.9)────╰Z──//────────╭C──├┤ ⟨Z ⊗ Z ⊗ Z⟩
2: ──RX(0.3)──────────────────╰Z──╰┤ ⟨Z ⊗ Z ⊗ Z⟩
To cut the circuit, we first convert it to its graph representation:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
.. figure:: ../../_static/qcut_graph.svg
:align: center
:width: 60%
:target: javascript:void(0);
If, however, the optimal location of the :class:`~.WireCut` is unknown, we can use
:func:`~.find_and_place_cuts` to make attempts in automatically finding such a cut
given the device constraints. Using the same circuit as above but with the
:class:`~.WireCut` removed, the same (optimal) cut can be recovered with automatic
cutting:
.. code-block:: python
with qml.tape.QuantumTape() as uncut_tape:
qml.RX(0.531, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.CZ(wires=[1, 2])
qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph = qml.transforms.qcut.tape_to_graph(uncut_tape),
cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2),
)
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX─╭C──RY────┤ ╭<Z@Z@Z>
1: ──RY─╰Z──//─╭C─┤ ├<Z@Z@Z>
2: ──RX────────╰Z─┤ ╰<Z@Z@Z>
Our next step is to remove the :class:`~.WireCut` nodes in the graph and replace with
:class:`~.MeasureNode` and :class:`~.PrepareNode` pairs.
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
The :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs are placeholder operations that
allow us to cut the circuit graph and then iterate over measurement and preparation
configurations at cut locations. First, the :func:`~.fragment_graph` function pulls apart
the graph into disconnected components as well as returning the
`communication_graph <https://en.wikipedia.org/wiki/Quotient_graph>`__
detailing the connectivity between the components.
>>> fragments, communication_graph = qml.transforms.qcut.fragment_graph(graph)
We now convert the ``fragments`` back to :class:`~.QuantumTape` objects
>>> fragment_tapes = [qml.transforms.qcut.graph_to_tape(f) for f in fragments]
The circuit fragments can now be visualized:
>>> print(fragment_tapes[0].draw())
0: ──RX(0.531)──╭C──RY(-0.4)─────┤ ⟨Z⟩
1: ──RY(0.9)────╰Z──MeasureNode──┤
>>> print(fragment_tapes[1].draw())
2: ──RX(0.3)──────╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──PrepareNode──╰C──╰┤ ⟨Z ⊗ Z⟩
Additionally, we must remap the tape wires to match those available on our device.
>>> dev = qml.device("default.qubit", wires=2)
>>> fragment_tapes = [
... qml.transforms.qcut.remap_tape_wires(t, dev.wires) for t in fragment_tapes
... ]
Next, each circuit fragment is expanded over :class:`~.MeasureNode` and
:class:`~.PrepareNode` configurations and a flat list of tapes is created:
.. code-block::
expanded = [qml.transforms.qcut.expand_fragment_tape(t) for t in fragment_tapes]
configurations = []
prepare_nodes = []
measure_nodes = []
for tapes, p, m in expanded:
configurations.append(tapes)
prepare_nodes.append(p)
measure_nodes.append(m)
tapes = tuple(tape for c in configurations for tape in c)
Each configuration is drawn below:
>>> for t in tapes:
... print(t.draw())
.. code-block::
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ I⟩ ╭┤ ⟨Z ⊗ Z⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ I⟩ ╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ X⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ X⟩
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ Y⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ Y⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──I────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──X────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──H────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)─────╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──H────────S──╰C──╰┤ ⟨Z ⊗ Z⟩
The last step is to execute the tapes and postprocess the results using
:func:`~.qcut_processing_fn`, which processes the results to the original full circuit
output via a tensor network contraction
>>> results = qml.execute(tapes, dev, gradient_fn=None)
>>> qml.transforms.qcut.qcut_processing_fn(
... results,
... communication_graph,
... prepare_nodes,
... measure_nodes,
... )
0.47165198882111165
"""
# pylint: disable=unused-argument
if len(tape.measurements) != 1:
raise ValueError(
"The circuit cutting workflow only supports circuits with a single output "
"measurement"
)
if not all(m.return_type is Expectation for m in tape.measurements):
raise ValueError(
"The circuit cutting workflow only supports circuits with expectation "
"value measurements"
)
if use_opt_einsum:
try:
import opt_einsum # pylint: disable=import-outside-toplevel,unused-import
except ImportError as e:
raise ImportError(
"The opt_einsum package is required when use_opt_einsum is set to "
"True in the cut_circuit function. This package can be "
"installed using:\npip install opt_einsum"
) from e
g = tape_to_graph(tape)
if auto_cutter is True or callable(auto_cutter):
cut_strategy = kwargs.pop("cut_strategy", None) or CutStrategy(
max_free_wires=len(device_wires)
)
g = find_and_place_cuts(
graph=g,
cut_method=auto_cutter if callable(auto_cutter) else kahypar_cut,
cut_strategy=cut_strategy,
**kwargs,
)
replace_wire_cut_nodes(g)
fragments, communication_graph = fragment_graph(g)
fragment_tapes = [graph_to_tape(f) for f in fragments]
fragment_tapes = [remap_tape_wires(t, device_wires) for t in fragment_tapes]
expanded = [expand_fragment_tape(t) for t in fragment_tapes]
configurations = []
prepare_nodes = []
measure_nodes = []
for tapes, p, m in expanded:
configurations.append(tapes)
prepare_nodes.append(p)
measure_nodes.append(m)
tapes = tuple(tape for c in configurations for tape in c)
return tapes, partial(
qcut_processing_fn,
communication_graph=communication_graph,
prepare_nodes=prepare_nodes,
measure_nodes=measure_nodes,
use_opt_einsum=use_opt_einsum,
)
@cut_circuit.custom_qnode_wrapper
def qnode_execution_wrapper(self, qnode, targs, tkwargs):
"""Here, we overwrite the QNode execution wrapper in order
to access the device wires."""
# pylint: disable=function-redefined
tkwargs.setdefault("device_wires", qnode.device.wires)
return self.default_qnode_wrapper(qnode, targs, tkwargs)
def _qcut_expand_fn(
tape: QuantumTape,
max_depth: int = 1,
auto_cutter: Union[bool, Callable] = False,
):
"""Expansion function for circuit cutting.
Expands operations until reaching a depth that includes :class:`~.WireCut` operations.
"""
for op in tape.operations:
if isinstance(op, WireCut):
return tape
if max_depth > 0:
return _qcut_expand_fn(tape.expand(), max_depth=max_depth - 1, auto_cutter=auto_cutter)
if not (auto_cutter is True or callable(auto_cutter)):
raise ValueError(
"No WireCut operations found in the circuit. Consider increasing the max_depth value if"
" operations or nested | |
<filename>run_model.py
import argparse
import math
import os
import time
import dill
import logger
import numpy as np
import scipy.stats as stats
import tensorflow as tf
import tensorflow.contrib.seq2seq as seq2seq
from tensorflow.contrib.seq2seq.python.ops import beam_search_decoder
from tensorflow.python.layers.core import Dense
from tensorflow.python.ops.rnn_cell import LSTMCell, MultiRNNCell
from tensorflow.python.ops.rnn_cell import LSTMStateTuple
from tensorflow.python.util import nest
import dataloader
def ranks(predictions, dataset, true_inds, sqrt=True):
"""
:param predictions: [batch_size, image_feats]
:param dataset: [dataset_size, image_feats]
:param true_inds: [batch_size, 1]
:param sqrt: Euclidian distance if True, otherwise Squared Euclidian Distance
:return: Ranks
"""
d = (predictions ** 2).sum(axis=-1)[:, np.newaxis] + (dataset ** 2).sum(axis=-1)
d -= 2 * np.squeeze(predictions.dot(dataset[..., np.newaxis]), axis=-1)
if sqrt:
d **= 0.5
sorted_norms = np.argsort(d, axis=-1).astype(np.uint32)
ranks = np.where(sorted_norms == true_inds[:, np.newaxis])[1]
# reciprocal_ranks = 1. / ranks
return ranks.tolist()
class ABOT(object):
def __init__(self,
session,
config,
mode):
assert mode.lower() in ['train', 'decode', 'rank', 'test']
self.config = config
self.mode = mode.lower()
self.session = session
self.embed_dim = config.embed_dim
self.vocab_dim = config.vocab_dim
self.fact_dim = config.fact_dim
self.history_dim = config.history_dim
self.decod_dim = config.decoder_dim
self.img_feature_dim = config.img_feature_size
self.start_token, self.end_token = config.start_token, config.end_token
self.pad_token = config.pad_token
self.batch_size = config.batch_size
self.save_each_epoch = False
with tf.variable_scope("t_op"):
self.t_op = tf.Variable(0, trainable=False)
self.t_add_op = self.t_op.assign_add(1)
self.use_beamsearch = False
if self.mode in ['decode', 'rank']:
self.beam_width = config.beam_width
self.use_beamsearch = True if self.beam_width > 1 else False
self.max_decode_step = config.max_decode_step
self.build_model()
def build_model(self):
with tf.variable_scope("abot"):
self.init_placeholders()
self.build_encoder()
self.build_decoder()
self.build_training()
self.summary_op = tf.summary.merge_all()
def init_placeholders(self):
self.imfeat_ph = tf.placeholder(dtype=tf.float32,
shape=(None, self.img_feature_dim),
name='im_feats')
self.fact_encoder_inputs = tf.placeholder(dtype=tf.int32,
shape=(None, None),
name='fact_encoder_inputs')
self.fact_encoder_inputs_length = tf.placeholder(dtype=tf.int32,
shape=(None,),
name='fact_encoder_inputs_length')
self.ques_encoder_inputs = tf.placeholder(dtype=tf.int32,
shape=(None, None),
name='ques_encoder_inputs')
self.ques_encoder_inputs_length = tf.placeholder(dtype=tf.int32,
shape=(None,),
name='ques_encoder_inputs_length')
self.decoder_inputs = tf.placeholder(dtype=tf.int32,
shape=(None, None),
name='decoder_inputs')
self.decoder_inputs_length = tf.placeholder(dtype=tf.int32,
shape=(None,),
name='decoder_inputs_length')
decoder_start_token = tf.ones(shape=(1, self.batch_size),
dtype=tf.int32) * self.start_token
decoder_pad_token = tf.ones(shape=(1, self.batch_size),
dtype=tf.int32) * self.pad_token
self.decoder_inputs_train = tf.concat(
[decoder_start_token, self.decoder_inputs], axis=0
)
self.decoder_inputs_length_train = self.decoder_inputs_length + 1
decoder_train_targets = tf.concat([self.decoder_inputs, decoder_pad_token],
axis=0)
decoder_train_targets_seq_len, _ = tf.unstack(tf.shape(decoder_train_targets))
decoder_train_targets_eos_mask = tf.one_hot(self.decoder_inputs_length_train - 1,
decoder_train_targets_seq_len,
on_value=self.end_token,
off_value=self.pad_token,
dtype=tf.int32)
decoder_train_targets_eos_mask = tf.transpose(decoder_train_targets_eos_mask,
[1, 0])
decoder_train_targets = tf.add(decoder_train_targets,
decoder_train_targets_eos_mask)
self.decoder_targets_train = decoder_train_targets
self.c_state_ph = tf.placeholder(dtype=tf.float32,
shape=(self.batch_size, self.history_dim),
name='qbot_cell_c1')
self.h_state_ph = tf.placeholder(dtype=tf.float32,
shape=(self.batch_size, self.history_dim),
name='qbot_cell_h1')
self.c2_state_ph = tf.placeholder(dtype=tf.float32,
shape=(self.batch_size, self.history_dim),
name='qbot_cell_c2')
self.h2_state_ph = tf.placeholder(dtype=tf.float32,
shape=(self.batch_size, self.history_dim),
name='qbot_cell_h2')
self.abot_history_state = tuple([LSTMStateTuple(c=self.c_state_ph, h=self.h_state_ph),
LSTMStateTuple(c=self.c2_state_ph, h=self.h2_state_ph)])
sqrt3 = math.sqrt(3)
initializer = tf.random_uniform_initializer(-sqrt3, sqrt3, dtype=tf.float32)
self.embedding_matrix = tf.get_variable(name='embedding_matrix',
shape=[self.vocab_dim, self.embed_dim],
initializer=initializer,
dtype=tf.float32)
def build_encoder(self):
print('Building encoder..')
with tf.variable_scope("encoder"):
self.fact_encoder_inputs_embedded = tf.nn.embedding_lookup(
params=self.embedding_matrix, ids=self.fact_encoder_inputs,
name='fact_embedding_inputs')
self.ques_encoder_inputs_embedded = tf.nn.embedding_lookup(
params=self.embedding_matrix, ids=self.ques_encoder_inputs,
name='ques_embedding_inputs'
)
with tf.variable_scope("fact_encoder"):
self.fact_encoder_cell = MultiRNNCell(
[LSTMCell(self.fact_dim), LSTMCell(self.fact_dim)])
self.fact_enc_out, self.fact_enc_state = tf.nn.dynamic_rnn(
cell=self.fact_encoder_cell, inputs=self.fact_encoder_inputs_embedded,
sequence_length=self.fact_encoder_inputs_length, dtype=tf.float32,
time_major=True
)
with tf.variable_scope("ques_encoder"):
self.ques_encoder_cell = MultiRNNCell(
[LSTMCell(self.fact_dim), LSTMCell(self.fact_dim)])
self.ques_enc_out, self.ques_enc_state = tf.nn.dynamic_rnn(
cell=self.ques_encoder_cell, inputs=self.ques_encoder_inputs_embedded,
sequence_length=self.ques_encoder_inputs_length, dtype=tf.float32,
time_major=True
)
with tf.variable_scope("history_encoder"):
self.history_encoder_cell = MultiRNNCell(
[LSTMCell(self.history_dim), LSTMCell(self.history_dim)])
fact_state = self.fact_enc_state[-1].h
ques_state = self.ques_enc_state[-1].h
history_input = tf.concat(values=[fact_state, ques_state, self.imfeat_ph],
axis=1,
name="history_input")
history_input = tf.expand_dims(history_input, axis=0)
self.hist_enc_out, self.hist_enc_state = tf.nn.dynamic_rnn(
cell=self.history_encoder_cell, inputs=history_input,
initial_state=self.abot_history_state,
dtype=tf.float32, time_major=True
)
def build_decoder(self):
print('Buidling decoder...')
with tf.variable_scope("decoder"):
# Get decoder cell and initial state
self.decoder_cell, self.decoder_initial_state = self.build_decoder_cell()
# Output projection layer
output_layer = Dense(self.vocab_dim, name='output_projection')
if self.mode == 'train':
# Construct inputs
self.decoder_inputs_embedded = tf.nn.embedding_lookup(
self.embedding_matrix,
self.decoder_inputs_train)
training_helper = seq2seq.TrainingHelper(
inputs=self.decoder_inputs_embedded,
sequence_length=self.decoder_inputs_length_train,
time_major=True,
name='training_helper')
training_decoder = seq2seq.BasicDecoder(cell=self.decoder_cell,
helper=training_helper,
initial_state=self.decoder_initial_state,
output_layer=output_layer)
# Maximum decoder time_steps in current batch
max_decoder_length = tf.reduce_max(self.decoder_inputs_length_train)
(self.decoder_outputs_train, self.decoder_last_state_train,
self.decoder_outputs_length_train) = (seq2seq.dynamic_decode(
decoder=training_decoder,
output_time_major=True,
impute_finished=True,
maximum_iterations=max_decoder_length))
self.decoder_logits_train = tf.identity(
self.decoder_outputs_train.rnn_output)
self.decoder_pred_train = tf.argmax(self.decoder_logits_train, axis=-1,
name='decoder_pred_train')
self.masks = tf.sequence_mask(lengths=self.decoder_inputs_length_train,
maxlen=max_decoder_length, dtype=tf.float32,
name='masks')
elif self.mode in ['decode', 'rank']:
start_tokens = tf.ones([self.batch_size, ],
tf.int32) * self.start_token
end_token = self.end_token
if not self.use_beamsearch:
# Greedy decoder
decoder_helper = seq2seq.GreedyEmbeddingHelper(
start_tokens=start_tokens,
end_token=end_token,
embedding=self.embedding_matrix)
print('building greedy decoder...')
inference_decoder = seq2seq.BasicDecoder(cell=self.decoder_cell,
helper=decoder_helper,
initial_state=self.decoder_initial_state,
output_layer=output_layer)
else:
print('building beam search decoder...')
inference_decoder = beam_search_decoder.BeamSearchDecoder(
cell=self.decoder_cell,
embedding=self.embedding_matrix,
start_tokens=start_tokens,
end_token=end_token,
initial_state=self.decoder_initial_state,
beam_width=self.beam_width,
output_layer=output_layer)
(self.decoder_outputs_decode, self.decoder_last_state_decode,
self.decoder_outputs_length_decode) = (seq2seq.dynamic_decode(
decoder=inference_decoder,
output_time_major=True,
maximum_iterations=self.max_decode_step))
if not self.use_beamsearch:
# shape is [max_steps, batch_size]
self.decoder_pred_decode = tf.expand_dims(
self.decoder_outputs_decode.sample_id, axis=-1)
self.decoder_outputs_length_decode = tf.expand_dims(
self.decoder_outputs_length_decode, axis=-1
)
else:
# shape is [max_steps, batch_size, beam_width]
self.decoder_pred_decode = self.decoder_outputs_decode.predicted_ids
def build_training(self):
if self.mode == 'train':
print('Building training ops...')
# Seq2Seq training
self.loss = seq2seq.sequence_loss(
logits=tf.transpose(self.decoder_logits_train, [1, 0, 2]),
targets=tf.transpose(self.decoder_targets_train, [1, 0]),
weights=self.masks,
average_across_batch=True,
average_across_timesteps=True)
tf.summary.scalar('loss', self.loss)
self.optimizer = tf.train.AdamOptimizer()
grads_vars = self.optimizer.compute_gradients(self.loss)
cliped_gradients = [(tf.clip_by_value(grad, -5., 5.), tvar) for grad, tvar in
grads_vars if grad is not None]
self.update_op = self.optimizer.apply_gradients(cliped_gradients, self.t_op)
def build_decoder_cell(self):
encoder_last_state = self.hist_enc_state
if self.use_beamsearch:
print("use beam search decoding..")
encoder_last_state = nest.map_structure(
lambda s: seq2seq.tile_batch(s, self.beam_width), encoder_last_state
)
decoder_initial_state = encoder_last_state
decoder_cell = MultiRNNCell([LSTMCell(self.decod_dim), LSTMCell(self.decod_dim)])
return decoder_cell, decoder_initial_state
def save(self, path, var_list=None, global_step=None):
# var_list = None returns the list of all saveable variables
sess = self.session
saver = tf.train.Saver(var_list)
# temporary code
save_path = saver.save(sess, save_path=path, global_step=global_step)
print('model saved at %s' % save_path)
def restore(self, sess, path, var_list=None):
# var_list = None returns the list of all saveable variables
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_list)
saver.restore(sess, save_path=path)
print('model restored from %s' % path)
def get_batch_inputs(self, batch, round):
q_len = batch['question_lengths'][:, round]
h_len = batch['history_lengths'][:, round]
a_len = batch['answer_lengths'][:, round]
q = batch['question'][0:int(np.max(q_len)), round, :]
h = batch['history'][0:int(np.max(h_len)), round, :]
a = batch['answer'][0:int(np.max(a_len)), round, :]
return q, h, a, q_len, h_len, a_len, batch['img_feats'], batch['img_inds']
def make_train_feed(self, data, c1, h1, c2, h2):
question, history, answer, q_len, h_len, a_len, img_feats, img_inds = data
return {
self.fact_encoder_inputs: history,
self.fact_encoder_inputs_length: h_len,
self.ques_encoder_inputs: question,
self.ques_encoder_inputs_length: q_len,
self.decoder_inputs: answer,
self.decoder_inputs_length: a_len,
self.c_state_ph: c1,
self.h_state_ph: h1,
self.c2_state_ph: c2,
self.h2_state_ph: h2,
self.imfeat_ph: img_feats
}
def make_decode_feed(self, data, c1, h1, c2, h2):
question, history, aanswer, q_len, h_len, a_len, img_feats, img_inds = data
return {
self.fact_encoder_inputs: history,
self.fact_encoder_inputs_length: h_len,
self.ques_encoder_inputs: question,
self.ques_encoder_inputs_length: q_len,
self.c_state_ph: c1,
self.h_state_ph: h1,
self.c2_state_ph: c2,
self.h2_state_ph: h2,
self.imfeat_ph: img_feats
}
def make_true_decode_feed(self, history, history_length, question, question_length, img_feats,
c1, h1, c2, h2):
return {
self.fact_encoder_inputs: history,
self.fact_encoder_inputs_length: history_length,
self.ques_encoder_inputs: question,
self.ques_encoder_inputs_length: question_length,
self.c_state_ph: c1,
self.h_state_ph: h1,
self.c2_state_ph: c2,
self.h2_state_ph: h2,
self.imfeat_ph: img_feats
}
def train(self, data, epochs):
start_time = time.time()
num_batches = int(np.ceil(data.num_train_threads / self.batch_size))
self.log_writer = tf.summary.FileWriter(self.config.logs_path,
graph=self.session.graph)
self.session.run(tf.global_variables_initializer())
for cur_epoch in range(epochs):
for cur_batch in range(num_batches):
batch, _ = data.get_train_batch(self.batch_size, time_major=True)
c1 = np.zeros((self.batch_size, self.history_dim))
h1 = np.zeros((self.batch_size, self.history_dim))
c2 = np.zeros((self.batch_size, self.history_dim))
h2 = np.zeros((self.batch_size, self.history_dim))
batch_loss = 0.
batch_start_time = time.time()
for cur_round in range(10):
feed_dict = self.make_train_feed(
data=self.get_batch_inputs(batch, cur_round),
c1=c1,
h1=h1,
c2=c2,
h2=h2)
fetches = [self.hist_enc_state, self.loss, self.update_op]
if cur_round % 5 == 0 and cur_batch % 50 == 0:
fetches += [self.summary_op]
states, round_loss, _, summ = self.session.run(fetches, feed_dict)
self.log_writer.add_summary(summ, self.t_op.eval())
else:
states, round_loss, _ = self.session.run(fetches, feed_dict)
c1, h1 = states[0].c, states[0].h
c2, h2 = states[1].c, states[1].h
batch_loss += round_loss
batch_duration = time.time() - batch_start_time
logger.record_tabular('Time elapsed', time.time() - start_time)
logger.record_tabular('Batch duration', batch_duration)
logger.record_tabular('(Batch, Total)', (cur_batch, num_batches))
logger.record_tabular('Epoch ', cur_epoch)
logger.record_tabular('Batch loss ', batch_loss / 10.)
logger.dump_tabular()
if self.save_each_epoch:
save_path = os.path.join(self.config.save_path,
'epoch_{}'.format(cur_epoch), 'model.ckpt')
self.save(save_path)
logger.log('Finished epoch {}/{}'.format(cur_epoch, epochs))
self.log_writer.close()
save_path = os.path.join(self.config.save_path, self.config.model_name,
'model.ckpt')
self.save(save_path)
def decode(self, data):
vocabulary = data.data['ind2word']
batch, _, _ = data.get_test_batch(np.random.randint(0, 40000), self.batch_size,
time_major=True)
c1 = np.zeros((self.batch_size, self.history_dim))
h1 = np.zeros((self.batch_size, self.history_dim))
c2 = np.zeros((self.batch_size, self.history_dim))
h2 = np.zeros((self.batch_size, self.history_dim))
print("caption: {}".format(" ".join(list(
vocabulary[token] for token in batch['history'][:, 0, 0] if
token in vocabulary))))
for cur_round in range(10):
feed_dict = self.make_decode_feed(
data=self.get_batch_inputs(batch, cur_round),
c1=c1,
h1=h1,
c2=c2,
h2=h2
)
fetches = [self.hist_enc_state, self.decoder_pred_decode,
self.decoder_outputs_length_decode]
states, decoding, decodings_length = self.session.run(fetches, feed_dict=feed_dict)
c1, h1 = states[0].c, states[0].h
c2, h2 = states[1].c, states[1].h
self.print_greedy_dround(decoding[:, :, 0], decodings_length, vocabulary,
batch['question'][:, cur_round, 0])
if cur_round == 5:
np.save('abot_decoding.npy', decoding)
np.save('abot_decoding_length.npy', decodings_length)
def print_greedy_dround(self, decoding, decoding_length, vocabulary, question):
print('Decoding for all batches is {}'.format(decoding))
# decoding to [batch_size, time_steps]
decoding = np.transpose(decoding)[0]
print('Decoding shape is {}, question shape is {}'.format(decoding.shape, question.shape))
print('Decoding raw is {}'.format(decoding))
print('Question raw is {}'.format(question))
print('Decoding length is {}'.format(decoding_length))
print('Decoding length shape is {}'.format(decoding_length.shape))
print('Question is')
print(' '.join(
list(vocabulary[token] for token in question if token in vocabulary)))
to_print = list(vocabulary[token] for token in decoding if token in vocabulary)
print('List to be printed is length {}'.format(len(to_print)))
print(" ".join(to_print))
print("----------")
class QBOT(object):
def __init__(self,
session,
config,
mode):
assert mode.lower() in ['train', 'decode', 'rank', 'test']
self.config = config
self.mode = mode.lower()
self.session = session
self.embed_dim = config.embed_dim
self.vocab_dim = config.vocab_dim
self.fact_dim = config.fact_dim
self.history_dim = config.history_dim
self.decod_dim = config.decoder_dim
self.img_feature_dim = config.img_feature_size
self.start_token, self.end_token = config.start_token, config.end_token
self.pad_token = config.pad_token
self.batch_size = config.batch_size
self.save_each_epoch = False
with tf.variable_scope("t_op"):
self.t_op = tf.Variable(0, trainable=False)
self.t_add_op = self.t_op.assign_add(1)
self.use_beamsearch = False
if self.mode in ['decode', 'rank']:
self.beam_width = config.beam_width
self.use_beamsearch = True if self.beam_width > 1 else False
self.max_decode_step = config.max_decode_step
self.build_model()
def build_model(self):
self.init_placeholders()
self.build_encoder()
self.build_regression()
self.build_decoder()
self.build_training()
self.summary_op = tf.summary.merge_all()
def init_placeholders(self):
print('Building placeholders...')
# Regression placeholders
self.imfeat_ph = tf.placeholder(dtype=tf.float32,
shape=(None, self.img_feature_dim),
name='im_feats')
# Seq2Seq placeholders
self.encoder_inputs = tf.placeholder(dtype=tf.int32,
shape=(None, None),
name='encoder_inputs')
self.encoder_inputs_length = tf.placeholder(dtype=tf.int32,
shape=(None,),
name='encoder_inputs_length')
self.decoder_inputs = tf.placeholder(dtype=tf.int32,
shape=(None, None),
name='decoder_inputs')
self.decoder_inputs_length = tf.placeholder(dtype=tf.int32,
shape=(None,),
name='decoder_inputs_length')
decoder_start_token = tf.ones(shape=(1, self.batch_size),
dtype=tf.int32) * self.start_token
decoder_pad_token = tf.ones(shape=(1, self.batch_size),
dtype=tf.int32) * self.pad_token
self.decoder_inputs_train = tf.concat(
[decoder_start_token, self.decoder_inputs], axis=0
)
self.decoder_inputs_length_train = self.decoder_inputs_length + 1
decoder_train_targets = tf.concat([self.decoder_inputs, decoder_pad_token],
axis=0)
decoder_train_targets_seq_len, _ = tf.unstack(tf.shape(decoder_train_targets))
decoder_train_targets_eos_mask = tf.one_hot(self.decoder_inputs_length_train - 1,
decoder_train_targets_seq_len,
on_value=self.end_token,
off_value=self.pad_token,
dtype=tf.int32)
decoder_train_targets_eos_mask = tf.transpose(decoder_train_targets_eos_mask,
[1, 0])
decoder_train_targets = tf.add(decoder_train_targets,
decoder_train_targets_eos_mask)
self.decoder_targets_train = decoder_train_targets
self.c_state_ph = tf.placeholder(dtype=tf.float32,
shape=(self.batch_size, self.history_dim),
name='qbot_cell_c1')
self.h_state_ph = tf.placeholder(dtype=tf.float32,
shape=(self.batch_size, self.history_dim),
name='qbot_cell_h1')
self.c2_state_ph = tf.placeholder(dtype=tf.float32,
shape=(self.batch_size, self.history_dim),
name='qbot_cell_c2')
self.h2_state_ph = tf.placeholder(dtype=tf.float32,
shape=(self.batch_size, self.history_dim),
name='qbot_cell_h2')
self.qbot_history_state = tuple([LSTMStateTuple(c=self.c_state_ph, h=self.h_state_ph),
LSTMStateTuple(c=self.c2_state_ph, h=self.h2_state_ph)])
sqrt3 = math.sqrt(3)
initializer = tf.random_uniform_initializer(-sqrt3, sqrt3, dtype=tf.float32)
self.embedding_matrix = tf.get_variable(name='embedding_matrix',
shape=[self.vocab_dim, self.embed_dim],
initializer=initializer,
dtype=tf.float32)
def build_encoder(self):
print('Building encoder..')
with tf.variable_scope("encoder"):
self.encoder_inputs_embedded = tf.nn.embedding_lookup(
params=self.embedding_matrix, ids=self.encoder_inputs,
name='encoder_embedding_inputs')
with tf.variable_scope("fact_encoder"):
self.fact_encoder_cell = MultiRNNCell(
[LSTMCell(self.fact_dim), LSTMCell(self.fact_dim)])
self.fact_enc_out, self.fact_enc_state = tf.nn.dynamic_rnn(
cell=self.fact_encoder_cell, inputs=self.encoder_inputs_embedded,
sequence_length=self.encoder_inputs_length, dtype=tf.float32,
time_major=True
)
with tf.variable_scope("history_encoder"):
self.history_encoder_cell = MultiRNNCell(
[LSTMCell(self.history_dim), LSTMCell(self.history_dim)])
history_input = tf.expand_dims(self.fact_enc_state[-1].h, axis=0)
self.hist_enc_out, self.hist_enc_state = tf.nn.dynamic_rnn(
cell=self.history_encoder_cell, inputs=history_input,
initial_state=self.qbot_history_state,
dtype=tf.float32, time_major=True
)
def build_regression(self):
print('Building regression...')
encoder_state = self.hist_enc_state[-1].h
encoder_state_shape = encoder_state.get_shape()[-1].value
self.rw = tf.get_variable("prediction_w",
shape=(encoder_state_shape, self.img_feature_dim))
self.rb = tf.get_variable("prediction_b",
shape=(self.img_feature_dim,))
self.y_t = tf.matmul(encoder_state, self.rw) + self.rb
def build_decoder(self):
print('Buidling decoder...')
with tf.variable_scope("decoder"):
# Get decoder cell and initial state
self.decoder_cell, self.decoder_initial_state = self.build_decoder_cell()
# Output projection layer
output_layer = Dense(self.vocab_dim, name='output_projection')
if self.mode == 'train':
# Construct inputs
self.decoder_inputs_embedded = tf.nn.embedding_lookup(
self.embedding_matrix,
self.decoder_inputs_train)
training_helper = seq2seq.TrainingHelper(
inputs=self.decoder_inputs_embedded,
sequence_length=self.decoder_inputs_length_train,
time_major=True,
name='training_helper')
training_decoder = seq2seq.BasicDecoder(cell=self.decoder_cell,
helper=training_helper,
initial_state=self.decoder_initial_state,
output_layer=output_layer)
# Maximum decoder time_steps in current batch
max_decoder_length = tf.reduce_max(self.decoder_inputs_length_train)
(self.decoder_outputs_train, self.decoder_last_state_train,
self.decoder_outputs_length_train) = (seq2seq.dynamic_decode(
decoder=training_decoder,
output_time_major=True,
impute_finished=True,
maximum_iterations=max_decoder_length))
self.decoder_logits_train = tf.identity(
self.decoder_outputs_train.rnn_output)
self.decoder_pred_train = tf.argmax(self.decoder_logits_train, axis=-1,
name='decoder_pred_train')
self.masks = tf.sequence_mask(lengths=self.decoder_inputs_length_train,
maxlen=max_decoder_length, dtype=tf.float32,
name='masks')
elif self.mode in ['decode', 'rank']:
start_tokens = tf.ones([self.batch_size, ],
tf.int32) * self.start_token
end_token = self.end_token
if not self.use_beamsearch:
# Greedy decoder
decoder_helper = seq2seq.GreedyEmbeddingHelper(
start_tokens=start_tokens,
end_token=end_token,
embedding=self.embedding_matrix)
print('building greedy decoder...')
inference_decoder = seq2seq.BasicDecoder(cell=self.decoder_cell,
helper=decoder_helper,
initial_state=self.decoder_initial_state,
output_layer=output_layer)
else:
print('building beam search decoder...')
inference_decoder = beam_search_decoder.BeamSearchDecoder(
cell=self.decoder_cell,
embedding=self.embedding_matrix,
start_tokens=start_tokens,
end_token=end_token,
initial_state=self.decoder_initial_state,
beam_width=self.beam_width,
output_layer=output_layer)
(self.decoder_outputs_decode, self.decoder_last_state_decode,
self.decoder_outputs_length_decode) = (seq2seq.dynamic_decode(
decoder=inference_decoder,
output_time_major=True,
maximum_iterations=self.max_decode_step))
if not self.use_beamsearch:
# shape is [max_steps, batch_size]
self.decoder_pred_decode = tf.expand_dims(
self.decoder_outputs_decode.sample_id, axis=-1)
self.decoder_outputs_length_decode = tf.expand_dims(
self.decoder_outputs_length_decode, axis=-1
)
else:
self.decoder_pred_decode = self.decoder_outputs_decode.predicted_ids
def build_training(self):
self.optimizer = tf.train.AdamOptimizer()
if self.mode == 'train':
print('Building training ops...')
# Seq2Seq training
self.loss = seq2seq.sequence_loss(
logits=tf.transpose(self.decoder_logits_train, [1, 0, 2]),
targets=tf.transpose(self.decoder_targets_train, [1, 0]),
weights=self.masks,
average_across_batch=True,
average_across_timesteps=True)
tf.summary.scalar('loss', self.loss)
grads_vars = self.optimizer.compute_gradients(self.loss)
cliped_gradients = [(tf.clip_by_value(grad, -5., 5.), tvar) for grad, tvar in
grads_vars if grad is not None]
self.update_op = self.optimizer.apply_gradients(cliped_gradients, self.t_op)
self.l2_dist_sq = tf.sqrt(
tf.reduce_sum(tf.square(self.y_t - self.imfeat_ph), axis=1))
self.batch_l2_loss = tf.reduce_mean(self.l2_dist_sq)
mse_grads_vars = self.optimizer.compute_gradients(self.batch_l2_loss)
clipped_gradients_regression = [(tf.clip_by_value(grad, -5., 5.), tvar) for
grad, tvar in
mse_grads_vars if grad is not None]
tf.summary.scalar('l2_dist_batch', self.batch_l2_loss)
self.update_pred_op = self.optimizer.apply_gradients(clipped_gradients_regression,
| |
<filename>test/python/quantum_info/test_synthesis.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for quantum synthesis methods."""
import unittest
from test import combine
from ddt import ddt
import numpy as np
import scipy.linalg as la
from qiskit import execute
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.extensions import UnitaryGate
from qiskit.circuit.library import (HGate, IGate, SdgGate, SGate, U3Gate,
XGate, YGate, ZGate, CXGate, CZGate,
iSwapGate, RXXGate)
from qiskit.providers.basicaer import UnitarySimulatorPy
from qiskit.quantum_info.operators import Operator
from qiskit.quantum_info.random import random_unitary
from qiskit.quantum_info.synthesis.one_qubit_decompose import OneQubitEulerDecomposer
from qiskit.quantum_info.synthesis.two_qubit_decompose import (TwoQubitWeylDecomposition,
two_qubit_cnot_decompose,
TwoQubitBasisDecomposer,
Ud)
from qiskit.quantum_info.synthesis.ion_decompose import cnot_rxx_decompose
from qiskit.test import QiskitTestCase
def make_oneq_cliffords():
"""Make as list of 1q Cliffords"""
ixyz_list = [g().to_matrix() for g in (IGate, XGate, YGate, ZGate)]
ih_list = [g().to_matrix() for g in (IGate, HGate)]
irs_list = [IGate().to_matrix(),
SdgGate().to_matrix() @ HGate().to_matrix(),
HGate().to_matrix() @ SGate().to_matrix()]
oneq_cliffords = [Operator(ixyz @ ih @ irs) for ixyz in ixyz_list
for ih in ih_list
for irs in irs_list]
return oneq_cliffords
ONEQ_CLIFFORDS = make_oneq_cliffords()
def make_hard_thetas_oneq(smallest=1e-18, factor=3.2, steps=22, phi=0.7, lam=0.9):
"""Make 1q gates with theta/2 close to 0, pi/2, pi, 3pi/2"""
return ([U3Gate(smallest * factor ** i, phi, lam) for i in range(steps)] +
[U3Gate(-smallest * factor ** i, phi, lam) for i in range(steps)] +
[U3Gate(np.pi / 2 + smallest * factor ** i, phi, lam) for i in range(steps)] +
[U3Gate(np.pi / 2 - smallest * factor ** i, phi, lam) for i in range(steps)] +
[U3Gate(np.pi + smallest * factor ** i, phi, lam) for i in range(steps)] +
[U3Gate(np.pi - smallest * factor ** i, phi, lam) for i in range(steps)] +
[U3Gate(3 * np.pi / 2 + smallest * factor ** i, phi, lam) for i in range(steps)] +
[U3Gate(3 * np.pi / 2 - smallest * factor ** i, phi, lam) for i in range(steps)])
HARD_THETA_ONEQS = make_hard_thetas_oneq()
# It's too slow to use all 24**4 Clifford combos. If we can make it faster, use a larger set
K1K2S = [(ONEQ_CLIFFORDS[3], ONEQ_CLIFFORDS[5], ONEQ_CLIFFORDS[2], ONEQ_CLIFFORDS[21]),
(ONEQ_CLIFFORDS[5], ONEQ_CLIFFORDS[6], ONEQ_CLIFFORDS[9], ONEQ_CLIFFORDS[7]),
(ONEQ_CLIFFORDS[2], ONEQ_CLIFFORDS[1], ONEQ_CLIFFORDS[0], ONEQ_CLIFFORDS[4]),
[Operator(U3Gate(x, y, z)) for x, y, z in
[(0.2, 0.3, 0.1), (0.7, 0.15, 0.22), (0.001, 0.97, 2.2), (3.14, 2.1, 0.9)]]]
class CheckDecompositions(QiskitTestCase):
"""Implements decomposition checkers."""
def check_one_qubit_euler_angles(self, operator, basis='U3', tolerance=1e-12,
phase_equal=False):
"""Check OneQubitEulerDecomposer works for the given unitary"""
target_unitary = operator.data
if basis is None:
angles = OneQubitEulerDecomposer().angles(target_unitary)
decomp_unitary = U3Gate(*angles).to_matrix()
else:
decomposer = OneQubitEulerDecomposer(basis)
decomp_unitary = Operator(decomposer(target_unitary)).data
# Add global phase to make special unitary
target_unitary *= la.det(target_unitary) ** (-0.5)
decomp_unitary *= la.det(decomp_unitary) ** (-0.5)
maxdist = np.max(np.abs(target_unitary - decomp_unitary))
if not phase_equal and maxdist > 0.1:
maxdist = np.max(np.abs(target_unitary + decomp_unitary))
self.assertTrue(np.abs(maxdist) < tolerance,
"Operator {}: Worst distance {}".format(operator, maxdist))
# FIXME: should be possible to set this tolerance tighter after improving the function
def check_two_qubit_weyl_decomposition(self, target_unitary, tolerance=1.e-7):
"""Check TwoQubitWeylDecomposition() works for a given operator"""
# pylint: disable=invalid-name
decomp = TwoQubitWeylDecomposition(target_unitary)
op = Operator(np.eye(4))
for u, qs in (
(decomp.K2r, [0]),
(decomp.K2l, [1]),
(Ud(decomp.a, decomp.b, decomp.c), [0, 1]),
(decomp.K1r, [0]),
(decomp.K1l, [1]),
):
op = op.compose(u, qs)
decomp_unitary = op.data
target_unitary *= la.det(target_unitary) ** (-0.25)
decomp_unitary *= la.det(decomp_unitary) ** (-0.25)
maxdists = [np.max(np.abs(target_unitary + phase * decomp_unitary))
for phase in [1, 1j, -1, -1j]]
maxdist = np.min(maxdists)
self.assertTrue(np.abs(maxdist) < tolerance,
"Unitary {}: Worst distance {}".format(target_unitary, maxdist))
def check_exact_decomposition(self, target_unitary, decomposer, tolerance=1.e-7):
"""Check exact decomposition for a particular target"""
decomp_circuit = decomposer(target_unitary)
result = execute(decomp_circuit, UnitarySimulatorPy()).result()
decomp_unitary = result.get_unitary()
target_unitary *= la.det(target_unitary) ** (-0.25)
decomp_unitary *= la.det(decomp_unitary) ** (-0.25)
maxdists = [np.max(np.abs(target_unitary + phase * decomp_unitary))
for phase in [1, 1j, -1, -1j]]
maxdist = np.min(maxdists)
self.assertTrue(np.abs(maxdist) < tolerance,
"Unitary {}: Worst distance {}".format(target_unitary, maxdist))
@ddt
class TestEulerAngles1Q(CheckDecompositions):
"""Test euler_angles_1q()"""
@combine(clifford=ONEQ_CLIFFORDS)
def test_euler_angles_1q_clifford(self, clifford):
"""Verify euler_angles_1q produces correct Euler angles for all Cliffords."""
self.check_one_qubit_euler_angles(clifford)
@combine(gate=HARD_THETA_ONEQS)
def test_euler_angles_1q_hard_thetas(self, gate):
"""Verify euler_angles_1q for close-to-degenerate theta"""
self.check_one_qubit_euler_angles(Operator(gate))
@combine(seed=range(5), name='test_euler_angles_1q_random_{seed}')
def test_euler_angles_1q_random(self, seed):
"""Verify euler_angles_1q produces correct Euler angles for random_unitary (seed={seed}).
"""
unitary = random_unitary(2, seed=seed)
self.check_one_qubit_euler_angles(unitary)
@ddt
class TestOneQubitEulerDecomposer(CheckDecompositions):
"""Test OneQubitEulerDecomposer"""
def check_one_qubit_euler_angles(self, operator, basis='U3',
tolerance=1e-12,
phase_equal=False):
"""Check euler_angles_1q works for the given unitary"""
decomposer = OneQubitEulerDecomposer(basis)
with self.subTest(operator=operator):
target_unitary = operator.data
decomp_unitary = Operator(decomposer(target_unitary)).data
# Add global phase to make special unitary
target_unitary *= la.det(target_unitary) ** (-0.5)
decomp_unitary *= la.det(decomp_unitary) ** (-0.5)
maxdist = np.max(np.abs(target_unitary - decomp_unitary))
if not phase_equal and maxdist > 0.1:
maxdist = np.max(np.abs(target_unitary + decomp_unitary))
self.assertTrue(np.abs(maxdist) < tolerance, "Worst distance {}".format(maxdist))
@combine(basis=['U3', 'U1X', 'ZYZ', 'ZXZ', 'XYX', 'RR'],
name='test_one_qubit_clifford_{basis}_basis')
def test_one_qubit_clifford_all_basis(self, basis):
"""Verify for {basis} basis and all Cliffords."""
for clifford in ONEQ_CLIFFORDS:
self.check_one_qubit_euler_angles(clifford, basis)
@combine(basis_tolerance=[('U3', 1e-12),
('XYX', 1e-12),
('ZXZ', 1e-12),
('ZYZ', 1e-12),
('U1X', 1e-7),
('RR', 1e-12)],
name='test_one_qubit_hard_thetas_{basis_tolerance[0]}_basis')
# Lower tolerance for U1X test since decomposition since it is
# less numerically accurate. This is due to it having 5 matrix
# multiplications and the X90 gates
def test_one_qubit_hard_thetas_all_basis(self, basis_tolerance):
"""Verify for {basis_tolerance[0]} basis and close-to-degenerate theta."""
for gate in HARD_THETA_ONEQS:
self.check_one_qubit_euler_angles(Operator(gate), basis_tolerance[0],
basis_tolerance[1])
@combine(basis=['U3', 'U1X', 'ZYZ', 'ZXZ', 'XYX', 'RR'], seed=range(50),
name='test_one_qubit_random_{basis}_basis_{seed}')
def test_one_qubit_random_all_basis(self, basis, seed):
"""Verify for {basis} basis and random_unitary (seed={seed})."""
unitary = random_unitary(2, seed=seed)
self.check_one_qubit_euler_angles(unitary, basis)
# FIXME: streamline the set of test cases
class TestTwoQubitWeylDecomposition(CheckDecompositions):
"""Test TwoQubitWeylDecomposition()
"""
# pylint: disable=invalid-name
def test_two_qubit_weyl_decomposition_cnot(self):
"""Verify Weyl KAK decomposition for U~CNOT"""
for k1l, k1r, k2l, k2r in K1K2S:
k1 = np.kron(k1l.data, k1r.data)
k2 = np.kron(k2l.data, k2r.data)
a = Ud(np.pi / 4, 0, 0)
self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)
def test_two_qubit_weyl_decomposition_iswap(self):
"""Verify Weyl KAK decomposition for U~iswap"""
for k1l, k1r, k2l, k2r in K1K2S:
k1 = np.kron(k1l.data, k1r.data)
k2 = np.kron(k2l.data, k2r.data)
a = Ud(np.pi / 4, np.pi / 4, 0)
self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)
def test_two_qubit_weyl_decomposition_swap(self):
"""Verify Weyl KAK decomposition for U~swap"""
for k1l, k1r, k2l, k2r in K1K2S:
k1 = np.kron(k1l.data, k1r.data)
k2 = np.kron(k2l.data, k2r.data)
a = Ud(np.pi / 4, np.pi / 4, np.pi / 4)
self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)
def test_two_qubit_weyl_decomposition_bgate(self):
"""Verify Weyl KAK decomposition for U~B"""
for k1l, k1r, k2l, k2r in K1K2S:
k1 = np.kron(k1l.data, k1r.data)
k2 = np.kron(k2l.data, k2r.data)
a = Ud(np.pi / 4, np.pi / 8, 0)
self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)
def test_two_qubit_weyl_decomposition_a00(self, smallest=1e-18, factor=9.8, steps=11):
"""Verify Weyl KAK decomposition for U~Ud(a,0,0)"""
for aaa in ([smallest * factor ** i for i in range(steps)] +
[np.pi / 4 - smallest * factor ** i for i in range(steps)] +
[np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]):
for k1l, k1r, k2l, k2r in K1K2S:
k1 = np.kron(k1l.data, k1r.data)
k2 = np.kron(k2l.data, k2r.data)
a = Ud(aaa, 0, 0)
self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)
def test_two_qubit_weyl_decomposition_aa0(self, smallest=1e-18, factor=9.8, steps=11):
"""Verify Weyl KAK decomposition for U~Ud(a,a,0)"""
for aaa in ([smallest * factor ** i for i in range(steps)] +
[np.pi / 4 - smallest * factor ** i for i in range(steps)] +
[np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]):
for k1l, k1r, k2l, k2r in K1K2S:
k1 = np.kron(k1l.data, k1r.data)
k2 = np.kron(k2l.data, k2r.data)
a = Ud(aaa, aaa, 0)
self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)
def test_two_qubit_weyl_decomposition_aaa(self, smallest=1e-18, factor=9.8, steps=11):
"""Verify Weyl KAK decomposition for U~Ud(a,a,a)"""
for aaa in ([smallest * factor ** i for i in range(steps)] +
[np.pi / 4 - smallest * factor ** i for i in range(steps)] +
[np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]):
for k1l, k1r, k2l, k2r in K1K2S:
k1 = np.kron(k1l.data, k1r.data)
k2 = np.kron(k2l.data, k2r.data)
a = Ud(aaa, aaa, aaa)
self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)
def test_two_qubit_weyl_decomposition_aama(self, smallest=1e-18, factor=9.8, steps=11):
"""Verify Weyl KAK decomposition for U~Ud(a,a,-a)"""
for aaa in ([smallest * factor ** i for i in range(steps)] +
[np.pi / 4 - smallest * factor ** i for i in range(steps)] +
[np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]):
for k1l, k1r, k2l, k2r in K1K2S:
k1 = np.kron(k1l.data, k1r.data)
| |
skyval = self.mean_clip
else:
raise ValueError('method must be one of "sigclip" or "median" ')
self.data -= skyval
if verbose:
if self.infile is not None:
descript = self.infile
else:
descript = 'the data'
print(' Subtracted value of %f from %s' % (skyval, descript))
return skyval
# -----------------------------------------------------------------------
def make_bpm(self, type, nsig=10., goodval=1, outfile=None):
"""
Makes a bad pixel mask (bpm) based on the data in this WcsHDU object.
The treatment is different depending on whether the data is a
dark or bias frame (type='dark') or is a science image (type='sci').
NOTE: For now only the type='dark' option is supported
Inputs:
type - type of data. Right now only type='dark' is supported
nsig - number of sigma deviation from the clipped mean to indicate
a bad pixel. Default=10.
goodval - The value (1 or 0) that indicates a good pixel in the
pixel mask. Bad pixels will be indicated by the opposite
value. Default=1
"""
""" Check type """
if type.lower() != 'dark':
raise TypeError('Currently only "dark" is supported')
""" Do a sigma clipping of the data """
self.sigma_clip()
""" Set up the baseline mask, with all pixels set to the good value """
if goodval == 1:
bpm = np.ones(self.data.shape)
badval = 0
elif goodval == 0:
bpm = np.zeros(self.data.shape)
badval = 1
else:
raise ValueError('goodval must be either 1 or 0')
"""
Identify pixels that deviate by more than nsig from the clipped mean
"""
diff = np.fabs(self.data - self.mean_clip)
bpm[diff > nsig * self.rms_clip] = badval
if outfile is not None:
pf.PrimaryHDU(bpm).writeto(outfile, overwrite=True)
else:
return bpm
# -----------------------------------------------------------------------
def make_objmask(self, nsig=0.7, init_kernel=3, bpm=None, flat=None):
"""
Makes a mask that is intended to indicate regions of the image
that contain flux from objects, as opposed to being blank sky.
The mask is set so that pixels containing object emission are
indicated by True, while blank sky pixels are indicated by False
Inputs:
nsig - number of sigma above the noise level a smoothed image
must have to be considered object emission. Default=1.5
bpm - bad pixel mask to apply. Default=None
Output:
objmask - object mask
"""
""" Compute the clipped rms in the image """
self.sigma_clip(mask=bpm)
""" Median smooth the image and set initial object mask """
med = self.smooth(init_kernel, smtype='median')
objmask = \
np.where((med - self.mean_clip) / self.rms_clip > nsig, 1, 0)
""" Reject isolated cosmic rays via a minimum filter """
objmask = ndimage.minimum_filter(objmask, (init_kernel+2))
""" Grow the mask regions to encompass low SNR regions """
growkernel = int(init_kernel * 3 + 2)
objmask = ndimage.maximum_filter(objmask, growkernel)
objmask = ndimage.maximum_filter(objmask, growkernel)
return objmask
# -----------------------------------------------------------------------
def apply_pixmask(self, mask, badval=0, maskval=np.nan):
"""
Applies a mask to the data by setting the pixels identified by
the mask to the requested value (default value is NaN).
If the mask is boolean, then the pixels to be masked are identified
by True.
If the mask is integer-valued, then the pixels to be masked are
identified as those that are set to badval. For example, many
bad pixel masks have goodval=1 and badval=0.
"""
""" Check the mask type """
if mask.dtype == int:
pixmask = mask == badval
elif mask.dytpe == float:
pixmask = (int(mask) == badval)
elif mask.dtype == bool:
pixmask = mask.copy()
else:
raise TypeError('Mask elements must be either int or bool')
""" Apply the mask """
self.data[pixmask] = maskval
# -----------------------------------------------------------------------
def subim_bounds_xy(self, centpos, imsize):
"""
Takes a requested image center (xcent, ycent) and requested image size
(all in pixels) and returns
the coordinates of the lower left corner (x1, y1) and the upper right
corner (x2, y2).
Inputs:
centpos - (x, y) coordinates of cutout center, in pixels
centpos can take any of the following formats:
1. A 2-element numpy array
2. A 2-element list: [xsize, ysize]
3. A 2-element tuple: (xsize, ysize)
4. centpos=None. In this case, the center of the
cutout is just the center of the full image
imsize - size of cutout (postage stamp) image, in pixels
imsize can take any of the following formats:
1. A single number (which will produce a square image)
2. A 2-element numpy array
3. A 2-element list: [xsize, ysize]
4. A 2-element tuple: (xsize, ysize)
5. imsize=None. In this case, the full image is used
"""
""" Get the full size of the image """
hdr = self.header
nx = hdr['naxis1']
ny = hdr['naxis2']
"""
If the subimage center is not already set, then define it as the
center of the full data set
"""
if centpos is None:
xcent = int((nx + 1.) / 2.)
ycent = int((ny + 1.) / 2.)
else:
if centpos[0] is None:
xcent = int((nx + 1.) / 2.)
else:
xcent = centpos[0]
if centpos[1] is None:
ycent = int((ny + 1.) / 2.)
else:
ycent = centpos[1]
"""
Define limits of subimage
For now does not deal with regions partially outside the input file
"""
if imsize is not None:
subxy = np.atleast_1d(imsize) # Converts imsize to a np array
subx = subxy[0]
if subxy.size > 1:
suby = subxy[1]
else:
suby = subxy[0]
halfx = int(subx/2.0)
halfy = int(suby/2.0)
x1 = int(xcent - halfx)
x2 = int(x1 + subx)
y1 = int(ycent - halfy)
y2 = int(y1 + suby)
else:
x1 = 0
x2 = nx
y1 = 0
y2 = ny
return x1, y1, x2, y2
# -----------------------------------------------------------------------
def cutout_xy(self, x1, y1, x2, y2, nanval=0., fixnans=False,
verbose=True):
"""
Selects the data in the subimage defined by the bounds x1, x2, y1, y2.
These were either set directly (e.g., by a call to imcopy) or by the
subim_bounds_xy function (which takes a subimage center and size)
Inputs:
verbose - Print out useful information if True (the default)
"""
# NEED TO ADD A CHECK FOR VALUES OF X1, X2, ETC. ##
"""
Cut out the subimage based on the bounds.
Note that radio images often have 4 dimensions (x, y, freq, stokes)
so for those just take the x and y data
"""
inhdr = self.header
if inhdr['naxis'] == 4:
data = self.data[0, 0, y1:y2, x1:x2].copy()
else:
data = self.data[y1:y2, x1:x2].copy()
""" Fix NaNs """
if fixnans:
if nanval == 'max':
goodmask = np.isfinite(data)
dmax = data[goodmask].max()
nanval = 10. * dmax
data[~np.isfinite(data)] = nanval
""" Set output image properties """
nx = x2 - x1
ny = y2 - y1
subcentx = 0.5 * (x1 + x2)
subcenty = 0.5 * (y1 + y2)
""" Set up new WCS information if the image has WCS """
hdr = inhdr.copy()
hdr['naxis'] = 2
hdr['naxis1'] = nx
hdr['naxis2'] = ny
for key in ['naxis3', 'naxis4']:
if key in hdr:
del hdr[key]
if self.wcsinfo is not None:
# xy = np.array([[subcentx, subcenty]])
# radec = self.wcsinfo.wcs_pix2world(xy, 0)[0]
# hdr['crpix1'] = nx / 2.
# hdr['crpix2'] = ny / 2.
self.crpix = [hdr['crpix1'] - x1, hdr['crpix2'] - y1]
""" Put the new data and header into a WcsHDU format """
subim = WcsHDU(data, hdr, verbose=False, wcsverb=False)
""" Print out useful information """
if verbose:
print(' Cutout data in section [xrange,yrange]: '
'[%d:%d,%d:%d]' % (x1, x2, y1, y2))
print(' Cutout image center (x, y): (%d, %d)' %
(subcentx, subcenty))
print(' Cutout image size (x y): %dx%d' % (nx, ny))
"""
Update the header info, including updating the CRPIXn values if they
are present.
"""
if self.infile is not None:
subim.header['ORIG_IM'] = 'Copied from %s' % self.infile
subim.header['trim'] = \
'Region in original image [xrange, yrange]: [%d:%d,%d:%d]' % \
(x1, x2, y1, y2)
""" Return the new HDU """
return subim
# -----------------------------------------------------------------------
def cutout_radec(self, imcent, imsize, outscale=None, fixnans=False,
nanval=0., theta_tol=1.e-5, verbose=True, debug=True):
"""
Makes a cutout of the data based on a image center in (ra, dec)
and image size in arcseconds.
The vast majority of the code is | |
new_prof_ids = endpoint_data.profile_ids
new_tags = set()
for profile_id in new_prof_ids:
for tag in self.tags_by_prof_id.get(profile_id, []):
new_tags.add((profile_id, tag))
if new_prof_ids != old_prof_ids:
# Profile ID changed, or an add/delete. the _xxx_profile_index
# methods ignore profile_id == None so we'll do the right thing.
_log.debug("Profile IDs changed from %s to %s",
old_prof_ids, new_prof_ids)
self._remove_profile_index(old_prof_ids, endpoint_id)
self._add_profile_index(new_prof_ids, endpoint_id)
# Since we've defaulted new/old_tags to set() if needed, we can
# use set operations to calculate the tag changes.
added_tags = new_tags - old_tags
unchanged_tags = new_tags & old_tags
removed_tags = old_tags - new_tags
# These default to set() if there are no IPs.
old_ips = old_endpoint.ip_addresses
new_ips = endpoint_data.ip_addresses
# Add *new* IPs to new tags. On a deletion, added_tags will be empty.
# Do this first to avoid marking ipsets as dirty if an endpoint moves
# from one profile to another but keeps the same tag.
for profile_id, tag in added_tags:
for ip in new_ips:
self._add_mapping(tag, profile_id, endpoint_id, ip)
# Change IPs in unchanged tags.
added_ips = new_ips - old_ips
removed_ips = old_ips - new_ips
for profile_id, tag in unchanged_tags:
for ip in removed_ips:
self._remove_mapping(tag, profile_id, endpoint_id, ip)
for ip in added_ips:
self._add_mapping(tag, profile_id, endpoint_id, ip)
# Remove *all* *old* IPs from removed tags. For a deletion, only this
# loop will fire.
for profile_id, tag in removed_tags:
for ip in old_ips:
self._remove_mapping(tag, profile_id, endpoint_id, ip)
def _add_mapping(self, tag_id, profile_id, endpoint_id, ip_address):
"""
Adds the given tag->IP->profile->endpoint mapping to the index.
Marks the tag as dirty if the update resulted in the IP being
newly added.
:param str tag_id: Tag ID
:param str profile_id: Profile ID
:param EndpointId endpoint_id: ID of the endpoint
:param str ip_address: IP address to add
"""
ip_added = not bool(self.ip_owners_by_tag[tag_id][ip_address])
ep_ids = self.ip_owners_by_tag[tag_id][ip_address][profile_id]
ep_ids.add(endpoint_id)
if ip_added:
self._dirty_tags.add(tag_id)
def _remove_mapping(self, tag_id, profile_id, endpoint_id, ip_address):
"""
Removes the tag->IP->profile->endpoint mapping from index.
Marks the tag as dirty if the update resulted in the IP being
removed.
:param str tag_id: Tag ID
:param str profile_id: Profile ID
:param EndpointId endpoint_id: ID of the endpoint
:param str ip_address: IP address to remove
"""
ep_ids = self.ip_owners_by_tag[tag_id][ip_address][profile_id]
ep_ids.discard(endpoint_id)
if not ep_ids:
del self.ip_owners_by_tag[tag_id][ip_address][profile_id]
if not self.ip_owners_by_tag[tag_id][ip_address]:
del self.ip_owners_by_tag[tag_id][ip_address]
self._dirty_tags.add(tag_id)
if not self.ip_owners_by_tag[tag_id]:
del self.ip_owners_by_tag[tag_id]
def _add_profile_index(self, prof_ids, endpoint_id):
"""
Notes in the index that an endpoint uses the given profiles.
:param set[str] prof_ids: set of profile IDs that the endpoint is in.
:param EndpointId endpoint_id: ID of the endpoint
"""
for prof_id in prof_ids:
self.endpoint_ids_by_profile_id[prof_id].add(endpoint_id)
def _remove_profile_index(self, prof_ids, endpoint_id):
"""
Notes in the index that an endpoint no longer uses any of the
given profiles.
:param set[str] prof_ids: set of profile IDs to remove the endpoint
from.
:param EndpointId endpoint_id: ID of the endpoint
"""
for prof_id in prof_ids:
endpoints = self.endpoint_ids_by_profile_id[prof_id]
endpoints.discard(endpoint_id)
if not endpoints:
_log.debug("No more endpoints use profile %s", prof_id)
del self.endpoint_ids_by_profile_id[prof_id]
def _finish_msg_batch(self, batch, results):
"""
Called after a batch of messages is finished, processes any
pending TagIpset member updates.
Doing that here allows us to lots of updates into one replace
operation. It also avoid wasted effort if tags are flapping.
"""
super(IpsetManager, self)._finish_msg_batch(batch, results)
_log.info("Finishing batch, sending updates to any dirty tags..")
self._update_dirty_active_ipsets()
self._force_reprogram = False
_log.info("Finished sending updates to dirty tags.")
class EndpointData(object):
"""
Space-efficient read-only 'struct' to hold only the endpoint data
that we need.
"""
__slots__ = ["_profile_ids", "_ip_addresses"]
def __init__(self, profile_ids, ip_addresses):
"""
:param sequence profile_ids: The profile IDs for the endpoint.
:param sequence ip_addresses: IP addresses for the endpoint.
"""
# Note: profile IDs are ordered in the data model but the ipsets
# code doesn't care about the ordering so it's safe to sort these here
# for comparison purposes.
self._profile_ids = tuple(sorted(profile_ids))
self._ip_addresses = tuple(sorted(ip_addresses))
@property
def profile_ids(self):
""":returns set[str]: profile IDs."""
# Generate set on demand to keep occupancy down. 250B overhead for a
# set vs 64 for a tuple.
return set(self._profile_ids)
@property
def ip_addresses(self):
""":returns set[str]: IP addresses."""
# Generate set on demand to keep occupancy down. 250B overhead for a
# set vs 64 for a tuple.
return set(self._ip_addresses)
def __repr__(self):
return self.__class__.__name__ + "(%s,%s)" % (self._profile_ids,
self._ip_addresses)
def __eq__(self, other):
if other is self:
return True
if not isinstance(other, EndpointData):
return False
return (other._profile_ids == self._profile_ids and
other._ip_addresses == self._ip_addresses)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._profile_ids) + hash(self._ip_addresses)
EMPTY_ENDPOINT_DATA = EndpointData([], [])
class IpsetActor(Actor):
"""
Actor managing a single ipset.
Batches up updates to minimise the number of actual dataplane updates.
"""
def __init__(self, ipset, qualifier=None):
"""
:param Ipset ipset: Ipset object to wrap.
:param str qualifier: Actor qualifier string for logging.
"""
super(IpsetActor, self).__init__(qualifier=qualifier)
self._ipset = ipset
# Members - which entries should be in the ipset.
self.members = set()
# Members which really are in the ipset.
self.programmed_members = None
self._force_reprogram = False
self.stopped = False
@property
def ipset_name(self):
"""
The name of the primary ipset. Safe to access from another greenlet;
only accesses immutable state.
"""
return self._ipset.set_name
def owned_ipset_names(self):
"""
This method is safe to call from another greenlet; it only accesses
immutable state.
:return: set of name of ipsets that this Actor owns and manages. the
sets may or may not be present.
"""
return set([self._ipset.set_name, self._ipset.temp_set_name])
@actor_message()
def replace_members(self, members, force_reprogram=False):
"""
Replace the members of this ipset with the supplied set.
:param set[str]|list[str] members: IP address strings.
"""
_log.info("Replacing members of ipset %s", self.name)
self.members.clear()
self.members.update(members)
self._force_reprogram |= force_reprogram
def _finish_msg_batch(self, batch, results):
_log.debug("IpsetActor._finish_msg_batch() called")
if not self.stopped and (self._force_reprogram or
self.members != self.programmed_members):
_log.debug("IpsetActor not in sync, updating dataplane.")
self._sync_to_ipset()
self._force_reprogram = False
def _sync_to_ipset(self):
_log.info("Rewriting %s with %d members.", self, len(self.members))
_log.debug("Setting ipset %s to %s", self, self.members)
# Defer to our helper.
self._ipset.replace_members(self.members)
# We have got the set into the correct state.
self.programmed_members = self.members.copy()
def __str__(self):
return (
self.__class__.__name__ + "<queue_len=%s,live=%s,msg=%s,"
"name=%s>" %
(
self._event_queue.qsize(),
bool(self.greenlet),
self._current_msg,
self.name,
)
)
class TagIpset(IpsetActor, RefCountedActor):
"""
Specialised, RefCountedActor managing a single tag's ipset.
"""
def __init__(self, tag, ip_type):
"""
:param str tag: Name of tag that this ipset represents. Note: not
the name of the ipset itself. The name of the ipset is derived
from this value.
:param ip_type: One of the constants, futils.IPV4 or futils.IPV6
"""
self.tag = tag
name = tag_to_ipset_name(ip_type, tag)
tmpname = tag_to_ipset_name(ip_type, tag, tmp=True)
family = "inet" if ip_type == IPV4 else "inet6"
# Helper class, used to do atomic rewrites of ipsets.
ipset = Ipset(name, tmpname, family, "hash:ip")
super(TagIpset, self).__init__(ipset, qualifier=tag)
# Notified ready?
self.notified_ready = False
@actor_message()
def on_unreferenced(self):
# Mark the object as stopped so that we don't accidentally recreate
# the ipset in _finish_msg_batch.
self.stopped = True
try:
self._ipset.delete()
finally:
self._notify_cleanup_complete()
def _finish_msg_batch(self, batch, results):
_log.debug("_finish_msg_batch on TagIpset")
super(TagIpset, self)._finish_msg_batch(batch, results)
if not self.notified_ready:
# We have created the set, so we are now ready.
_log.debug("TagIpset _finish_msg_batch notifying ready")
self.notified_ready = True
self._notify_ready()
def __str__(self):
return (
self.__class__.__name__ + "<queue_len=%s,live=%s,msg=%s,"
"name=%s,id=%s>" %
(
self._event_queue.qsize(),
bool(self.greenlet),
self._current_msg,
self.name,
self._id,
)
)
class Ipset(object):
"""
(Synchronous) wrapper around an ipset, supporting atomic rewrites.
"""
def __init__(self, ipset_name, temp_ipset_name, ip_family,
ipset_type="hash:ip"):
"""
:param str ipset_name: name of the primary ipset. Must be less than
32 chars.
:param str temp_ipset_name: name of a secondary, temporary ipset to
use when doing an atomic rewrite. Must be less than 32 chars.
"""
assert len(ipset_name) < 32
assert len(temp_ipset_name) < 32
self.set_name = ipset_name
self.temp_set_name = temp_ipset_name
self.type = ipset_type
assert ip_family in ("inet", "inet6")
self.family = ip_family
def exists(self):
try:
futils.check_call(["ipset", "list", self.set_name])
except FailedSystemCall as e:
if e.retcode == 1 and "does not exist" in e.stderr:
return False
else:
_log.exception("Failed to check if ipset exists")
raise
else:
return True
def ensure_exists(self):
"""
Creates the ipset iff it does not exist.
Leaves the set and its contents untouched if it already exists.
"""
input_lines = [self._create_cmd(self.set_name)]
self._exec_and_commit(input_lines)
def replace_members(self, members):
"""
| |
# Copyright 2020 Toyota Research Institute. All rights reserved.
import argparse
import numpy as np
import os
import torch
from glob import glob
import sys
from packnet_sfm.models.model_wrapper import ModelWrapper
from packnet_sfm.datasets.augmentations import resize_image, to_tensor
from packnet_sfm.utils.horovod import hvd_init, rank, print0
from packnet_sfm.utils.image import load_image
from packnet_sfm.utils.config import parse_test_file
from packnet_sfm.utils.load import set_debug
from packnet_sfm.utils.depth import inv2depth, depth2inv
from packnet_sfm.geometry.camera_multifocal_valeo import CameraMultifocal
from packnet_sfm.datasets.kitti_based_valeo_dataset_utils import \
read_raw_calib_files_camera_valeo_with_suffix, transform_from_rot_trans
from packnet_sfm.geometry.pose import Pose
from packnet_sfm.losses.multiview_photometric_loss import SSIM
import torch.nn.functional as funct
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import cv2
torch.autograd.set_detect_anomaly(True)
# Pairs of adjacent cameras
# If 4 cameras are included, adjacent pairs are front-right, right-rear, rear-left, left-front.
# If 5 cameras are included, must add pairs with long range (LR):
# LR-left, LR-front, LR-right (LR is not adjacent to the rear camera)
CAMERA_CONTEXT_PAIRS = {
4: [[0, 1], [1, 2], [2, 3], [3, 0]],
5: [[0, 1], [1, 2], [2, 3], [3, 0], [4, 0], [4, 3], [4, 1]]
}
def parse_args():
parser = argparse.ArgumentParser(description='Recalibration tool, for a specific sequence from the Valeo dataset')
parser.add_argument('--checkpoints', type=str, nargs='+', help='Checkpoint files (.ckpt)')
parser.add_argument('--input_folder', type=str, default=None, help='Input base folder')
parser.add_argument('--input_imgs', type=str, nargs='+', default=None, help='Input images')
parser.add_argument('--image_shape', type=int, nargs='+', default=None,
help='Input and output image shape '
'(default: checkpoint\'s config.datasets.augmentation.image_shape)')
parser.add_argument('--n_epochs', type=int, default=1, help='Number of epochs')
parser.add_argument('--every_n_files', type=int, default=1, help='Step in files if folders are used')
parser.add_argument('--lr', type=float, default=0.05, help='Learning rate')
parser.add_argument('--scheduler_step_size', type=int, default=20, help='How many epochs before reducing lr')
parser.add_argument('--scheduler_gamma', type=float, default=1.0, help='Factor for lr reduction (<=1)')
parser.add_argument('--regul_weight_trans', type=float, default=5.0, help='Regularization weight for position correction')
parser.add_argument('--regul_weight_rot', type=float, default=0.001, help='Regularization weight for rotation correction')
parser.add_argument('--regul_weight_overlap', type=float, default=0.01, help='Regularization weight for the overlap between cameras')
parser.add_argument('--save_pictures', action='store_true', default=False)
parser.add_argument('--save_plots', action='store_true', default=False)
parser.add_argument('--save_rot_tab', action='store_true', default=False)
parser.add_argument('--show_plots', action='store_true', default=False)
parser.add_argument('--save_folder', type=str, default='/home/vbelissen/Downloads', help='Where to save pictures')
parser.add_argument('--frozen_cams_trans', type=int, nargs='+', default=None, help='List of frozen cameras in translation')
parser.add_argument('--frozen_cams_rot', type=int, nargs='+', default=None, help='List of frozen cameras in rotation')
parser.add_argument('--calibrations_suffix', type=str, default='', help='If you want another calibration folder')
parser.add_argument('--depth_suffix', type=str, default='', help='If you want another folder for depth maps (according to calibration)')
parser.add_argument('--use_lidar', action='store_true', default=False)
parser.add_argument('--lidar_weight', type=float, default=1., help='Weight in lidar loss')
args = parser.parse_args()
checkpoints = args.checkpoints
N = len(checkpoints)
for i in range(N):
assert checkpoints[i].endswith('.ckpt')
assert args.image_shape is None or len(args.image_shape) == 2, \
'You need to provide a 2-dimensional tuple as shape (H,W)'
assert (args.input_folder is None and args.input_imgs is not None) or (args.input_folder is not None and args.input_imgs is None), \
'You need to provide either a list of input base folders for images or a list of input images, one for each camera'
assert N == 4 or N == 5, 'You should have 4 or 5 cameras in the setup'
return args, N
def get_base_folder(image_file):
"""The base folder"""
return '/'.join(image_file.split('/')[:-6])
def get_camera_name(image_file):
"""Returns 'cam_i', i between 0 and 4"""
return image_file.split('/')[-2]
def get_sequence_name(image_file):
"""Returns a sequence name like '20180227_185324'."""
return image_file.split('/')[-3]
def get_split_type(image_file):
"""Returns 'train', 'test' or 'test_sync'."""
return image_file.split('/')[-4]
def get_path_to_theta_lut(image_file):
"""Get the current folder from image_file."""
return os.path.join(get_base_folder(image_file),
'calibrations_theta_lut',
'fisheye',
get_split_type(image_file),
get_sequence_name(image_file),
get_sequence_name(image_file) + '_' + get_camera_name(image_file) + '_1280_800.npy')
def get_path_to_ego_mask(image_file):
"""Get the current folder from image_file."""
return os.path.join(get_base_folder(image_file),
'semantic_masks',
'fisheye',
get_split_type(image_file),
get_sequence_name(image_file),
get_sequence_name(image_file) + '_' + get_camera_name(image_file) + '.npy')
def get_camera_type(image_file, calib_data):
"""Returns the camera type."""
cam = get_camera_name(image_file)
camera_type = calib_data[cam]['type']
assert camera_type == 'fisheye' or camera_type == 'perspective', \
'Only fisheye and perspective cameras supported'
return camera_type
def get_camera_type_int(camera_type):
"""Returns an int based on the camera type."""
if camera_type == 'fisheye':
return 0
elif camera_type == 'perspective':
return 1
else:
return 2
def get_intrinsics_fisheye(image_file, calib_data):
"""Get intrinsics from the calib_data dictionary (fisheye cam)."""
cam = get_camera_name(image_file)
#intr = calib_data[cam]['intrinsics']
base_intr = calib_data[cam]['base_intrinsics']
intr = calib_data[cam]['intrinsics']
poly_coeffs = np.array([float(intr['c1']),
float(intr['c2']),
float(intr['c3']),
float(intr['c4'])],dtype='float32')
principal_point = np.array([float(base_intr['cx_offset_px']),
float(base_intr['cy_offset_px'])],dtype='float32')
scale_factors = np.array([1., float(intr['pixel_aspect_ratio'])],dtype='float32')
return poly_coeffs, principal_point, scale_factors
def get_null_intrinsics_fisheye():
"""Get null intrinsics (fisheye cam)."""
return np.zeros(4,dtype='float32'), np.zeros(2,dtype='float32'), np.zeros(2,dtype='float32')
def get_intrinsics_distorted(image_file, calib_data):
"""Get intrinsics from the calib_data dictionary (distorted perspective cam)."""
cam = get_camera_name(image_file)
base_intr = calib_data[cam]['base_intrinsics']
intr = calib_data[cam]['intrinsics']
cx, cy = float(base_intr['cx_px']), float(base_intr['cy_px'])
fx, fy = float(intr['f_x_px']), float(intr['f_y_px'])
k1, k2, k3 = float(intr['dist_k1']), float(intr['dist_k2']), float(intr['dist_k3'])
p1, p2 = float(intr['dist_p1']), float(intr['dist_p2'])
K = np.array([[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]],dtype='float32')
return K, np.array([k1, k2, k3],dtype='float32'), np.array([p1, p2],dtype='float32')
def get_null_intrinsics_distorted():
"""Get null intrinsics (distorted perspective cam)."""
return np.zeros((3, 3),dtype='float32'), np.zeros(3,dtype='float32'), np.zeros(2,dtype='float32')
def get_full_intrinsics(image_file, calib_data):
"""Get intrinsics from the calib_data dictionary (fisheye or distorted perspective cam)."""
camera_type = get_camera_type(image_file, calib_data)
if camera_type == 'fisheye':
poly_coeffs, principal_point, scale_factors = get_intrinsics_fisheye(image_file, calib_data)
K, k, p = get_null_intrinsics_distorted()
elif camera_type == 'perspective':
poly_coeffs, principal_point, scale_factors = get_null_intrinsics_fisheye()
K, k, p = get_intrinsics_distorted(image_file, calib_data)
else:
sys.exit('Wrong camera type')
return poly_coeffs, principal_point, scale_factors, K, k, p
def get_depth_file(image_file, depth_suffix):
"""
Get the corresponding depth file from an image file.
Parameters
----------
image_file: string
The image filename
depth_suffix: string
Can be empty ('') or like '_1' if you want to use another depth map folder
(typically for recalibrated depth maps)
"""
base, ext = os.path.splitext(os.path.basename(image_file))
return os.path.join(get_base_folder(image_file),
'depth_maps' + depth_suffix,
'fisheye',
get_split_type(image_file),
get_sequence_name(image_file),
get_camera_name(image_file).replace('cam', 'velodyne'),
base.replace('cam', 'velodyne') + '.npz')
def transform_from_rot_trans_torch(R, t):
"""
Transformation matrix from rotation matrix and translation vector.
Parameters
----------
R : np.array [3,3]
Rotation matrix
t : np.array [3]
translation vector
Returns
-------
matrix : np.array [4,4]
Transformation matrix
"""
R = R.view(3, 3)
t = t.view(3, 1)
return torch.cat([torch.cat([R, t], dim=1), torch.tensor([0, 0, 0, 1]).view(1, 4).float().cuda()], dim=0)
def get_extrinsics_pose_matrix(image_file, calib_data):
"""Get extrinsics from the calib_data dictionary (fisheye or distorted perspective cam)."""
camera_type = get_camera_type(image_file, calib_data)
if camera_type == 'fisheye':
return get_extrinsics_pose_matrix_fisheye(image_file, calib_data)
elif camera_type == 'perspective':
return get_extrinsics_pose_matrix_distorted(image_file, calib_data)
else:
sys.exit('Wrong camera type')
def get_extrinsics_pose_matrix_fisheye(image_file, calib_data):
"""Get extrinsics from the calib_data dictionary (fisheye cam)."""
cam = get_camera_name(image_file)
extr = calib_data[cam]['extrinsics']
t = np.array([float(extr['pos_x_m']), float(extr['pos_y_m']), float(extr['pos_z_m'])])
x_rad = np.pi / 180. * float(extr['rot_x_deg'])
z1_rad = np.pi / 180. * float(extr['rot_z1_deg'])
z2_rad = np.pi / 180. * float(extr['rot_z2_deg'])
x_rad += np.pi # gcam
cosx, sinx = np.cos(x_rad), np.sin(x_rad)
cosz1, sinz1 = np.cos(z1_rad), np.sin(z1_rad)
cosz2, sinz2 = np.cos(z2_rad), np.sin(z2_rad)
Rx = np.array([[ 1, 0, 0],
[ 0, cosx, sinx],
[ 0, -sinx, cosx]])
Rz1 = np.array([[ cosz1, sinz1, 0],
[-sinz1, cosz1, 0],
[ 0, 0, 1]])
Rz2 = np.array([[cosz2, -sinz2, 0],
[sinz2, cosz2, 0],
[ 0, 0, 1]])
R = np.matmul(Rz2, np.matmul(Rx, Rz1))
T_other_convention = -np.dot(R,t)
pose_matrix = transform_from_rot_trans(R, T_other_convention).astype(np.float32)
return pose_matrix
def get_extrinsics_pose_matrix_distorted(image_file, calib_data):
"""Get extrinsics from the calib_data dictionary (distorted perspective cam)."""
cam = get_camera_name(image_file)
extr = calib_data[cam]['extrinsics']
T_other_convention = np.array([float(extr['t_x_m']), float(extr['t_y_m']), float(extr['t_z_m'])])
R = np.array(extr['R'])
pose_matrix = transform_from_rot_trans(R, T_other_convention).astype(np.float32)
return pose_matrix
def get_extrinsics_pose_matrix_extra_trans_rot_torch(image_file, calib_data, extra_xyz_m, extra_xyz_deg):
"""Get extrinsics from the calib_data dictionary, with extra translation and rotation."""
cam = get_camera_name(image_file)
extr = calib_data[cam]['extrinsics']
camera_type = get_camera_type(image_file, calib_data)
# May be subject to modifications:
# At the time of coding,
# fisheye cams are encoded with 3 rotation values and the position of COP
# perspective cams are encoded with a rotation matrix and the position of the origin in the cam reference
if camera_type == 'perspective':
T_other_convention = torch.from_numpy(np.array([float(extr['t_x_m']),
float(extr['t_y_m']),
float(extr['t_z_m'])])).float().cuda() + extra_xyz_m
R_ini = torch.from_numpy(np.array(extr['R'])).float().cuda()
x_rad = np.pi / 180. * extra_xyz_deg[0]
z1_rad = np.pi / 180. * extra_xyz_deg[1]
z2_rad = np.pi / 180. * extra_xyz_deg[2]
elif camera_type == 'fisheye':
t = torch.from_numpy(np.array([float(extr['pos_x_m']),
float(extr['pos_y_m']),
float(extr['pos_z_m'])])).float().cuda() + extra_xyz_m
x_rad = np.pi / 180. * (float(extr['rot_x_deg']) + extra_xyz_deg[0])
z1_rad = np.pi / 180. * (float(extr['rot_z1_deg']) + extra_xyz_deg[1])
z2_rad = np.pi / 180. * (float(extr['rot_z2_deg']) + extra_xyz_deg[2])
x_rad += np.pi # gcam
else:
sys.exit('Wrong camera type')
cosx = torch.cos(x_rad)
sinx = torch.sin(x_rad)
cosz1 = torch.cos(z1_rad)
sinz1 = torch.sin(z1_rad)
cosz2 = torch.cos(z2_rad)
sinz2 = torch.sin(z2_rad)
Rx = torch.zeros((3, 3), dtype=cosx.dtype).cuda()
Rz1 = torch.zeros((3, 3), dtype=cosx.dtype).cuda()
Rz2 = torch.zeros((3, 3), dtype=cosx.dtype).cuda()
Rx[0, 0], Rx[1, 1], Rx[2, 2], Rx[1, 2], Rx[2, 1] = 1, cosx, cosx, sinx, -sinx
Rz1[0, 0], Rz1[1, 1], Rz1[0, 1], Rz1[1, 0], Rz1[2, 2] = cosz1, cosz1, sinz1, -sinz1, 1
Rz2[0, 0], Rz2[1, 1], Rz2[0, 1], Rz2[1, 0], Rz2[2, 2] = cosz2, cosz2, -sinz2, sinz2, 1
if camera_type == 'fisheye':
R = Rz2 @ (Rx @ Rz1)
T_other_convention = -R @ t
elif camera_type == 'perspective':
R = (Rz2 @ (Rx @ Rz1)) @ R_ini
pose_matrix = transform_from_rot_trans_torch(R, | |
<reponame>shiningsunnyday/ssd_keras<gh_stars>0
# -*- coding: iso-8859-1 -*-
from shutil import copy2
import shutil
import os
import xml.etree.ElementTree
import cv2
import argparse
ext = '.jpg'
dstext = '.jpg'
postfix = ''
fl32_intersection = ["adidas-text", "adidas1", "adidas2", "adidas3", "aldi", "aldi-text", "aldinord", "apple",
"becks", "becks-symbol", "bmw", "carlsberg", "coca-cola", "coke", "corona-text", "corona-symbol",
"dhl", "esso", "esso-text", "federalexpress", "fedex", "ferrari", "ford-symbol", "google-text",
"google+", "google-symbol", "guinness", "heineken", "hp", "milka", "nvidia", "paulaner",
"pepsi-text", "pepsi-symbol", "shell-symbol", "shell-text", "starbucks-text", "starbucks-symbol",
"stellaartois", "tsingtao", "ups"]
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--in', dest='inpath', help='Path of the original dataset\'s data folder to be cleaned. It won\'t be modified.',
default=None, type=str, required=True)
parser.add_argument('--out', dest='outpath',
help='Path where the cleaned dataset will be copied.',
default=None, type=str, required=True)
parser.add_argument('--wofl32', dest='wofl32',
help='Generate the dataset without the classes of FlickrLogos32.',
action='store_true', default=False)
parser.add_argument('--roi', dest='roi',
help='Writes the rois out for each brands separately.',
action='store_true', default=False)
parser.add_argument('--commonformat', dest='commonformat',
help='Writes the dataset also to a common Faster R-CNN format out.',
action='store_true', default=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
imglist = ''
brandlist = list()
print('Copying dataset')
if os.path.exists(args.outpath):
shutil.rmtree(args.outpath)
xmlpath = os.path.join(args.outpath, 'voc_format')
shutil.copytree(args.inpath, xmlpath)
if args.commonformat:
commonoutpath = os.path.join(args.outpath, 'commonformat')
annotationspath = os.path.join(commonoutpath, 'Annotations')
imagespath = os.path.join(commonoutpath, 'Images')
imagesetspath = os.path.join(commonoutpath, 'ImageSets')
os.makedirs(annotationspath)
os.makedirs(imagespath)
os.makedirs(imagesetspath)
i = 0
unavailableCounter = 0
imageCounter = 0
totalRoiCounter = 0
print('Processing dataset files')
for r, subdirs, files in os.walk(xmlpath):
for filename in files:
i = i + 1
if not filename.endswith('.xml'):
continue
if i % 1000 == 0:
print('Processed: ' + str(i) + ' images.')
imagename = filename.split('.')[0]
imgpath = os.path.join(r, imagename + ext)
filewithpath = os.path.join(r, filename)
if not os.path.isfile(imgpath):
os.remove(filewithpath)
unavailableCounter += 1
print('Deleted xml for unavailable image file {:s}'.format(imgpath))
continue
im = cv2.imread(os.path.join(r, imagename + ext))
if im is None:
continue
imageCounter += 1
try:
parent = filewithpath.split('/')[-2]
except IndexError:
parent = filewithpath.split('\\')[-2]
parent = parent.replace(' ', '')
parser = xml.etree.ElementTree.XMLParser(encoding="utf-8")
tree = xml.etree.ElementTree.parse(filewithpath, parser = parser)
root = tree.getroot()
imglist += parent + imagename + postfix + '\n'
roiCounter = 0
im = cv2.imread(os.path.join(r, imagename + ext))
assert im is not None
imagebrands = []
intersection = False
for obj in root.findall('object'):
brand = str(obj.find('name').text.encode('utf-8').lower())
if brand == "1.fcköln":
brand = "fckoeln"
if brand == "adidas3":
brand = "adidas-text"
if "adidas4" in str(brand):
brand = brand.replace("adidas4", "adidas3")
if brand == "aluratek":
brand = "aluratek-symbol"
if brand == "apecase":
brand = "apecase-symbol"
if brand == "apecase-teilsichtbar":
brand = "apecase-symbol-teilsichtbar"
if brand == "armitron1":
brand = "armitron"
if brand == "audi":
brand = "audi-symbol"
if brand == "b":
brand = "bridgestone"
if brand == "basf-symbol":
brand = "basf"
if "bertha1" in brand:
brand = brand.replace("bertha1", "bertha")
if "boing" in brand:
brand = brand.replace("boing", "boeing")
if "budweiser1" in brand:
brand = brand.replace("budweiser1", "budweiser")
if "budweiser2" in brand:
brand = brand.replace("budweiser2", "budweiser")
if brand == "budweiser-b":
brand = "budweiser-b-symbol"
if brand == "budweiser-teilsichtbar":
brand = "budweiser-symbol-teilsichtbar"
if "bundweiser" in brand:
brand = brand.replace("bundweiser", "budweiser")
if brand == "burgerking":
brand = "burgerking-symbol"
if brand == "burgerking-teilsichtbar":
brand = "burgerking-symbol-teilsichtbar"
if "canon1" in brand:
brand = brand.replace("canon1", "canon")
if "canon2" in brand:
brand = brand.replace("canon2", "canon")
if "cartier1" in brand:
brand = brand.replace("cartier1", "cartier")
if "caterpillar1" in brand:
brand = brand.replace("caterpillar1", "caterpillar")
if brand == "chevrolet1":
brand = brand.replace("chevrolet1", "chevrolet")
if brand == "citroen":
brand == "citroen-symbol"
if brand == "colgate1":
brand = "colgate"
if "dadone" in brand:
brand = brand.replace("dadone", "danone")
if brand == "cvs-symbol" or brand == "cvs-logo":
brand = "cvspharmacy"
if brand == "danone1":
brand = "danone"
if "fils" in brand:
brand = brand.replace("fils", "fila")
if brand == "google":
brand = "google-symbol"
if brand == "gucci1":
brand = "gucci"
if brand == "gucci logo":
brand = "gucci-symbol"
if "heineke" in brand:
brand = brand.replace("heineke", "heineken")
if brand == "hersheys1":
brand = "hersheys"
if brand == "hungry jacks logo":
brand = "hungry jacks-symbol"
if "hyundri" in brand:
brand = brand.replace("hyundri", "hyundai")
if "kellogg`s-k" in brand:
brand = brand.replace("kellogg`s-k", "kellogg`s-symbol")
if "kia-logo" in brand:
brand = brand.replace("kia-logo", "kia")
if brand == "lego":
brand = "lego-symbol"
if brand == "lego-teilsichtbar":
brand = "lego-symbol-teilsichtbar"
if "louis vuitton2" in brand:
brand = brand.replace("louis vuitton2", "louisvuitton")
if brand == "mastercard1":
brand = "mastercard"
if brand == "mcdonalds":
brand = "mcdonalds-symbol"
if brand == "mcdonalds-teilsichtbar":
brand = "mcdonalds-symbol-teilsichtbar"
if brand == "mercedes" or brand == "mercedes-logo":
brand = "mercedesbenz-symbol"
if brand == "mercedes-schrift" or brand == "mercedes-schriftzug":
brand = "mercedesbenz-schriftzug" # !!!!!
if brand == "mercedes-teilsichtbar":
brand = "mercedesbenz-symbol-teilsichtbar"
if brand == "nestle1" or brand == "nestle2":
brand = "nestle"
if brand == "nike":
brand = "nike-symbol"
if "nikelogo" in brand:
brand = brand.replace("nikelogo", "nike")
if brand == "lego1":
brand = "lego"
if brand == "nivea1":
brand = "nivea"
if brand == "olympia":
brand = "olympicgames"
if brand == "pizzahut-logo":
brand = "pizzahut"
if "ruffels" in brand:
brand = brand.replace("ruffels", "ruffles")
if brand == "the home depot1" or brand == "the home depot-logo":
brand = "thehomedepot"
if "vl" in brand:
brand = brand.replace("vl", "louisvuitton")
if brand == "volksbank":
brand = "volksbank-symbol"
if brand == "ströker":
brand = "stroeer"
if brand == "görtz":
brand = "goertz"
if "schriftzug" in brand:
brand = brand.replace("-schriftzug", "-text") # !!!!
if "schrift" in brand:
brand = brand.replace("-schrift", "-text") # !!!!
if "teilsichtbar" in brand:
brand = brand.replace("-teilsichtbar", "")
obj.find('truncated').text = str(1)
if "logo" in brand:
brand = brand.replace('logo', 'symbol')
if "`" in brand:
brand = brand.replace("`", "")
if "." in brand:
brand = brand.replace(".", "")
brand = brand.replace(" ", "")
if brand == "chanel":
brand = "chanel-text"
if brand == "chanel-symbol":
brand = "chanel"
if brand == "citroen":
brand = "citroen-text"
if brand == "citroen-symbol":
brand = "citroen"
if brand == "mcdonalds":
brand = "mcdonalds-text"
if brand == "mcdonalds-symbol":
brand = "mcdonalds"
if brand == "mercedesbenz":
brand = "mercedes-text"
if brand == "mercedesbenz-symbol":
brand = "mercedes"
if brand == "nike-symbol":
brand = "nike"
if brand == "porsche":
brand = "porsche-text"
if brand == "porsche-symbol":
brand = "porsche"
if brand == "unicef-symbol":
brand = "unicef"
if brand == "vodafone-symbol":
brand = "vodafone"
roiname = parent + "_" +imagename + '_' + str(roiCounter)
if roiname == "H&M_img000198_0" or roiname == "H&M_img000252_4":
brand = "adidas3"
if (
roiname == "nivea_img000135_0" or roiname == "nivea_img000180_5" or
roiname == "red bull_img000292_2" or roiname == "red bull_img000292_9" or
roiname == "red bull_img000323_3" or roiname == "red bull_img000563_2" or
roiname == "red bull_img000563_4"
):
brand = "adidas-text"
if brand == "adidas" or brand == "adidas-symbol" or roiname == "adidas_img000419_0":
brand = "adidas1"
if roiname == "adidas_img000023_0":
brand = "adidas3"
if brand == "amazon-text":
brand = "amazon"
if roiname == "boeing_img000039_2" or roiname == "boeing_img000043_1":
brand = "amazon"
if brand == "americanexpress1":
brand = "americanexpress"
if roiname == "BMW_img000103_2":
brand = "audi-symbol"
if brand == "basf-symbol":
brand = "basf"
if roiname == "volkswagen_img000419_2":
brand = "beatsaudio"
if roiname == "bionade_img000097_2":
brand = "bionade-symbol"
if roiname == "bosch_img000070_0" or brand == "bosch":
brand = "bosch-text"
if roiname == "airhawk_img000030_0":
brand = "airhawk"
if brand == "bud" or brand == "budweiser":
brand = "budweiser-text"
if (roiname == "budweiser_img000008_5" or roiname == "budweiser_img000008_6" or roiname == "budweiser_img000008_7" or
roiname == "budweiser_img000008_8" or roiname == "budweiser_img000009_0" or roiname == "budweiser_img000177_5" or
roiname == "budweiser_img000177_6" or roiname == "budweiser_img000177_7" or roiname == "budweiser_img000202_0" or
roiname == "budweiser_img000210_3" or roiname == "budweiser_img000210_4" or roiname == "budweiser_img000376_4" or
roiname == "budweiser_img000376_5" or roiname == "budweiser_img000376_7"):
brand = "budweiser-text"
if roiname == "burger king_img000172_0" or roiname == "McDonalds_img000594_1":
brand = "burgerking-text"
if roiname == "burger king_img000131_2" or roiname == "burger king_img000420_1" or roiname == "burger king_img000166_3":
brand = "burgerking-symbol"
if brand == "burkler":
brand = "buckler"
if roiname == "FedEx_img000230_0":
brand = "fedex"
if roiname == "heineken_img000045_51":
brand = "heineken"
if roiname == "intel_img000073_0":
brand = "intel"
if roiname == "netflix_img000115_0":
brand = "netflix"
if roiname == "nivea_img000128_3":
brand = "nivea"
if roiname == "Pampers_img000016_1" or roiname == "Pampers_img000144_2":
brand = | |
rowSums( countsTable[groups == '%s'] ) ''' % g1)
r('''b = rowSums( countsTable[groups == '%s'] ) ''' % g2)
if first:
r('''plot(cumsum(sort(a - b)), type = 'l') ''')
first = False
else:
r('''lines(cumsum(sort(a - b))) ''')
r['dev.off']()
r('''suppressMessages(library('ggplot2'))''')
r('''suppressMessages(library('reshape'))''')
# output difference between pairs within groups
first = True
legend = []
for pair in pairs:
for g1, g2 in itertools.combinations(groups, 2):
key = re.sub("-", "_", "pair_%s_%s_vs_%s" % (pair, g1, g2))
legend.append(key)
# print r('''colnames( countsTable) ''')
# print r(''' pairs=='%s' ''' % pair)
# print r(''' groups=='%s' ''' % g1)
r('''a = rowSums( countsTable[pairs == '%s' & groups == '%s'])''' % (
pair, g1))
r('''b = rowSums( countsTable[pairs == '%s' & groups == '%s'])''' % (
pair, g2))
r('''c = cumsum( sort(a - b) )''')
r('''c = c - min(c)''')
if first:
data = r('''d = data.frame( %s = c)''' % key)
first = False
else:
r('''d$%s = c''' % key)
# remove row names (gene idenitifiers)
r('''row.names(d) = NULL''')
# add numbers of genes (x-axis)
r('''d$genes=1:nrow(d)''')
# merge data for ggplot
r('''d = melt( d, 'genes', variable_name = 'comparison' )''')
# plot
r('''gp = ggplot(d)''')
r('''pp = gp + \
geom_line(aes(x=genes,y=value,group=comparison,color=comparison))''')
try:
R.ggsave('''%(outfile_prefix)sbalance_pairs.png''' % locals())
r['dev.off']()
except RRuntimeError:
E.warn("could not plot")
# build DGEList object
# ignore message: "Calculating library sizes from column totals"
r('''countsTable = suppressMessages(DGEList(countsTable, group=groups))''')
# Relevel groups to make the results predictable - IMS
if ref_group is not None:
r('''countsTable$samples$group <- relevel(countsTable$samples$group,
ref = "%s")''' % ref_group)
else:
# if no ref_group provided use first group in groups
r('''countsTable$sample$group <- relevel(countsTable$samples$group,
ref = "%s")''' % groups[0])
# calculate normalisation factors
E.info("calculating normalization factors")
r('''countsTable = calcNormFactors( countsTable )''')
E.info("output")
# output MDS plot
R.png('''%(outfile_prefix)smds.png''' % locals())
try:
r('''plotMDS( countsTable )''')
except RRuntimeError:
E.warn("can not plot mds")
r['dev.off']()
# build design matrix
if has_pairs:
r('''design = model.matrix(~pairs + countsTable$samples$group)''')
else:
r('''design = model.matrix(~countsTable$samples$group)''')
# r('''rownames(design) = rownames( countsTable$samples )''')
# r('''colnames(design)[length(colnames(design))] = "CD4" ''' )
# fitting model to each tag
if has_replicates:
# estimate common dispersion
r('''countsTable = estimateGLMCommonDisp(countsTable, design)''')
# estimate tagwise dispersion
r('''countsTable = estimateGLMTagwiseDisp(countsTable, design)''')
# fitting model to each tag
r('''fit = glmFit(countsTable, design)''')
else:
# fitting model to each tag
if dispersion is None:
raise ValueError("no replicates and no dispersion")
E.warn("no replicates - using a fixed dispersion value")
r('''fit = glmFit(countsTable, design, dispersion=%f)''' %
dispersion)
# perform LR test
r('''lrt = glmLRT(fit)''')
E.info("Generating output")
# output cpm table
r('''suppressMessages(library(reshape2))''')
r('''countsTable.cpm <- cpm(countsTable, normalized.lib.sizes=TRUE)''')
r('''melted <- melt(countsTable.cpm)''')
r('''names(melted) <- c("test_id", "sample", "ncpm")''')
# melt columns are factors - convert to string for sorting
r('''melted$test_id = levels(melted$test_id)[as.numeric(melted$test_id)]''')
r('''melted$sample = levels(melted$sample)[as.numeric(melted$sample)]''')
# sort cpm table by test_id and sample
r('''sorted = melted[with(melted, order(test_id, sample)),]''')
r('''gz = gzfile("%(outfile_prefix)scpm.tsv.gz", "w" )''' % locals())
r('''write.table(sorted, file=gz, sep = "\t",
row.names=FALSE, quote=FALSE)''')
r('''close(gz)''')
# compute adjusted P-Values
r('''padj = p.adjust(lrt$table$PValue, 'BH')''')
rtype = collections.namedtuple("rtype", "lfold logCPM LR pvalue")
# output differences between pairs
if len(groups) == 2:
R.png('''%(outfile_prefix)smaplot.png''' % locals())
r('''plotSmear(countsTable, pair=c('%s'))''' % "','".join(groups))
r('''abline(h=c(-2, 2), col='dodgerblue') ''')
r['dev.off']()
# I am assuming that logFC is the base 2 logarithm foldchange.
# Parse results and parse to file
results = []
counts = E.Counter()
df = r('''lrt$table''')
df["padj"] = r('''padj''')
counts.significant = sum(df.padj <= fdr)
counts.insignificant = sum(df.padj > fdr)
counts.significant_over = sum((df.padj <= fdr) & (df.logFC > 0))
counts.significant_under = sum((df.padj <= fdr) & (df.logFC < 0))
counts.input = len(df)
counts.all_over = sum(df.logFC > 0)
counts.all_under = sum(df.logFC < 0)
counts.fail = sum(df.PValue.isnull())
counts.ok = counts.input - counts.fail
df["fold"] = df.logFC.pow(2.0)
df["significant"] = df.padj <= fdr
# TODO: use pandas throughout
for interval, d in df.iterrows():
# fold change is determined by the alphabetical order of the factors.
# Is the following correct?
results.append(GeneExpressionResult._make((
interval,
groups[1],
d.logCPM,
0,
groups[0],
d.logCPM,
0,
d.PValue,
d.padj,
d.logFC,
d.fold,
d.logFC, # no transform of lfold
str(int(d.significant)),
"OK")))
writeExpressionResults(outfile, results)
outf = iotools.open_file("%(outfile_prefix)ssummary.tsv" % locals(), "w")
outf.write("category\tcounts\n%s\n" % counts.asTable())
outf.close()
# needs to put into class
##
def deseqPlotSizeFactors(outfile):
'''plot size factors - requires cds object.'''
R.png(outfile)
r('''par(mar=c(8,4,4,2))''')
r('''barplot( sizeFactors( cds ), main="size factors", las=2)''')
r['dev.off']()
def deseqOutputSizeFactors(outfile):
'''output size factors - requires cds object.'''
size_factors = r('''sizeFactors( cds )''')
samples = r('''names(sizeFactors(cds))''')
with iotools.open_file(outfile, "w") as outf:
outf.write("sample\tfactor\n")
for name, x in zip(samples, size_factors):
outf.write("%s\t%s\n" % (name, str(x)))
def deseqPlotCorrelationHeatmap(outfile, vsd):
'''plot a heatmap
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
# rpy2.4.2 - passing of arrays seems to be broken - do it in R
# dists = r['as.matrix'](R.dist(R.t(R.exprs(vsd))))
dists = r('''as.matrix(dist(t(exprs(vsd))))''')
R.png(outfile)
r['heatmap.2'](
dists,
trace='none',
margin=ro.IntVector((10, 10)))
r['dev.off']()
def deseqPlotGeneHeatmap(outfile,
data,
Rowv=False,
Colv=False):
'''plot a heatmap of all genes
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
if len(data) == 0:
return
# do not print if not enough values in one
# direction (single row or column)
if min(R.dim(data)) < 2:
return
R.png(outfile, width=500, height=2000)
hmcol = R.colorRampPalette(r['brewer.pal'](9, "GnBu"))(100)
r['heatmap.2'](
data,
col=hmcol,
trace="none",
dendrogram="none",
Rowv=Rowv,
Colv=Colv,
labRow=False,
margin=ro.IntVector((5, 5)),
lhei=ro.IntVector((1, 10)),
key=False)
r['dev.off']()
def deseqPlotPCA(outfile, vsd, max_genes=500):
'''plot a PCA
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
R.png(outfile)
# if there are more than 500 genes (after filtering)
# use the 500 most variable in the PCA
# else use the number of genes
r('''ntop = ifelse(as.integer(dim(vsd))[1] >= %(max_genes)i,
%(max_genes)i,
as.integer(dim(vsd))[1])''' % locals())
try:
r('''plotPCA(vsd)''')
except RRuntimeError as msg:
E.warn("can not plot PCA: %s" % msg)
r['dev.off']()
def deseqPlotPairs(outfile):
'''requires counts table'''
# Plot pairs
R.png(outfile, width=960, height=960)
plotPairs()
r['dev.off']()
def deseqPlotPvaluesAgainstRowsums(outfile):
'''plot pvalues against row sum rank.
This plot is useful to see if quantile filtering could
be applied.
'''
r('''counts_sum = rowSums( countsTable )''')
R.png(outfile)
r('''plot( rank( counts_sum)/length(counts_sum),
-log10( res$pval),
pch = 16,
cex= 0.1)''')
r('''abline( a=3, b=0, col='red')''')
r['dev.off']()
def deseqParseResults(control_name, treatment_name, fdr, vsd=False):
'''parse deseq output.
retrieve deseq results from object 'res' in R namespace.
The 'res' object is a dataframe with the following columns (from the
DESeq manual):
id: The ID of the observable, taken from the row names of the
counts slots.
baseMean: The base mean (i.e., mean of the counts divided by the size
factors) for the counts for both conditions
baseMeanA: The base mean (i.e., mean of the counts divided by the size
factors) for the counts for condition A
baseMeanB: The base mean for condition B
foldChange: The ratio meanB/meanA
log2FoldChange: The log2 of the fold change
pval: The p value for rejecting the null hypothesis 'meanA==meanB'
padj: The adjusted p values (adjusted with 'p.adjust( pval,
method="BH")')
vsd_log2FoldChange: The log2 fold change after variance stabilization.
This data field is not part of DESeq proper, but has been added
in this module in the runDESeq() method.
Here, 'conditionA' is 'control' and 'conditionB' is 'treatment'
such that a foldChange of 2 means that treatment is twice
upregulated compared to control.
Returns a list of results.
If vsd is True, the log fold change will be computed from the variance
stabilized data.
'''
results = []
counts = E.Counter()
res_df = pandas2ri.ri2py(r["res"])
for index, data in res_df.iterrows():
counts.input += 1
# set significant flag
if data['padj'] <= fdr:
signif = 1
counts.significant += 1
if data['log2FoldChange'] > 0:
counts.significant_over += 1
else:
counts.significant_under += 1
else:
signif = 0
counts.insignificant += 1
if data['log2FoldChange'] > 0:
counts.all_over += 1
else:
counts.all_under += 1
# set lfold change to 0 if both are not expressed
if data['baseMeanA'] == 0.0 and data['baseMeanB'] == 0.0:
data['foldChange'] = 0
data['log2FoldChange'] = 0
if data['pval'] != r('''NA'''):
status = "OK"
else:
status = "FAIL"
counts[status] += 1
counts.output += 1
# check if our assumptions about the direction of fold change
# are correct
assert (data['foldChange'] > 1) == (data['baseMeanB'] > data['baseMeanA'])
| |
<gh_stars>1-10
from time import sleep
from gtts import gTTS
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
# Text to Audio Converter using gTTs
def txt_to_audio_converter(msg: str, file_name: str, audio_saving_dir: str = './') -> None:
"""
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| |
| Description: |
| ¯¯¯¯¯¯¯¯¯¯¯ |
| Converts the text into audio and saves in audio_saving_dir/<filename>.mp3. |
| This is can be used to send the typed text as audio to the recipient. |
| |
| Parameters: |
| ¯¯¯¯¯¯¯¯¯¯ |
| :param msg: str, Message that is to be converted into audio format. |
| :param file_name: str, Name of file. |
| :param audio_saving_dir: str, Audio Saving Directory. |
| |
| Return: |
| ¯¯¯¯¯¯¯ |
| :return: None |
| |
¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯
"""
language = 'en'
output = gTTS(text=msg, lang=language, slow=False)
output.save(audio_saving_dir + "/" + file_name + ".mp3")
class Whatsapp:
def __init__(self, driver_address: str, usage_data_directory: str):
"""
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| |
| Description: |
| ¯¯¯¯¯¯¯¯¯¯¯ |
| To initialize the the chrome driver and to login got whatsapp. |
| The usage_data_directory is used to store the QR code information to avoid multiple scanning. |
| |
| Parameters: |
| ¯¯¯¯¯¯¯¯¯¯ |
| :param driver_address: str, Address of Chrome Driver. |
| :param usage_data_directory: str, Stores data to help logging multiple times seamless by passing QR code.|
| |
¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯
"""
self.__driver_address = driver_address
self.__usage_data_directory = usage_data_directory
self.__driver = self.__whatsapp_initializer()
def send_document(self, name: list, files_address: list, select: bool = True) -> None:
"""
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| |
| Description: |
| ¯¯¯¯¯¯¯¯¯¯¯ |
| This method is used to Automatically send files as documents to the recipient. |
| Multiple Documents can also be sent to the recipient by adding the files address in `file_address` list. |
| This same set of documents can be even sent to multiple people by adding names into the `name` list. |
| Accepts all formats. |
| |
| Parameters: |
| ¯¯¯¯¯¯¯¯¯ |
| :param files_address: list, Contains strings of address of files to be sent stored in list. |
| :param name: list, List of all contacts to which it should be send in string format. |
| :param select: bool, Confirm if a particular command should be there on not. |
| |
| Return: |
| ¯¯¯¯¯¯¯ |
| :return: None |
| |
¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯
"""
if select:
for j in range(len(name)):
# Selects the chat from the given list of names
self.__chat_selector(name[j])
for i in files_address:
self.__sender(xpath='//input[@accept="*"]', file_name=i)
def send_image(self, name: list, files_address: list, select: bool = True) -> None:
"""
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| |
| Description: |
| ¯¯¯¯¯¯¯¯¯¯¯ |
| This method is used to Automatically send image or video to the recipient. |
| Multiple Images, Videos can be sent by adding the file address with extension in the `file_address` list |
| The same set of images, videos can be even sent to multiple people by adding names into the `name` list |
| Accepts all formats of image and videos. |
| |
| Parameters: |
| ¯¯¯¯¯¯¯¯¯¯ |
| :param name: list, List of all contacts to which it should be send in string format. |
| :param files_address: list, Contains strings of address of files to be sent stored in list. |
| :param select: bool, Confirm if a particular command should be there on not. |
| |
| Return: |
| ¯¯¯¯¯¯¯ |
| :return: None |
| |
¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯
"""
if select:
for j in range(len(name)):
# Selects the chat from the given list of names
self.__chat_selector(name[j])
for i in files_address:
self.__sender(xpath='//input[@accept="image/*,video/mp4,video/3gpp,video/quicktime"]', file_name=i)
def send_text(self, name: list, msg: list, select: bool = True) -> None:
"""
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| |
| Description: |
| ¯¯¯¯¯¯¯¯¯¯¯ |
| This method is used to Automatically send messages to the recipient. |
| Multiple messages can be sent by adding messages into the `msg` list. |
| The same set of images, videos can be even sent to multiple people by adding names into the `name` list |
| |
| Parameters: |
| ¯¯¯¯¯¯¯¯¯¯ |
| :param name: list, List of all contacts to which it should be send in string format. |
| :param msg: str, Message to be sent. |
| :param select: bool, Confirm if a particular command should be there on not. |
| |
| Return: |
| ¯¯¯¯¯¯¯ |
| :return: None |
| |
¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯
"""
for j in range(len(name)):
self.__chat_selector(name[j])
for i in range(len(msg)):
self.__message_writer(msg[i], select=select)
def send_audio(self, name: list, msg: str, file_name: str, file_dir: str, select: bool = True) -> | |
<reponame>jon85p/pyENL
#!/usr/bin/env python3
'''
Traducciones para pyENL
'''
def translations(lang='en'):
'''
Devuelve un diccionario con las traducciones de cada string.
'''
dicc_gen = {}
# Por cada opción de texto a mostrar al usuario agregar una entrada al
# diccionario general; cada valor de diccionario será otro diccionario donde
# las claves son los códigos de los idiomas y los valores son las
# correspondientes traducciones.
# TODO: Traducción de excepciones
idiomas = ['es', 'en', 'pt', 'fr']
if lang not in idiomas:
raise Exception('Idioma no listado, verificar opciones.')
dicc_gen['Resolver'] = {'es': 'Resolver', 'en': 'Solve', 'pt': 'Resolver',
'fr': 'Resolver'}
dicc_gen['Ecuaciones'] = {'es': 'Ecuaciones', 'en': 'Equations',
'pt': 'Ecuaciones', 'fr': 'Ecuaciones'}
dicc_gen['Actualizar'] = {'es': 'Actualizar', 'en': 'Update',
'pt': 'Actualizar', 'fr': 'Actualizar'}
dicc_gen['Limpiar'] = {'es': 'Limpiar', 'en': 'Clear', 'pt': 'Limpiar',
'fr': 'Limpiar'}
dicc_gen['Variables'] = {'es': 'Variables', 'en': 'Variables',
'pt': 'Variables', 'fr': 'Variables'}
dicc_gen['Información'] = {'es': 'Información', 'en': 'Information',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['Soluciones'] = {'es': 'Soluciones', 'en': 'Solutions',
'pt': 'Soluciones', 'fr': 'Soluciones'}
dicc_gen['Residuos'] = {'es': 'Residuos', 'en': 'Residual',
'pt': 'Residuos', 'fr': 'Residuos'}
dicc_gen['x Ecuaciones/y Variables'] = {'es': 'x Ecuaciones/y Variables', 'en': 'x Ecuaciones/y Variables',
'pt': 'x Ecuaciones/y Variables', 'fr': 'x Ecuaciones/y Variables'}
dicc_gen['Archivo'] = {'es': 'Archivo', 'en': 'File',
'pt': 'Archivo', 'fr': 'Archivo'}
dicc_gen['Exportar reporte'] = {'es': 'Exportar reporte', 'en': 'Export report',
'pt': 'Exportar reporte', 'fr': 'Exportar reporte'}
dicc_gen['Importar'] = {'es': 'Importar', 'en': 'Import',
'pt': 'Importar', 'fr': 'Importar'}
dicc_gen['Editar'] = {'es': 'Editar', 'en': 'Edit',
'pt': 'Editar', 'fr': 'Editar'}
dicc_gen['Opciones'] = {'es': 'Opciones', 'en': 'Options',
'pt': 'Opciones', 'fr': 'Opciones'}
dicc_gen['Herramientas'] = {'es': 'Herramientas', 'en': 'Tools',
'pt': 'Herramientas', 'fr': 'Herramientas'}
dicc_gen['Funciones Ingeniería'] = {'es': 'Funciones Ingeniería', 'en': 'Engineering Functions',
'pt': 'Funciones Ingeniería', 'fr': 'Funciones Ingeniería'}
dicc_gen['Funciones de usuario'] = {'es': 'Funciones de usuario', 'en': 'User functions',
'pt': 'Funciones de usuario', 'fr': 'Funciones de usuario'}
dicc_gen['Ayuda'] = {'es': 'Ayuda', 'en': 'Help',
'pt': 'Ayuda', 'fr': 'Ayuda'}
dicc_gen['Abrir'] = {'es': 'Abrir', 'en': 'Open',
'pt': 'Abrir', 'fr': 'Abrir'}
dicc_gen['Guardar'] = {'es': 'Guardar', 'en': 'Save',
'pt': 'Guardar', 'fr': 'Guardar'}
dicc_gen['Guardar Como...'] = {'es': 'Guardar Como...', 'en': 'Save as...',
'pt': 'Guardar Como...', 'fr': 'Guardar Como...'}
dicc_gen['Cerrar'] = {'es': 'Cerrar', 'en': 'Close',
'pt': 'Cerrar', 'fr': 'Cerrar'}
dicc_gen['Salir'] = {'es': 'Salir', 'en': 'Exit',
'pt': 'Salir', 'fr': 'Salir'}
dicc_gen['Seleccionar todo'] = {'es': 'Seleccionar todo', 'en': 'Select all',
'pt': 'Seleccionar todo', 'fr': 'Seleccionar todo'}
dicc_gen['Deshacer'] = {'es': 'Deshacer', 'en': 'Undo',
'pt': 'Deshacer', 'fr': 'Deshacer'}
dicc_gen['Rehacer'] = {'es': 'Rehacer', 'en': 'Redo',
'pt': 'Rehacer', 'fr': 'Rehacer'}
dicc_gen['Copiar'] = {'es': 'Copiar', 'en': 'Copy',
'pt': 'Copiar', 'fr': 'Copiar'}
dicc_gen['Cortar'] = {'es': 'Cortar', 'en': 'Cut',
'pt': 'Cortar', 'fr': 'Cortar'}
dicc_gen['Pegar'] = {'es': 'Pegar', 'en': 'Paste',
'pt': 'Pegar', 'fr': 'Pegar'}
dicc_gen['Ayuda pyENL'] = {'es': 'Ayuda pyENL', 'en': 'pyENL Help',
'pt': 'Ayuda pyENL', 'fr': 'Ayuda pyENL'}
dicc_gen['Ayuda NumPy'] = {'es': 'Ayuda NumPy', 'en': 'NumPy Help',
'pt': 'Ayuda NumPy', 'fr': 'Ayuda NumPy'}
dicc_gen['Ayuda CoolProp'] = {'es': 'Ayuda CoolProp', 'en': 'CoolProp Help',
'pt': 'Ayuda CoolProp', 'fr': 'Ayuda CoolProp'}
dicc_gen['Sobre pyENL'] = {'es': 'Sobre pyENL', 'en': 'About pyENL',
'pt': 'Sobre pyENL', 'fr': 'Sobre pyENL'}
dicc_gen['Licencias'] = {'es': 'Licencias', 'en': 'Licences',
'pt': 'Licencias', 'fr': 'Licencias'}
dicc_gen['Termodinámicas'] = {'es': 'Termodinámicas', 'en': 'Thermodynamical',
'pt': 'Termodinámicas', 'fr': 'Termodinámicas'}
dicc_gen['Por agregar...'] = {'es': 'Por agregar...', 'en': 'TODO',
'pt': 'Por agregar...', 'fr': 'Por agregar...'}
dicc_gen['Disponibles'] = {'es': 'Disponibles', 'en': 'Availables',
'pt': 'Disponibles', 'fr': 'Disponibles'}
dicc_gen['Agregar...'] = {'es': 'Agregar...', 'en': 'TODO...',
'pt': 'Agregar...', 'fr': 'Agregar...'}
dicc_gen['Comentario'] = {'es': 'Comentario', 'en': 'Comment',
'pt': 'Comentario', 'fr': 'Comentario'}
dicc_gen['Unidades'] = {'es': 'Unidades', 'en': 'Units',
'pt': 'Unidades', 'fr': 'Unidades'}
dicc_gen['Configuración'] = {'es': 'Configuración', 'en': 'Settings',
'pt': 'Configuración', 'fr': 'Configuración'}
dicc_gen['Imprimir'] = {'es': 'Imprimir', 'en': 'Print',
'pt': 'Imprimir', 'fr': 'Imprimir'}
dicc_gen['Open Document Text'] = {'es': 'Open Document Text', 'en': 'Open Document Text',
'pt': 'Open Document Text', 'fr': 'Open Document Text'}
dicc_gen['Archivo LaTeX'] = {'es': 'Archivo LaTeX', 'en': 'LaTeX file',
'pt': 'Archivo LaTeX', 'fr': 'Archivo LaTeX'}
dicc_gen['Archivo EES'] = {'es': 'Archivo EES', 'en': 'EES file',
'pt': 'Archivo EES', 'fr': 'Archivo EES'}
dicc_gen['Información'] = {'es': 'Información', 'en': 'Information',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['Solucionado en '] = {'es': 'Solucionado en ', 'en': 'Solved in ',
'pt': 'Información', 'fr': 'Información'}
dicc_gen[' segundos.\nMayor desviación de '] = {'es': ' segundos.\nMayor desviación de ', 'en': ' seconds.\nGreater Desviation: ',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['Ecuación'] = {'es': 'Ecuación', 'en': 'Equation',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['Residuo'] = {'es': 'Residuo', 'en': 'Residual',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['Solución'] = {'es': 'Solución', 'en': 'Solution',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['No hubo convergencia a solución...'] = {'es': 'No hubo convergencia a solución...',
'en': 'No convergence to solution...',
'pt': 'No hubo convergencia a solución...',
'fr': 'No hubo convergencia a solución...'}
dicc_gen['Problema'] = {'es': 'Problema', 'en': 'Problem',
'pt': 'Problema', 'fr': 'Problema'}
dicc_gen['Variable'] = {'es': 'Variable', 'en': 'Variable',
'pt': 'Variable', 'fr': 'Variable'}
dicc_gen['Valor Inicial'] = {'es': 'Valor Inicial', 'en': 'Initial Guess',
'pt': 'Valor Inicial', 'fr': 'Valor Inicial'}
dicc_gen['Inferior'] = {'es': 'Inferior', 'en': 'Lower',
'pt': 'Inferior', 'fr': 'Inferior'}
dicc_gen['Superior'] = {'es': 'Superior', 'en': 'Upper',
'pt': 'Superior', 'fr': 'Superior'}
dicc_gen['El número '] = {'es': 'El número ', 'en': 'The number ',
'pt': 'El número ', 'fr': 'El número '}
dicc_gen[' es mayor a '] = {'es': ' es mayor a ', 'en': 'is greater than ',
'pt': ' es mayor a ', 'fr': ' es mayor a '}
dicc_gen[' en la variable '] = {'es': ' en la variable ', 'en': ' in variable ',
'pt': ' en la variable ', 'fr': ' en la variable '}
dicc_gen['El valor inicial de '] = {'es': 'El valor inicial de ', 'en': 'The initial guess of ',
'pt': 'El valor inicial de ', 'fr': 'El valor inicial de '}
dicc_gen[' debe estar entre los dos límites.'] = {'es': ' debe estar entre los dos límites.',
'en': ' must is between the limits.',
'pt': ' debe estar entre los dos límites.', 'fr': ' debe estar entre los dos límites.'}
dicc_gen[' ecuaciones / '] = {'es': ' ecuaciones / ', 'en': ' equations /',
'pt': ' ecuaciones / ', 'fr': ' ecuaciones / '}
dicc_gen[' variables'] = {'es': ' variables', 'en': ' variables',
'pt': ' variables', 'fr': ' variables'}
dicc_gen['Error encontrando cantidad de variables y de ecuaciones'] = {'es': 'Error encontrando cantidad de variables y de ecuaciones',
'en': 'Error finding variable lenght and equations',
'pt': 'Error encontrando cantidad de variables y de ecuaciones',
'fr': 'Error encontrando cantidad de variables y de ecuaciones'}
dicc_gen["x Ecuaciones/y Variables"] = {'es': "x Ecuaciones/y Variables", 'en': 'x Equations/y Variables',
'pt': "x Ecuaciones/y Variables", 'fr': "x Ecuaciones/y Variables"}
dicc_gen['Información'] = {'es': 'Información', 'en': 'Information',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['Información'] = {'es': 'Información', 'en': 'Information',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['Información'] = {'es': 'Información', 'en': 'Information',
'pt': 'Información', 'fr': 'Información'}
dicc_gen['Acá va el comentario'] = {'es': 'Acá va el comentario', 'en': 'Comment goes here',
'pt': 'Acá va el comentario', 'fr': 'Acá va el comentario'}
dicc_gen['El documento se ha modificado'] = {'es' : 'El documento se ha modificado',
'en': 'The file was modified',
'pt': 'El archivo ha sido modificado',
'fr': 'El archivo ha sido modificado'}
dicc_gen["¿Desea guardar los cambios?"] = {'es' : '¿Desea guardar los cambios?',
'en': 'Save changes?',
'pt': '¿Desea guardar los cambios?',
'fr': '¿Desea guardar los cambios?'}
dicc_gen["Idioma (requiere reiniciar pyENL)"] = {'es' : "Idioma (requiere reiniciar pyENL)",
'en': 'Language (pyENL restart)',
'pt': '"Idioma (requiere reiniciar pyENL)"',
'fr': '"Idioma (requiere reiniciar pyENL)"'}
dicc_gen['Spanish'] = {'es' : 'Español', 'en': 'Spanish',
'pt': 'Espanhol', 'fr': 'Español'}
dicc_gen['English'] = {'es' : 'Inglés', 'en': 'English',
'pt': 'Inglês', 'fr': 'Anglais'}
dicc_gen['French'] = {'es' : 'Francés', 'en': 'French',
'pt': 'Francês', 'fr': 'Français'}
dicc_gen['Portuguese'] = {'es' : 'Portugués', 'en': 'Portiguese',
'pt': 'Portugues', 'fr': 'Portugais'}
dicc_gen['Formato'] = {'es' : 'Formato', 'en': 'Format',
'pt': 'Format', 'fr': 'Format'}
dicc_gen['Interfaz'] = {'es' : 'Interfaz', 'en': 'Interface',
'pt': 'Interface', 'fr': 'Interface'}
dicc_gen['Método'] = {'es' : 'Método', 'en': 'Method',
'pt': 'Method', 'fr': 'Method'}
dicc_gen['Formato'] = {'es' : 'Formato', 'en': 'Format',
'pt': 'Format', 'fr': 'Format'}
dicc_gen['Tolerancia'] = {'es' : 'Tolerancia', 'en': 'Tolerance',
'pt': 'Tolerance', 'fr': 'Tolerance'}
dicc_gen['Tiempo máximo de espera en segundos'] = {'es' : | |
<gh_stars>1-10
###############################################################################
# Copyright (c) 2018-2021 Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
#
# Written by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
# <EMAIL>.
#
# LLNL-CODE-819515
#
# All rights reserved.
#
# This file is part of RASE.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED,INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
###############################################################################
"""
This module provides plotting support for RASE analysis
"""
import glob
import json
from itertools import cycle
import os
import numpy as np
import matplotlib
from sqlalchemy.orm import Session
import pandas as pd
matplotlib.use("Qt5Agg")
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
from matplotlib import rcParams
import matplotlib.patheffects as peff
rcParams.update({'figure.autolayout': True})
import seaborn as sns
sns.set(font_scale=1.5)
sns.set_style("whitegrid")
from PyQt5.QtCore import pyqtSlot, Qt, QUrl
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QMessageBox
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWebEngineWidgets import QWebEngineView
from .ui_generated import ui_view_spectra_dialog, ui_results_plotting_dialog, ui_results_plotting_dialog_3d
from src.rase_functions import calc_result_uncertainty, get_sample_dir, readSpectrumFile
import src.s_curve_functions as sCurve
from src.utils import get_bundle_dir, natural_keys
from src.base_spectra_dialog import SharedObject, ReadFileObject
from src.rase_settings import RaseSettings
class BaseSpectraViewerDialog(ui_view_spectra_dialog.Ui_Dialog, QDialog):
def __init__(self, parent, base_spectra, detector, selected):
QDialog.__init__(self, parent)
self.setupUi(self)
self.baseSpectra = base_spectra
self.detector = detector
self.selected = selected
self.session = Session()
self.json_file = os.path.join(get_bundle_dir(), "d3_resources", "spectrum.json")
self.index = 0
self.browser = WebSpectraView(self)
for i, baseSpectrum in enumerate(self.baseSpectra):
if baseSpectrum.material.name == self.selected:
self.index = i
self.plot_spectrum()
self.plot_layout = QVBoxLayout(self.widget)
self.plot_layout.addWidget(self.browser)
self.widget.setFocus()
def plot_spectrum(self):
baseSpectrum = self.baseSpectra[self.index]
with open(self.json_file, "w") as json_file:
print(baseSpectrum.as_json(self.detector), file=json_file)
self.browser.reload()
@pyqtSlot(bool)
def on_prevMaterialButton_clicked(self, checked):
if self.index > 0:
self.index = self.index - 1
self.plot_spectrum()
@pyqtSlot(bool)
def on_nextMaterialButton_clicked(self, checked):
if self.index < len(self.baseSpectra)-1:
self.index = self.index + 1
self.plot_spectrum()
class SampleSpectraViewerDialog(ui_view_spectra_dialog.Ui_Dialog, QDialog):
def __init__(self, parent, scenario, detector, selected, file_list=None):
QDialog.__init__(self, parent)
self.setupUi(self)
self.scenario = scenario
self.detector = detector
self.selected = selected
self.session = Session()
self.json_file = os.path.join(get_bundle_dir(), "d3_resources", "spectrum.json")
self.index = selected
self.file_list = file_list
if not self.file_list:
sample_path = get_sample_dir(RaseSettings().getSampleDirectory(), self.detector, self.scenario.id)
self.file_list = glob.glob(os.path.join(sample_path, "*.n42"))
self.file_list.sort(key=natural_keys)
self.browser = WebSpectraView(self)
self.plot_spectrum()
self.plot_layout = QVBoxLayout(self.widget)
self.plot_layout.addWidget(self.browser)
self.widget.setFocus()
def plot_spectrum(self):
filepath = self.file_list[self.index]
status = []
sharedObject = SharedObject(True)
# FIXME: add error handling
v = readSpectrumFile(filepath, sharedObject, status, requireRASESen=False)
data = ReadFileObject(*v)
with open(self.json_file, "w") as json_file:
json_str = json.dumps([{"title": os.path.basename(filepath),
"livetime": data.livetime,
"realtime": data.realtime,
"xeqn": [data.ecal[0], data.ecal[1], data.ecal[2]],
"y": [float(c) for c in data.counts.split(',')],
"yScaleFactor": 1,
}])
print(json_str, file=json_file)
self.browser.reload()
@pyqtSlot(bool)
def on_prevMaterialButton_clicked(self, checked):
if self.index >= 0:
self.index = self.index - 1
self.plot_spectrum()
@pyqtSlot(bool)
def on_nextMaterialButton_clicked(self, checked):
if self.index < len(self.file_list)-1:
self.index = self.index + 1
self.plot_spectrum()
class WebSpectraView(QWebEngineView):
def __init__(self, parent):
super(WebSpectraView, self).__init__(parent)
file_path = os.path.join(get_bundle_dir(), "d3_resources", "spectrum.html")
local_url = QUrl.fromLocalFile(file_path)
self.load(local_url)
class Result3DPlottingDialog(ui_results_plotting_dialog_3d.Ui_Dialog, QDialog):
def __init__(self, parent, df, titles):
QDialog.__init__(self)
self.setupUi(self)
self.df = df
self.titles = titles
self.colormap = matplotlib.pyplot.get_cmap('RdYlGn')
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.widget)
self.ax = self.fig.add_subplot(111)
self.navi_toolbar = NavigationToolbar(self.canvas, self.widget)
self.window_layout = QVBoxLayout(self.widget)
self.window_layout.addWidget(self.canvas)
self.window_layout.addWidget(self.navi_toolbar)
matplotlib.pyplot.colorbar(mappable=self.ax.imshow(
np.array(df.T, 'float'),
cmap=self.colormap,
vmin=df.min().min(),
vmax=df.max().max(),
interpolation='bicubic',
aspect='auto'
), ax=self.ax)
self.ax.xaxis.set_ticks(np.arange(df.shape[0]))
self.ax.xaxis.set_ticklabels(df.index)
self.ax.tick_params(axis='x', labelsize=16)
self.ax.yaxis.set_ticks(np.arange(df.shape[1]))
self.ax.yaxis.set_ticklabels(df.columns)
self.ax.tick_params(axis='y', labelsize=16)
for x_val in range(df.shape[1]):
vals = np.array(df.iloc[:, x_val])
x = np.arange(df.shape[0])
y = x_val * np.ones_like(x)
self.ax.plot(x[vals >= .75], y[vals > .75], 'wo', markerfacecolor='none', markersize='16')
self.ax.plot(x[vals < .75], y[vals < .75], 'wx', markersize='16')
self.ax.invert_yaxis()
self.ax.set_xlabel(titles[0], size='16')
self.ax.set_ylabel(titles[1], size='16')
self.ax.set_title(titles[2], size='20')
self.canvas.draw()
self.widget.setFocus()
class ResultPlottingDialog(ui_results_plotting_dialog.Ui_Dialog, QDialog):
def __init__(self, parent, x, y, titles, labels, replications, x_err=None, y_err=None):
QDialog.__init__(self)
self.setupUi(self)
self.x = x
self.y = y
self.x_err = x_err
self.y_err = y_err
self.titles = titles
self.labels = labels
self.replications = replications
self.palette = None
# seaborn not compatible with lmfit
self.color_array = [['b', "#E5E7E9"], ['r', "#D5DbDb"], ['g', "#CCD1D1"],
['m', "#CACFD2"], ['c', "#AAB7B8"], ['k', "#99A3A4"]]
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.widget)
self.ax = self.fig.add_subplot(111)
self.navi_toolbar = NavigationToolbar(self.canvas, self.widget)
self.alphaCBox.addItem('\u03B1 = 0.01 (99%)', 0.01)
self.alphaCBox.addItem('\u03B1 = 0.05 (95%)', 0.05)
self.alphaCBox.addItem('\u03B1 = 0.1 (90%)', 0.1)
self.alphaCBox.addItem('\u03B1 = 0.32 (68%)', 0.32)
self.alphaCBox.setCurrentIndex(1)
self.window_layout = QVBoxLayout(self.widget)
self.window_layout.addWidget(self.canvas)
self.window_layout.addWidget(self.navi_toolbar)
model = QStandardItemModel()
for label in self.labels:
item = QStandardItem(str(label))
item.setCheckState(Qt.Checked)
item.setCheckable(True)
model.appendRow(item)
self.lstCurves.setModel(model)
if not self.labels:
self.lstCurves.hide()
self.lblSelectCurves.hide()
if len(self.x[0]) < 4 and self.y:
self.groupBox.setEnabled(False)
self.groupBox.setToolTip('Cannot plot s-curve with less than four data points.\n'
'Please choose four or more data points to enable s-curve plotting.')
if not self.y:
self.groupBox.setEnabled(False)
self.btnPlotData.clicked.connect(self.plotSelection)
self.draw()
self.widget.setFocus()
def get_selected_labels(self):
model = self.lstCurves.model()
selected = [model.item(index).text() for index in range(model.rowCount()) if model.item(index).checkState()]
return selected
def draw(self):
"""
Draws the initial plot of all the data
"""
self.palette = cycle(sns.color_palette())
self.ax.clear()
if self.y:
self.ax.set_ylabel(self.titles[1])
if self.labels:
for i, (x, y, label) in enumerate(zip(self.x, self.y, self.labels)):
y_err = np.transpose(self.y_err[i]) if i < len(self.y_err) else None
x_err = np.transpose(self.x_err[i]) if i < len(self.x_err) else None
color = next(self.palette)
self.ax.errorbar(x, y, yerr=y_err, xerr=x_err,
color=color, ecolor=color, fmt='o', capsize=3, label=label)
self.ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left", fontsize='xx-small')
else:
y_err = np.transpose(self.y_err[0]) if self.y_err else None
x_err = np.transpose(self.x_err[0]) if self.x_err else None
color = next(self.palette)
self.ax.errorbar(self.x[0], self.y[0], yerr=y_err, xerr=x_err,
color=color, ecolor=color, fmt='o', capsize=3)
else:
# min_n_entries = min([len(k) for k in self.x])
# n_bins = 10 if min_n_entries <= 10 else int(np.sqrt(min_n_entries))
self.ax.hist(self.x, bins=10, label=self.labels)
self.ax.legend(fontsize='xx-small')
self.ax.set_xlabel(self.titles[0])
if (self.titles[0].startswith('Dose') or self.titles[0].startswith('BkgDose') or
self.titles[0].startswith('Flux') or self.titles[0].startswith('BkgFlux')):
self.ax.set_xscale('log')
if (self.titles[1].startswith('Dose') or self.titles[1].startswith('BkgDose') or
self.titles[1].startswith('Flux') or self.titles[1].startswith('BkgFlux')):
self.ax.set_yscale('log')
self.canvas.draw()
@pyqtSlot(bool)
def plotSelection(self, checked):
"""
Executes after the "plot S-curve(s)" button has been pressed and checkboxes have been checked
(if there are any to be checked)
"""
self.ax_text = []
# to ensure zip and input data to s-curve function work as intended even if there is no data in the error vals
if not self.x_err:
x_error = [[None]] * len(self.x)
else:
x_error = self.x_err
if not self.y_err:
y_error = [[None]] * len(self.y)
else:
y_error = self.y_err
if self.labels:
self.ax.clear()
self.draw()
for index, (lab, x_vals, y_vals, err_x, err_y, repl) in \
enumerate(zip(self.labels, self.x, self.y, x_error, y_error, self.replications)):
if str(lab) in self.get_selected_labels():
color, color_hash = self.color_array[index % 6]
self.sCurveGen(lab, x_vals, y_vals, err_x, err_y, repl, color, color_hash)
else:
self.ax.clear()
self.sCurveGen('Data', self.x[0], self.y[0], x_error[0], y_error[0], self.replications[0], 'b', '#e5e7e9')
def sCurveGen(self, label, x, y, x_err, y_err, repl, color, color_hash):
# TODO: Automatically trim off all (x, y) pairs that are above the first 1 and below the last 0 to prevent
# fits from failing to converge or giving wonky results
if x_err[0] is None:
x_err = [None] * len(x)
if y_err[0] is None:
y_err = [None] * len(y)
id_mark = self.idThreshBox.value() / 100
alpha = self.alphaCBox.itemData(self.alphaCBox.currentIndex())
B = 1
Q = 1
if self.combo_fitspace.currentText() == 'Logarithmic':
logfit = True
else:
logfit = False
errorbars = np.absolute([np.subtract((p, p), calc_result_uncertainty(p, n, alpha))
for (p, n) in zip(y, repl)])
weights = np.array([1. / ((h + l) / 2) for (h, l) in errorbars]) # weights by half total error bar length
if np.isnan(np.sum(weights)):
weights = None
r = sCurve.s_fit(x, y, weights, [B, Q], logfit)
p = [r.params[u].value for u in r.var_names]
try:
(a1, a2, B, Q) = sCurve.correlated_values(p, r.covar)
except:
(a1, a2, B, Q) = (0, 0, 0, 0)
self.txtFitResults.append("------------\n" + str(label) + "\n")
self.txtFitResults.append(r.fit_report(show_correl=False))
self.ax_text.append(label)
self.ax.plot(x, y, 'o', color=color, label='_nolegend_')
if r.covar is not None:
r.plot_fit(datafmt=' ', fitfmt=color + '-', yerr=[0] * (len(y)), ax=self.ax, numpoints=150000)
x_dense = np.linspace(min(x), max(x), 150000)
delta_y = r.eval_uncertainty(x=x_dense)
r_best = r.model.eval(r.params, x=x_dense)
if self.check_confint.isChecked():
self.ax.fill_between(x_dense, r_best - delta_y, r_best + delta_y, color=color_hash)
else:
# no confidence band to plot, so don't make fit
self.ax.plot(x, y, color + 'o')
if not y_err[0] is None:
self.ax.errorbar(x, y, | |
#!/usr/bin/env python
# coding: utf-8
# # Mask R-CNN Demo
#
# A quick intro to using the pre-trained model to detect and segment objects.
import os
import keras
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
if 'PYTHONPATH' in os.environ:
print("Please unset the environment variable PYTHONPATH if you got errors with pycocotools!")
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
import coco
#get_ipython().run_line_magic('matplotlib', 'inline')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
# Directory of videos to be saved as detection results
VIDEO_OUTPUT_DIR = os.path.join(ROOT_DIR, "videos")
# ## Configurations
#
# We'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.
#
# For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change.
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
if os.getenv('IMAGE_MAX_DIM'):
IMAGE_MAX_DIM = int(os.getenv('IMAGE_MAX_DIM'))
if os.getenv('IMAGE_MIN_DIM'):
IMAGE_MIN_DIM = int(os.getenv('IMAGE_MIN_DIM'))
config = InferenceConfig()
config.display()
# ## Create Model and Load Trained Weights
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# ## Class Names
#
# The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.
#
# To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.
#
# To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.
# ```
# # Load COCO dataset
# dataset = coco.CocoDataset()
# dataset.load_coco(COCO_DIR, "train")
# dataset.prepare()
#
# # Print class names
# print(dataset.class_names)
# ```
#
# We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
# ## Run Object Detection
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import time
class MaskRCNNTracker():
"""Implements tracker based segmentation ouputs.
Params:
-
Inputs:
-
Output:
A dictionay that maps the current frame's instance indexes to
the unique instance IDs that identify individual objects
"""
def __init__(self):
self.instance_id_manager = 0
self.dict_instance_history = {}
self.dict_trajectories = {}
self.instance_memory_length = 2
self.frame_number = 0 # the current frame number
self.image_size = None # the image size (x, y) of the current frame
self.dict_location_prediction = {}
self.dict_trajectory_timestamp = {} # frame number corresponding to the last location
# if an instance disapprears (i.e., no correspondence found), how long do we keep the
# records ?
self.time_keep_records_frames = 50 # in frames
def fill_polygons_in_bounding_map(self, poly_vertices):
"""
Given one or multiple ploygons rach consisting of a sequence of vertices,
determine a box or map that encloses them. Then fill the polygon(s) within
the map and calculate its area.
Input:
- poly_vertices: A list of polygons. Each item is a list of points [x,y]
"""
left = 10000 # sufficiently large coordinate in x
right = 0 # the minimum possible coordinate in x
top = 10000 # sufficiently large coordinate in y
bottom = 0 # the minimum possible coordinate in y
# polyVertices: a list of N-by-2 arrays
for poly in poly_vertices:
left = min(left, np.amin(poly[:,0]))
right = max(right, np.amax(poly[:,0]))
top = min(top, np.amin(poly[:,1]))
bottom = max(bottom, np.amax(poly[:,1]))
pts = []
for poly in poly_vertices:
pts.append(poly-np.array([left,top]))
# This map is a 2-D array
map = np.zeros((bottom-top+1, right-left+1),dtype=np.uint8)
# mask the area
cv2.fillPoly(map, pts, color=(255))
polyArea = np.count_nonzero(map)
return (left, top, right, bottom, map, polyArea, self.frame_number)
def compute_intersection_polygons(self, tuplePolygonA, tuplePolygonB):
"""
Calculate intersection between two regions each outlined by one
or multiple polygons.
Inputs:
- tuplePolygonA, tuplePolygonB: A tuple to represent a region outlined
by one or multiple polygons. See the output of method
"fill_polygons_in_bounding_map".
Return: Intersection over Union (IoU) in the range from 0 to 1.0
"""
# tuplePolygonA and tuplePolygonB
# (xmin, ymin, xmax, ymax, filledPolygon2Dmap, frame_number)
A_left = tuplePolygonA[0]
A_right = tuplePolygonA[2]
A_top = tuplePolygonA[1]
A_bottom = tuplePolygonA[3]
B_left = tuplePolygonB[0]
B_right = tuplePolygonB[2]
B_top = tuplePolygonB[1]
B_bottom = tuplePolygonB[3]
# check if the two maps intersect
if B_left >= A_right or B_top >= A_bottom:
return 0
if A_left >= B_right or A_top >= B_bottom:
return 0
# calculate the overlapping part of the two bounding maps
Overlap_left = max(A_left, B_left)
Overlap_right = min(A_right, B_right)
Overlap_top = max(A_top, B_top)
Overlap_bottom = min(A_bottom, B_bottom)
# get the overlapping part within the two maps respectively
Overlap_A_map = tuplePolygonA[4][(Overlap_top-A_top):(min(A_bottom,Overlap_bottom)-A_top+1),
(Overlap_left-A_left):(min(A_right,Overlap_right)-A_left+1)]
Overlap_B_map = tuplePolygonB[4][(Overlap_top-B_top):(min(B_bottom,Overlap_bottom)-B_top+1),
(Overlap_left-B_left):(min(B_right,Overlap_right)-B_left+1)]
# calculate the intersection between the two silhouettes within the overlapping part
Overlap_map_boolean = np.logical_and(Overlap_A_map, Overlap_B_map)
# calculate the area of silhouette intersection
Overlap_count = np.count_nonzero(Overlap_map_boolean)
Union_count = tuplePolygonA[5] + tuplePolygonB[5] - Overlap_count
return Overlap_count/Union_count
def update_buffers(self):
# Update the buffers (dictionaries) for the past detection results
for uid in self.dict_trajectories:
if (len(self.dict_trajectories[uid]) > 80):
self.dict_trajectories[uid].pop(0)
uid_list = list(self.dict_trajectories.keys())
for uid in uid_list:
if (self.frame_number - self.dict_trajectory_timestamp[uid]) > self.time_keep_records_frames:
self.dict_trajectories.pop(uid)
self.dict_trajectory_timestamp.pop(uid)
def receive_first_segmentation_output(self, results, class_names, image_size):
"""
This method is called when the segmentation results for the very first frame received
Input:
- results: segmentation results as output of Mask R-CNN
- class_names: list of class names of the dataset
- image_size: image size in format (x, y)
Output:
- Tuple:
item 0: the current instance ID to assigned unique ID (dict)
item 1: Contours for current instances (dict)
"""
boxes = results['rois']
masks = results['masks']
class_ids = results['class_ids']
scores = results['scores']
self.image_size = image_size
# Number of instances
N = boxes.shape[0]
if not N:
return None
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# increment the frame counter
self.frame_number = 1
# Find the instances of interest, e.g., persons
instances_of_interest = []
for i in range(N):
class_id = class_ids[i]
if class_id == class_names.index('person') and scores[i] >= 0.75:
instances_of_interest.append(i)
# Find the contours that cover detected instances
dict_contours | |
<reponame>elifesciences/builder
"""`trop.py` is a module that uses the Troposphere library to build up
an AWS CloudFormation template dynamically using values from the
'context', a dictionary of data built up in `cfngen.py` derived from
the project file (`projects/elife.yaml`):
projects file -> build context -> trop.py -> cloudformation json
The non-AWS pipeline is similar:
-> terraform.py -> terraform json
see also `terraform.py`."""
import json, os
from collections import OrderedDict
from os.path import join
from . import config, utils, bvars, aws
from .config import ConfigurationError
from troposphere import GetAtt, Output, Ref, Template, ec2, rds, sns, sqs, Base64, route53, Parameter, Tags, docdb, wafv2
from troposphere import s3, cloudfront, elasticloadbalancing as elb, elasticloadbalancingv2 as alb, elasticache
from functools import partial
from .utils import ensure, subdict, lmap, isstr, deepcopy, lookup
import logging
# todo: remove on upgrade to python 3
# backports a fix we need to py2-compatible troposphere 2.7.1
# see:
# - https://github.com/cloudtools/troposphere/issues/1888
# - https://github.com/cloudtools/troposphere/commit/15478380cc0775c1cb915b74c031d68ca988b1c5
alb.TargetGroup.props["ProtocolVersion"] = (str, False)
LOG = logging.getLogger(__name__)
SECURITY_GROUP_TITLE = "StackSecurityGroup"
SECURITY_GROUP_ELB_TITLE = "ELBSecurityGroup"
EC2_TITLE = 'EC2Instance1'
EC2_TITLE_NODE = 'EC2Instance%d'
ELB_TITLE = 'ElasticLoadBalancer'
RDS_TITLE = "AttachedDB"
RDS_SG_ID = "DBSecurityGroup"
RDS_DB_PG = "RDSDBParameterGroup"
DBSUBNETGROUP_TITLE = 'AttachedDBSubnet'
EXT_TITLE = "ExtraStorage%s"
EXT_MP_TITLE = "MountPoint%s"
R53_EXT_TITLE = "ExtDNS"
R53_EXT_TITLE_NODE = "ExtDNS%s"
R53_INT_TITLE = "IntDNS"
R53_INT_TITLE_NODE = "IntDNS%s"
R53_CDN_TITLE = "CloudFrontCDNDNS%s"
R53_CNAME_TITLE = "CnameDNS%s"
R53_FASTLY_TITLE = "FastlyDNS%s"
CLOUDFRONT_TITLE = 'CloudFrontCDN'
# from http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-route53-aliastarget.html
CLOUDFRONT_HOSTED_ZONE_ID = 'Z2FDTNDATAQYW2'
CLOUDFRONT_ERROR_ORIGIN_ID = 'ErrorsOrigin'
ELASTICACHE_TITLE = 'ElastiCache%s'
ELASTICACHE_SECURITY_GROUP_TITLE = 'ElastiCacheSecurityGroup'
ELASTICACHE_SUBNET_GROUP_TITLE = 'ElastiCacheSubnetGroup'
ELASTICACHE_PARAMETER_GROUP_TITLE = 'ElastiCacheParameterGroup'
ALB_TITLE = 'ELBv2'
KEYPAIR = "KeyName"
# --- utils, used by by more than one resource
def _remove_if_none(data, key_list):
"""deletes a list of keys from given `data` if keyed value is `None`.
Cloudformation may not accept `null` for a value but will accept the absence of it's key."""
for key in key_list:
if data[key] is None:
del data[key]
def _sanitize_title(string):
"""a form of slugification.
foo = Foo
foo-bar => FooBar
foo-bar-baz => FooBarBaz
FOO-BAR-BAZ => FooBarBaz"""
return "".join(map(str.capitalize, string.split("-")))
def _convert_ports_to_dictionary(ports):
if isinstance(ports, list):
ports_map = OrderedDict()
for p in ports:
if isinstance(p, int):
ports_map[p] = {}
elif isinstance(p, dict):
ensure(len(p) == 1, "Single port definition cannot contain more than one value")
from_port = list(p.keys())[0]
configuration = list(p.values())[0]
ports_map[from_port] = configuration
else:
raise ValueError("Invalid port definition: %s" % (p,))
elif isinstance(ports, dict):
ports_map = OrderedDict()
for p, configuration in ports.items():
if isinstance(configuration, bool):
# temporary
ports_map[p] = {}
elif isinstance(configuration, int):
ports_map[p] = {'guest': configuration}
elif isinstance(configuration, OrderedDict):
ports_map[p] = configuration
else:
raise ValueError("Invalid port definition: %s => %s" % (p, configuration))
else:
raise ValueError("Invalid ports definition: %s" % ports)
return ports_map
def merge_ports(ports, another):
ports = OrderedDict(ports)
ports.update(another)
return ports
def convert_ports_dict_to_troposphere(ports):
def _port_to_dict(port, configuration):
return ec2.SecurityGroupRule(**{
'FromPort': port,
'ToPort': configuration.get('guest', port),
'IpProtocol': configuration.get('protocol', 'tcp'),
'CidrIp': configuration.get('cidr-ip', '0.0.0.0/0'),
})
return [_port_to_dict(port, configuration) for port, configuration in ports.items()]
def security_group(group_id, vpc_id, ingress_data, description=""):
return ec2.SecurityGroup(group_id, **{
'GroupDescription': description or 'security group',
'VpcId': vpc_id,
'SecurityGroupIngress': convert_ports_dict_to_troposphere(ingress_data),
})
def _instance_tags(context, node=None):
"""returns a dictionary of common tags for an instance.
Passing in the node's number will include node-specific tags."""
tags = aws.generic_tags(context)
if node:
# this instance is part of a cluster
tags.update({
'Name': '%s--%d' % (context['stackname'], node), # "journal--prod--1"
'Node': node, # "1"
})
return tags
def instance_tags(context, node=None, single_tag_obj=False):
"""same as `_instance_tags`, but returns a list of `Tag` objects.
When `single_tag_obj` is `True`, a single `Tags` (plural) object is returned as ewer
troposphere resources use `troposphere.Tags` to model a collection of `Tag` objects."""
data = _instance_tags(context, node)
if single_tag_obj:
return Tags(data)
return [ec2.Tag(key, str(value)) for key, value in data.items()]
def mkoutput(title, desc, val):
if isinstance(val, tuple):
val = GetAtt(val[0], val[1])
return Output(title, Description=desc, Value=val)
def overridden_component(context, component, index, allowed, interesting=None):
"two-level merging of overrides into one of context's components"
if not interesting:
interesting = allowed
overrides = context[component].get('overrides', {}).get(index, {})
for element in overrides:
ensure(element in allowed, "`%s` override is not allowed for `%s` clusters" % (element, component))
overridden_context = deepcopy(context)
overridden_context[component].pop('overrides', None)
for key, value in overrides.items():
if key not in interesting:
continue
assert key in overridden_context[component], "Can't override `%s` as it's not already a key in `%s`" % (key, overridden_context[component].keys())
if isinstance(overridden_context[component][key], dict):
overridden_context[component][key].update(value)
else:
overridden_context[component][key] = value
return overridden_context[component]
def _is_domain_2nd_level(hostname):
"""returns True if hostname is a 2nd level TLD.
e.g. the 'elifesciences' in 'journal.elifesciences.org'.
'.org' would be the first-level domain name, and 'journal' would be the third-level or 'sub' domain name."""
return hostname.count(".") == 1
def using_elb(context):
# two load balancers present, check the explicit primary
if 'primary_lb' in context and context['primary_lb'] == 'alb':
return False
# one or the other is present
return True if 'elb' in context and context['elb'] else False
def cnames(context):
"additional CNAME DNS entries pointing to full_hostname"
ensure(isstr(context['domain']), "A 'domain' must be specified for CNAMEs to be built")
def entry(hostname, i):
if _is_domain_2nd_level(hostname):
# must be an alias as it is a 2nd-level domain like elifesciences.net
ensure(context['elb'] or context['alb'], "2nd-level domain aliases are only supported for ELBs and ALBs")
hostedzone = hostname + "." # "elifesciences.org."
# ELBs take precendence.
# disabling the ELB during migration will replace the ELB DNS entries with ALB DNS entries.
target = ELB_TITLE if using_elb(context) else ALB_TITLE
return route53.RecordSetType(
R53_CNAME_TITLE % (i + 1),
HostedZoneName=hostedzone,
Name=hostname,
Type="A",
AliasTarget=route53.AliasTarget(
GetAtt(target, "CanonicalHostedZoneNameID" if using_elb(context) else "CanonicalHostedZoneID"),
GetAtt(target, "DNSName")
)
)
hostedzone = context['domain'] + "."
return route53.RecordSetType(
R53_CNAME_TITLE % (i + 1),
HostedZoneName=hostedzone,
Name=hostname,
Type="CNAME",
TTL="60",
ResourceRecords=[context['full_hostname']],
)
return [entry(hostname, i) for i, hostname in enumerate(context['subdomains'])]
#
# render_* functions
#
# --- ec2
def build_vars(context, node):
"""returns a subset of given context data with some extra node information
that will be encoded and stored on the ec2 instance at /etc/build-vars.json.b64"""
buildvars = deepcopy(context)
# preseve some of the project data. all of it is too much
keepers = [
'formula-repo',
'formula-dependencies'
]
buildvars['project'] = subdict(buildvars['project'], keepers)
buildvars['node'] = node
buildvars['nodename'] = "%s--%s" % (context['stackname'], node) # "journal--prod--1"
return buildvars
def ec2instance(context, node):
lu = partial(utils.lu, context)
buildvars = build_vars(context, node)
buildvars_serialization = bvars.encode_bvars(buildvars)
odd = node % 2 == 1
subnet_id = lu('aws.subnet-id') if odd else lu('aws.redundant-subnet-id')
clean_server_script = open(join(config.SCRIPTS_PATH, '.clean-server.sh.fragment'), 'r').read()
project_ec2 = {
"ImageId": lu('ec2.ami'),
"InstanceType": lu('ec2.type'), # "t2.small", "m1.medium", etc
"KeyName": Ref(KEYPAIR),
"SecurityGroupIds": [Ref(SECURITY_GROUP_TITLE)],
"SubnetId": subnet_id, # "subnet-1d4eb46a"
"Tags": instance_tags(context, node),
# send script output to AWS EC2 console, syslog and /var/log/user-data.log
# - https://alestic.com/2010/12/ec2-user-data-output/
"UserData": Base64("""#!/bin/bash
set -x
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
echo %s > /etc/build-vars.json.b64
%s""" % (buildvars_serialization, clean_server_script)),
}
if lu('ec2.cpu-credits') != 'standard':
project_ec2["CreditSpecification"] = ec2.CreditSpecification(
CPUCredits=lu('ec2.cpu-credits'),
)
if context['ec2'].get('root'):
project_ec2['BlockDeviceMappings'] = [{
'DeviceName': '/dev/sda1',
'Ebs': {
'VolumeSize': context['ec2']['root']['size'],
'VolumeType': context['ec2']['root'].get('type', 'standard'),
# unfortunately root volumes do not support Tags:
# https://blog.cloudability.com/two-solutions-for-tagging-elusive-aws-ebs-volumes/
}
}]
return ec2.Instance(EC2_TITLE_NODE % node, **project_ec2)
def render_ext_volume(context, context_ext, template, actual_ec2_instances, node=1):
vtype = context_ext.get('type', 'standard') # todo: no default values here, push this into cfngen.py
if node in actual_ec2_instances:
availability_zone = GetAtt(EC2_TITLE_NODE % node, "AvailabilityZone")
else:
availability_zone = context['aws']['availability-zone'] if node % 2 == 1 else context['aws']['redundant-availability-zone']
# 2021-10-05: iiif--prod--2 died and the MountPoint failed to attach to the ext Volume during re-creation.
# I suspected a bad ext Volume and needed CloudFormation to delete it for me.
# preventing it's creation here when the given `node` was being suppressed, successfully allowed me to recover.
# if node not in actual_ec2_instances:
# return
args = {
"Size": str(context_ext['size']),
"AvailabilityZone": availability_zone,
"VolumeType": vtype,
"Tags": instance_tags(context, node),
}
ec2v = ec2.Volume(EXT_TITLE % node, **args)
template.add_resource(ec2v)
if node in actual_ec2_instances:
args = {
"InstanceId": Ref(EC2_TITLE_NODE % node),
"VolumeId": Ref(ec2v),
"Device": context_ext.get('device'),
}
template.add_resource(ec2.VolumeAttachment(EXT_MP_TITLE % node, **args))
def render_ext(context, template, cluster_size, actual_ec2_instances):
# backward compatibility: ext is still specified outside of ec2 rather than as a sub-key
context['ec2']['ext'] = context['ext']
for node in range(1, cluster_size + 1):
overrides = context['ec2'].get('overrides', {}).get(node, {})
overridden_context = deepcopy(context)
overridden_context['ext'].update(overrides.get('ext', {}))
# TODO: extract `allowed` variable
node_context = overridden_component(context, 'ec2', index=node, allowed=['type', 'ext'])
render_ext_volume(overridden_context, node_context.get('ext', {}), template, actual_ec2_instances, node)
def external_dns_ec2_single(context):
# The DNS name of an existing Amazon Route 53 hosted zone
hostedzone = context['domain'] + "." # TRAILING DOT IS IMPORTANT!
dns_record = route53.RecordSetType(
R53_EXT_TITLE,
HostedZoneName=hostedzone,
Comment="External DNS record for EC2",
Name=context['full_hostname'] + '.',
Type="A",
TTL="60",
ResourceRecords=[GetAtt(EC2_TITLE, "PublicIp")],
)
return dns_record
def internal_dns_ec2_single(context):
# The DNS name of an existing Amazon Route 53 hosted zone
hostedzone = context['int_domain'] + "." # TRAILING DOT IS IMPORTANT!
dns_record = route53.RecordSetType(
R53_INT_TITLE,
| |
<filename>ross/disk_element.py
import bokeh.palettes as bp
from bokeh.models import ColumnDataSource, HoverTool
import matplotlib.patches as mpatches
import numpy as np
import toml
from ross.utils import read_table_file
from ross.element import Element
__all__ = ["DiskElement"]
bokeh_colors = bp.RdGy[11]
class DiskElement(Element):
"""A disk element.
This class will create a disk element from input data of inertia and mass.
Parameters
----------
n: int
Node in which the disk will be inserted.
m : float
Mass of the disk element.
Id : float
Diametral moment of inertia.
Ip : float
Polar moment of inertia
tag : str, optional
A tag to name the element
Default is None
Examples
--------
>>> disk = DiskElement(n=0, m=32, Id=0.2, Ip=0.3)
>>> disk.Ip
0.3
"""
def __init__(self, n, m, Id, Ip, tag=None):
self.n = int(n)
self.n_l = n
self.n_r = n
self.m = m
self.Id = Id
self.Ip = Ip
self.tag = tag
self.color = bokeh_colors[9]
def __eq__(self, other):
"""This function allows disk elements to be compared.
Parameters
----------
other: object
The second object to be compared with.
Returns
-------
bool
True if the comparison is true; False otherwise.
Examples
--------
>>> disk1 = disk_example()
>>> disk2 = disk_example()
>>> disk1 == disk2
True
"""
false_number = 0
for i in self.__dict__:
try:
if np.allclose(self.__dict__[i], other.__dict__[i]):
pass
else:
false_number += 1
except TypeError:
if self.__dict__[i] == other.__dict__[i]:
pass
else:
false_number += 1
if false_number == 0:
return True
else:
return False
def __repr__(self):
"""This function returns a string representation of a disk element.
Parameters
----------
Returns
-------
A string representation of a disk object.
Examples
--------
>>> disk = disk_example()
>>> disk # doctest: +ELLIPSIS
DiskElement(Id=0.17809, Ip=0.32956...
"""
return (
f"{self.__class__.__name__}"
f"(Id={self.Id:{0}.{5}}, Ip={self.Ip:{0}.{5}}, "
f"m={self.m:{0}.{5}}, color={self.color!r}, "
f"n={self.n}, tag={self.tag!r})"
)
def __hash__(self):
return hash(self.tag)
def save(self, file_name):
"""Saves a disk element in a toml format.
It works as an auxiliary function of the save function in the Rotor
class.
Parameters
----------
file_name: string
The name of the file the disk element will be saved in.
Returns
-------
None
Examples
--------
>>> disk = disk_example()
>>> disk.save('DiskElement.toml')
"""
data = self.load_data(file_name)
data["DiskElement"][str(self.n)] = {
"n": self.n,
"m": self.m,
"Id": self.Id,
"Ip": self.Ip,
"tag": self.tag,
}
self.dump_data(data, file_name)
@staticmethod
def load(file_name="DiskElement"):
"""Loads a list of disk elements saved in a toml format.
Parameters
----------
file_name: str
The name of the file of the disk element to be loaded.
Returns
-------
disk_elements: list
A list of disk elements.
Examples
--------
>>> disk1 = disk_example()
>>> disk1.save('DiskElement.toml')
>>> list_of_disks = DiskElement.load('DiskElement.toml')
>>> disk1 == list_of_disks[0]
True
"""
disk_elements = []
with open("DiskElement.toml", "r") as f:
disk_elements_dict = toml.load(f)
for element in disk_elements_dict["DiskElement"]:
disk_elements.append(
DiskElement(**disk_elements_dict["DiskElement"][element])
)
return disk_elements
def dof_mapping(self):
"""Degrees of freedom mapping.
Returns a dictionary with a mapping between degree of freedom and its
index.
Returns
-------
dof_mapping: dict
A dictionary containing the degrees of freedom and their indexes.
Examples
--------
>>> disk = disk_example()
>>> disk.dof_mapping()
{'x_0': 0, 'y_0': 1, 'alpha_0': 2, 'beta_0': 3}
"""
return dict(x_0=0, y_0=1, alpha_0=2, beta_0=3)
def M(self):
"""Mass matrix.
This method will return the mass matrix for an instance of a disk
element.
Returns
-------
Mass matrix for the disk element.
Examples
--------
>>> disk = DiskElement(0, 32.58972765, 0.17808928, 0.32956362)
>>> disk.M()
array([[32.58972765, 0. , 0. , 0. ],
[ 0. , 32.58972765, 0. , 0. ],
[ 0. , 0. , 0.17808928, 0. ],
[ 0. , 0. , 0. , 0.17808928]])
"""
m = self.m
Id = self.Id
# fmt: off
M = np.array([[m, 0, 0, 0],
[0, m, 0, 0],
[0, 0, Id, 0],
[0, 0, 0, Id]])
# fmt: on
return M
def K(self):
"""Stiffness matrix.
This method will return the stiffness matrix for an instance of a disk
element.
Returns
-------
K: np.ndarray
A matrix of floats containing the values of the stiffness matrix.
Examples
--------
>>> disk = disk_example()
>>> disk.K()
array([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
"""
K = np.zeros((4, 4))
return K
def C(self):
"""Returns the damping matrix.
Returns
-------
C: np.ndarray
A matrix of floats containing the values of the damping matrix.
Examples
--------
>>> disk = disk_example()
>>> disk.C()
array([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
"""
C = np.zeros((4, 4))
return C
def G(self):
"""Gyroscopic matrix.
This method will return the gyroscopic matrix for an instance of a disk
element.
Returns
-------
G: np.ndarray
Gyroscopic matrix for the disk element.
Examples
--------
>>> disk = DiskElement(0, 32.58972765, 0.17808928, 0.32956362)
>>> disk.G()
array([[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0.32956362],
[ 0. , 0. , -0.32956362, 0. ]])
"""
Ip = self.Ip
# fmt: off
G = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, Ip],
[0, 0, -Ip, 0]])
# fmt: on
return G
def patch(self, position, ax):
"""Disk element patch.
Patch that will be used to draw the disk element.
Parameters
----------
ax : matplotlib axes, optional
Axes in which the plot will be drawn.
position : float
Position in which the patch will be drawn.
Returns
-------
"""
zpos, ypos = position
step = ypos / 5
# matplotlib node (x pos), outer diam. (y pos)
disk_points_u = [
[zpos, ypos], # upper
[zpos + step, ypos * 4],
[zpos - step, ypos * 4],
[zpos, ypos],
]
disk_points_l = [
[zpos, -ypos], # lower
[zpos + step, -ypos * 4],
[zpos - step, -ypos * 4],
[zpos, -ypos],
]
ax.add_patch(mpatches.Polygon(disk_points_u, facecolor=self.color))
ax.add_patch(mpatches.Polygon(disk_points_l, facecolor=self.color))
ax.add_patch(
mpatches.Circle(xy=(zpos, ypos * 4), radius=step, color=self.color)
)
ax.add_patch(
mpatches.Circle(xy=(zpos, -ypos * 4), radius=step, color=self.color)
)
def bokeh_patch(self, position, bk_ax):
"""Disk element patch.
Patch that will be used to draw the disk element.
Parameters
----------
bk_ax : bokeh plotting axes, optional
Axes in which the plot will be drawn.
position : float
Position in which the patch will be drawn.
Returns
-------
bk_ax : bokeh plotting axes
Returns the axes object with the plot.
"""
zpos, ypos = position
step = ypos / 5
# bokeh plot - coordinates to plot disks elements
z_upper = [zpos, zpos + step, zpos - step]
y_upper = [ypos, ypos * 4, ypos * 4]
z_lower = [zpos, zpos + step, zpos - step]
y_lower = [-ypos, -ypos * 4, -ypos * 4]
source = ColumnDataSource(
dict(
z_l=[z_lower],
y_l=[y_lower],
z_u=[z_upper],
y_u=[y_upper],
elnum=[self.n],
IP=[self.Ip],
ID=[self.Id],
mass=[self.m],
tag=[self.tag],
)
)
source_c = ColumnDataSource(
dict(
z_circle=[z_upper[0]],
yu_circle=[y_upper[1]],
yl_circle=[-y_upper[1]],
radius=[step],
elnum=[self.n],
IP=[self.Ip],
ID=[self.Id],
mass=[self.m],
tag=[self.tag],
)
)
bk_ax.patches(
xs="z_u",
ys="y_u",
source=source,
alpha=1,
line_width=2,
color=self.color,
legend="Disk",
name="ub_disk",
)
bk_ax.patches(
xs="z_l",
ys="y_l",
source=source,
alpha=1,
line_width=2,
color=self.color,
name="ub_disk",
)
bk_ax.circle(
x="z_circle",
y="yu_circle",
radius="radius",
source=source_c,
fill_alpha=1,
color=self.color,
name="uc_disk",
)
bk_ax.circle(
x="z_circle",
y="yl_circle",
radius="radius",
source=source_c,
fill_alpha=1,
color=self.color,
name="lc_disk",
)
hover = HoverTool(names=["uc_disk", "lc_disk", "ub_disk", "lb_disk"])
hover.tooltips = [
("Disk Node :", "@elnum"),
("Polar Moment of Inertia :", "@IP"),
("Diametral Moment of Inertia :", "@ID"),
("Disk mass :", "@mass"),
("Tag :", "@tag"),
]
hover.mode = "mouse"
return hover
@classmethod
def from_geometry(cls, n, material, width, i_d, o_d, tag=None):
"""A disk element.
This class method will create a disk element from geometry data.
Parameters
----------
n: int
Node in which the disk will be inserted.
material: ross.Material
Shaft material.
width: float
The disk width.
i_d: float
Inner diameter.
o_d: float
Outer diameter.
Attributes
----------
m : float
Mass of the disk element.
Id : float
Diametral moment of inertia.
Ip : float
Polar moment of inertia
tag : str, optional
A tag to name the element
Default is None
Examples
--------
>>> from ross.materials import steel
>>> disk = DiskElement.from_geometry(0, steel, 0.07, 0.05, 0.28)
>>> disk.Ip
0.32956362089137037
"""
m = 0.25 * material.rho * np.pi * width * (o_d ** 2 - i_d ** 2)
# fmt: off
Id = (
0.015625 * material.rho * np.pi * width | |
# coding: utf-8
# # Text Data
#
# ## Pre-introduction
#
# We'll be spending a lot of time today manipulating text. Make sure you remember how to split, join, and search strings.
# ## Introduction
#
# We've spent a lot of time in python dealing with text data, and that's because text data is everywhere. It is the primary form of communication between persons and persons, persons and computers, and computers and computers. The kind of inferential methods that we apply to text data, however, are different from those applied to tabular data.
#
# This is partly because documents are typically specified in a way that expresses both structure and content using text (i.e. the document object model).
#
# Largely, however, it's because text is difficult to turn into numbers in a way that preserves the information in the document. Today, we'll talk about dominant language model in NLP and the basics of how to implement it in Python.
#
# ### The term-document model
#
# This is also sometimes referred to as "bag-of-words" by those who don't think very highly of it. The term document model looks at language as individual communicative efforts that contain one or more tokens. The kind and number of the tokens in a document tells you something about what is attempting to be communicated, and the order of those tokens is ignored.
#
# To start with, let's load a document.
# In[1]:
import nltk
#nltk.download('webtext')
document = nltk.corpus.webtext.open('grail.txt').read()
# Let's see what's in this document
# In[2]:
len(document.split('\n'))
# In[3]:
document.split('\n')[0:10]
# It looks like we've gotten ourselves a bit of the script from Monty Python and the Holy Grail. Note that when we are looking at the text, part of the structure of the document is written in tokens. For example, stage directions have been placed in brackets, and the names of the person speaking are in all caps.
#
# ## Regular expressions
#
# If we wanted to read out all of the stage directions for analysis, or just King Arthur's lines, doing so in base python string processing will be very difficult. Instead, we are going to use regular expressions. Regular expressions are a method for string manipulation that match patterns instead of bytes.
# In[4]:
import re
snippet = "I fart in your general direction! Your mother was a hamster, and your father smelt of elderberries!"
re.search(r'mother', snippet)
# Just like with `str.find`, we can search for plain text. But `re` also gives us the option for searching for patterns of bytes - like only alphabetic characters.
# In[5]:
re.search(r'[a-z]', snippet)
# In this case, we've told re to search for the first sequence of bytes that is only composed of lowercase letters between `a` and `z`. We could get the letters at the end of each sentence by including a bang at the end of the pattern.
# In[6]:
re.search(r'[a-z]!', snippet)
# If we wanted to pull out just the stage directions from the screenplay, we might try a pattern like this:
# In[7]:
re.findall(r'[a-zA-Z]', document)[0:10]
# So that's obviously no good. There are two things happening here:
#
# 1. `[` and `]` do not mean 'bracket'; they are special characters which mean 'any thing of this class'
# 2. we've only matched one letter each
#
# A better regular expression, then, would wrap this in escaped brackets, and include a command saying more than one letter.
#
# Re is flexible about how you specify numbers - you can match none, some, a range, or all repetitions of a sequence or character class.
#
# character | meaning
# ----------|--------
# `{x}` | exactly x repetitions
# `{x,y}` | between x and y repetitions
# `?` | 0 or 1 repetition
# `*` | 0 or many repetitions
# `+` | 1 or many repetitions
# In[8]:
re.findall(r'\[[a-zA-Z]+\]', document)[0:10]
# This is better, but it's missing that `[clop clop clop]` we saw above. This is because we told the regex engine to match any alphabetic character, but we did not specify whitespaces, commas, etc. to match these, we'll use the dot operator, which will match anything expect a newline.
#
# Part of the power of regular expressions are their special characters. Common ones that you'll see are:
#
# character | meaning
# ----------|--------
# `.` | match anything except a newline
# `^` | match the start of a line
# `$` | match the end of a line
# `\s` | matches any whitespace or newline
#
# Finally, we need to fix this `+` character. It is a 'greedy' operator, which means it will match as much of the string as possible. To see why this is a problem, try:
# In[9]:
snippet = 'This is [cough cough] and example of a [really] greedy operator'
re.findall(r'\[.+\]', snippet)
# Since the operator is greedy, it is matching everything inbetween the first open and the last close bracket. To make `+` consume the least possible amount of string, we'll add a `?`.
# In[10]:
p = re.compile(r'\[.+?\]')
re.findall(p, document)[0:10]
# What if we wanted to grab all of Arthur's speech? This one is a little trickier, since:
#
# 1. It is not conveniently bracketed; and,
# 2. We want to match on ARTHUR, but not to capture it
#
# If we wanted to do this using base string manipulation, we would need to do something like:
#
# ```
# split the document into lines
# create a new list of just lines that start with ARTHUR
# create a newer list with ARTHUR removed from the front of each element
# ```
#
# Regex gives us a way of doing this in one line, by using something called groups. Groups are pieces of a pattern that can be ignored, negated, or given names for later retrieval.
#
# character | meaning
# ----------|--------
# `(x)` | match x
# `(?:x)` | match x but don't capture it
# `(?P<x>)` | match something and give it name x
# `(?=x)` | match only if string is followed by x
# `(?!x)` | match only if string is not followed by x
# In[11]:
p = re.compile(r'(?:ARTHUR: )(.+)')
re.findall(p, document)[0:10]
# Because we are using `findall`, the regex engine is capturing and returning the normal groups, but not the non-capturing group. For complicated, multi-piece regular expressions, you may need to pull groups out separately. You can do this with names.
# In[12]:
p = re.compile(r'(?P<name>[A-Z ]+)(?::)(?P<line>.+)')
match = re.search(p, document)
match
# In[13]:
match.group('name'), match.group('line')
# #### Now let's try a small challenge!
#
# To check that you've understood something about regular expressions, we're going to have you do a small test challenge. Partner up with the person next to you - we're going to do this as a pair coding exercise - and choose which computer you are going to use.
#
# Then, navigate to `challenges/03_analysis/` and read through challenge A. When you think you've completed it successfully, run `py.test test_A.py` .
# ## Tokenizing
#
# Let's grab Arthur's speech from above, and see what we can learn about Arthur from it.
# In[14]:
p = re.compile(r'(?:ARTHUR: )(.+)')
arthur = ' '.join(re.findall(p, document))
arthur[0:100]
# In our model for natural language, we're interested in words. The document is currently a continuous string of bytes, which isn't ideal. You might be tempted to separate this into words using your newfound regex knowledge:
# In[15]:
p = re.compile(r'\w+', flags=re.I)
re.findall(p, arthur)[0:10]
# But this is problematic for languages that make extensive use of punctuation. For example, see what happens with:
# In[16]:
re.findall(p, "It isn't Dav's cheesecake that I'm worried about")
# The practice of pulling apart a continuous string into units is called "tokenizing", and it creates "tokens". NLTK, the canonical library for NLP in Python, has a couple of implementations for tokenizing a string into words.
# In[17]:
from nltk import word_tokenize
word_tokenize("It isn't Dav's cheesecake that I'm worried about")
# The distinction here is subtle, but look at what happened to "isn't". It's been separated into "IS" and "N'T", which is more in keeping with the way contractions work in English.
# In[18]:
tokens = word_tokenize(arthur)
tokens[0:10]
# At this point, we can start asking questions like what are the most common words, and what | |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from enaml.qt.QtCore import QMargins, QSize, QEvent
from enaml.qt.QtGui import (
QFrame, QLayout, QTabWidget, QGridLayout, QStackedLayout, QVBoxLayout,
QWidget, QStyle, QStyleOption
)
from .q_dock_bar import QDockBarManager
class QDockAreaLayout(QStackedLayout):
""" A custom stacked layout for the QDockArea.
This stacked layout reports its size hints according to the widget
which is currently visible, as opposed to aggregated hints which is
the default.
"""
def sizeHint(self):
""" Get the size hint for the layout.
"""
widget = self.currentWidget()
if widget is not None:
return widget.sizeHint()
return QSize(256, 192)
def minimumSize(self):
""" Get the minimum size for the layout.
"""
widget = self.currentWidget()
if widget is not None:
return widget.minimumSizeHint()
return QSize(256, 192)
class QDockArea(QFrame):
""" A custom QFrame which provides an area for docking QDockItems.
A dock area is used by creating QDockItem instances using the dock
area as the parent. A DockLayout instance can then be created and
applied to the dock area with the 'setDockLayout' method. The names
in the DockLayoutItem objects are used to find the matching dock
item widget child.
"""
def __init__(self, parent=None):
""" Initialize a QDockArea.
Parameters
----------
parent : QWidget
The parent of the dock area.
"""
super(QDockArea, self).__init__(parent)
self._dock_bar_manager = QDockBarManager(self)
self._primary_pane = primary_pane = QWidget(self)
self._central_pane = central_pane = QWidget(primary_pane)
self._dock_events_enabled = False
self._opaque_resize = None
self._tab_position = None
central_layout = QVBoxLayout()
central_layout.setContentsMargins(QMargins(0, 0, 0, 0))
central_layout.setSizeConstraint(QLayout.SetMinimumSize)
central_pane.setLayout(central_layout)
grid_layout = QGridLayout()
grid_layout.setRowStretch(0, 0)
grid_layout.setRowStretch(1, 1)
grid_layout.setRowStretch(2, 0)
grid_layout.setColumnStretch(0, 0)
grid_layout.setColumnStretch(1, 1)
grid_layout.setColumnStretch(2, 0)
grid_layout.setContentsMargins(QMargins(0, 0, 0, 0))
grid_layout.setSizeConstraint(QLayout.SetMinimumSize)
grid_layout.addWidget(central_pane, 1, 1)
primary_pane.setLayout(grid_layout)
area_layout = QDockAreaLayout()
area_layout.setContentsMargins(QMargins(0, 0, 0, 0))
area_layout.setSizeConstraint(QLayout.SetMinimumSize)
area_layout.insertWidget(0, primary_pane)
self.setLayout(area_layout)
self.updateSpacing()
#--------------------------------------------------------------------------
# Protected API
#--------------------------------------------------------------------------
def event(self, event):
""" A generic event handler for the dock area.
"""
if event.type() == QEvent.StyleChange:
self.updateSpacing()
return super(QDockArea, self).event(event)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def updateSpacing(self):
""" Update the primary layout spacing for the dock area.
This method will extract spacing value defined in the style
sheet for the dock area and apply it to the spacing between
the dock bars and the central widget.
"""
opt = QStyleOption()
opt.initFrom(self)
style = self.style()
# hack to get the style sheet 'spacing' property.
spacing = style.pixelMetric(QStyle.PM_ToolBarItemSpacing, opt, self)
grid_layout = self._primary_pane.layout()
grid_layout.setVerticalSpacing(spacing)
grid_layout.setHorizontalSpacing(spacing)
def centralPane(self):
""" Get the central pane for the dock area.
This method is used the dock bar manager to access the central
layout pane. It should not normally be called by user code.
Returns
-------
result : QWidget
The central pane for the dock area.
"""
return self._central_pane
def primaryPane(self):
""" Get the primary pane for the dock area.
This method is used the dock bar manager to access the primary
layout pane. It should not normally be called by user code.
Returns
-------
result : QWidget
The primary pane for the dock area.
"""
return self._primary_pane
def centralWidget(self):
""" Get the central dock widget for the area.
This method is called by the dock manager which handles the
dock area. It should not normally be called by user code.
Returns
-------
result : QWidget or None
The central dock widget for the area, or None if no widget
is installed.
"""
item = self._central_pane.layout().itemAt(0)
if item is not None:
return item.widget()
def setCentralWidget(self, widget):
""" Set the central widget for the dock area.
This method is called by the dock manager which handles the
dock area. It should not normally be called by user code.
Parameters
----------
widget : QWidget
The central widget for the dock area.
"""
layout = self._central_pane.layout()
item = layout.itemAt(0)
if item is not None:
old = item.widget()
if old is widget:
return
old.hide()
old.setParent(None)
if widget is not None:
layout.addWidget(widget)
# lower the widget to keep it stacked behind any pinned
# containers which are in the slide-out position.
widget.lower()
widget.show()
def maximizedWidget(self):
""" Get the widget to that is set as the maximized widget.
Returns
-------
result : QWidget or None
The widget which is maximized over the dock area.
"""
return self.layout().widget(1)
def setMaximizedWidget(self, widget):
""" Set the widget to maximize over the dock area.
Returns
-------
result : QWidget or None
The widget to maximize over the dock area.
"""
old = self.maximizedWidget()
if old is not None:
if old is widget:
return
old.hide()
old.setParent(None)
if widget is not None:
layout = self.layout()
layout.insertWidget(1, widget)
layout.setCurrentIndex(1)
widget.show()
def addToDockBar(self, container, position, index=-1):
""" Add a container to the dock bar at the given position.
Parameters
----------
container : QDockContainer
The dock container to add to the dock bar. The container
should be unplugged from any other layout before calling
this method.
position : QDockBar.Position
The enum value specifying the dock bar to which the
container should be added.
index : int, optional
The index at which to insert the item. The default is -1
and will append the item to the dock bar.
"""
self._dock_bar_manager.addContainer(container, position, index)
def removeFromDockBar(self, container):
""" Remove a container previously added to a dock bar.
Parameters
----------
container : QDockContainer
The dock container to remove from the dock bar.
"""
self._dock_bar_manager.removeContainer(container)
def dockBarGeometry(self, position):
""" Get the geometry of the dock bar at the given position.
Parameters
----------
position : QDockBar.Position
The enum value specifying the dock bar of interest.
Returns
-------
result : QRect
The geometry of the given dock bar expressed in area
coordinates. If no dock bar exists at the given position,
an invalid QRect will be returned.
"""
return self._dock_bar_manager.dockBarGeometry(position)
def dockBarContainers(self):
""" Get the containers held in the dock bars.
Returns
-------
result : list
A list of tuples of the form (container, position).
"""
return self._dock_bar_manager.dockBarContainers()
def dockBarPosition(self, container):
""" Get the dock bar position of the given container.
Parameters
----------
container : QDockContainer
The dock container of interest.
Returns
-------
result : QDockBar.Position or None
The position of the container, or None if the container
does not exist in the manager.
"""
return self._dock_bar_manager.dockBarPosition(container)
def extendFromDockBar(self, container):
""" Extend the given container from its dock bar.
If the container does not exist in a dock bar, this is a no-op.
Parameters
----------
container : QDockContainer
The dock container of interest.
"""
self._dock_bar_manager.extendContainer(container)
def retractToDockBar(self, container):
""" Retract the given container into it's dock bar.
If the container does not exist in a dock bar, this is a no-op.
Parameters
----------
container : QDockContainer
The dock container of interest.
"""
self._dock_bar_manager.retractContainer(container)
def clearDockBars(self):
""" Clear the dock bars from the dock area.
This method is called by the dock manager when the dock area
is reset. It should not be called directly by user code.
"""
self._dock_bar_manager.clearDockBars()
def isEmpty(self):
""" Get whether or not the dock area is empty.
Returns
-------
result : bool
True if the dock area is empty, False otherwise.
"""
if self.centralWidget() is not None:
return False
if self.maximizedWidget() is not None:
return False
return self._dock_bar_manager.isEmpty()
def tabPosition(self):
""" Get the default position for newly created tab widgets.
The tab position is inherited from an ancestor dock area unless
it is explicitly set by the user.
Returns
-------
result : QTabWidget.TabPosition
The position for dock area tabs. If the value has not been
set by the user and there is no ancestor dock area, the
default is QTabWidget.North.
"""
pos = self._tab_position
if pos is not None:
return pos
p = self.parent()
while p is not None:
if isinstance(p, QDockArea):
return p.tabPosition()
p = p.parent()
return QTabWidget.North
def setTabPosition(self, position):
""" Set the default position for newly created tab widget.
Parameters
----------
position : QTabWidget.TabPosition
The position for the tabs of newly created tab widgets.
"""
self._tab_position = position
def dockEventsEnabled(self):
""" Get whether dock events are enabled for the area.
Returns
-------
result : bool
| |
dryrun for details of the dryrun mode.''')
runmode.add_argument(
'-s',
choices=['default', 'ignore', 'force', 'build', 'assert'],
default='default',
metavar='SIGMODE',
dest='__sig_mode__',
help='''How runtime signature would be handled, which can be "default"
(save and use signature, default mode in batch mode), "ignore"
(ignore runtime signature, default mode in interactive mode),
"force" (ignore existing signature and overwrite them while
executing the workflow), "build" (build new or overwrite
existing signature from existing environment and output files), and
"assert" for validating existing files against their signatures.
Please refer to online documentation for details about the
use of runtime signatures.''')
runmode.add_argument(
'-T',
action='store_true',
dest='trace_existing',
help='''Trace existing targets and re-execute the steps that generate
them to make sure that the targets are current.''')
# run in tapping mode etc
runmode.add_argument(
'-m', nargs='+', dest='exec_mode', help=argparse.SUPPRESS)
output = parser.add_argument_group(
title='Output options', description='''Output of workflow''')
output.add_argument(
'-d',
nargs='?',
default='',
metavar='DAG',
dest='__dag__',
help='''Output Direct Acyclic Graph (DAGs) in graphiviz .dot format.
Because DAG and status of nodes will change during the execution of
workflow, multiple DAGs will be written to the specified file with
names {workflow}_1, {workflow}_2 etc. The dot file would be named
{script_name}_{timestamp}.dot unless a separate filename is specified.'''
)
output.add_argument(
'-p',
nargs='?',
default='',
metavar='REPORT',
dest='__report__',
help='''Output a report that summarizes the execution of the
workflow after the completion of the execution. This includes command line,
steps executed, tasks executed, CPU/memory of tasks, and DAG if option -d
is also specified. The report will by be named {script_name}_{timestamp}.html
unless a separate filename is specified.''')
output.add_argument(
'-v',
dest='verbosity',
type=int,
choices=range(5),
default=2,
help='''Output error (0), warning (1), info (2) and debug (3)
information to standard output (default to 2). More debug information could be
generated by setting environmental variable SOS_DEBUG to comma separated topics
of GENERAL, WORKER, CONTROLLER, STEP, VARIABLE, EXECUTOR, TARGET, ZERONQ, TASK,
DAG, and ACTION, or ALL for all debug information''')
parser.set_defaults(func=cmd_run)
return parser
def cmd_run(args, workflow_args):
#import multiprocessing as mp
# #562, #558, #493
#
# if sys.platform != 'win32':
# mp.set_start_method('forkserver')
from .utils import env, get_traceback, load_config_files
from .parser import SoS_Script
if args.__remote__ is not None:
# if executing on a remote host...
from .hosts import Host
load_config_files(args.__config__)
host = Host(args.__remote__)
from .utils import remove_arg
#
# copy script to remote host...
host.send_to_host(args.script)
argv = remove_arg(sys.argv, '-r')
# -c only point to local config file.
argv = remove_arg(argv, '-c')
# remove --slave mode because the master cannot reach remote slave
argv = remove_arg(argv, '-m')
# replace absolute path with relative one because remote sos might have
# a different path.
if os.path.basename(argv[0]) == 'sos':
argv[0] = 'sos'
elif os.path.basename(argv[0]) == 'sos-runner':
argv[0] = 'sos-runner'
# execute the command on remote host
sys.exit(host._host_agent.check_call(argv, under_workdir=True))
# '' means no -d
dt = datetime.datetime.now().strftime('%m%d%y_%H%M')
if args.__dag__ is None:
args.__dag__ = f'{os.path.splitext(args.script)[0]}_{dt}.dot'
elif args.__dag__ == '':
args.__dag__ = None
if args.__report__ is None:
args.__report__ = f'{os.path.splitext(args.script)[0]}_{dt}.html'
elif args.__report__ == '':
args.__report__ = None
env.verbosity = args.verbosity
if args.__report__ and args.__dag__:
try:
import graphviz
import PIL
import imageio
assert graphviz
assert PIL
assert imageio
except ImportError as e:
raise RuntimeError(
f'Python packages graphviz, pillow, and imageio are required for the generation of DAG animation in workflow report (options -p with -d): {e}'
)
import shutil
if not shutil.which('dot'):
raise RuntimeError(
f'Command dot from package graphviz is required for the generation of DAG animation in workflow report (options -p with -d)'
)
from .workflow_executor import Base_Executor
if args.__bin_dirs__:
for d in args.__bin_dirs__:
if d == '~/.sos/bin' and not os.path.isdir(os.path.expanduser(d)):
os.makedirs(os.path.expanduser(d), exist_ok=True)
elif not os.path.isdir(os.path.expanduser(d)):
raise ValueError('directory does not exist: {}'.format(d))
os.environ['PATH'] = os.pathsep.join([
os.path.expanduser(x) for x in args.__bin_dirs__
]) + os.pathsep + os.environ['PATH']
try:
# workflow args has to be in the format of --arg, not positional, not -a
if workflow_args and not workflow_args[0].startswith('--'):
raise ValueError("Unrecognized command line option {}".format(
' '.join(workflow_args)))
script = SoS_Script(filename=args.script)
workflow = script.workflow(
args.workflow, use_default=not args.__targets__)
config = {
'config_file': args.__config__,
'output_dag': args.__dag__,
'output_report': args.__report__,
'default_queue': args.__queue__,
'max_procs': args.__max_procs__,
'max_running_jobs': args.__max_running_jobs__,
'sig_mode': 'ignore' if args.dryrun else args.__sig_mode__,
'run_mode': 'dryrun' if args.dryrun else 'run',
'verbosity': args.verbosity,
# for infomration only
'workdir': os.getcwd(),
'script': args.script,
'workflow': args.workflow,
'targets': args.__targets__,
'bin_dirs': args.__bin_dirs__,
'workflow_args': workflow_args,
'trace_existing': args.trace_existing,
# tapping etc
'exec_mode': args.exec_mode
}
if args.exec_mode:
if args.exec_mode[0] != 'tapping' or len(args.exec_mode) == 1:
raise ValueError(
f'Unsupported exec_mode (option -m). {args.exec_mode} provided'
)
if args.exec_mode[1] != 'slave':
raise ValueError(
f'Unsupported exec_mode (option -m). {args.exec_mode} provided'
)
if len(args.exec_mode) != 6:
raise ValueError(
f'Unsupported exec_mode (option -m). {args.exec_mode} provided'
)
try:
config['slave_id'] = args.exec_mode[2]
config['sockets'] = {
'tapping_logging': int(args.exec_mode[3]),
'tapping_listener': int(args.exec_mode[4]),
'tapping_controller': int(args.exec_mode[5]),
}
except Exception as e:
raise ValueError(
f'Unsupported exec_mode (option -m). {args.exec_mode} provided: {e}'
)
#env.logger.debug(f'Process being tapped as slave {config["slave_id"]} at {config["sockets"]["tapping_logging"]} (logger) and {config["sockets"]["tapping_controller"]} (controller)')
config['exec_mode'] = args.exec_mode[1]
executor = Base_Executor(workflow, args=workflow_args, config=config)
# start controller
executor.run(args.__targets__, mode='dryrun' if args.dryrun else 'run')
except Exception as e:
if args.verbosity and args.verbosity > 2:
sys.stderr.write(get_traceback())
env.logger.error(e)
sys.exit(1)
#
# subcommand dryrun
#
def get_dryrun_parser(desc_only=False):
parser = argparse.ArgumentParser(
'dryrun',
description='''Execute workflow in dryrun mode. This mode is identical
to run mode except that 1). Actions might behavior differently. In
particular, script-running steps would print instead of execute script.
2). Steps will generate empty output files if specified output do not
exist after execution. 3). Signature mode is set to ignore. 4). Option
-q is ignored so all tasks are executed locally. 5). Tasks are generated
but not executed.''',
epilog=workflow_options)
parser.short_description = '''Execute workflow in dryrun mode'''
if desc_only:
return parser
parser.add_argument('script', metavar='SCRIPT', help=script_help)
parser.add_argument(
'workflow', metavar='WORKFLOW', nargs='?', help=workflow_spec)
parser.add_argument(
'-c',
dest='__config__',
metavar='CONFIG_FILE',
help='''A configuration file in the format of YAML/JSON. The content
of the configuration file will be available as a dictionary
CONF in the SoS script being executed.''')
parser.add_argument(
'-t',
dest='__targets__',
metavar='FILES',
default=[],
nargs='+',
help='''One of more files or alias of other targets that
will be the target of execution. If specified, SoS will execute
only part of a workflow or multiple workflows or auxiliary steps
to generate specified targets. ''')
parser.add_argument(
'-q',
dest='__queue__',
default='*',
metavar='QUEUE',
help='''host (server) or job queues to execute all tasks in the
workflow. The queue can be defined in global or local sos
configuration file, or a file specified by option --config. A host is
assumed to be a remote machine with process type if no configuration
is found. ''')
runmode = parser.add_argument_group(
title='Run mode options',
description='''Control how sos scirpt is executed.''')
runmode.add_argument(
'-T',
action='store_true',
dest='trace_existing',
help='''Trace existing targets and re-execute the steps that generate
them to make sure that the targets are current.''')
output = parser.add_argument_group(
title='Output options', description='''Output of workflow''')
output.add_argument(
'-d',
nargs='?',
default='',
metavar='DAG',
dest='__dag__',
help='''Output Direct Acyclic Graph (DAGs) in graphiviz .dot format. An
exntesion of ".dot" would be added automatically. Because DAG could
change during the execution of workflow, multiple DAGs could be
outputed with names $FILE_1.dot, $FILE_2.dot. If this option is
specified without a name, the DAG would be wrritten to the standard
output.''')
output.add_argument(
'-p',
nargs='?',
default='',
metavar='REPORT',
dest='__report__',
help='''Output a report that summarizes the execution of the
workflow after the completion of the execution. This includes command line,
steps executed, tasks executed, CPU/memory of tasks, and DAG if option -d
is also specified. The report will by be named {script_name}_{timestamp}.html
unless a separate filename is specified.''')
output.add_argument(
'-v',
dest='verbosity',
type=int,
choices=range(5),
default=2,
help='''Output error (0), warning (1), info (2), debug (3) and trace (4)
information to standard output (default to 2).''')
parser.set_defaults(func=cmd_dryrun)
return parser
def cmd_dryrun(args, workflow_args):
args.__sig_mode__ = 'ignore'
args.__max_procs__ = 1
args.__max_running_jobs__ = 1
args.dryrun = True
args.__bin_dirs__ = []
args.__remote__ = None
args.exec_mode = None
cmd_run(args, workflow_args)
#
# subcommand push
#
def get_push_parser(desc_only=False):
parser = argparse.ArgumentParser(
'push',
description='''Push local files or directory to a remote host''')
if desc_only:
return parser
parser.add_argument(
'items',
nargs='+',
help='''Files or directories to be sent
to remote host. The location of remote files are determined by "path_map"
determined by "paths" definitions of local and remote hosts.''')
parser.add_argument(
'-t',
'--to',
dest='host',
help='''Remote host to | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.tpu_v2alpha1.services.tpu import TpuAsyncClient
from google.cloud.tpu_v2alpha1.services.tpu import TpuClient
from google.cloud.tpu_v2alpha1.services.tpu import pagers
from google.cloud.tpu_v2alpha1.services.tpu import transports
from google.cloud.tpu_v2alpha1.types import cloud_tpu
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TpuClient._get_default_mtls_endpoint(None) is None
assert TpuClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert TpuClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert (
TpuClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
)
assert (
TpuClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert TpuClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [TpuClient, TpuAsyncClient,])
def test_tpu_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "tpu.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TpuGrpcTransport, "grpc"),
(transports.TpuGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_tpu_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [TpuClient, TpuAsyncClient,])
def test_tpu_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "tpu.googleapis.com:443"
def test_tpu_client_get_transport_class():
transport = TpuClient.get_transport_class()
available_transports = [
transports.TpuGrpcTransport,
]
assert transport in available_transports
transport = TpuClient.get_transport_class("grpc")
assert transport == transports.TpuGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TpuClient, transports.TpuGrpcTransport, "grpc"),
(TpuAsyncClient, transports.TpuGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(TpuClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TpuClient))
@mock.patch.object(
TpuAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TpuAsyncClient)
)
def test_tpu_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TpuClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TpuClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(TpuClient, transports.TpuGrpcTransport, "grpc", "true"),
(TpuAsyncClient, transports.TpuGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(TpuClient, transports.TpuGrpcTransport, "grpc", "false"),
(TpuAsyncClient, transports.TpuGrpcAsyncIOTransport, "grpc_asyncio", "false"),
],
)
@mock.patch.object(TpuClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TpuClient))
@mock.patch.object(
TpuAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TpuAsyncClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_tpu_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TpuClient, transports.TpuGrpcTransport, "grpc"),
(TpuAsyncClient, transports.TpuGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_tpu_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TpuClient, transports.TpuGrpcTransport, "grpc"),
(TpuAsyncClient, transports.TpuGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_tpu_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_tpu_client_client_options_from_dict():
with mock.patch(
"google.cloud.tpu_v2alpha1.services.tpu.transports.TpuGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TpuClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("request_type", [cloud_tpu.ListNodesRequest, dict,])
def test_list_nodes(request_type, transport: str = "grpc"):
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tpu.ListNodesResponse(
next_page_token="<PASSWORD>token_<PASSWORD>", unreachable=["unreachable_value"],
)
response = client.list_nodes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.ListNodesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListNodesPager)
assert response.next_page_token == "<PASSWORD>token_<PASSWORD>"
assert response.unreachable == ["unreachable_value"]
def test_list_nodes_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
client.list_nodes()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.ListNodesRequest()
@pytest.mark.asyncio
async def test_list_nodes_async(
transport: str = "grpc_asyncio", request_type=cloud_tpu.ListNodesRequest
):
client = TpuAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tpu.ListNodesResponse(
next_page_token="<PASSWORD>token_<PASSWORD>",
unreachable=["unreachable_value"],
)
)
response = await client.list_nodes(request)
# Establish | |
<filename>src/data/augmentation.py<gh_stars>0
import random
import math
from functools import partial, wraps
import numpy as np
import cv2
__all__ = [
'Compose', 'Choose',
'Scale', 'DiscreteScale',
'Flip', 'HorizontalFlip', 'VerticalFlip', 'Rotate',
'Crop', 'MSCrop',
'Shift', 'XShift', 'YShift',
'HueShift', 'SaturationShift', 'RGBShift', 'RShift', 'GShift', 'BShift',
'PCAJitter',
'ContraBrightScale', 'ContrastScale', 'BrightnessScale',
'AddGaussNoise'
]
rand = random.random
randi = random.randint
choice = random.choice
uniform = random.uniform
# gauss = random.gauss
gauss = random.normalvariate # This one is thread-safe
# The transformations treat 2-D or 3-D numpy ndarrays only, with the optional 3rd dim as the channel dim
def _istuple(x): return isinstance(x, (tuple, list))
class Transform:
def __init__(self, random_state=False):
self.random_state = random_state
def _transform(self, x):
raise NotImplementedError
def __call__(self, *args):
if self.random_state: self._set_rand_param()
assert len(args) > 0
return self._transform(args[0]) if len(args) == 1 else tuple(map(self._transform, args))
def _set_rand_param(self):
raise NotImplementedError
class Compose:
def __init__(self, *tf):
assert len(tf) > 0
self.tfs = tf
def __call__(self, *x):
if len(x) > 1:
for tf in self.tfs: x = tf(*x)
else:
x = x[0]
for tf in self.tfs: x = tf(x)
return x
class Choose:
def __init__(self, *tf):
assert len(tf) > 1
self.tfs = tf
def __call__(self, *x):
idx = randi(0, len(self.tfs)-1)
return self.tfs[idx](*x)
class Scale(Transform):
def __init__(self, scale=(0.5,1.0)):
if _istuple(scale):
assert len(scale) == 2
self.scale_range = tuple(scale) #sorted(scale)
self.scale = float(scale[0])
super(Scale, self).__init__(random_state=True)
else:
super(Scale, self).__init__(random_state=False)
self.scale = float(scale)
def _transform(self, x):
# assert x.ndim == 3
h, w = x.shape[:2]
size = (int(h*self.scale), int(w*self.scale))
if size == (h,w):
return x
interp = cv2.INTER_LINEAR if np.issubdtype(x.dtype, np.floating) else cv2.INTER_NEAREST
return cv2.resize(x, size, interpolation=interp)
def _set_rand_param(self):
self.scale = uniform(*self.scale_range)
class DiscreteScale(Scale):
def __init__(self, bins=(0.5, 0.75), keep_prob=0.5):
super(DiscreteScale, self).__init__(scale=(min(bins), 1.0))
self.bins = tuple(bins)
self.keep_prob = float(keep_prob)
def _set_rand_param(self):
self.scale = 1.0 if rand()<self.keep_prob else choice(self.bins)
class Flip(Transform):
# Flip or rotate
_directions = ('ud', 'lr', 'no', '90', '180', '270')
def __init__(self, direction=None):
super(Flip, self).__init__(random_state=(direction is None))
self.direction = direction
if direction is not None: assert direction in self._directions
def _transform(self, x):
if self.direction == 'ud':
## Current torch version doesn't support negative stride of numpy arrays
return np.ascontiguousarray(x[::-1])
elif self.direction == 'lr':
return np.ascontiguousarray(x[:,::-1])
elif self.direction == 'no':
return x
elif self.direction == '90':
# Clockwise
return np.ascontiguousarray(self._T(x)[:,::-1])
elif self.direction == '180':
return np.ascontiguousarray(x[::-1,::-1])
elif self.direction == '270':
return np.ascontiguousarray(self._T(x)[::-1])
else:
raise ValueError('invalid flipping direction')
def _set_rand_param(self):
self.direction = choice(self._directions)
@staticmethod
def _T(x):
return np.swapaxes(x, 0, 1)
class HorizontalFlip(Flip):
_directions = ('lr', 'no')
def __init__(self, flip=None):
if flip is not None: flip = self._directions[~flip]
super(HorizontalFlip, self).__init__(direction=flip)
class VerticalFlip(Flip):
_directions = ('ud', 'no')
def __init__(self, flip=None):
if flip is not None: flip = self._directions[~flip]
super(VerticalFlip, self).__init__(direction=flip)
class Rotate(Flip):
_directions = ('90', '180', '270', 'no')
class Crop(Transform):
_inner_bounds = ('bl', 'br', 'tl', 'tr', 't', 'b', 'l', 'r')
def __init__(self, crop_size=None, bounds=None):
__no_bounds = (bounds is None)
super(Crop, self).__init__(random_state=__no_bounds)
if __no_bounds:
assert crop_size is not None
else:
if not((_istuple(bounds) and len(bounds)==4) or (isinstance(bounds, str) and bounds in self._inner_bounds)):
raise ValueError('invalid bounds')
self.bounds = bounds
self.crop_size = crop_size if _istuple(crop_size) else (crop_size, crop_size)
def _transform(self, x):
h, w = x.shape[:2]
if self.bounds == 'bl':
return x[h//2:,:w//2]
elif self.bounds == 'br':
return x[h//2:,w//2:]
elif self.bounds == 'tl':
return x[:h//2,:w//2]
elif self.bounds == 'tr':
return x[:h//2,w//2:]
elif self.bounds == 't':
return x[:h//2]
elif self.bounds == 'b':
return x[h//2:]
elif self.bounds == 'l':
return x[:,:w//2]
elif self.bounds == 'r':
return x[:,w//2:]
elif len(self.bounds) == 2:
assert self.crop_size <= (h, w)
ch, cw = self.crop_size
if (ch,cw) == (h,w):
return x
cx, cy = int((w-cw+1)*self.bounds[0]), int((h-ch+1)*self.bounds[1])
return x[cy:cy+ch, cx:cx+cw]
else:
left, top, right, lower = self.bounds
return x[top:lower, left:right]
def _set_rand_param(self):
self.bounds = (rand(), rand())
class MSCrop(Crop):
def __init__(self, scale, crop_size=None):
super(MSCrop, self).__init__(crop_size)
self.scale = scale # Scale factor
def __call__(self, lr, hr):
if self.random_state:
self._set_rand_param()
# I've noticed that random scaling bounds may cause pixel misalignment
# between the lr-hr pair, which significantly damages the training
# effect, thus the quadruple mode is desired
left, top, cw, ch = self._get_quad(*lr.shape[:2])
self._set_quad(left, top, cw, ch)
lr_crop = self._transform(lr)
left, top, cw, ch = [int(it*self.scale) for it in (left, top, cw, ch)]
self._set_quad(left, top, cw, ch)
hr_crop = self._transform(hr)
return lr_crop, hr_crop
def _get_quad(self, h, w):
ch, cw = self.crop_size
cx, cy = int((w-cw+1)*self.bounds[0]), int((h-ch+1)*self.bounds[1])
return cx, cy, cw, ch
def _set_quad(self, left, top, cw, ch):
self.bounds = (left, top, left+cw, top+ch)
class Shift(Transform):
def __init__(self, x_shift=(-0.0625, 0.0625), y_shift=(-0.0625, 0.0625), circular=True):
super(Shift, self).__init__(random_state=_istuple(x_shift) or _istuple(y_shift))
if _istuple(x_shift):
self.xshift_range = tuple(x_shift)
self.xshift = float(x_shift[0])
else:
self.xshift = float(x_shift)
self.xshift_range = (self.xshift, self.xshift)
if _istuple(y_shift):
self.yshift_range = tuple(y_shift)
self.yshift = float(y_shift[0])
else:
self.yshift = float(y_shift)
self.yshift_range = (self.yshift, self.yshift)
self.circular = circular
def _transform(self, im):
h, w = im.shape[:2]
xsh = -int(self.xshift*w)
ysh = -int(self.yshift*h)
if self.circular:
# Shift along the x-axis
im_shifted = np.concatenate((im[:, xsh:], im[:, :xsh]), axis=1)
# Shift along the y-axis
im_shifted = np.concatenate((im_shifted[ysh:], im_shifted[:ysh]), axis=0)
else:
zeros = np.zeros(im.shape)
im1, im2 = (zeros, im) if xsh < 0 else (im, zeros)
im_shifted = np.concatenate((im1[:, xsh:], im2[:, :xsh]), axis=1)
im1, im2 = (zeros, im_shifted) if ysh < 0 else (im_shifted, zeros)
im_shifted = np.concatenate((im1[ysh:], im2[:ysh]), axis=0)
return im_shifted
def _set_rand_param(self):
self.xshift = uniform(*self.xshift_range)
self.yshift = uniform(*self.yshift_range)
class XShift(Shift):
def __init__(self, x_shift=(-0.0625, 0.0625), circular=True):
super(XShift, self).__init__(x_shift, 0.0, circular)
class YShift(Shift):
def __init__(self, y_shift=(-0.0625, 0.0625), circular=True):
super(YShift, self).__init__(0.0, y_shift, circular)
# Color jittering and transformation
# The followings partially refer to https://github.com/albu/albumentations/
class _ValueTransform(Transform):
def __init__(self, rs, limit=(0, 255)):
super().__init__(rs)
self.limit = limit
self.limit_range = limit[1] - limit[0]
@staticmethod
def keep_range(tf):
@wraps(tf)
def wrapper(obj, x):
# # Make a copy
# x = x.copy()
dtype = x.dtype
# The calculations are done with floating type in case of overflow
# This is a stupid yet simple way
x = tf(obj, np.clip(x.astype(np.float32), *obj.limit))
# Convert back to the original type
return np.clip(x, *obj.limit).astype(dtype)
return wrapper
class ColorJitter(_ValueTransform):
_channel = (0,1,2)
def __init__(self, shift=((-20,20), (-20,20), (-20,20)), limit=(0,255)):
super().__init__(False, limit)
_nc = len(self._channel)
if _nc == 1:
if _istuple(shift):
rs = True
self.shift = self.range = shift
else:
rs = False
self.shift = (shift,)
self.range = (shift, shift)
else:
if _istuple(shift):
if len(shift) != _nc:
raise ValueError("please specify the shift value (or range) for every channel.")
rs = all(_istuple(s) for s in shift)
self.shift = self.range = shift
else:
rs = False
self.shift = [shift for _ in range(_nc)]
self.range = [(shift, shift) for _ in range(_nc)]
self.random_state = rs
def _(x):
return x
self.convert_to = _
self.convert_back = _
@_ValueTransform.keep_range
def _transform(self, x):
x = self.convert_to(x)
for i, c in enumerate(self._channel):
x[...,c] = self._clip(x[...,c]+float(self.shift[i]))
x = self.convert_back(x)
return x
def _clip(self, x):
return x
def _set_rand_param(self):
if len(self._channel) == 1:
self.shift = [uniform(*self.range)]
else:
self.shift = [uniform(*r) for r in self.range]
class HSVShift(ColorJitter):
def __init__(self, shift, limit):
super().__init__(shift, limit)
def _convert_to(x):
x = x.astype(np.float32)
# Normalize to [0,1]
x -= self.limit[0]
x /= self.limit_range
x = cv2.cvtColor(x, code=cv2.COLOR_RGB2HSV)
return x
def _convert_back(x):
x = cv2.cvtColor(x.astype(np.float32), code=cv2.COLOR_HSV2RGB)
return x * self.limit_range + self.limit[0]
# Pack conversion methods
self.convert_to = _convert_to
self.convert_back = _convert_back
def _clip(self, x):
raise NotImplementedError
class HueShift(HSVShift):
_channel = (0,)
def __init__(self, shift=(-20, 20), limit=(0, 255)):
super().__init__(shift, limit)
def _clip(self, x):
# Circular
# Note that this works in Opencv 3.4.3, not yet tested under other versions
x[x<0] += 360
x[x>360] -= 360
return x
class SaturationShift(HSVShift):
_channel = (1,)
def __init__(self, shift=(-30, 30), limit=(0, 255)):
super().__init__(shift, limit)
self.range = tuple(r / self.limit_range for r in self.range)
def _clip(self, x):
return np.clip(x, 0, 1.0)
class RGBShift(ColorJitter):
def __init__(self, shift=((-20,20), (-20,20), (-20,20)), limit=(0, 255)):
super().__init__(shift, limit)
class RShift(RGBShift):
_channel = (0,)
def __init__(self, shift=(-20,20), limit=(0, 255)):
super().__init__(shift, limit)
class GShift(RGBShift):
_channel = (1,)
def __init__(self, shift=(-20,20), limit=(0, 255)):
super().__init__(shift, limit)
class BShift(RGBShift):
_channel = (2,)
def __init__(self, shift=(-20,20), limit=(0, 255)):
super().__init__(shift, limit)
class PCAJitter(_ValueTransform):
def __init__(self, sigma=0.3, limit=(0, 255)):
# For RGB only
super().__init__(True, limit)
self.sigma = sigma
@_ValueTransform.keep_range
def _transform(self, x):
old_shape = x.shape
x = np.reshape(x, (-1,3), order='F') # For RGB
x_mean = np.mean(x, 0)
x = x - x_mean
cov_x = np.cov(x, rowvar=False)
eig_vals, | |
# -*- coding: utf-8 -*-
"""
Diagnostic Record Read/Write
------------------------------
These need to be tied into a the current server context
or linked to the appropriate data
"""
import struct
from pymodbus3.constants import ModbusStatus, ModbusPlusOperation
from pymodbus3.pdu import ModbusRequest
from pymodbus3.pdu import ModbusResponse
from pymodbus3.device import ModbusControlBlock
from pymodbus3.utilities import pack_bitstring
_MCB = ModbusControlBlock()
# Diagnostic Function Codes Base Classes
# diagnostic 08, 00-18,20
# TODO Make sure all the data is decoded from the response
class DiagnosticStatusRequest(ModbusRequest):
"""
This is a base class for all of the diagnostic request functions
"""
function_code = 0x08
_rtu_frame_size = 8
def __init__(self, **kwargs):
"""
Base initializer for a diagnostic request
"""
ModbusRequest.__init__(self, **kwargs)
self.message = None
def encode(self):
"""
Base encoder for a diagnostic response
we encode the data set in self.message
:returns: The encoded packet
"""
packet = struct.pack('>H', self.sub_function_code)
if self.message is not None:
if isinstance(self.message, str):
packet += self.message.encode()
elif isinstance(self.message, bytes):
packet += self.message
elif isinstance(self.message, list):
for piece in self.message:
packet += struct.pack('>H', piece)
elif isinstance(self.message, int):
packet += struct.pack('>H', self.message)
return packet
def decode(self, data):
""" Base decoder for a diagnostic request
:param data: The data to decode into the function code
"""
self.sub_function_code, self.message = struct.unpack('>HH', data)
class DiagnosticStatusResponse(ModbusResponse):
"""
This is a base class for all of the diagnostic response functions
It works by performing all of the encoding and decoding of variable
data and lets the higher classes define what extra data to append
and how to execute a request
"""
function_code = 0x08
_rtu_frame_size = 8
def __init__(self, **kwargs):
"""
Base initializer for a diagnostic response
"""
self.sub_function_code = None
ModbusResponse.__init__(self, **kwargs)
self.message = None
def encode(self):
"""
Base encoder for a diagnostic response
we encode the data set in self.message
:returns: The encoded packet
"""
packet = struct.pack('>H', self.sub_function_code)
if self.message is not None:
if isinstance(self.message, str):
packet += self.message.encode()
elif isinstance(self.message, bytes):
packet += self.message
elif isinstance(self.message, list):
for piece in self.message:
packet += struct.pack('>H', piece)
elif isinstance(self.message, int):
packet += struct.pack('>H', self.message)
return packet
def decode(self, data):
""" Base decoder for a diagnostic response
:param data: The data to decode into the function code
"""
self.sub_function_code, self.message = struct.unpack('>HH', data)
class DiagnosticStatusSimpleRequest(DiagnosticStatusRequest):
"""
A large majority of the diagnostic functions are simple
status request functions. They work by sending 0x0000
as data and their function code and they are returned
2 bytes of data.
If a function inherits this, they only need to implement
the execute method
"""
def __init__(self, data=0x0000, **kwargs):
"""
General initializer for a simple diagnostic request
The data defaults to 0x0000 if not provided as over half
of the functions require it.
:param data: The data to send along with the request
"""
DiagnosticStatusRequest.__init__(self, **kwargs)
self.message = data
def execute(self, *args):
""" Base function to raise if not implemented """
raise NotImplementedError('Diagnostic Message Has No Execute Method')
class DiagnosticStatusSimpleResponse(DiagnosticStatusResponse):
"""
A large majority of the diagnostic functions are simple
status request functions. They work by sending 0x0000
as data and their function code and they are returned
2 bytes of data.
"""
def __init__(self, data=0x0000, **kwargs):
""" General initializer for a simple diagnostic response
:param data: The resulting data to return to the client
"""
DiagnosticStatusResponse.__init__(self, **kwargs)
self.message = data
# region Diagnostic Sub Code 00
class ReturnQueryDataRequest(DiagnosticStatusRequest):
"""
The data passed in the request data field is to be returned (looped back)
in the response. The entire response message should be identical to the
request.
"""
sub_function_code = 0x0000
def __init__(self, message=0x0000, **kwargs):
""" Initializes a new instance of the request
:param message: The message to send to loopback
"""
DiagnosticStatusRequest.__init__(self, **kwargs)
if isinstance(message, list):
self.message = message
else:
self.message = [message]
def execute(self, *args):
""" Executes the loopback request (builds the response)
:returns: The populated loopback response message
"""
return ReturnQueryDataResponse(self.message)
class ReturnQueryDataResponse(DiagnosticStatusResponse):
"""
The data passed in the request data field is to be returned (looped back)
in the response. The entire response message should be identical to the
request.
"""
sub_function_code = 0x0000
def __init__(self, message=0x0000, **kwargs):
""" Initializes a new instance of the response
:param message: The message to loopback
"""
DiagnosticStatusResponse.__init__(self, **kwargs)
if isinstance(message, list):
self.message = message
else:
self.message = [message]
# endregion
# region Diagnostic Sub Code 01
class RestartCommunicationsOptionRequest(DiagnosticStatusRequest):
"""
The remote device serial line port must be initialized and restarted, and
all of its communications event counters are cleared. If the port is
currently in Listen Only Mode, no response is returned. This function is
the only one that brings the port out of Listen Only Mode. If the port is
not currently in Listen Only Mode, a normal response is returned. This
occurs before the restart is executed.
"""
sub_function_code = 0x0001
def __init__(self, toggle=False, **kwargs):
""" Initializes a new request
:param toggle: Set to True to toggle, False otherwise
"""
DiagnosticStatusRequest.__init__(self, **kwargs)
if toggle:
self.message = [ModbusStatus.On]
else:
self.message = [ModbusStatus.Off]
def execute(self, *args):
""" Clear event log and restart
:returns: The initialized response message
"""
# if _MCB.ListenOnly:
return RestartCommunicationsOptionResponse(self.message)
class RestartCommunicationsOptionResponse(DiagnosticStatusResponse):
"""
The remote device serial line port must be initialized and restarted, and
all of its communications event counters are cleared. If the port is
currently in Listen Only Mode, no response is returned. This function is
the only one that brings the port out of Listen Only Mode. If the port is
not currently in Listen Only Mode, a normal response is returned. This
occurs before the restart is executed.
"""
sub_function_code = 0x0001
def __init__(self, toggle=False, **kwargs):
""" Initializes a new response
:param toggle: Set to True if we toggled, False otherwise
"""
DiagnosticStatusResponse.__init__(self, **kwargs)
if toggle:
self.message = [ModbusStatus.On]
else:
self.message = [ModbusStatus.Off]
# endregion
# region Diagnostic Sub Code 02
class ReturnDiagnosticRegisterRequest(DiagnosticStatusSimpleRequest):
"""
The contents of the remote device's 16-bit diagnostic register are
returned in the response
"""
sub_function_code = 0x0002
def execute(self, *args):
""" Execute the diagnostic request on the given device
:returns: The initialized response message
"""
# if _MCB.isListenOnly():
register = pack_bitstring(_MCB.get_diagnostic_register())
return ReturnDiagnosticRegisterResponse(register)
class ReturnDiagnosticRegisterResponse(DiagnosticStatusSimpleResponse):
"""
The contents of the remote device's 16-bit diagnostic register are
returned in the response
"""
sub_function_code = 0x0002
# endregion
# region Diagnostic Sub Code 03
class ChangeAsciiInputDelimiterRequest(DiagnosticStatusSimpleRequest):
"""
The character 'CHAR' passed in the request data field becomes the end of
message delimiter for future messages (replacing the default LF
character). This function is useful in cases of a Line Feed is not
required at the end of ASCII messages.
"""
sub_function_code = 0x0003
def execute(self, *args):
""" Execute the diagnostic request on the given device
:returns: The initialized response message
"""
char = (self.message & 0xff00) >> 8
_MCB.Delimiter = char
return ChangeAsciiInputDelimiterResponse(self.message)
class ChangeAsciiInputDelimiterResponse(DiagnosticStatusSimpleResponse):
"""
The character 'CHAR' passed in the request data field becomes the end of
message delimiter for future messages (replacing the default LF
character). This function is useful in cases of a Line Feed is not
required at the end of ASCII messages.
"""
sub_function_code = 0x0003
# endregion
# region Diagnostic Sub Code 04
class ForceListenOnlyModeRequest(DiagnosticStatusSimpleRequest):
"""
Forces the addressed remote device to its Listen Only Mode for MODBUS
communications. This isolates it from the other devices on the network,
allowing them to continue communicating without interruption from the
addressed remote device. No response is returned.
"""
sub_function_code = 0x0004
def execute(self, *args):
""" Execute the diagnostic request on the given device
:returns: The initialized response message
"""
_MCB.ListenOnly = True
return ForceListenOnlyModeResponse()
class ForceListenOnlyModeResponse(DiagnosticStatusResponse):
"""
Forces the addressed remote device to its Listen Only Mode for MODBUS
communications. This isolates it from the other devices on the network,
allowing them to continue communicating without interruption from the
addressed remote device. No response is returned.
This does not send a response
"""
sub_function_code = 0x0004
should_respond = False
def __init__(self, **kwargs):
""" Initializer to block a return response
"""
DiagnosticStatusResponse.__init__(self, **kwargs)
self.message = []
# endregion
# region Diagnostic Sub Code 10
class ClearCountersRequest(DiagnosticStatusSimpleRequest):
"""
The goal is to clear ll counters and the diagnostic register.
Also, counters are cleared | |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import random
from odoo import api, models, fields, tools, _
from odoo.http import request
from odoo.exceptions import UserError, ValidationError
_logger = logging.getLogger(__name__)
class SaleOrder(models.Model):
_inherit = "sale.order"
website_order_line = fields.One2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
)
cart_quantity = fields.Integer(compute='_compute_cart_info', string='Cart Quantity')
payment_acquirer_id = fields.Many2one('payment.acquirer', string='Payment Acquirer', copy=False)
payment_tx_id = fields.Many2one('payment.transaction', string='Transaction', copy=False)
only_services = fields.Boolean(compute='_compute_cart_info', string='Only Services')
@api.multi
@api.depends('website_order_line.product_uom_qty', 'website_order_line.product_id')
def _compute_cart_info(self):
for order in self:
order.cart_quantity = int(sum(order.mapped('website_order_line.product_uom_qty')))
order.only_services = all(l.product_id.type in ('service', 'digital') for l in order.website_order_line)
@api.model
def _get_errors(self, order):
return []
@api.model
def _get_website_data(self, order):
return {
'partner': order.partner_id.id,
'order': order
}
@api.multi
def _cart_find_product_line(self, product_id=None, line_id=None, **kwargs):
self.ensure_one()
product = self.env['product.product'].browse(product_id)
# split lines with the same product if it has untracked attributes
if product and product.mapped('attribute_line_ids').filtered(lambda r: not r.attribute_id.create_variant) and not line_id:
return self.env['sale.order.line']
domain = [('order_id', '=', self.id), ('product_id', '=', product_id)]
if line_id:
domain += [('id', '=', line_id)]
return self.env['sale.order.line'].sudo().search(domain)
@api.multi
def _website_product_id_change(self, order_id, product_id, qty=0):
order = self.sudo().browse(order_id)
product_context = dict(self.env.context)
product_context.setdefault('lang', order.partner_id.lang)
product_context.update({
'partner': order.partner_id.id,
'quantity': qty,
'date': order.date_order,
'pricelist': order.pricelist_id.id,
})
product = self.env['product.product'].with_context(product_context).browse(product_id)
pu = product.price
if order.pricelist_id and order.partner_id:
order_line = order._cart_find_product_line(product.id)
if order_line:
pu = self.env['account.tax']._fix_tax_included_price(pu, product.taxes_id, order_line[0].tax_id)
return {
'product_id': product_id,
'product_uom_qty': qty,
'order_id': order_id,
'product_uom': product.uom_id.id,
'price_unit': pu,
}
@api.multi
def _get_line_description(self, order_id, product_id, attributes=None):
if not attributes:
attributes = {}
order = self.sudo().browse(order_id)
product_context = dict(self.env.context)
product_context.setdefault('lang', order.partner_id.lang)
product = self.env['product.product'].with_context(product_context).browse(product_id)
name = product.display_name
# add untracked attributes in the name
untracked_attributes = []
for k, v in attributes.items():
# attribute should be like 'attribute-48-1' where 48 is the product_id, 1 is the attribute_id and v is the attribute value
attribute_value = self.env['product.attribute.value'].sudo().browse(int(v))
if attribute_value and not attribute_value.attribute_id.create_variant:
untracked_attributes.append(attribute_value.name)
if untracked_attributes:
name += '\n%s' % (', '.join(untracked_attributes))
if product.description_sale:
name += '\n%s' % (product.description_sale)
return name
@api.multi
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, attributes=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
self.ensure_one()
SaleOrderLineSudo = self.env['sale.order.line'].sudo()
quantity = 0
order_line = False
if self.state != 'draft':
request.session['sale_order_id'] = None
raise UserError(_('It is forbidden to modify a sale order which is not in draft status'))
if line_id is not False:
order_lines = self._cart_find_product_line(product_id, line_id, **kwargs)
order_line = order_lines and order_lines[0]
# Create line if no line with product_id can be located
if not order_line:
values = self._website_product_id_change(self.id, product_id, qty=1)
values['name'] = self._get_line_description(self.id, product_id, attributes=attributes)
order_line = SaleOrderLineSudo.create(values)
try:
order_line._compute_tax_id()
except ValidationError as e:
# The validation may occur in backend (eg: taxcloud) but should fail silently in frontend
_logger.debug("ValidationError occurs during tax compute. %s" % (e))
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty is not None:
quantity = order_line.product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
order_line.unlink()
else:
# update line
values = self._website_product_id_change(self.id, product_id, qty=quantity)
if self.pricelist_id.discount_policy == 'with_discount' and not self.env.context.get('fixed_price'):
order = self.sudo().browse(self.id)
product_context = dict(self.env.context)
product_context.setdefault('lang', order.partner_id.lang)
product_context.update({
'partner': order.partner_id.id,
'quantity': quantity,
'date': order.date_order,
'pricelist': order.pricelist_id.id,
})
product = self.env['product.product'].with_context(product_context).browse(product_id)
values['price_unit'] = self.env['account.tax']._fix_tax_included_price(
order_line._get_display_price(product),
order_line.product_id.taxes_id,
order_line.tax_id
)
order_line.write(values)
return {'line_id': order_line.id, 'quantity': quantity}
def _cart_accessories(self):
""" Suggest accessories based on 'Accessory Products' of products in cart """
for order in self:
accessory_products = order.website_order_line.mapped('product_id.accessory_product_ids').filtered(lambda product: product.website_published)
accessory_products -= order.website_order_line.mapped('product_id')
return random.sample(accessory_products, len(accessory_products))
class Website(models.Model):
_inherit = 'website'
pricelist_id = fields.Many2one('product.pricelist', compute='_compute_pricelist_id', string='Default Pricelist')
currency_id = fields.Many2one('res.currency', related='pricelist_id.currency_id', string='Default Currency')
salesperson_id = fields.Many2one('res.users', string='Salesperson')
salesteam_id = fields.Many2one('crm.team', string='Sales Team')
pricelist_ids = fields.One2many('product.pricelist', compute="_compute_pricelist_ids",
string='Price list available for this Ecommerce/Website')
@api.one
def _compute_pricelist_ids(self):
self.pricelist_ids = self.env["product.pricelist"].search([("website_id", "=", self.id)])
@api.multi
def _compute_pricelist_id(self):
for website in self:
if website._context.get('website_id') != website.id:
website = website.with_context(website_id=website.id)
website.pricelist_id = website.get_current_pricelist()
# This method is cached, must not return records! See also #8795
@tools.ormcache('self.env.uid', 'country_code', 'show_visible', 'website_pl', 'current_pl', 'all_pl', 'partner_pl', 'order_pl')
def _get_pl_partner_order(self, country_code, show_visible, website_pl, current_pl, all_pl, partner_pl=False, order_pl=False):
""" Return the list of pricelists that can be used on website for the current user.
:param str country_code: code iso or False, If set, we search only price list available for this country
:param bool show_visible: if True, we don't display pricelist where selectable is False (Eg: Code promo)
:param int website_pl: The default pricelist used on this website
:param int current_pl: The current pricelist used on the website
(If not selectable but the current pricelist we had this pricelist anyway)
:param list all_pl: List of all pricelist available for this website
:param int partner_pl: the partner pricelist
:param int order_pl: the current cart pricelist
:returns: list of pricelist ids
"""
pricelists = self.env['product.pricelist']
if country_code:
for cgroup in self.env['res.country.group'].search([('country_ids.code', '=', country_code)]):
for group_pricelists in cgroup.pricelist_ids:
if not show_visible or group_pricelists.selectable or group_pricelists.id in (current_pl, order_pl):
pricelists |= group_pricelists
partner = self.env.user.partner_id
is_public = self.user_id.id == self.env.user.id
if not is_public and (not pricelists or (partner_pl or partner.property_product_pricelist.id) != website_pl):
if partner.property_product_pricelist.website_id:
pricelists |= partner.property_product_pricelist
if not pricelists: # no pricelist for this country, or no GeoIP
pricelists |= all_pl.filtered(lambda pl: not show_visible or pl.selectable or pl.id in (current_pl, order_pl))
else:
pricelists |= all_pl.filtered(lambda pl: not show_visible and pl.sudo().code)
# This method is cached, must not return records! See also #8795
return pricelists.ids
def _get_pl(self, country_code, show_visible, website_pl, current_pl, all_pl):
pl_ids = self._get_pl_partner_order(country_code, show_visible, website_pl, current_pl, all_pl)
return self.env['product.pricelist'].browse(pl_ids)
def get_pricelist_available(self, show_visible=False):
""" Return the list of pricelists that can be used on website for the current user.
Country restrictions will be detected with GeoIP (if installed).
:param bool show_visible: if True, we don't display pricelist where selectable is False (Eg: Code promo)
:returns: pricelist recordset
"""
website = request and request.website or None
if not website:
if self.env.context.get('website_id'):
website = self.browse(self.env.context['website_id'])
else:
website = self.search([], limit=1)
isocountry = request and request.session.geoip and request.session.geoip.get('country_code') or False
partner = self.env.user.partner_id
order_pl = partner.last_website_so_id and partner.last_website_so_id.state == 'draft' and partner.last_website_so_id.pricelist_id
partner_pl = partner.property_product_pricelist
pricelists = website._get_pl_partner_order(isocountry, show_visible,
website.user_id.sudo().partner_id.property_product_pricelist.id,
request and request.session.get('website_sale_current_pl') or None,
website.pricelist_ids,
partner_pl=partner_pl and partner_pl.id or None,
order_pl=order_pl and order_pl.id or None)
return self.env['product.pricelist'].browse(pricelists)
def is_pricelist_available(self, pl_id):
""" Return a boolean to specify if a specific pricelist can be manually set on the website.
Warning: It check only if pricelist is in the 'selectable' pricelists or the current pricelist.
:param int pl_id: The pricelist id to check
:returns: Boolean, True if valid / available
"""
return pl_id in self.get_pricelist_available(show_visible=False).ids
def get_current_pricelist(self):
"""
:returns: The current pricelist record
"""
# The list of available pricelists for this user.
# If the user is signed in, and has a pricelist set different than the public user pricelist
# then this pricelist will always be considered as available
available_pricelists = self.get_pricelist_available()
pl = None
partner = self.env.user.partner_id
if request and request.session.get('website_sale_current_pl'):
# `website_sale_current_pl` is set only if the user specifically chose it:
# - Either, he chose it from the pricelist selection
# - Either, he entered a coupon code
pl = self.env['product.pricelist'].browse(request.session['website_sale_current_pl'])
if pl not in available_pricelists:
pl = None
request.session.pop('website_sale_current_pl')
if not pl:
# If the user has a saved cart, it take the pricelist of this cart, except if
# the order is no longer draft (It has already been confirmed, or cancelled, ...)
pl = partner.last_website_so_id.state == 'draft' and partner.last_website_so_id.pricelist_id
if not pl:
# The pricelist of the user set on its partner form.
# If the user is not signed in, it's the public user pricelist
pl = partner.property_product_pricelist
if available_pricelists and pl not in available_pricelists:
# If there is at least one pricelist in the available pricelists
# and the chosen pricelist is not within them
# it then choose the first available pricelist.
# This can only happen when the pricelist is the public user pricelist and this pricelist is not in the available pricelist | |
<reponame>Proxymiity/discord-storage-bot
from discord import File
from os import getcwd
from pathlib import Path
from discord.ext import commands
from utils.dataIO import dataIO
from utils import checks, tools, pool, file, verify, log
storage_settings = dataIO.load_json("data/storage.json")
class Storage(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.locked = False
async def check_locked(self, ctx):
if self.locked:
await ctx.send("*AYAYA!!* Database is locked. Perhaps a transfer is still going?\n"
"Please wait before executing this command, or check "
f"<#{storage_settings['storage']['logs']}> for info.")
return self.locked
@checks.authorized()
@commands.command(name="help")
async def _help(self, ctx):
p = Path(getcwd() + "/help.md")
h = File(str(p))
await ctx.send(file=h)
@checks.authorized()
@commands.group(aliases=["p"])
async def pool(self, ctx):
pass
@checks.authorized()
@pool.command()
async def create(self, ctx, name: str):
if await self.check_locked(ctx):
return
name = name.lower()
if not await verify.valid_name(ctx, name):
return
await pool.new(self.bot, name)
await ctx.send(f"Created new pool named `{name}`")
await log.send(self.bot, ctx, "create_pool", name)
@checks.authorized()
@pool.command()
async def recycle(self, ctx, name: str):
if await self.check_locked(ctx):
return
name = name.lower()
if not await verify.valid_name(ctx, name):
return
p = pool.load()
if name not in p:
await ctx.send("*A-Ayaya..?* Pool does not exist.")
return
pool_id = pool.id_by_name(name)
await pool.recycle(self.bot, pool_id)
await ctx.send("Sent the pool to the Recycle Bin.")
await log.send(self.bot, ctx, "categorize_pool", name)
@checks.authorized()
@pool.command()
async def restore(self, ctx, name: str):
if await self.check_locked(ctx):
return
name = name.lower()
if not await verify.valid_name(ctx, name):
return
p = pool.load()
if name not in p:
await ctx.send("*A-Ayaya..? Pool does not exist.*")
return
if p[name]["ct"] == storage_settings["storage"]["categories"]["main"]:
await ctx.send("*A-Ayaya..? Pool isn't in the Recycle Bin.*")
return
pool_id = pool.id_by_name(name)
await pool.categorize(self.bot, pool_id, storage_settings["storage"]["categories"]["main"])
await ctx.send("Restored the pool!")
await log.send(self.bot, ctx, "categorize_pool", name)
@checks.authorized()
@pool.command(name="delete")
async def _del(self, ctx, name: str):
name = name.lower()
if not await verify.valid_name(ctx, name):
return
p = pool.load()
if name not in p:
await ctx.send("*A-Ayaya..? Pool does not exist.*")
return
pool_id = pool.id_by_name(name)
await pool.delete(self.bot, pool_id)
await ctx.send("Deleted the pool!")
await log.send(self.bot, ctx, "delete_pool", name)
@checks.authorized()
@pool.command()
async def empty_recycle(self, ctx):
if await self.check_locked(ctx):
return
await pool.empty_recycle_bin(self.bot)
await ctx.send("Emptied the Recycle Bin.")
await log.send(self.bot, ctx, "recycle_pool")
@checks.authorized()
@commands.group(aliases=["f"])
async def file(self, ctx):
pass
@checks.authorized()
@file.command(aliases=["s"])
async def store(self, ctx, ch: str, path: str, *, name: str = None):
if await self.check_locked(ctx):
return
p = pool.load()
ch = ch.lower()
local_file = Path(path)
pl = pool.id_by_name(ch)
if not await verify.valid_name(ctx, ch) or not await verify.valid_path(ctx, path):
return
if not pl:
await ctx.send("*AYAYA!* There's no storage pool with that name.")
return
if await self.is_ro(ctx, p, ch):
return
if local_file.name in p[ch]["files"]:
await ctx.send("*A-Ayaya..?* There's already a file with that name.")
return
if not local_file.is_file():
await ctx.send("*A-Ayaya..?* There's no file at the provided location.")
return
name = name or local_file.stem
msg = await ctx.send("*yawn* Encrypting/Splitting/Uploading that file...")
self.locked = True
await log.send(self.bot, ctx, "store_file", local_file.name, ch)
if p[ch]["ct"] == storage_settings["storage"]["categories"]["recycle"]:
return
await file.store(self.bot, local_file.name, name, str(local_file), pl)
self.locked = False
await msg.delete()
await ctx.message.reply(f"Successfully uploaded `{local_file.name}` to `{ch}`.\n"
f"Get it back using `/file retrieve {ch} {local_file.name} <dl path>`.")
await log.send(self.bot, ctx, "store_file", local_file.name, ch, done=True)
@checks.authorized()
@file.command(aliases=["r"])
async def retrieve(self, ctx, ch: str, name: str, path: str):
p = pool.load()
ch = ch.lower()
local_file = Path(path)
pl = pool.id_by_name(ch)
if await self.check_locked(ctx) or not await verify.valid_name(ctx, ch):
return
if not await verify.valid_path(ctx, path) or not await verify.valid_name_extended(ctx, name):
return
if not pl:
await ctx.send("*AYAYA!* There's no storage pool with that name.")
return
if await self.is_ro(ctx, p, ch):
return
if name not in p[ch]["files"]:
await ctx.send("*A-Ayaya..?* There's no file to retrieve with that name.")
return
if local_file.exists():
await ctx.send("*A-Ayaya..?* There's already a file at the provided location.")
return
msg = await ctx.send("*yawn* Downloading/Merging/Decrypting that file...")
self.locked = True
await log.send(self.bot, ctx, "retrieve_file", name, ch)
await file.retrieve(self.bot, name, path, pl)
self.locked = False
await msg.delete()
await ctx.message.reply(f"Successfully downloaded `{local_file.name}` to `{path}`.")
await log.send(self.bot, ctx, "retrieve_file", name, ch, done=True)
@checks.authorized()
@file.command(aliases=["dl"])
async def download(self, ctx, ch: str, url: str, *, name: str = None):
if await self.check_locked(ctx):
return
p = pool.load()
ch = ch.lower()
f = verify.sanitize_name(url.rsplit("/", 1)[1])
pl = pool.id_by_name(ch)
if not await verify.valid_name(ctx, ch):
return
if not pl:
await ctx.send("*AYAYA!* There's no storage pool with that name.")
return
if await self.is_ro(ctx, p, ch):
return
if f in p[ch]["files"]:
await ctx.send("*A-Ayaya..?* There's already a file with that name.")
return
name = name or f.rsplit(".", 1)[0]
msg = await ctx.send("*yawn* Encrypting/Splitting/Transferring that file...")
self.locked = True
await log.send(self.bot, ctx, "store_url", url, ch, f)
await file.download(self.bot, url, name, pl)
self.locked = False
await msg.delete()
await ctx.message.reply(f"Successfully transferred `{f}` to `{ch}`.\n"
f"Get it back using `/file retrieve {ch} {f} <dl path>`.")
await log.send(self.bot, ctx, "store_url", url, ch, f, done=True)
@checks.authorized()
@file.command(aliases=["d"])
async def delete(self, ctx, ch: str, name: str):
p = pool.load()
if await self.check_locked(ctx):
return
ch = ch.lower()
pl = pool.id_by_name(ch)
if not await verify.valid_name(ctx, ch) or not await verify.valid_name_extended(ctx, name):
return
if not pl:
await ctx.send("*AYAYA!* There's no storage pool with that name.")
return
if await self.is_ro(ctx, p, ch):
return
if not await self.pre_del_check(ctx, ch, name):
return
msg = await ctx.send("*yawn* Deleting that file...")
self.locked = True
await log.send(self.bot, ctx, "delete_file", name, ch)
await pool.yank(self.bot, name, pl)
self.locked = False
await msg.delete()
await ctx.message.reply(f"Successfully deleted `{name}` from `{ch}`.")
await log.send(self.bot, ctx, "delete_file", name, ch, done=True)
@checks.authorized()
@file.command(aliases=["y"])
async def yank(self, ctx, ch: str, name: str):
p = pool.load()
ch = ch.lower()
pl = pool.id_by_name(ch)
if await self.check_locked(ctx) or not await verify.valid_name(ctx, ch):
return
if not pl:
await ctx.send("*AYAYA!* There's no storage pool with that name.")
return
if await self.is_ro(ctx, p, ch):
return
if not await verify.valid_name_extended(ctx, name) or not await self.pre_del_check(ctx, ch, name):
return
msg = await ctx.send("*yawn* Yanking that file...")
pool.db_yank(name, pl)
await msg.delete()
await ctx.message.reply(f"Successfully yanked `{name}` from `{ch}`.")
await log.send(self.bot, ctx, "yank_file", name, ch)
@staticmethod
async def pre_del_check(ctx, ch: str, name: str):
p = pool.load()
pl = pool.id_by_name(ch)
if not pl:
await ctx.send("*AYAYA!* There's no storage pool with that name.")
return False
if name not in p[ch]["files"]:
await ctx.send("*A-Ayaya..?* There's no file with that name.")
return False
return True
@checks.authorized()
@commands.group(aliases=["s"])
async def search(self, ctx):
pass
@checks.authorized()
@search.command(aliases=["f"], name="file")
async def search_file(self, ctx, ch: str, q: str):
p = pool.load()
ch = ch.lower()
pl = pool.id_by_name(ch)
if not pl:
await ctx.send("*AYAYA!* There's no storage pool with that name.")
return False
files = p[ch]["files"]
found = []
for x in files:
if q in x:
found.append([f"`{x}`", f"custom name: `{files[x]['custom_name']}`, splits: {len(files[x]['parts'])}"])
if found:
to_send = tools.paginate_text(found, "", "", f"Found {len(found)} file(s):", mid_sep=", ")
for x in to_send:
await ctx.send(x)
else:
await ctx.send("*Ayaya...* No items matched your query.")
@checks.authorized()
@search.command(aliases=["n"], name="name")
async def search_name(self, ctx, ch: str, *, q: str):
p = pool.load()
ch = ch.lower()
pl = pool.id_by_name(ch)
if not pl:
await ctx.send("*AYAYA!* There's no storage pool with that name.")
return False
names = {}
for f in p[ch]["files"]:
names[f] = p[ch]["files"][f]["custom_name"]
files = p[ch]["files"]
found = []
for x in names:
if q in names[x]:
found.append([f"`{x}`", f"custom name: `{files[x]['custom_name']}`, splits: {len(files[x]['parts'])}"])
if found:
to_send = tools.paginate_text(found, "", "", f"Found {len(found)} file(s):", mid_sep=", ")
for x in to_send:
await ctx.send(x)
else:
await ctx.send("*Ayaya...* No items matched your query.")
@checks.authorized()
@commands.group(aliases=["gs"])
async def gsearch(self, ctx):
pass
@checks.authorized()
@gsearch.command(aliases=["f"], name="file")
async def search_file(self, ctx, q: str):
p = pool.load()
found = []
for x in p:
files = p[x]["files"]
for y in files:
if q in y:
found.append([f"`{x}`/`{y}`", f"custom name: `{files[y]['custom_name']}`, splits: "
f"{len(files[y]['parts'])}"])
if found:
to_send = tools.paginate_text(found, "", "", f"Found {len(found)} file(s):", mid_sep=", ")
for x in to_send:
await ctx.send(x)
else:
await ctx.send("*Ayaya...* No items matched your query.")
@checks.authorized()
@gsearch.command(aliases=["n"], name="name")
async def search_name(self, ctx, *, q: str):
p = pool.load()
found = []
for x in p:
names = {}
for f in p[x]["files"]:
names[f] = p[x]["files"][f]["custom_name"]
files = p[x]["files"]
for y in names:
if q in names[y]:
found.append([f"`{x}`/`{y}`", f"custom name: `{files[y]['custom_name']}`, splits: "
f"{len(files[y]['parts'])}"])
if found:
to_send = tools.paginate_text(found, "", "", f"Found {len(found)} file(s):", mid_sep=", ")
for x in to_send:
await ctx.send(x)
else:
await ctx.send("*Ayaya...* No items matched your query.")
@checks.authorized()
@commands.command(aliases=["l"], name="list")
async def list(self, | |
#!/usr/bin/env python
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkCommonCore import (
vtkFloatArray,
vtkIdList,
vtkPoints
)
from vtkmodules.vtkCommonDataModel import vtkUnstructuredGrid
from vtkmodules.vtkCommonTransforms import vtkTransform
from vtkmodules.vtkFiltersCore import (
vtkContourFilter,
vtkGlyph3D,
vtkThresholdPoints,
vtkTubeFilter
)
from vtkmodules.vtkFiltersExtraction import vtkExtractEdges
from vtkmodules.vtkFiltersGeneral import (
vtkShrinkPolyData,
vtkTransformPolyDataFilter
)
from vtkmodules.vtkFiltersSources import (
vtkCubeSource,
vtkSphereSource
)
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer
)
from vtkmodules.vtkRenderingFreeType import vtkVectorText
def main():
mc_cases, rotation, label = get_program_parameters()
if not mc_cases:
mc_cases = [7]
else:
# Ensure that they are unique.
mc_cases = list(set(mc_cases))
# Check that they lie in the correct range.
badCases = []
for item in mc_cases:
if abs(int(item) > 14):
badCases.append(item)
if badCases:
print('Bad case number(s)', ','.join(map(str, badCases)))
for item in badCases:
mc_cases.remove(item)
if not mc_cases:
print('No cases.')
return
marching_cubes(mc_cases, rotation, label)
def get_program_parameters():
import argparse
description = 'Marching cubes cases for 3D isosurface generation.'
epilogue = '''
Marching cubes cases for 3D isosurface generation.
The 256 possible cases have been reduced to 15 cases using symmetry.
Dark vertices are greater than the selected isosurface value.
For the cases, enter them as integers separated by a space e.g: 1 2 3
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('cases', nargs='*', type=int, default=[],
help='A list of integers i such that 0 <= abs(i) < 14, corresponding to the cases desired.')
parser.add_argument('-r', '--rotation', type=int, default=0,
help='Rotate camera around the cube, for i such that 0 <= abs(i) < 4,\
corresponding to 0, 90, 180, 270 degrees.')
# Use a mutually exclusive group.
label_parser = parser.add_mutually_exclusive_group(required=False)
label_parser.add_argument('-l', '--label', action='store_true', dest='label',
help='Display a label, true by default.')
label_parser.add_argument('-n', '--no_label', action='store_false', dest='label',
help='Supress diaplaying a label.')
parser.set_defaults(label=True)
args = parser.parse_args()
return args.cases, args.rotation, args.label
def marching_cubes(mcCases, rotation=0, label=True):
color = vtkNamedColors()
# Rotate the final figure 0, 90, 180, 270 degrees.
rotation = abs(int(rotation))
if rotation > 3:
rotation = 0
if len(mcCases) > 1:
print('Cases', ', '.join(map(str, mcCases)))
else:
print('Cases', ','.join(map(str, mcCases)))
print('Rotated', rotation * 90, 'degrees.')
renWin = vtkRenderWindow()
renWin.SetSize(640, 480)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Always use a grid of four columns unless number of cases < 4.
renderers = list()
gridSize = ((len(mcCases) + 3) // 4) * 4
if len(mcCases) < 4:
gridSize = len(mcCases)
for i in range(0, gridSize):
# Create the Renderer
renderer = vtkRenderer()
renderers.append(renderer)
# Set the background color.
renderers[i].SetBackground(color.GetColor3d('slate_grey'))
renWin.AddRenderer(renderer)
for i in range(0, len(mcCases)):
# Define a Single Cube
Scalars = vtkFloatArray()
Scalars.InsertNextValue(1.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(1.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Points = vtkPoints()
Points.InsertNextPoint(0, 0, 0)
Points.InsertNextPoint(1, 0, 0)
Points.InsertNextPoint(1, 1, 0)
Points.InsertNextPoint(0, 1, 0)
Points.InsertNextPoint(0, 0, 1)
Points.InsertNextPoint(1, 0, 1)
Points.InsertNextPoint(1, 1, 1)
Points.InsertNextPoint(0, 1, 1)
Ids = vtkIdList()
Ids.InsertNextId(0)
Ids.InsertNextId(1)
Ids.InsertNextId(2)
Ids.InsertNextId(3)
Ids.InsertNextId(4)
Ids.InsertNextId(5)
Ids.InsertNextId(6)
Ids.InsertNextId(7)
Grid = vtkUnstructuredGrid()
Grid.Allocate(10, 10)
Grid.InsertNextCell(12, Ids)
Grid.SetPoints(Points)
Grid.GetPointData().SetScalars(Scalars)
# Find the triangles that lie along the 0.5 contour in this cube.
Marching = vtkContourFilter()
Marching.SetInputData(Grid)
Marching.SetValue(0, 0.5)
Marching.Update()
# Extract the edges of the triangles just found.
triangleEdges = vtkExtractEdges()
triangleEdges.SetInputConnection(Marching.GetOutputPort())
# Draw the edges as tubes instead of lines. Also create the associated
# mapper and actor to display the tubes.
triangleEdgeTubes = vtkTubeFilter()
triangleEdgeTubes.SetInputConnection(triangleEdges.GetOutputPort())
triangleEdgeTubes.SetRadius(.005)
triangleEdgeTubes.SetNumberOfSides(6)
triangleEdgeTubes.UseDefaultNormalOn()
triangleEdgeTubes.SetDefaultNormal(.577, .577, .577)
triangleEdgeMapper = vtkPolyDataMapper()
triangleEdgeMapper.SetInputConnection(triangleEdgeTubes.GetOutputPort())
triangleEdgeMapper.ScalarVisibilityOff()
triangleEdgeActor = vtkActor()
triangleEdgeActor.SetMapper(triangleEdgeMapper)
triangleEdgeActor.GetProperty().SetDiffuseColor(
color.GetColor3d('lamp_black'))
triangleEdgeActor.GetProperty().SetSpecular(.4)
triangleEdgeActor.GetProperty().SetSpecularPower(10)
# Shrink the triangles we found earlier. Create the associated mapper
# and actor. Set the opacity of the shrunken triangles.
aShrinker = vtkShrinkPolyData()
aShrinker.SetShrinkFactor(1)
aShrinker.SetInputConnection(Marching.GetOutputPort())
aMapper = vtkPolyDataMapper()
aMapper.ScalarVisibilityOff()
aMapper.SetInputConnection(aShrinker.GetOutputPort())
Triangles = vtkActor()
Triangles.SetMapper(aMapper)
Triangles.GetProperty().SetDiffuseColor(
color.GetColor3d('banana'))
Triangles.GetProperty().SetOpacity(.6)
# Draw a cube the same size and at the same position as the one
# created previously. Extract the edges because we only want to see
# the outline of the cube. Pass the edges through a vtkTubeFilter so
# they are displayed as tubes rather than lines.
CubeModel = vtkCubeSource()
CubeModel.SetCenter(.5, .5, .5)
Edges = vtkExtractEdges()
Edges.SetInputConnection(CubeModel.GetOutputPort())
Tubes = vtkTubeFilter()
Tubes.SetInputConnection(Edges.GetOutputPort())
Tubes.SetRadius(.01)
Tubes.SetNumberOfSides(6)
Tubes.UseDefaultNormalOn()
Tubes.SetDefaultNormal(.577, .577, .577)
# Create the mapper and actor to display the cube edges.
TubeMapper = vtkPolyDataMapper()
TubeMapper.SetInputConnection(Tubes.GetOutputPort())
CubeEdges = vtkActor()
CubeEdges.SetMapper(TubeMapper)
CubeEdges.GetProperty().SetDiffuseColor(
color.GetColor3d('khaki'))
CubeEdges.GetProperty().SetSpecular(.4)
CubeEdges.GetProperty().SetSpecularPower(10)
# Create a sphere to use as a glyph source for vtkGlyph3D.
Sphere = vtkSphereSource()
Sphere.SetRadius(0.04)
Sphere.SetPhiResolution(20)
Sphere.SetThetaResolution(20)
# Remove the part of the cube with data values below 0.5.
ThresholdIn = vtkThresholdPoints()
ThresholdIn.SetInputData(Grid)
ThresholdIn.ThresholdByUpper(.5)
# Display spheres at the vertices remaining in the cube data set after
# it was passed through vtkThresholdPoints.
Vertices = vtkGlyph3D()
Vertices.SetInputConnection(ThresholdIn.GetOutputPort())
Vertices.SetSourceConnection(Sphere.GetOutputPort())
# Create a mapper and actor to display the glyphs.
SphereMapper = vtkPolyDataMapper()
SphereMapper.SetInputConnection(Vertices.GetOutputPort())
SphereMapper.ScalarVisibilityOff()
CubeVertices = vtkActor()
CubeVertices.SetMapper(SphereMapper)
CubeVertices.GetProperty().SetDiffuseColor(
color.GetColor3d('tomato'))
# Define the text for the label
caseLabel = vtkVectorText()
caseLabel.SetText('Case 1')
if label:
# Set up a transform to move the label to a new position.
aLabelTransform = vtkTransform()
aLabelTransform.Identity()
# Position the label according to the rotation of the figure.
if rotation == 0:
aLabelTransform.Translate(-0.2, 0, 1.25)
aLabelTransform.Scale(.05, .05, .05)
elif rotation == 1:
aLabelTransform.RotateY(90)
aLabelTransform.Translate(-1.25, 0, 1.25)
aLabelTransform.Scale(.05, .05, .05)
elif rotation == 2:
aLabelTransform.RotateY(180)
aLabelTransform.Translate(-1.25, 0, 0.2)
aLabelTransform.Scale(.05, .05, .05)
else:
aLabelTransform.RotateY(270)
aLabelTransform.Translate(-0.2, 0, 0.2)
aLabelTransform.Scale(.05, .05, .05)
# Move the label to a new position.
labelTransform = vtkTransformPolyDataFilter()
labelTransform.SetTransform(aLabelTransform)
labelTransform.SetInputConnection(caseLabel.GetOutputPort())
# Create a mapper and actor to display the text.
labelMapper = vtkPolyDataMapper()
labelMapper.SetInputConnection(labelTransform.GetOutputPort())
labelActor = vtkActor()
labelActor.SetMapper(labelMapper)
# Define the base that the cube sits on. Create its associated mapper
# and actor. Set the position of the actor.
baseModel = vtkCubeSource()
baseModel.SetXLength(1.5)
baseModel.SetYLength(.01)
baseModel.SetZLength(1.5)
baseMapper = vtkPolyDataMapper()
baseMapper.SetInputConnection(baseModel.GetOutputPort())
base = vtkActor()
base.SetMapper(baseMapper)
base.SetPosition(.5, -0.09, .5)
# Set the scalar values for this case of marching cubes.
# A negative case number will generate a complementary case
mcCase = mcCases[i]
if mcCase < 0:
cases[-mcCase](Scalars, caseLabel, 0, 1)
else:
cases[mcCase](Scalars, caseLabel, 1, 0)
# Force the grid to update.
Grid.Modified()
# Add the actors to the renderer
renderers[i].AddActor(triangleEdgeActor)
renderers[i].AddActor(base)
if label:
renderers[i].AddActor(labelActor)
renderers[i].AddActor(CubeEdges)
renderers[i].AddActor(CubeVertices)
renderers[i].AddActor(Triangles)
# Position the camera.
renderers[i].GetActiveCamera().Dolly(1.2)
# Rotate the camera an extra 30 degrees so the cube is not face on.
if rotation == 0:
renderers[i].GetActiveCamera().Azimuth(30)
elif rotation == 1:
renderers[i].GetActiveCamera().Azimuth(30 + 90)
elif rotation == 2:
renderers[i].GetActiveCamera().Azimuth(30 + 180)
else:
renderers[i].GetActiveCamera().Azimuth(30 + 270)
renderers[i].GetActiveCamera().Elevation(20)
renderers[i].ResetCamera()
renderers[i].ResetCameraClippingRange()
if i > 0:
renderers[i].SetActiveCamera(renderers[0].GetActiveCamera())
# Setup viewports for the renderers
rendererSize = 300
xGridDimensions = 4
if len(mcCases) < 4:
xGridDimensions = len(mcCases)
yGridDimensions = (len(mcCases) - 1) // 4 + 1
print('Grid dimensions, (x, y): ({:d}, {:d})'.format(xGridDimensions, yGridDimensions))
renWin.SetSize(
rendererSize * xGridDimensions, rendererSize * yGridDimensions)
renWin.SetWindowName('MarchingCases')
for row in range(0, yGridDimensions):
for col in range(0, xGridDimensions):
index = row * xGridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [
float(col) / xGridDimensions,
float(yGridDimensions - (row + 1)) / yGridDimensions,
float(col + 1) / xGridDimensions,
float(yGridDimensions - row) / yGridDimensions]
renderers[index].SetViewport(viewport)
iren.Initialize()
renWin.Render()
iren.Start()
def case0(scalars, caseLabel, IN, OUT):
scalars.InsertValue(0, OUT)
scalars.InsertValue(1, OUT)
scalars.InsertValue(2, OUT)
scalars.InsertValue(3, OUT)
scalars.InsertValue(4, OUT)
scalars.InsertValue(5, OUT)
scalars.InsertValue(6, OUT)
scalars.InsertValue(7, OUT)
if IN == 1:
caseLabel.SetText('Case 0 - 00000000')
else:
caseLabel.SetText('Case 0c - 11111111')
def case1(scalars, caseLabel, IN, OUT):
scalars.InsertValue(0, IN)
scalars.InsertValue(1, OUT)
scalars.InsertValue(2, OUT)
scalars.InsertValue(3, OUT)
scalars.InsertValue(4, OUT)
scalars.InsertValue(5, OUT)
scalars.InsertValue(6, OUT)
scalars.InsertValue(7, OUT)
if IN == 1:
caseLabel.SetText('Case 1 - 00000001')
else:
caseLabel.SetText('Case 1c - 11111110')
def case2(scalars, caseLabel, IN, OUT):
scalars.InsertValue(0, IN)
scalars.InsertValue(1, IN)
scalars.InsertValue(2, OUT)
scalars.InsertValue(3, OUT)
scalars.InsertValue(4, OUT)
scalars.InsertValue(5, OUT)
scalars.InsertValue(6, OUT)
scalars.InsertValue(7, OUT)
if IN == 1:
caseLabel.SetText('Case 2 - 00000011')
else:
caseLabel.SetText('Case 2c - 11111100')
def case3(scalars, caseLabel, IN, OUT):
scalars.InsertValue(0, IN)
scalars.InsertValue(1, OUT)
scalars.InsertValue(2, IN)
scalars.InsertValue(3, OUT)
scalars.InsertValue(4, OUT)
scalars.InsertValue(5, OUT)
scalars.InsertValue(6, OUT)
scalars.InsertValue(7, OUT)
if IN == 1:
caseLabel.SetText('Case 3 - 00000101')
else:
caseLabel.SetText('Case 3c - 11111010')
def case4(scalars, caseLabel, IN, OUT):
scalars.InsertValue(0, IN)
scalars.InsertValue(1, OUT)
scalars.InsertValue(2, OUT)
scalars.InsertValue(3, OUT)
scalars.InsertValue(4, OUT)
scalars.InsertValue(5, OUT)
scalars.InsertValue(6, IN)
scalars.InsertValue(7, OUT)
if IN == 1:
caseLabel.SetText('Case 4 - 01000001')
else:
caseLabel.SetText('Case 4c - 10111110')
def case5(scalars, caseLabel, IN, OUT):
scalars.InsertValue(0, OUT)
scalars.InsertValue(1, IN)
scalars.InsertValue(2, OUT)
scalars.InsertValue(3, OUT)
scalars.InsertValue(4, IN)
scalars.InsertValue(5, IN)
scalars.InsertValue(6, OUT)
scalars.InsertValue(7, OUT)
if IN == 1:
caseLabel.SetText('Case 5 - 00110010')
else:
caseLabel.SetText('Case 5c - 11001101')
def case6(scalars, caseLabel, IN, | |
all_products.get('products'):
return next_clear
for product in all_products['products']:
id_product = product['id']
res = self.api('products/' + to_str(id_product) + '.json', None, 'Delete')
all_products = self.api('products.json?limit=100')
self.sleep_time(0.1)
except Exception:
self.log_traceback()
return next_clear
return next_clear
def clear_target_customers(self):
next_clear = {
'result': 'process',
'function': 'clear_target_pages',
'msg': ''
}
self._notice['target']['clear'] = next_clear
if not self._notice['config']['customers']:
return next_clear
list_customer_skip = list()
try:
all_customers = self.api('customers.json?limit=100')
id_customer = 0
while all_customers:
if not all_customers:
return next_clear
all_customers = json_decode(all_customers)
if not all_customers.get('customers'):
return next_clear
for customer in all_customers['customers']:
id_customer = to_str(customer['id'])
res = self.api('customers/' + to_str(id_customer) + '.json', None, 'Delete')
# res = json_decode(res)
# if res and res.get('errors'):
# if isinstance(res['errors'], str):
# if res['errors'] == 'Error deleting customer':
# list_customer_skip.append(id_customer)
# else:
# if res['errors'].get('base', list()):
# if res['errors']['base'][0] == 'Cannot delete orders brokered by Shopify':
# list_customer_skip.append(id_customer)
# if list_customer_skip and all_customers['customers'] and to_len(list_customer_skip) == to_len(all_customers['customers']):
# break
if to_len(all_customers['customers']) < 100:
break
all_customers = self.api('customers.json?limit=100&since_id={}'.format(id_customer))
self.sleep_time(0.1)
except Exception:
self.log_traceback()
return next_clear
return next_clear
def clear_target_orders(self):
next_clear = {
'result': 'process',
'function': 'clear_target_customers',
'msg': ''
}
self._notice['target']['clear'] = next_clear
if not self._notice['config']['orders']:
return next_clear
list_order_skip = list()
try:
all_orders = self.api('orders.json?status=any&limit=100')
while all_orders:
if not all_orders:
return next_clear
all_orders = json_decode(all_orders)
if not all_orders.get('orders'):
return next_clear
for order in all_orders['orders']:
id_order = to_str(order['id'])
if id_order in list_order_skip:
continue
res = self.api('orders/' + id_order + '.json', None, 'Delete')
res = json_decode(res)
if res and res.get('errors', dict()).get('base', list()):
if res['errors']['base'][0] == 'Cannot delete orders brokered by Shopify':
list_order_skip.append(id_order)
if list_order_skip and all_orders['orders'] and len(list_order_skip) == len(all_orders['orders']):
break
all_orders = self.api('orders.json?status=any&limit=100')
self.sleep_time(0.1)
except Exception:
self.log_traceback()
return next_clear
return next_clear
def clear_target_reviews(self):
next_clear = {
'result': 'success',
'function': '',
'msg': ''
}
if not self._notice['config']['reviews']:
return next_clear
delete = self.delete_obj(self.TABLE_SHOPIFY_REVIEW, {
'migration_id': self._migration_id,
})
file_path = get_pub_path() + '/media/' + to_str(self._migration_id)
if os.path.isdir(file_path):
shutil.rmtree(file_path)
if not delete:
return error_database()
return next_clear
def clear_target_blogs(self):
next_clear = {
'result': 'process',
'function': 'clear_target_coupons',
'msg': ''
}
self._notice['target']['clear'] = next_clear
if not self._notice['config']['blogs']:
return next_clear
try:
themes = self.api('themes.json')
themes = json_decode(themes)
if themes and themes.get('themes'):
for theme in themes['themes']:
if 'Litextension image blog description' in theme['name']:
res = self.api('themes/' + to_str(theme['id']) + '.json', None, 'DELETE')
all_blogs = self.api('blogs.json?limit=100')
while all_blogs:
if not all_blogs:
return next_clear
all_blogs = json_decode(all_blogs)
if not all_blogs.get('blogs'):
return next_clear
for blog in all_blogs['blogs']:
id_blog = blog['id']
res = self.api('blogs/' + to_str(id_blog) + '.json', None, 'Delete')
all_blogs = self.api('blogs.json?limit=100')
self.sleep_time(0.1)
except Exception:
self.log_traceback()
return next_clear
return next_clear
def clear_target_coupons(self):
next_clear = {
'result': 'process',
'function': 'clear_target_reviews',
'msg': ''
}
self._notice['target']['clear'] = next_clear
if not self._notice['config']['coupons']:
return next_clear
try:
all_price_rules = self.api('price_rules.json?limit=100')
while all_price_rules:
if not all_price_rules:
return next_clear
all_price_rules = json_decode(all_price_rules)
if not all_price_rules.get('price_rules'):
return next_clear
for price_rule in all_price_rules['price_rules']:
id_price = price_rule['id']
res = self.api('price_rules/' + to_str(id_price) + '.json', None, 'Delete')
all_price_rules = self.api('price_rules.json?limit=100')
self.sleep_time(0.1)
except Exception:
self.log_traceback()
return next_clear
return next_clear
def clear_target_pages(self):
next_clear = {
'result': 'process',
'function': 'clear_target_blogs',
'msg': ''
}
self._notice['target']['clear'] = next_clear
if not self._notice['config']['pages']:
return next_clear
try:
themes = self.api('themes.json')
themes = json_decode(themes)
if themes and themes.get('themes'):
for theme in themes['themes']:
if 'Litextension image page description' in theme['name']:
res = self.api('themes/' + to_str(theme['id']) + '.json', None, 'DELETE')
all_pages = self.api('pages.json?limit=100')
while all_pages:
if not all_pages:
return next_clear
all_pages = json_decode(all_pages)
if not all_pages.get('pages'):
return next_clear
for page in all_pages['pages']:
id_page = page['id']
res = self.api('pages/' + to_str(id_page) + '.json', None, 'Delete')
all_pages = self.api('pages.json?limit=100')
time.sleep(0.1)
except Exception:
self.log_traceback()
return next_clear
return next_clear
# TODO: clear demo
def clear_target_taxes_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_manufacturers_demo',
}
self._notice['target']['clear_demo'] = next_clear
return next_clear
def clear_target_manufacturers_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_categories_demo',
}
self._notice['target']['clear_demo'] = next_clear
return self._notice['target']['clear_demo']
def clear_target_categories_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_products_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['categories']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_CATEGORY
}
categories = self.select_obj(TABLE_MAP, where)
category_ids = list()
if categories['result'] == 'success':
category_ids = duplicate_field_value_from_list(categories['data'], 'id_desc')
if not category_ids:
return next_clear
for category_id in category_ids:
res = self.api('custom_collections/' + to_str(category_id) + '.json', None, 'Delete')
return next_clear
def clear_target_products_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['products']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_PRODUCT
}
products = self.select_obj(TABLE_MAP, where)
product_ids = list()
if products['result'] == 'success':
product_ids = duplicate_field_value_from_list(products['data'], 'id_desc')
if not product_ids:
return next_clear
for product_id in product_ids:
res = self.api('products/' + to_str(product_id) + '.json', None, 'Delete')
self.delete_obj(TABLE_MAP, where)
return next_clear
def clear_target_customers_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_reviews_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['customers']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_CUSTOMER
}
customers = self.select_obj(TABLE_MAP, where)
customer_ids = list()
if customers['result'] == 'success':
customer_ids = duplicate_field_value_from_list(customers['data'], 'id_desc')
if not customer_ids:
return next_clear
for customer_id in customer_ids:
res = self.api('customers/' + to_str(customer_id) + '.json', None, 'Delete')
return next_clear
def clear_target_orders_demo(self):
next_clear = {
'result': 'success',
'function': 'clear_target_customers_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['orders']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_ORDER
}
orders = self.select_obj(TABLE_MAP, where)
order_ids = list()
if orders['result'] == 'success':
order_id_map = duplicate_field_value_from_list(orders['data'], 'id_desc')
order_ids = list(set(order_ids + order_id_map))
if not order_ids:
return next_clear
for order_id in order_ids:
res = self.api('orders/' + to_str(order_id) + '.json', None, 'Delete')
self.delete_obj(TABLE_MAP, where)
return next_clear
def clear_target_reviews_demo(self):
next_clear = {
'result': 'success',
'function': '',
'msg': ''
}
if not self._notice['config']['reviews']:
return next_clear
delete = self.delete_obj(self.TABLE_SHOPIFY_REVIEW, {
'migration_id': self._migration_id,
})
file_path = get_pub_path() + '/media/' + to_str(self._migration_id)
if os.path.isdir(file_path):
shutil.rmtree(file_path)
if not delete:
return error_database()
return next_clear
def prepare_import_target(self):
# api_shop = self.api('shop.json')
# if not api_shop:
# response = response_error('Shopify API Password is not correct(target)!')
# response['elm'] = '#error-api'
# return response
# try:
# shop = json_decode(api_shop)
# if 'errors' in shop:
# response = response_error('Shopify API Password is not correct(target)!')
# response['elm'] = '#error-api'
# return response
# except:
# response = response_error('Shopify API Password is not correct(target)!')
# response['elm'] = '#error-api'
# return response
# shopify_plan = shop.get('shop', dict()).get('plan_name', 'affiliate')
# self._notice['target']['shopify_plan_name'] = shopify_plan
return response_success()
# TODO: TAX
def prepare_taxes_import(self):
return self
def prepare_taxes_export(self):
return self
def get_taxes_main_export(self):
taxes = list()
tax = {
'id': '1',
'code': 'Tax Rule Shopify'
}
taxes.append(tax)
return response_success(taxes)
def get_taxes_ext_export(self, taxes):
tax_rates = self.api('countries.json')
if not tax_rates:
return response_error(self.console_error("Could not get tax rate data from Shopify"))
tax_rates_data = json_decode(tax_rates)
return response_success(tax_rates_data)
def convert_tax_export(self, tax, taxes_ext):
tax_product = list()
tax_customer = list()
tax_zone = list()
tax_product_data = self.construct_tax_product()
tax_product_data['id'] = 1
tax_product_data['code'] = None
tax_product_data['name'] = 'Product Tax Class Shopify'
tax_product.append(tax_product_data)
for country in taxes_ext['data']['countries']:
if 'provinces' in country and country['provinces']:
for province in country['provinces']:
tax_zone_state = self.construct_tax_zone_state()
tax_zone_state['id'] = province['id']
tax_zone_state['name'] = province['name']
tax_zone_state['state_code'] = province['code']
tax_zone_country = self.construct_tax_zone_country()
tax_zone_country['id'] = country['id']
tax_zone_country['name'] = country['name']
tax_zone_country['country_code'] = country['code']
tax_zone_rate = self.construct_tax_zone_rate()
tax_zone_rate['id'] = None
tax_zone_rate['name'] = province['tax_name']
tax_zone_rate['rate'] = province['tax']
tax_zone_data = self.construct_tax_zone()
tax_zone_data['id'] = None
tax_zone_data['name'] = country['tax_name'] + ' - ' + province['tax_name'] if province[
'tax_name'] else country['tax_name'] + ' - ' + province['code']
tax_zone_data['country'] = tax_zone_country
tax_zone_data['state'] = tax_zone_state
tax_zone_data['rate'] = tax_zone_rate
tax_zone.append(tax_zone_data)
else:
tax_zone_state = self.construct_tax_zone_state()
tax_zone_country = self.construct_tax_zone_country()
tax_zone_country['id'] = country['id']
tax_zone_country['name'] = country['name']
tax_zone_country['country_code'] = country['code']
tax_zone_rate = self.construct_tax_zone_rate()
tax_zone_rate['id'] = None
tax_zone_rate['name'] = country['tax_name']
tax_zone_rate['rate'] = country['tax']
tax_zone_data = self.construct_tax_zone()
tax_zone_data['id'] = None
tax_zone_data['name'] = country['tax_name'] if country['tax_name'] else country['code']
tax_zone_data['country'] = tax_zone_country
tax_zone_data['state'] = tax_zone_state
tax_zone_data['rate'] = tax_zone_rate
tax_zone.append(tax_zone_data)
tax_data = self.construct_tax()
tax_data['id'] = tax['id']
tax_data['name'] = tax['code']
tax_data['tax_products'] = tax_product
tax_data['tax_zones'] = tax_zone
return response_success(tax_data)
def get_tax_id_import(self, convert, tax, taxes_ext):
return tax['id']
def check_tax_import(self, convert, tax, taxes_ext):
return self.get_map_field_by_src(self.TYPE_TAX, convert['id'], convert['code'])
def router_tax_import(self, convert, tax, taxes_ext):
return response_success('tax_import')
def before_tax_import(self, convert, tax, taxes_ext):
return response_success()
def tax_import(self, convert, tax, taxes_ext):
all_countries = self.api('countries.json')
if not all_countries:
return response_error(self.console_error("Could not get countries data from Shopify"))
all_countries = json_decode(all_countries)
if not convert['tax_zones']:
return response_warning(self.console_error(
"Tax id " + to_str(convert['id']) + " import failed. Error: Tax zones are not existed!"))
tax_zones = convert['tax_zones']
for tax_zone in tax_zones:
country_code = tax_zone['country']['country_code']
state_code = tax_zone['state']['state_code']
rate = to_decimal(tax_zone['rate']['rate']) / 100
check_country = False
id_country = 0
for country in all_countries['countries']:
if country['code'] == country_code:
check_country = True
id_country = country['id']
if not check_country:
post_data = {
'country': {
'code': country_code,
}
}
response = self.api('countries.json', post_data, 'Post')
response = json_decode(response)
check_response = self.check_response_import(response, {'id': country_code, 'code': country_code},
'country')
if check_response['result'] != 'success':
return check_response
id_country = response['country']['id']
if not state_code and id_country:
put_data = {
'country': {
'id': id_country,
'tax': rate
}
}
response = self.api('countries/' + id_country + '.json', put_data, 'Put')
response = json_decode(response)
check_response = self.check_response_update(response, country_code, 'country')
if check_response['result'] != 'success':
return check_response
continue
country_detail = self.api('countries/' + id_country + '.json')
if not country_detail:
return response_warning('Could not get data country: ' + country_code)
country_detail = json_decode(country_detail)
check_state = False
id_state = 0
for province in country_detail['country']['provinces']:
if province['code'] == state_code:
check_state = True
id_state = province['id']
if check_state:
put_data = {
'province': {
'id': id_state,
'tax': rate
}
}
response = self.api('countries/' + id_country + '/provinces/' + id_state + '.json', put_data,
'Put')
response = json_decode(response)
check_response = self.check_response_update(response, country_code + ':' + state_code, 'province')
if check_response['result'] != 'success':
return check_response
return response_success(0)
def after_tax_import(self, tax_id, convert, tax, taxes_ext):
return response_success()
def addition_tax_import(self, convert, tax, taxes_ext):
return response_success()
# TODO: MANUFACTURER
def prepare_manufacturers_import(self):
return self
def prepare_manufacturers_export(self):
return self
def get_manufacturers_main_export(self):
id_src = self._notice['process']['manufacturers']['id_src']
limit = self._notice['setting']['manufacturers']
query = "SELECT * FROM " + self.SP_MANU + " WHERE id > " + to_str(id_src) + " ORDER BY id ASC LIMIT " + to_str(limit)
manufacturers = self.select_raw(query)
if manufacturers['result'] != 'success':
return error_database()
return manufacturers
def get_manufacturers_ext_export(self, manufacturers):
return response_success()
def convert_manufacturer_export(self, manufacturer, manufacturers_ext):
manufacturer_data = self.construct_manufacturer()
manufacturer_data['id'] = manufacturer['id']
manufacturer_data['name'] = manufacturer['name']
return response_success(manufacturer_data)
def get_manufacturer_id_import(self, convert, manufacturer, manufacturers_ext):
return manufacturer['id']
def check_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return False
def router_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success('manufacturer_import')
def before_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success()
def manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success(0)
def after_manufacturer_import(self, manufacturer_id, convert, manufacturer, manufacturers_ext):
return response_success()
def addition_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success()
# TODO: CATEGORY
def prepare_categories_import(self):
return self
def prepare_categories_export(self):
self._notice['process']['categories']['type'] = 'custom'
return self
def get_categories_main_export(self):
collection_type = self._notice['process']['categories'].get('type', 'custom')
limit = self._notice['setting']['categories']
categories_data = list()
if collection_type == 'custom':
id_src = self._notice['process']['categories']['id_src']
collections = self.api('custom_collections.json?since_id=' + to_str(id_src) + '&limit=' + to_str(limit))
if not collections:
return response_error(self.console_error("Could not get category data from Shopify"))
categories_page = json_decode(collections)
# if 'custom_collections' in categories_page and not categories_page['custom_collections']:
# return create_response('pass')
categories_data = categories_page.get('custom_collections')
if not categories_data:
collection_type = 'smart'
if collection_type == 'smart':
id_src = self._notice['process']['categories'].get('id_src_smart')
if not id_src:
id_src = 0
collections = self.api('smart_collections.json?since_id=' + to_str(id_src) + '&limit=' + to_str(limit))
if not collections:
return response_error(self.console_error("Could not get category data from Shopify"))
categories_page = json_decode(collections)
if 'smart_collections' in categories_page and not categories_page['smart_collections']:
return create_response('pass')
categories_data = categories_page['smart_collections']
response = response_success(categories_data)
response['type'] = collection_type
return response
def get_categories_ext_export(self, categories):
extend = dict()
for category in categories['data']:
extend[category['id']] = dict()
meta = False
if 'rules' in category:
meta = self.api('smart_collections/' + to_str(category['id']) + '/metafields.json')
else:
meta = self.api('custom_collections/' + to_str(category['id']) + '/metafields.json')
if meta:
category_meta = json_decode(meta)
extend[category['id']]['meta'] = category_meta.get('metafields')
return response_success(extend)
def convert_category_export(self, category, categories_ext):
category_data = self.construct_category()
parent = self.construct_category_parent()
parent['id'] = 0
category_data['parent'] = parent
category_data['id'] = category['id']
category_data['active'] = True if category['published_at'] else False
if 'image' in category:
if 'src' in category['image']:
real_path = re.sub("/\?.+/", "", to_str(category['image']['src']))
real_path = real_path[:real_path.find('?')]
category_data['thumb_image']['url'] = real_path
category_data['thumb_image']['path'] = ''
category_data['name'] = category['title']
category_data['description'] = category['body_html']
category_data['created_at'] = convert_format_time(category['published_at'], self.FORMAT_DATETIME)
category_data['updated_at'] = convert_format_time(category['updated_at'], self.FORMAT_DATETIME)
category_data['category'] = category
category_data['categories_ext'] = | |
#! /usr/bin/env python3
import sys
#1 DONE!!! Passed 9/10
def translate_sequence(rna_sequence, genetic_code):
pass
# Read and get the RNA string
rna = rna_sequence.upper()
print ("\n \n RNA String: ", rna)
x=len(rna)
if x < 3:
return ''
# RNA codon table(make sure you have it)
protein_string = ""
# Generate protein string
for i in range(0, len(rna),3):
if genetic_code[rna[i:i+3]] == "*" :
break
protein_string += genetic_code[rna[i:i+3]]
return protein_string
# return protein_string
# Print the protein string
print ("\n \n Protein String: ", protein_string)
# End
#2 Passed 2/4 All giving protein but not passing
def get_all_translations(rna_sequence, genetic_code):
pass
# Read and get the RNA string
DNA = rna_sequence.upper()
print ("\n \n RNA String1: ", DNA)
if (DNA.find('AUG') != -1):
pass
# print ("Contains given substring ")
else:
return []
# print ("Doesn't contains given substring")
start = DNA.find('AUG')
protein_string1 = ""
if start!= -1:
while start+2 < len(DNA):
codon = DNA[start:start+3]
if genetic_code[codon] == "*":
break
protein_string1 += genetic_code[codon]
return [protein_string1]
start+=3
# print ("\n \n Protein String1: ", protein_string1)
DNA2= DNA[1:]
print ("\n \n RNA2 String: ", DNA2)
if (DNA2.find('AUG') != -1):
pass
# print ("Contains given substring ")
else:
return []
# print ("Doesn't contains given substring")
start = DNA2.find('AUG')
protein_string2 = ""
if start!= -1:
while start+2 < len(DNA2):
codon = DNA2[start:start+3]
if genetic_code[codon] == "*":
break
protein_string2 += genetic_code[codon]
return [protein_string2]
start+=3
# print ("\n \n Protein String2: ", protein_string2)
DNA3= DNA[2:]
print ("\n \n RNA3 String: ", DNA3)
if (DNA3.find('AUG') != -1):
pass
# print ("Contains given substring ")
else:
return []
# print ("Doesn't contains given substring")
start = DNA3.find('AUG')
protein_string3 = ""
if start!= -1:
while start+2 < len(DNA3):
codon = DNA3[start:start+3]
if genetic_code[codon] == "*":
break
protein_string3 += genetic_code[codon]
return [protein_string3]
start+=3
# print ("\n \n Protein String3: ", protein_string3)
#3 DONE Passed All
def get_reverse(sequence):
pass
sequence = sequence.upper()
re = []
x = len(sequence)
for i in sequence:
x = x - 1
re.append(sequence[x])
return ''.join(re)
#4 DONE Passed All
def get_complement(sequence):
pass
sequence = sequence.upper()
com = []
for i in sequence:
if i == "U":
com.append("A")
if i == "A":
com.append("U")
if i == "G":
com.append("C")
if i == "C":
com.append("G")
return ''.join(com)
#5 DONE Passed All
def reverse_and_complement(sequence):
pass
sequence = sequence.upper()
re = []
x = len(sequence)
for i in sequence:
x = x - 1
re.append(sequence[x])
# return ''.join(re)
com = []
for i in re:
if i == "U":
com.append("A")
if i == "A":
com.append("U")
if i == "G":
com.append("C")
if i == "C":
com.append("G")
return ''.join(com)
#6
def get_longest_peptide(rna_sequence, genetic_code):
"""Get the longest peptide encoded by an RNA sequence.
Explore six reading frames of `rna_sequence` (the three reading frames of
`rna_sequence`, and the three reading frames of the reverse and complement
of `rna_sequence`) and return (as a string) the longest sequence of amino
acids that it encodes, according to the `genetic_code`.
If no amino acids can be translated from `rna_sequence` nor its reverse and
complement, an empty string is returned.
Parameters
----------
rna_sequence : str
A string representing an RNA sequence (upper or lower-case).
genetic_code : dict
A dictionary mapping all 64 codons (strings of three RNA bases) to
amino acids (string of single-letter amino acid abbreviation). Stop
codons should be represented with asterisks ('*').
Returns
-------
str
A string of the longest sequence of amino acids encoded by
`rna_sequence`.
"""
pass
# Read and get the RNA string
DNA = rna_sequence.upper()
print ("\n \n RNA String1: ", DNA)
if (DNA.find('AUG') != -1):
pass
else:
return ""
DNA2= DNA[1:]
# print ("\n \n RNA2 String: ", DNA2)
if (DNA2.find('AUG') != -1):
pass
else:
return ""
DNA3= DNA[2:]
# print ("\n \n RNA3 String: ", DNA3)
if (DNA3.find('AUG') != -1):
pass
else:
return ""
sequence = DNA[1:]
# Find orf 2
# Find all AUG indexs
start_position = 1
start_indexs = []
stop_indexs = []
for i in range(1, len(sequence), 3):
if sequence[i:i+3] == "AUG":
start_indexs.append(i)
# Find all stop codon indexs
for i in range(1, len(sequence), 3):
stops =["UAA", "UGA", "UAG"]
if sequence[i:i+3] in stops:
stop_indexs.append(i)
orf = []
mark = 0
for i in range(0,len(start_indexs)):
for j in range(0, len(stop_indexs)):
if start_indexs[i] < stop_indexs[j] and start_indexs[i] > mark:
orf.append(sequence[start_indexs[i]:stop_indexs[j]+3])
mark = stop_indexs[j]+3
break
# return orf
orf2 = orf
print(orf2)
pass
sequence = DNA[2:]
# Find all ATG indexs
start_position = 2
start_indexs = []
stop_indexs = []
for i in range(2, len(sequence), 3):
if sequence[i:i+3] == "AUG":
start_indexs.append(i)
# Find all stop codon indexs
for i in range(2, len(sequence), 3):
stops =["UAA", "UGA", "UAG"]
if sequence[i:i+3] in stops:
stop_indexs.append(i)
orf = []
mark = 0
start_position = {}
for i in range(0,len(start_indexs)):
for j in range(0, len(stop_indexs)):
if start_indexs[i] < stop_indexs[j] and start_indexs[i] > mark:
orf.append(sequence[start_indexs[i]:stop_indexs[j]+3])
start_position[len(sequence[start_indexs[i]:stop_indexs[j]+3])] = start_indexs[i]
mark = stop_indexs[j]+3
break
# return orf
orf3 = orf
print(orf3)
pass
sequence = DNA[0:]
start_position = 0
start_indexs = []
stop_indexs = []
for i in range(0, len(sequence), 3):
if sequence[i:i+3] == "AUG":
start_indexs.append(i)
# Find all stop codon indexs
for i in range(0, len(sequence), 3):
stops =["UAA", "UGA", "UAG"]
if sequence[i:i+3] in stops:
stop_indexs.append(i)
orf = []
mark = 0
start_position = {}
for i in range(0,len(start_indexs)):
for j in range(0, len(stop_indexs)):
if start_indexs[i] < stop_indexs[j] and start_indexs[i] > mark:
orf.append(sequence[start_indexs[i]:stop_indexs[j]+3])
start_position[len(sequence[start_indexs[i]:stop_indexs[j]+3])] = start_indexs[i]
mark = stop_indexs[j]+3
break
# return orf
orf1 = orf
print(orf1)
pass
print ("\n \n RNA1 String: ", orf2)
#list2str
orf2_str =''.join([str(elem) for elem in orf2])
print("\n orf2_str", orf2_str)
start = orf2_str.find('AUG')
# print(start)
protein_string = ""
if start!= -1:
while start+2 < len(orf2_str):
codon = orf2_str[start:start+3]
if genetic_code[codon] == "*":
break
protein_string += genetic_code[codon]
# return protein_string
# print ("\n \n Protein String: ", protein_string)
start+=3
orf2_protein=protein_string
print ("\n longest Forward peptide:", orf2_protein)
###Reverse sequence longest peptide
DNA = rna_sequence.upper()
sequence = DNA
re = []
x = len(sequence)
for i in sequence:
x = x - 1
re.append(sequence[x])
# return ''.join(re)
com = []
for i in re:
if i == "U":
com.append("A")
if i == "A":
com.append("U")
if i == "G":
com.append("C")
if i == "C":
com.append("G")
# return ''.join(com)
rDNA=''.join(com)
print ("\n \n RNA String_Reverse1: ", rDNA)
if (rDNA.find('AUG') != -1):
pass
else:
pass
# return ""
rDNA2= rDNA[1:]
print ("\n \n RNA String_reverse2: ", rDNA2)
if (rDNA2.find('AUG') != -1):
pass
else:
# return ""
pass
rDNA3= rDNA[2:]
print ("\n \n RNA3 String_reverse3: ", rDNA3)
if (rDNA3.find('AUG') != -1):
pass
else:
# return ""
pass
sequence = rDNA[1:]
# Find orf 2
# Find all AUG indexs
start_position = 1
start_indexs = []
stop_indexs = []
for i in range(1, len(sequence), 3):
if sequence[i:i+3] == "AUG":
start_indexs.append(i)
# Find all stop codon indexs
for i in range(1, len(sequence), 3):
stops =["UAA", "UGA", "UAG"]
if sequence[i:i+3] in stops:
stop_indexs.append(i)
orf = []
mark = 0
for i in range(0,len(start_indexs)):
for j in range(0, len(stop_indexs)):
if start_indexs[i] < stop_indexs[j] and start_indexs[i] > mark:
orf.append(sequence[start_indexs[i]:stop_indexs[j]+3])
mark = stop_indexs[j]+3
break
# return orf
reverse_orf2 = orf
print("\nORF2reverse: ",reverse_orf2)
pass
sequence = rDNA[2:]
# Find all ATG indexs
start_position = 2
start_indexs = []
stop_indexs = []
for i in range(2, len(sequence), 3):
if sequence[i:i+3] == "AUG":
start_indexs.append(i)
# Find all stop codon indexs
for i in range(2, len(sequence), 3):
stops =["UAA", "UGA", "UAG"]
if sequence[i:i+3] in stops:
stop_indexs.append(i)
orf = []
mark = 0
start_position = {}
for i in range(0,len(start_indexs)):
for j in range(0, len(stop_indexs)):
if start_indexs[i] < stop_indexs[j] and start_indexs[i] > mark:
orf.append(sequence[start_indexs[i]:stop_indexs[j]+3])
start_position[len(sequence[start_indexs[i]:stop_indexs[j]+3])] = start_indexs[i]
mark = stop_indexs[j]+3
break
# return orf
reverse_orf3 = orf
print("\nORF3reverse: ",reverse_orf3)
pass
sequence = rDNA[0:]
start_position = 0
start_indexs = []
stop_indexs = []
for i in range(0, len(sequence), 3):
if sequence[i:i+3] == "AUG":
start_indexs.append(i)
# Find all stop codon indexs
for i in range(0, len(sequence), 3):
stops =["UAA", "UGA", "UAG"]
if sequence[i:i+3] in stops:
stop_indexs.append(i)
orf = []
mark = 0
start_position = {}
for i in range(0,len(start_indexs)):
for j in range(0, len(stop_indexs)):
if start_indexs[i] < stop_indexs[j] and start_indexs[i] > mark:
orf.append(sequence[start_indexs[i]:stop_indexs[j]+3])
start_position[len(sequence[start_indexs[i]:stop_indexs[j]+3])] = start_indexs[i]
mark = stop_indexs[j]+3
break
# | |
instance of
`collections.namedtuple` with attributes `low` and `high`.
standard_error : float or ndarray
The bootstrap standard error, that is, the sample standard
deviation of the bootstrap distribution
Notes
-----
Elements of the confidence interval may be NaN for ``method='BCa'`` if
the bootstrap distribution is degenerate (e.g. all elements are identical).
In this case, consider using another `method` or inspecting `data` for
indications that other analysis may be more appropriate (e.g. all
observations are identical).
References
----------
.. [1] <NAME> and <NAME>, An Introduction to the Bootstrap,
Chapman & Hall/CRC, Boca Raton, FL, USA (1993)
.. [2] <NAME>, "Bootstrap Confidence Intervals",
http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf
.. [3] Bootstrapping (statistics), Wikipedia,
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Examples
--------
Suppose we have sampled data from an unknown distribution.
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> from scipy.stats import norm
>>> dist = norm(loc=2, scale=4) # our "unknown" distribution
>>> data = dist.rvs(size=100, random_state=rng)
We are interested int the standard deviation of the distribution.
>>> std_true = dist.std() # the true value of the statistic
>>> print(std_true)
4.0
>>> std_sample = np.std(data) # the sample statistic
>>> print(std_sample)
3.9460644295563863
We can calculate a 90% confidence interval of the statistic using
`bootstrap`.
>>> from scipy.stats import bootstrap
>>> data = (data,) # samples must be in a sequence
>>> res = bootstrap(data, np.std, confidence_level=0.9,
... random_state=rng)
>>> print(res.confidence_interval)
ConfidenceInterval(low=3.57655333533867, high=4.382043696342881)
If we sample from the distribution 1000 times and form a bootstrap
confidence interval for each sample, the confidence interval
contains the true value of the statistic approximately 900 times.
>>> n_trials = 1000
>>> ci_contains_true_std = 0
>>> for i in range(n_trials):
... data = (dist.rvs(size=100, random_state=rng),)
... ci = bootstrap(data, np.std, confidence_level=0.9, n_resamples=1000,
... random_state=rng).confidence_interval
... if ci[0] < std_true < ci[1]:
... ci_contains_true_std += 1
>>> print(ci_contains_true_std)
875
Rather than writing a loop, we can also determine the confidence intervals
for all 1000 samples at once.
>>> data = (dist.rvs(size=(n_trials, 100), random_state=rng),)
>>> res = bootstrap(data, np.std, axis=-1, confidence_level=0.9,
... n_resamples=1000, random_state=rng)
>>> ci_l, ci_u = res.confidence_interval
Here, `ci_l` and `ci_u` contain the confidence interval for each of the
``n_trials = 1000`` samples.
>>> print(ci_l[995:])
[3.77729695 3.75090233 3.45829131 3.34078217 3.48072829]
>>> print(ci_u[995:])
[4.88316666 4.86924034 4.32032996 4.2822427 4.59360598]
And again, approximately 90% contain the true value, ``std_true = 4``.
>>> print(np.sum((ci_l < std_true) & (std_true < ci_u)))
900
`bootstrap` can also be used to estimate confidence intervals of
multi-sample statistics, including those calculated by hypothesis
tests. `scipy.stats.mood` perform's Mood's test for equal scale parameters,
and it returns two outputs: a statistic, and a p-value. To get a
confidence interval for the test statistic, we first wrap
`scipy.stats.mood` in a function that accepts two sample arguments,
accepts an `axis` keyword argument, and returns only the statistic.
>>> from scipy.stats import mood
>>> def my_statistic(sample1, sample2, axis):
... statistic, _ = mood(sample1, sample2, axis=-1)
... return statistic
Here, we use the 'percentile' method with the default 95% confidence level.
>>> sample1 = norm.rvs(scale=1, size=100, random_state=rng)
>>> sample2 = norm.rvs(scale=2, size=100, random_state=rng)
>>> data = (sample1, sample2)
>>> res = bootstrap(data, my_statistic, method='basic', random_state=rng)
>>> print(mood(sample1, sample2)[0]) # element 0 is the statistic
-5.521109549096542
>>> print(res.confidence_interval)
ConfidenceInterval(low=-7.255994487314675, high=-4.016202624747605)
The bootstrap estimate of the standard error is also available.
>>> print(res.standard_error)
0.8344963846318795
Paired-sample statistics work, too. For example, consider the Pearson
correlation coefficient.
>>> from scipy.stats import pearsonr
>>> n = 100
>>> x = np.linspace(0, 10, n)
>>> y = x + rng.uniform(size=n)
>>> print(pearsonr(x, y)[0]) # element 0 is the statistic
0.9962357936065914
We wrap `pearsonr` so that it returns only the statistic.
>>> def my_statistic(x, y):
... return pearsonr(x, y)[0]
We call `bootstrap` using ``paired=True``.
Also, since ``my_statistic`` isn't vectorized to calculate the statistic
along a given axis, we pass in ``vectorized=False``.
>>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
... random_state=rng)
>>> print(res.confidence_interval)
ConfidenceInterval(low=0.9950085825848624, high=0.9971212407917498)
"""
# Input validation
args = _bootstrap_iv(data, statistic, vectorized, paired, axis,
confidence_level, n_resamples, batch, method,
random_state)
data, statistic, vectorized, paired, axis = args[:5]
confidence_level, n_resamples, batch, method, random_state = args[5:]
theta_hat_b = []
batch_nominal = batch or n_resamples
for k in range(0, n_resamples, batch_nominal):
batch_actual = min(batch_nominal, n_resamples-k)
# Generate resamples
resampled_data = []
for sample in data:
resample = _bootstrap_resample(sample, n_resamples=batch_actual,
random_state=random_state)
resampled_data.append(resample)
# Compute bootstrap distribution of statistic
theta_hat_b.append(statistic(*resampled_data, axis=-1))
theta_hat_b = np.concatenate(theta_hat_b, axis=-1)
# Calculate percentile interval
alpha = (1 - confidence_level)/2
if method == 'bca':
interval = _bca_interval(data, statistic, axis=-1, alpha=alpha,
theta_hat_b=theta_hat_b, batch=batch)
percentile_fun = _percentile_along_axis
else:
interval = alpha, 1-alpha
def percentile_fun(a, q):
return np.percentile(a=a, q=q, axis=-1)
# Calculate confidence interval of statistic
ci_l = percentile_fun(theta_hat_b, interval[0]*100)
ci_u = percentile_fun(theta_hat_b, interval[1]*100)
if method == 'basic': # see [3]
theta_hat = statistic(*data, axis=-1)
ci_l, ci_u = 2*theta_hat - ci_u, 2*theta_hat - ci_l
return BootstrapResult(confidence_interval=ConfidenceInterval(ci_l, ci_u),
standard_error=np.std(theta_hat_b, ddof=1, axis=-1))
# we'll move permutation_test in here, too, and rename this `_resampling.py`
def _monte_carlo_test_iv(sample, rvs, statistic, vectorized, n_resamples,
batch, alternative, axis):
"""Input validation for `monte_carlo_test`."""
axis_int = int(axis)
if axis != axis_int:
raise ValueError("`axis` must be an integer.")
if vectorized not in {True, False}:
raise ValueError("`vectorized` must be `True` or `False`.")
if not callable(rvs):
raise TypeError("`rvs` must be callable.")
if not callable(statistic):
raise TypeError("`statistic` must be callable.")
if not vectorized:
statistic_vectorized = _vectorize_statistic(statistic)
else:
statistic_vectorized = statistic
sample = np.atleast_1d(sample)
sample = np.moveaxis(sample, axis, -1)
n_resamples_int = int(n_resamples)
if n_resamples != n_resamples_int or n_resamples_int <= 0:
raise ValueError("`n_resamples` must be a positive integer.")
if batch is None:
batch_iv = batch
else:
batch_iv = int(batch)
if batch != batch_iv or batch_iv <= 0:
raise ValueError("`batch` must be a positive integer or None.")
alternatives = {'two-sided', 'greater', 'less'}
alternative = alternative.lower()
if alternative not in alternatives:
raise ValueError(f"`alternative` must be in {alternatives}")
return (sample, rvs, statistic_vectorized, vectorized, n_resamples_int,
batch_iv, alternative, axis_int)
fields = ['statistic', 'pvalue', 'null_distribution']
MonteCarloTestResult = make_dataclass("MonteCarloTestResult", fields)
def monte_carlo_test(sample, rvs, statistic, *, vectorized=False,
n_resamples=9999, batch=None, alternative="two-sided",
axis=0):
r"""
Monte Carlo test that a sample is drawn from a given distribution.
The null hypothesis is that the provided `sample` was drawn at random from
the distribution for which `rvs` generates random variates. The value of
the `statistic` for the given sample is compared against a Monte Carlo null
distribution: the value of the statistic for each of `n_resamples`
samples generated by `rvs`. This gives the p-value, the probability of
observing such an extreme value of the test statistic under the null
hypothesis.
Parameters
----------
sample : array-like
An array of observations.
rvs : callable
Generates random variates from the distribution against which `sample`
will be tested. `rvs` must be a callable that accepts keyword argument
``size`` (e.g. ``rvs(size=(m, n))``) and returns an N-d array sample
of that shape.
statistic : callable
Statistic for which the p-value of the hypothesis test is to be
calculated. `statistic` must be a callable that accepts a sample
(e.g. ``statistic(sample)``) and returns the resulting statistic.
If `vectorized` is set ``True``, `statistic` must also accept a keyword
argument `axis` and be vectorized to compute the statistic along the
provided `axis` of the sample array.
vectorized : bool, default: ``False``
By default, `statistic` is assumed to calculate the statistic only for
a 1D arrays `sample`. If `vectorized` is set ``True``, `statistic` must
also accept a keyword argument `axis` and be vectorized to compute the
statistic along the provided `axis` of an ND sample array. Use of a
vectorized statistic can reduce computation time.
n_resamples : int, default: 9999
Number of random permutations used to approximate the Monte Carlo null
distribution.
batch : int, optional
The number of permutations to process in each call to `statistic`.
Memory usage is O(`batch`*``sample.size[axis]``). Default is
``None``, in which case `batch` equals `n_resamples`.
alternative : {'two-sided', 'less', 'greater'}
The alternative hypothesis for which the p-value is calculated.
For each alternative, the p-value is defined as follows.
- ``'greater'`` : the percentage of the null distribution that is
greater than | |
import copy
import math
from functools import partial
from typing import Any, Callable, List, Optional, Sequence
import torch
from torch import nn, Tensor
from torchvision.ops import StochasticDepth
from .._internally_replaced_utils import load_state_dict_from_url
from ..ops.misc import ConvNormActivation, SqueezeExcitation
from ._utils import _make_divisible
__all__ = [
"EfficientNet",
"efficientnet_b0",
"efficientnet_b1",
"efficientnet_b2",
"efficientnet_b3",
"efficientnet_b4",
"efficientnet_b5",
"efficientnet_b6",
"efficientnet_b7",
]
model_urls = {
# Weights ported from https://github.com/rwightman/pytorch-image-models/
"efficientnet_b0": "https://download.pytorch.org/models/efficientnet_b0_rwightman-3dd342df.pth",
"efficientnet_b1": "https://download.pytorch.org/models/efficientnet_b1_rwightman-533bc792.pth",
"efficientnet_b2": "https://download.pytorch.org/models/efficientnet_b2_rwightman-bcdf34b7.pth",
"efficientnet_b3": "https://download.pytorch.org/models/efficientnet_b3_rwightman-cf984f9c.pth",
"efficientnet_b4": "https://download.pytorch.org/models/efficientnet_b4_rwightman-7eb33cd5.pth",
# Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/
"efficientnet_b5": "https://download.pytorch.org/models/efficientnet_b5_lukemelas-b6417697.pth",
"efficientnet_b6": "https://download.pytorch.org/models/efficientnet_b6_lukemelas-c76e70fd.pth",
"efficientnet_b7": "https://download.pytorch.org/models/efficientnet_b7_lukemelas-dcc49843.pth",
}
class MBConvConfig:
# Stores information listed at Table 1 of the EfficientNet paper
def __init__(
self,
expand_ratio: float,
kernel: int,
stride: int,
input_channels: int,
out_channels: int,
num_layers: int,
width_mult: float,
depth_mult: float,
) -> None:
self.expand_ratio = expand_ratio
self.kernel = kernel
self.stride = stride
self.input_channels = self.adjust_channels(input_channels, width_mult)
self.out_channels = self.adjust_channels(out_channels, width_mult)
self.num_layers = self.adjust_depth(num_layers, depth_mult)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "expand_ratio={expand_ratio}"
s += ", kernel={kernel}"
s += ", stride={stride}"
s += ", input_channels={input_channels}"
s += ", out_channels={out_channels}"
s += ", num_layers={num_layers}"
s += ")"
return s.format(**self.__dict__)
@staticmethod
def adjust_channels(channels: int, width_mult: float, min_value: Optional[int] = None) -> int:
return _make_divisible(channels * width_mult, 8, min_value)
@staticmethod
def adjust_depth(num_layers: int, depth_mult: float):
return int(math.ceil(num_layers * depth_mult))
class MBConv(nn.Module):
def __init__(
self,
cnf: MBConvConfig,
stochastic_depth_prob: float,
norm_layer: Callable[..., nn.Module],
se_layer: Callable[..., nn.Module] = SqueezeExcitation,
) -> None:
super().__init__()
if not (1 <= cnf.stride <= 2):
raise ValueError("illegal stride value")
self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
layers: List[nn.Module] = []
activation_layer = nn.SiLU
# expand
expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)
if expanded_channels != cnf.input_channels:
layers.append(
ConvNormActivation(
cnf.input_channels,
expanded_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
# depthwise
layers.append(
ConvNormActivation(
expanded_channels,
expanded_channels,
kernel_size=cnf.kernel,
stride=cnf.stride,
groups=expanded_channels,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
# squeeze and excitation
squeeze_channels = max(1, cnf.input_channels // 4)
layers.append(se_layer(expanded_channels, squeeze_channels, activation=partial(nn.SiLU, inplace=True)))
# project
layers.append(
ConvNormActivation(
expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
)
)
self.block = nn.Sequential(*layers)
self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
self.out_channels = cnf.out_channels
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result = self.stochastic_depth(result)
result += input
return result
class EfficientNet(nn.Module):
def __init__(
self,
inverted_residual_setting: List[MBConvConfig],
dropout: float,
stochastic_depth_prob: float = 0.2,
num_classes: int = 1000,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
**kwargs: Any,
) -> None:
"""
EfficientNet main class
Args:
inverted_residual_setting (List[MBConvConfig]): Network structure
dropout (float): The droupout probability
stochastic_depth_prob (float): The stochastic depth probability
num_classes (int): Number of classes
block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet
norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
"""
super().__init__()
if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty")
elif not (
isinstance(inverted_residual_setting, Sequence)
and all([isinstance(s, MBConvConfig) for s in inverted_residual_setting])
):
raise TypeError("The inverted_residual_setting should be List[MBConvConfig]")
if block is None:
block = MBConv
if norm_layer is None:
norm_layer = nn.BatchNorm2d
layers: List[nn.Module] = []
# building first layer
firstconv_output_channels = inverted_residual_setting[0].input_channels
layers.append(
ConvNormActivation(
3, firstconv_output_channels, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=nn.SiLU
)
)
# building inverted residual blocks
total_stage_blocks = sum([cnf.num_layers for cnf in inverted_residual_setting])
stage_block_id = 0
for cnf in inverted_residual_setting:
stage: List[nn.Module] = []
for _ in range(cnf.num_layers):
# copy to avoid modifications. shallow copy is enough
block_cnf = copy.copy(cnf)
# overwrite info if not the first conv in the stage
if stage:
block_cnf.input_channels = block_cnf.out_channels
block_cnf.stride = 1
# adjust stochastic depth probability based on the depth of the stage block
sd_prob = stochastic_depth_prob * float(stage_block_id) / total_stage_blocks
stage.append(block(block_cnf, sd_prob, norm_layer))
stage_block_id += 1
layers.append(nn.Sequential(*stage))
# building last several layers
lastconv_input_channels = inverted_residual_setting[-1].out_channels
lastconv_output_channels = 4 * lastconv_input_channels
layers.append(
ConvNormActivation(
lastconv_input_channels,
lastconv_output_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=nn.SiLU,
)
)
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Dropout(p=dropout, inplace=True),
nn.Linear(lastconv_output_channels, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
init_range = 1.0 / math.sqrt(m.out_features)
nn.init.uniform_(m.weight, -init_range, init_range)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _efficientnet_conf(width_mult: float, depth_mult: float, **kwargs: Any) -> List[MBConvConfig]:
bneck_conf = partial(MBConvConfig, width_mult=width_mult, depth_mult=depth_mult)
inverted_residual_setting = [
bneck_conf(1, 3, 1, 32, 16, 1),
bneck_conf(6, 3, 2, 16, 24, 2),
bneck_conf(6, 5, 2, 24, 40, 2),
bneck_conf(6, 3, 2, 40, 80, 3),
bneck_conf(6, 5, 1, 80, 112, 3),
bneck_conf(6, 5, 2, 112, 192, 4),
bneck_conf(6, 3, 1, 192, 320, 1),
]
return inverted_residual_setting
def _efficientnet_model(
arch: str,
inverted_residual_setting: List[MBConvConfig],
dropout: float,
pretrained: bool,
progress: bool,
**kwargs: Any,
) -> EfficientNet:
model = EfficientNet(inverted_residual_setting, dropout, **kwargs)
if pretrained:
if model_urls.get(arch, None) is None:
raise ValueError("No checkpoint is available for model type {}".format(arch))
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def efficientnet_b0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B0 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=1.0, depth_mult=1.0, **kwargs)
return _efficientnet_model("efficientnet_b0", inverted_residual_setting, 0.2, pretrained, progress, **kwargs)
def efficientnet_b1(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B1 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=1.0, depth_mult=1.1, **kwargs)
return _efficientnet_model("efficientnet_b1", inverted_residual_setting, 0.2, pretrained, progress, **kwargs)
def efficientnet_b2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B2 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=1.1, depth_mult=1.2, **kwargs)
return _efficientnet_model("efficientnet_b2", inverted_residual_setting, 0.3, pretrained, progress, **kwargs)
def efficientnet_b3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B3 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=1.2, depth_mult=1.4, **kwargs)
return _efficientnet_model("efficientnet_b3", inverted_residual_setting, 0.3, pretrained, progress, **kwargs)
def efficientnet_b4(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B4 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=1.4, depth_mult=1.8, **kwargs)
return _efficientnet_model("efficientnet_b4", inverted_residual_setting, 0.4, pretrained, progress, **kwargs)
def efficientnet_b5(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B5 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=1.6, depth_mult=2.2, **kwargs)
return _efficientnet_model(
"efficientnet_b5",
inverted_residual_setting,
0.4,
pretrained,
progress,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),
**kwargs,
)
def efficientnet_b6(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B6 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=1.8, depth_mult=2.6, **kwargs)
return _efficientnet_model(
"efficientnet_b6",
inverted_residual_setting,
0.5,
pretrained,
progress,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),
**kwargs,
)
def efficientnet_b7(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B7 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=2.0, depth_mult=3.1, **kwargs)
return _efficientnet_model(
"efficientnet_b7",
inverted_residual_setting,
| |
# -*- coding: utf-8 -*-
"""Developer convenience functions for ibs (detections).
TODO: need to split up into sub modules:
consistency_checks
feasibility_fixes
move the export stuff to dbio
then there are also convineience functions that need to be ordered at least
within this file
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import zip, range
from os.path import expanduser, join, abspath
import numpy as np
import vtool as vt
import utool as ut
import cv2
from ibeis.control import controller_inject
import tqdm
# Inject utool functions
(print, rrr, profile) = ut.inject2(__name__, '[other.detectfuncs]')
SAMPLES = 1000
AP_SAMPLE_POINTS = [_ / float(SAMPLES) for _ in range(0, SAMPLES + 1)]
# Must import class before injection
CLASS_INJECT_KEY, register_ibs_method = (
controller_inject.make_ibs_register_decorator(__name__))
def _resize(image, t_width=None, t_height=None, verbose=False):
if verbose:
print('RESIZING WITH t_width = %r and t_height = %r' % (t_width, t_height, ))
height, width = image.shape[:2]
if t_width is None and t_height is None:
return image
elif t_width is not None and t_height is not None:
pass
elif t_width is None:
t_width = (width / height) * float(t_height)
elif t_height is None:
t_height = (height / width) * float(t_width)
t_width, t_height = float(t_width), float(t_height)
t_width, t_height = int(np.around(t_width)), int(np.around(t_height))
assert t_width > 0 and t_height > 0, 'target size too small'
assert t_width <= width * 10 and t_height <= height * 10, 'target size too large (capped at 1000%)'
# interpolation = cv2.INTER_LANCZOS4
interpolation = cv2.INTER_LINEAR
return cv2.resize(image, (t_width, t_height), interpolation=interpolation)
def simple_code(label):
from ibeis.constants import YAWALIAS, SPECIES_MAPPING
if label == 'ignore':
return 'IGNORE'
for key in SPECIES_MAPPING:
if key in label:
species_code, species_nice = SPECIES_MAPPING[key]
while species_code is None:
species_code, species_nice = SPECIES_MAPPING[species_nice]
assert species_code is not None
label = label.replace(key, species_code)
for key in sorted(YAWALIAS.keys(), key=len, reverse=True):
value = YAWALIAS[key]
label = label.replace(key, value)
return label
##########################################################################################
def general_precision_recall_algo(ibs, label_list, confidence_list, category='positive', samples=SAMPLES, **kwargs):
def errors(zipped, conf, category):
tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
for index, (label, confidence) in enumerate(zipped):
if label == category:
if conf <= confidence:
tp += 1
else:
fn += 1
else:
if conf <= confidence:
fp += 1
else:
tn += 1
return tp, tn, fp, fn
zipped = list(zip(label_list, confidence_list))
conf_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
conf_dict = {}
for conf in conf_list:
conf_dict[conf] = errors(zipped, conf, category)
conf_list_ = [-1.0, -1.0]
pr_list = [1.0, 0.0]
re_list = [0.0, 1.0]
tpr_list = [0.0, 1.0]
fpr_list = [0.0, 1.0]
# conf_list_ = []
# pr_list = []
# re_list = []
# tpr_list = []
# fpr_list = []
for conf in sorted(conf_dict.keys(), reverse=True):
error_list = conf_dict[conf]
tp, tn, fp, fn = error_list
try:
pr = tp / (tp + fp)
re = tp / (tp + fn)
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
tpr_list.append(tpr)
fpr_list.append(fpr)
except ZeroDivisionError:
print('Zero division error (%r) - tp: %r tn: %r fp: %r fn: %r' % (conf, tp, tn, fp, fn, ))
return conf_list_, pr_list, re_list, tpr_list, fpr_list
def general_interpolate_precision_recall(conf_list, re_list, pr_list):
conf_list_, re_list_, pr_list_ = [], [], []
zipped = zip(re_list, conf_list, pr_list)
zipped = sorted(zipped, reverse=True)
max_pr = None
for re, conf, pr in zipped:
if max_pr is None or pr > max_pr:
if max_pr is not None:
conf_list_.append(np.nan)
re_list_.append(re)
pr_list_.append(max_pr)
max_pr = pr
if pr < max_pr:
pr = max_pr
conf_list_.append(conf)
re_list_.append(re)
pr_list_.append(pr)
return conf_list_, re_list_, pr_list_
def general_identify_operating_point(conf_list, x_list, y_list, target=(1.0, 1.0)):
best_length = np.inf
best_conf_list = []
best_x_list = []
best_y_list = []
tx, ty = target
for conf, x, y in sorted(zip(conf_list, x_list, y_list)):
x_ = x
y_ = y
x_ = (x_ - tx)
y_ = (y_ - ty)
length = np.sqrt(x_ * x_ + y_ * y_)
if length < best_length:
best_length = length
best_conf_list = [conf]
best_x_list = [x]
best_y_list = [y]
elif length == best_length:
flag_list = [
abs(best_conf - conf) > 0.01
for best_conf in best_conf_list
]
if False in flag_list:
continue
best_conf_list.append(conf)
best_x_list.append(x)
best_y_list.append(y)
return best_conf_list, best_x_list, best_y_list, best_length
def general_area_best_conf(conf_list, x_list, y_list, label='Unknown', color='b',
marker='o', plot_point=True, interpolate=True,
target=(1.0, 1.0), target_recall=None, **kwargs):
import matplotlib.pyplot as plt
zipped = list(sorted(zip(x_list, y_list, conf_list)))
x_list = [_[0] for _ in zipped]
y_list = [_[1] for _ in zipped]
conf_list = [_[2] for _ in zipped]
if interpolate:
conf_list, x_list, y_list = general_interpolate_precision_recall(
conf_list,
x_list,
y_list
)
if interpolate:
ap_list = []
for AP_POINT in AP_SAMPLE_POINTS:
for re, pr in sorted(zip(x_list, y_list)):
if AP_POINT <= re:
ap_list.append(pr)
break
ap = sum(ap_list) / len(ap_list)
else:
ap = np.trapz(y_list, x=x_list)
tup1 = general_identify_operating_point(conf_list, x_list, y_list, target=target)
best_conf_list, best_x_list, best_y_list, best_length = tup1
tup2 = None
if target_recall is not None:
for x, y, conf in sorted(zip(x_list, y_list, conf_list)):
if target_recall <= x and not np.isnan(conf):
tup2 = [conf], [x], [y], None
break
if len(best_conf_list) > 1:
print('WARNING: Multiple best operating points found %r' % (best_conf_list, ))
assert len(best_conf_list) > 0
best_conf = best_conf_list[0]
if interpolate:
# label = '%s [AP = %0.02f, OP = %0.02f]' % (label, ap * 100.0, best_conf)
label = '%s [AP = %0.02f]' % (label, ap * 100.0)
else:
label = '%s [AUC = %0.02f]' % (label, ap * 100.0, )
linestyle = '--' if kwargs.get('line_dotted', False) else '-'
plt.plot(x_list, y_list, color=color, linestyle=linestyle, label=label)
if plot_point:
plt.plot(best_x_list, best_y_list, color=color, marker=marker)
return ap, best_conf, tup1, tup2
def general_confusion_matrix_algo(label_correct_list, label_predict_list,
category_list, category_mapping,
fig_, axes_, fuzzy_dict=None, conf=None,
conf_list=None, size=10, **kwargs):
# import matplotlib.colors as colors
import matplotlib.pyplot as plt
suppressed_label = 'SUP'
if conf is not None:
assert conf_list is not None
category_list.append(suppressed_label)
index = len(category_list) - 1
category_mapping[suppressed_label] = index
if fuzzy_dict is not None:
fuzzy_dict[index] = set([])
if category_mapping is not None:
index_list = [category_mapping[category] for category in category_list]
zipped = list(sorted(zip(index_list, category_list)))
category_list = [_[1] for _ in zipped]
# Get the number of categories
num_categories = len(category_list)
# Build the confusion matrix
confusion_matrix = np.zeros((num_categories, num_categories))
zipped = zip(label_correct_list, label_predict_list)
suppressed = 0.0
suppressed_correct = 0.0
suppressed_fuzzy = 0.0
for index, (label_correct, label_predict) in enumerate(zipped):
if conf is not None:
conf_ = conf_list[index]
if conf_ < conf:
if label_correct != label_predict:
suppressed_correct += 1
if fuzzy_dict is not None:
x = category_mapping[label_correct]
y = category_mapping[label_predict]
if not (y in fuzzy_dict[x] or x in fuzzy_dict[y]):
suppressed_fuzzy += 1
label_predict = suppressed_label
suppressed += 1
# Perform any mapping that needs to be done
correct_ = category_mapping[label_correct]
predict_ = category_mapping[label_predict]
# Add to the confidence matrix
confusion_matrix[correct_][predict_] += 1
# Normalize the confusion matrix using the rows
row_normalizer = np.sum(confusion_matrix, axis=1)
confusion_normalized = np.array((confusion_matrix.T / row_normalizer).T)
# Draw the confusion matrix
res = axes_.imshow(confusion_normalized, cmap=plt.cm.jet,
interpolation='nearest')
correct = suppressed_correct
fuzzy = suppressed_fuzzy
total = 0.0
for x in range(num_categories):
for y in range(num_categories):
number = int(confusion_matrix[x][y])
if x == y:
correct += number
if fuzzy_dict is not None and (y in fuzzy_dict[x] or x in fuzzy_dict[y]):
fuzzy += number
total += number
axes_.annotate(
str(number), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',
size=size,
)
cb = fig_.colorbar(res) # NOQA
cb.set_clim(0.0, 1.0)
plt.xticks(np.arange(num_categories), category_list, rotation=90)
plt.yticks(np.arange(num_categories), category_list)
margin_small = 0.1
margin_large = 0.9
plt.subplots_adjust(
left=margin_small,
right=margin_large,
bottom=margin_small,
top=margin_large
)
correct_rate = correct / total
fuzzy_rate = fuzzy / total
return correct_rate, fuzzy_rate
def general_intersection_over_union(bbox1, bbox2):
intersection_xtl = max(bbox1['xtl'], bbox2['xtl'])
intersection_ytl = max(bbox1['ytl'], bbox2['ytl'])
intersection_xbr = min(bbox1['xbr'], bbox2['xbr'])
intersection_ybr = min(bbox1['ybr'], bbox2['ybr'])
intersection_w = intersection_xbr - intersection_xtl
intersection_h = intersection_ybr - intersection_ytl
if intersection_w <= 0 or intersection_h <= 0:
return 0.0
intersection = intersection_w * intersection_h
union = (bbox1['width'] * bbox1['height']) + (bbox2['width'] * bbox2['height']) - intersection
return intersection / union
def general_overlap(gt_list, pred_list):
overlap = np.zeros((len(gt_list), len(pred_list)), dtype=np.float32)
for i, gt in enumerate(gt_list):
for j, pred in enumerate(pred_list):
overlap[i, j] = general_intersection_over_union(gt, pred)
return overlap
def general_tp_fp_fn(gt_list, pred_list, min_overlap, **kwargs):
overlap = general_overlap(gt_list, pred_list)
num_gt, num_pred = overlap.shape
if num_gt == 0:
tp = 0.0
fp = num_pred
fn = 0.0
elif num_pred == 0:
tp = 0.0
fp = 0.0
fn = num_gt
else:
pred_index_list = range(num_pred)
gt_index_list = np.argmax(overlap, axis=0)
| |
from typing import Tuple
from uuid import uuid4
from freezegun import freeze_time
from ee.clickhouse.materialized_columns.columns import materialize
from ee.clickhouse.models.event import create_event
from ee.clickhouse.queries import ClickhousePaths
from ee.clickhouse.queries.paths.path_event_query import PathEventQuery
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.constants import (
FUNNEL_PATH_AFTER_STEP,
FUNNEL_PATH_BEFORE_STEP,
FUNNEL_PATH_BETWEEN_STEPS,
INSIGHT_FUNNELS,
PAGEVIEW_EVENT,
SCREEN_EVENT,
)
from posthog.models.filters import Filter, PathFilter
from posthog.models.person import Person
from posthog.queries.test.test_paths import paths_test_factory
from posthog.test.base import test_with_materialized_columns
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
ONE_MINUTE = 60_000 # 1 minute in milliseconds
class TestClickhousePaths(ClickhouseTestMixin, paths_test_factory(ClickhousePaths, _create_event, Person.objects.create)): # type: ignore
def test_denormalized_properties(self):
materialize("events", "$current_url")
materialize("events", "$screen_name")
query = ClickhousePaths(team=self.team, filter=PathFilter(data={"path_type": PAGEVIEW_EVENT})).get_query()
self.assertNotIn("json", query.lower())
query = ClickhousePaths(team=self.team, filter=PathFilter(data={"path_type": SCREEN_EVENT})).get_query()
self.assertNotIn("json", query.lower())
self.test_current_url_paths_and_logic()
def test_step_limit(self):
with freeze_time("2012-01-01T03:21:34.000Z"):
Person.objects.create(team_id=self.team.pk, distinct_ids=["fake"])
_create_event(
properties={"$current_url": "/1"}, distinct_id="fake", event="$pageview", team=self.team,
)
with freeze_time("2012-01-01T03:22:34.000Z"):
_create_event(
properties={"$current_url": "/2"}, distinct_id="fake", event="$pageview", team=self.team,
)
with freeze_time("2012-01-01T03:24:34.000Z"):
_create_event(
properties={"$current_url": "/3"}, distinct_id="fake", event="$pageview", team=self.team,
)
with freeze_time("2012-01-01T03:27:34.000Z"):
_create_event(
properties={"$current_url": "/4"}, distinct_id="fake", event="$pageview", team=self.team,
)
with freeze_time("2012-01-7T03:21:34.000Z"):
filter = PathFilter(data={"step_limit": 2})
response = ClickhousePaths(team=self.team, filter=filter).run(team=self.team, filter=filter)
self.assertEqual(
response, [{"source": "1_/1", "target": "2_/2", "value": 1, "average_conversion_time": ONE_MINUTE}]
)
with freeze_time("2012-01-7T03:21:34.000Z"):
filter = PathFilter(data={"step_limit": 3})
response = ClickhousePaths(team=self.team, filter=filter).run(team=self.team, filter=filter)
self.assertEqual(
response,
[
{"source": "1_/1", "target": "2_/2", "value": 1, "average_conversion_time": ONE_MINUTE},
{"source": "2_/2", "target": "3_/3", "value": 1, "average_conversion_time": 2 * ONE_MINUTE},
],
)
with freeze_time("2012-01-7T03:21:34.000Z"):
filter = PathFilter(data={"step_limit": 4})
response = ClickhousePaths(team=self.team, filter=filter).run(team=self.team, filter=filter)
self.assertEqual(
response,
[
{"source": "1_/1", "target": "2_/2", "value": 1, "average_conversion_time": ONE_MINUTE},
{"source": "2_/2", "target": "3_/3", "value": 1, "average_conversion_time": 2 * ONE_MINUTE},
{"source": "3_/3", "target": "4_/4", "value": 1, "average_conversion_time": 3 * ONE_MINUTE},
],
)
def test_step_conversion_times(self):
Person.objects.create(team_id=self.team.pk, distinct_ids=["fake"])
_create_event(
properties={"$current_url": "/1"},
distinct_id="fake",
event="$pageview",
team=self.team,
timestamp="2012-01-01T03:21:34.000Z",
)
_create_event(
properties={"$current_url": "/2"},
distinct_id="fake",
event="$pageview",
team=self.team,
timestamp="2012-01-01T03:22:34.000Z",
)
_create_event(
properties={"$current_url": "/3"},
distinct_id="fake",
event="$pageview",
team=self.team,
timestamp="2012-01-01T03:24:34.000Z",
)
_create_event(
properties={"$current_url": "/4"},
distinct_id="fake",
event="$pageview",
team=self.team,
timestamp="2012-01-01T03:27:34.000Z",
)
Person.objects.create(team_id=self.team.pk, distinct_ids=["fake2"])
_create_event(
properties={"$current_url": "/1"},
distinct_id="fake2",
event="$pageview",
team=self.team,
timestamp="2012-01-01T03:21:34.000Z",
)
_create_event(
properties={"$current_url": "/2"},
distinct_id="fake2",
event="$pageview",
team=self.team,
timestamp="2012-01-01T03:23:34.000Z",
)
_create_event(
properties={"$current_url": "/3"},
distinct_id="fake2",
event="$pageview",
team=self.team,
timestamp="2012-01-01T03:27:34.000Z",
)
filter = PathFilter(data={"step_limit": 4, "date_from": "2012-01-01", "include_event_types": ["$pageview"]})
response = ClickhousePaths(team=self.team, filter=filter).run(team=self.team, filter=filter)
self.assertEqual(
response,
[
{"source": "1_/1", "target": "2_/2", "value": 2, "average_conversion_time": 1.5 * ONE_MINUTE},
{"source": "2_/2", "target": "3_/3", "value": 2, "average_conversion_time": 3 * ONE_MINUTE},
{"source": "3_/3", "target": "4_/4", "value": 1, "average_conversion_time": 3 * ONE_MINUTE},
],
)
# this tests to make sure that paths don't get scrambled when there are several similar variations
def test_path_event_ordering(self):
for i in range(50):
Person.objects.create(distinct_ids=[f"user_{i}"], team=self.team)
_create_event(event="step one", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:00:00")
_create_event(event="step two", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:01:00")
_create_event(event="step three", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:02:00")
if i % 2 == 0:
_create_event(
event="step branch", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:03:00"
)
filter = PathFilter(
data={"date_from": "2021-05-01", "date_to": "2021-05-03", "include_event_types": ["custom_event"]}
)
response = ClickhousePaths(team=self.team, filter=filter).run(team=self.team, filter=filter)
self.assertEqual(
response,
[
{"source": "1_step one", "target": "2_step two", "value": 50, "average_conversion_time": 60000.0},
{"source": "2_step two", "target": "3_step three", "value": 50, "average_conversion_time": 60000.0},
{"source": "3_step three", "target": "4_step branch", "value": 25, "average_conversion_time": 60000.0},
],
)
def _create_sample_data_multiple_dropoffs(self):
for i in range(5):
Person.objects.create(distinct_ids=[f"user_{i}"], team=self.team)
_create_event(event="step one", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:00:00")
_create_event(
event="between_step_1_a", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:01:00"
)
_create_event(
event="between_step_1_b", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:02:00"
)
_create_event(
event="between_step_1_c", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:03:00"
)
_create_event(event="step two", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:04:00")
_create_event(event="step three", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:05:00")
for i in range(5, 15):
Person.objects.create(distinct_ids=[f"user_{i}"], team=self.team)
_create_event(event="step one", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:00:00")
_create_event(
event="between_step_1_a", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:01:00"
)
_create_event(
event="between_step_1_b", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:02:00"
)
_create_event(event="step two", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:03:00")
_create_event(
event="between_step_2_a", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:04:20"
)
_create_event(
event="between_step_2_b", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:05:40"
)
for i in range(15, 35):
Person.objects.create(distinct_ids=[f"user_{i}"], team=self.team)
_create_event(event="step one", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:00:00")
_create_event(
event="step dropoff1", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:01:00"
)
_create_event(
event="step dropoff2", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:02:00"
)
if i % 2 == 0:
_create_event(
event="step branch", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:03:00"
)
def test_path_by_funnel_after_dropoff(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"funnel_paths": FUNNEL_PATH_AFTER_STEP,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_interval": 7,
"funnel_window_interval_unit": "day",
"funnel_step": -2,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
funnel_filter = Filter(data=data)
path_filter = PathFilter(data=data)
response = ClickhousePaths(team=self.team, filter=path_filter, funnel_filter=funnel_filter).run()
self.assertEqual(
response,
[
{"source": "1_step one", "target": "2_step dropoff1", "value": 20, "average_conversion_time": 60000.0},
{
"source": "2_step dropoff1",
"target": "3_step dropoff2",
"value": 20,
"average_conversion_time": 60000.0,
},
{
"source": "3_step dropoff2",
"target": "4_step branch",
"value": 10,
"average_conversion_time": 60000.0,
},
],
)
def test_path_by_funnel_after_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"funnel_paths": FUNNEL_PATH_AFTER_STEP,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_interval": 7,
"funnel_window_interval_unit": "day",
"funnel_step": 2,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
funnel_filter = Filter(data=data)
path_filter = PathFilter(data=data)
response = ClickhousePaths(team=self.team, filter=path_filter, funnel_filter=funnel_filter).run()
self.assertEqual(
response,
[
{
"source": "1_step two",
"target": "2_between_step_2_a",
"value": 10,
"average_conversion_time": 80000.0,
},
{
"source": "2_between_step_2_a",
"target": "3_between_step_2_b",
"value": 10,
"average_conversion_time": 80000.0,
},
{"source": "1_step two", "target": "2_step three", "value": 5, "average_conversion_time": 60000.0},
],
)
def test_path_by_funneL_before_dropoff(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"funnel_paths": FUNNEL_PATH_BEFORE_STEP,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_interval": 7,
"funnel_window_interval_unit": "day",
"funnel_step": -3,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
funnel_filter = Filter(data=data)
path_filter = PathFilter(data=data)
response = ClickhousePaths(team=self.team, filter=path_filter, funnel_filter=funnel_filter).run()
self.assertEqual(
response,
[
{
"source": "1_step one",
"target": "2_between_step_1_a",
"value": 10,
"average_conversion_time": 60000.0,
},
{
"source": "2_between_step_1_a",
"target": "3_between_step_1_b",
"value": 10,
"average_conversion_time": 60000.0,
},
{
"source": "3_between_step_1_b",
"target": "4_step two",
"value": 10,
"average_conversion_time": 60000.0,
},
{
"source": "4_step two",
"target": "5_between_step_2_a",
"value": 10,
"average_conversion_time": 80000.0,
},
],
)
def test_path_by_funnel_before_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"funnel_paths": FUNNEL_PATH_BEFORE_STEP,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_interval": 7,
"funnel_window_interval_unit": "day",
"funnel_step": 2,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
funnel_filter = Filter(data=data)
path_filter = PathFilter(data=data)
response = ClickhousePaths(team=self.team, filter=path_filter, funnel_filter=funnel_filter).run()
self.assertEqual(
response,
[
{
"source": "1_step one",
"target": "2_between_step_1_a",
"value": 15,
"average_conversion_time": 60000.0,
},
{
"source": "2_between_step_1_a",
"target": "3_between_step_1_b",
"value": 15,
"average_conversion_time": 60000.0,
},
{
"source": "3_between_step_1_b",
"target": "4_step two",
"value": 10,
"average_conversion_time": 60000.0,
},
{
"source": "3_between_step_1_b",
"target": "4_between_step_1_c",
"value": 5,
"average_conversion_time": 60000.0,
},
{
"source": "4_between_step_1_c",
"target": "5_step two",
"value": 5,
"average_conversion_time": 60000.0,
},
],
)
def test_path_by_funnel_between_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"funnel_paths": FUNNEL_PATH_BETWEEN_STEPS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_interval": 7,
"funnel_window_interval_unit": "day",
"funnel_step": 2,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
funnel_filter = Filter(data=data)
path_filter = PathFilter(data=data)
response = ClickhousePaths(team=self.team, filter=path_filter, funnel_filter=funnel_filter).run()
self.assertEqual(
response,
[
{
"source": "1_step one",
"target": "2_between_step_1_a",
"value": 15,
"average_conversion_time": 60000.0,
},
{
"source": "2_between_step_1_a",
"target": "3_between_step_1_b",
"value": 15,
"average_conversion_time": 60000.0,
},
{
"source": "3_between_step_1_b",
"target": "4_step two",
"value": 10,
"average_conversion_time": 60000.0,
},
{
"source": "3_between_step_1_b",
"target": "4_between_step_1_c",
"value": 5,
"average_conversion_time": 60000.0,
},
{
"source": "4_between_step_1_c",
"target": "5_step two",
"value": 5,
"average_conversion_time": 60000.0,
},
],
)
@test_with_materialized_columns(["$current_url"])
def test_paths_end(self):
Person.objects.create(team_id=self.team.pk, distinct_ids=["person_1"])
_create_event(
properties={"$current_url": "/1"},
distinct_id="person_1",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:01:00",
)
_create_event(
properties={"$current_url": "/2"},
distinct_id="person_1",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:02:00",
)
_create_event(
properties={"$current_url": "/3"},
distinct_id="person_1",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:03:00",
)
_create_event(
properties={"$current_url": "/4"},
distinct_id="person_1",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:04:00",
)
_create_event(
properties={"$current_url": "/5"},
distinct_id="person_1",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:05:00",
)
_create_event(
properties={"$current_url": "/about"},
distinct_id="person_1",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:06:00",
)
_create_event(
properties={"$current_url": "/after"},
distinct_id="person_1",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:07:00",
)
Person.objects.create(team_id=self.team.pk, distinct_ids=["person_2"])
_create_event(
properties={"$current_url": "/5"},
distinct_id="person_2",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:01:00",
)
_create_event(
properties={"$current_url": "/about"},
distinct_id="person_2",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:02:00",
)
Person.objects.create(team_id=self.team.pk, distinct_ids=["person_3"])
_create_event(
properties={"$current_url": "/3"},
distinct_id="person_3",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:01:00",
)
_create_event(
properties={"$current_url": "/4"},
distinct_id="person_3",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:02:00",
)
_create_event(
properties={"$current_url": "/about"},
distinct_id="person_3",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:03:00",
)
_create_event(
properties={"$current_url": "/after"},
distinct_id="person_3",
event="$pageview",
team=self.team,
timestamp="2021-05-01 00:04:00",
)
filter = PathFilter(
data={
"path_type": "$pageview",
"end_point": "/about",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
}
)
response = ClickhousePaths(team=self.team, filter=filter).run(team=self.team, filter=filter,)
self.assertEqual(
response,
[
{"source": "1_/2", "target": "2_/3", "value": 1, "average_conversion_time": 60000.0},
{"source": "1_/3", "target": "2_/4", "value": 1, "average_conversion_time": 60000.0},
{"source": "1_/5", "target": "2_/about", "value": 1, "average_conversion_time": 60000.0},
{"source": "2_/3", "target": "3_/4", "value": 1, "average_conversion_time": 60000.0},
{"source": "2_/4", "target": "3_/about", "value": 1, "average_conversion_time": 60000.0},
{"source": | |
string
tals = bytearray()
for chan in tal_channel_data:
for s in chan:
i = int(s)
tals.extend(np.uint8([i % 256, i // 256]))
regex_tal = '([+-]\d+\.?\d*)(\x15(\d+\.?\d*))?(\x14.*?)\x14\x00'
# use of latin-1 because characters are only encoded for the first 256
# code points and utf-8 can triggers an "invalid continuation byte" error
tal_list = re.findall(regex_tal, tals.decode('latin-1'))
events = []
for ev in tal_list:
onset = float(ev[0])
duration = float(ev[2]) if ev[2] else 0
for annotation in ev[3].split('\x14')[1:]:
if annotation:
events.append([onset, duration, annotation])
return events
def _get_edf_info(fname, stim_channel, annot, annotmap, eog, misc, exclude,
preload):
"""Extract all the information from the EDF+,BDF file."""
if eog is None:
eog = []
if misc is None:
misc = []
edf_info = dict()
edf_info['annot'] = annot
edf_info['annotmap'] = annotmap
edf_info['events'] = []
with open(fname, 'rb') as fid:
assert(fid.tell() == 0)
fid.seek(168) # Seek 8 + 80 bytes for Subject id + 80 bytes for rec id
day, month, year = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
hour, minute, sec = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
century = 2000 if year < 50 else 1900
date = datetime.datetime(year + century, month, day, hour, minute, sec)
edf_info['data_offset'] = header_nbytes = int(fid.read(8).decode())
subtype = fid.read(44).strip().decode()[:5]
if len(subtype) > 0:
edf_info['subtype'] = subtype
else:
edf_info['subtype'] = os.path.splitext(fname)[1][1:].lower()
edf_info['n_records'] = n_records = int(fid.read(8).decode())
# record length in seconds
record_length = float(fid.read(8).decode())
if record_length == 0:
edf_info['record_length'] = record_length = 1.
warn('Header information is incorrect for record length. Default '
'record length set to 1.')
else:
edf_info['record_length'] = record_length
nchan = int(fid.read(4).decode())
channels = list(range(nchan))
ch_names = [fid.read(16).strip().decode() for ch in channels]
exclude = [ch_names.index(idx) for idx in exclude]
for ch in channels:
fid.read(80) # transducer
units = [fid.read(8).strip().decode() for ch in channels]
edf_info['units'] = list()
edf_info['exclude'] = exclude
include = list()
for i, unit in enumerate(units):
if i in exclude:
continue
if unit == 'uV':
edf_info['units'].append(1e-6)
else:
edf_info['units'].append(1)
include.append(i)
ch_names = [ch_names[idx] for idx in include]
physical_min = np.array([float(fid.read(8).decode())
for ch in channels])[include]
edf_info['physical_min'] = physical_min
physical_max = np.array([float(fid.read(8).decode())
for ch in channels])[include]
digital_min = np.array([float(fid.read(8).decode())
for ch in channels])[include]
edf_info['digital_min'] = digital_min
digital_max = np.array([float(fid.read(8).decode())
for ch in channels])[include]
prefiltering = [fid.read(80).strip().decode() for ch in channels][:-1]
highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
for filt in prefiltering])
lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
for filt in prefiltering])
# number of samples per record
n_samps = np.array([int(fid.read(8).decode()) for ch
in channels])
edf_info['n_samps'] = n_samps
n_samps = n_samps[include]
fid.read(32 * nchan).decode() # reserved
assert fid.tell() == header_nbytes
physical_ranges = physical_max - physical_min
cals = digital_max - digital_min
if edf_info['subtype'] in ('24BIT', 'bdf'):
edf_info['data_size'] = 3 # 24-bit (3 byte) integers
else:
edf_info['data_size'] = 2 # 16-bit (2 byte) integers
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
chs = list()
tal_ch_name = 'EDF Annotations'
tal_chs = np.where(np.array(ch_names) == tal_ch_name)[0]
if len(tal_chs) > 0:
if len(tal_chs) > 1:
warn('Channel names are not unique, found duplicates for: %s. '
'Adding running numbers to duplicate channel names.'
% tal_ch_name)
for idx, tal_ch in enumerate(tal_chs, 1):
ch_names[tal_ch] = ch_names[tal_ch] + '-%s' % idx
tal_channel = tal_chs
else:
tal_channel = None
edf_info['tal_channel'] = tal_channel
if tal_channel is not None and stim_channel is not None and not preload:
raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
' parsed completely on loading.'
' You must set preload parameter to True.'))
if stim_channel == -1:
stim_channel = len(include) - 1
pick_mask = np.ones(len(ch_names))
for idx, ch_info in enumerate(zip(ch_names, physical_ranges, cals)):
ch_name, physical_range, cal = ch_info
chan_info = {}
chan_info['cal'] = cal
chan_info['logno'] = idx + 1
chan_info['scanno'] = idx + 1
chan_info['range'] = physical_range
chan_info['unit_mul'] = 0.
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_V
chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
chan_info['kind'] = FIFF.FIFFV_EEG_CH
chan_info['loc'] = np.zeros(12)
if ch_name in eog or idx in eog or idx - nchan in eog:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_EOG_CH
pick_mask[idx] = False
if ch_name in misc or idx in misc or idx - nchan in misc:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
pick_mask[idx] = False
check1 = stim_channel == ch_name
check2 = stim_channel == idx
check3 = nchan > 1
stim_check = np.logical_and(np.logical_or(check1, check2), check3)
if stim_check:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_STIM_CH
pick_mask[idx] = False
chan_info['ch_name'] = 'STI 014'
ch_names[idx] = chan_info['ch_name']
edf_info['units'][idx] = 1
if isinstance(stim_channel, str):
stim_channel = idx
if tal_channel is not None and idx in tal_channel:
chan_info['range'] = 1
chan_info['cal'] = 1
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
pick_mask[idx] = False
chs.append(chan_info)
edf_info['stim_channel'] = stim_channel
if any(pick_mask):
picks = [item for item, mask in zip(range(nchan), pick_mask) if mask]
edf_info['max_samp'] = max_samp = n_samps[picks].max()
else:
edf_info['max_samp'] = max_samp = n_samps.max()
# sfreq defined as the max sampling rate of eeg
sfreq = n_samps.max() / record_length
info = _empty_info(sfreq)
info['meas_date'] = calendar.timegm(date.utctimetuple())
info['chs'] = chs
if highpass.size == 0:
pass
elif all(highpass):
if highpass[0] == 'NaN':
pass # Placeholder for future use. Highpass set in _empty_info.
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
else:
info['highpass'] = float(np.max(highpass))
warn('Channels contain different highpass filters. Highest filter '
'setting will be stored.')
if lowpass.size == 0:
pass
elif all(lowpass):
if lowpass[0] == 'NaN':
pass # Placeholder for future use. Lowpass set in _empty_info.
else:
info['lowpass'] = float(lowpass[0])
else:
info['lowpass'] = float(np.min(lowpass))
warn('Channels contain different lowpass filters. Lowest filter '
'setting will be stored.')
# Some keys to be consistent with FIF measurement info
info['description'] = None
info['buffer_size_sec'] = 1.
edf_info['nsamples'] = int(n_records * max_samp)
# These are the conditions under which a stim channel will be interpolated
if stim_channel is not None and not (annot and annotmap) and \
tal_channel is None and n_samps[stim_channel] != int(max_samp):
warn('Interpolating stim channel. Events may jitter.')
info._update_redundant()
return info, edf_info
def _read_annot(annot, annotmap, sfreq, data_length):
"""Annotation File Reader.
Parameters
----------
annot : str
Path to annotation file.
annotmap : str
Path to annotation map file containing mapping from label to trigger.
sfreq : float
Sampling frequency.
data_length : int
Length of the data file.
Returns
-------
stim_channel : ndarray
An array containing stimulus trigger events.
"""
pat = '([+/-]\d+.\d+),(\w+)'
annot = open(annot).read()
triggers = re.findall(pat, annot)
times, values = zip(*triggers)
times = [float(time) * sfreq for time in times]
pat = '(\w+):(\d+)'
annotmap = open(annotmap).read()
mappings = re.findall(pat, annotmap)
maps = {}
for mapping in mappings:
maps[mapping[0]] = mapping[1]
triggers = [int(maps[value]) for value in values]
stim_channel = np.zeros(data_length)
for time, trigger in zip(times, triggers):
stim_channel[time] = trigger
return stim_channel
def read_raw_edf(input_fname, montage=None, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None, exclude=(),
preload=False, verbose=None):
"""Reader function for EDF+, BDF conversion to FIF.
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
exclude : list of str
Channel names to exclude. This can help when reading data with
different sampling rates to avoid unnecessary resampling.
| |
assert InvoiceCollection(session, period_id=p1.id)\
.outstanding_amount == 137.5
assert InvoiceCollection(session, period_id=p2.id)\
.outstanding_amount == 33
assert InvoiceCollection(session).unpaid_count() == 2
assert InvoiceCollection(session).unpaid_count(
excluded_period_ids=(p1.id, )) == 1
# pay all of the second invoice
i2.items[0].paid = True
assert i2.total_amount == 33
assert i2.outstanding_amount == 0
assert i2.paid_amount == 33
assert i2.paid == True
assert InvoiceCollection(session).unpaid_count() == 1
def test_invoice_reference(session, owner, prebooking_period):
invoices = InvoiceCollection(
session, user_id=owner.id, period_id=prebooking_period.id)
# DEFAULT SCHEMA
# --------------
i1 = invoices.add()
assert len(i1.references) == 1
assert i1.references[0].reference.startswith('q')
assert i1.references[0].readable.startswith('Q')
assert i1.references[0].schema == 'feriennet-v1'
assert i1.references[0].bucket == 'feriennet-v1'
# make sure linking to the same schema is idempotent
invoices.schema.link(session, i1)
session.refresh(i1)
assert len(i1.references) == 1
# ESR
# ---
invoices = invoices.for_schema('esr-v1')
invoices.schema.link(session, i1)
session.refresh(i1)
assert len(i1.references) == 2
assert len(i1.references[1].reference) == 27
assert ' ' in i1.references[1].readable
assert i1.references[1].schema == 'esr-v1'
assert i1.references[1].bucket == 'esr-v1'
# make sure linking to the same schema is idempotent
invoices.schema.link(session, i1)
session.refresh(i1)
assert len(i1.references) == 2
# Raiffeisen
# ----------
invoices = invoices.for_schema('raiffeisen-v1', schema_config=dict(
esr_identification_number='123123'
))
invoices.schema.link(session, i1)
session.refresh(i1)
assert len(i1.references) == 3
assert len(i1.references[2].reference) == 27
assert i1.references[2].reference.startswith('123123')
assert i1.references[2].schema == 'raiffeisen-v1'
assert i1.references[2].bucket.startswith('raiffeisen-v1-')
# make sure linking the same schema is idempotent
invoices.schema.link(session, i1)
session.refresh(i1)
assert len(i1.references) == 3
# unless we change the config
invoices = invoices.for_schema('raiffeisen-v1', schema_config=dict(
esr_identification_number='321321'
))
invoices.schema.link(session, i1)
session.refresh(i1)
assert len(i1.references) == 4
assert i1.references[3].reference.startswith('321321')
assert i1.references[3].schema == 'raiffeisen-v1'
assert i1.references[3].bucket.startswith('raiffeisen-v1-')
assert i1.references[2].bucket != i1.references[3].bucket
def test_invoice_reference_format_esr():
schema = ESRSchema()
assert schema.format('000127131108141601011502061')\
== '1271 31108 14160 10115 02061'
assert schema.format('127131108141601011502061')\
== '1271 31108 14160 10115 02061'
assert schema.checksum('96111690000000660000000928') == '4'
assert schema.checksum('12000000000023447894321689') == '9'
def test_invoice_reference_format_feriennet():
assert FeriennetSchema().format('qeb3afd0e43') == 'Q-EB3AF-D0E43'
def test_invoice_reference_extract_feriennet_schema():
extract_code = FeriennetSchema().extract
assert extract_code('') is None
assert extract_code('\n asdf') is None
assert extract_code('Q-70171-292FA') == 'q70171292fa'
assert extract_code('B-70171-292FA') is None
assert extract_code('Q-7o171-292FA') == 'q70171292fa'
assert extract_code('Q-7o171-292FA') == 'q70171292fa'
assert extract_code('Q- 7o171292FA') == 'q70171292fa'
assert extract_code('Q- 7o171 29 ---- 2FA') == 'q70171292fa'
assert extract_code('Q\n7o171\n292FA') == 'q70171292fa'
assert extract_code('Code: q-70171-292fa') == 'q70171292fa'
def test_invoice_reference_uniqueness(session, owner, prebooking_period):
invoices = InvoiceCollection(
session, user_id=owner.id, period_id=prebooking_period.id)
i1 = invoices.add()
with pytest.raises(IntegrityError):
invoices.schema.link(session, i1, optimistic=True)
def test_confirm_period(session, owner):
activities = ActivityCollection(session)
attendees = AttendeeCollection(session)
periods = PeriodCollection(session)
occasions = OccasionCollection(session)
bookings = BookingCollection(session)
period = periods.add(
title="Autumn 2016",
prebooking=(datetime(2016, 9, 1), datetime(2016, 9, 30)),
booking=(datetime(2016, 9, 30), datetime(2016, 9, 30)),
execution=(datetime(2016, 10, 1), datetime(2016, 10, 31)),
active=True)
period.all_inclusive = False
period.booking_cost = 10
sport = activities.add("Sport", username=owner.username)
o = occasions.add(
start=datetime(2016, 10, 4, 10),
end=datetime(2016, 10, 4, 12),
timezone="Europe/Zurich",
activity=sport,
period=period,
spots=(0, 2))
o.cost = 20
a1 = attendees.add(
user=owner,
name="<NAME>",
birth_date=date(2000, 1, 1),
gender='male')
a2 = attendees.add(
user=owner,
name="<NAME>",
birth_date=date(2000, 1, 1),
gender='male')
transaction.commit()
b1 = bookings.add(owner, a1, o)
b2 = bookings.add(owner, a2, o)
b1.state = 'open'
b2.state = 'accepted'
period = periods.query().one()
period.confirm()
assert bookings.query().all()[0].cost == 30.0
assert bookings.query().all()[1].cost == 30.0
assert sorted([b.state for b in bookings.query()]) == [
'accepted',
'denied',
]
transaction.abort()
period = periods.query().one()
period.all_inclusive = True
period.booking_cost = 10
b1 = bookings.add(owner, a1, o)
period.confirm()
assert bookings.query().one().cost == 20.0
def test_cancel_occasion(session, owner):
activities = ActivityCollection(session)
attendees = AttendeeCollection(session)
periods = PeriodCollection(session)
occasions = OccasionCollection(session)
bookings = BookingCollection(session)
period = periods.add(
title="Autumn 2016",
prebooking=(datetime(2016, 9, 1), datetime(2016, 9, 30)),
booking=(datetime(2016, 9, 30), datetime(2016, 9, 30)),
execution=(datetime(2016, 10, 1), datetime(2016, 10, 31)),
active=True)
o1 = occasions.add(
start=datetime(2016, 10, 4, 10),
end=datetime(2016, 10, 4, 12),
timezone="Europe/Zurich",
activity=activities.add("Sport", username=owner.username),
period=period,
spots=(0, 2))
o2 = occasions.add(
start=datetime(2016, 10, 4, 10),
end=datetime(2016, 10, 4, 12),
timezone="Europe/Zurich",
activity=activities.add("Science", username=owner.username),
period=period,
spots=(0, 2))
a1 = attendees.add(
user=owner,
name="<NAME>",
birth_date=date(2008, 1, 1),
gender='male')
a2 = attendees.add(
user=owner,
name="<NAME>",
birth_date=date(2008, 1, 1),
gender='male')
transaction.commit()
o1, o2 = occasions.query().all()
b1 = bookings.add(owner, a1, o1)
b2 = bookings.add(owner, a2, o1)
o1.cancel()
assert b1.state == 'cancelled'
assert b2.state == 'cancelled'
assert o1.cancelled
assert not o2.cancelled
transaction.abort()
periods.active().confirmed = True
o1, o2 = occasions.query().all()
b1 = bookings.add(owner, a1, o1)
b2 = bookings.add(owner, a1, o2)
b1.state = 'accepted'
b2.state = 'blocked'
o1.cancel()
assert b1.state == 'cancelled'
assert b2.state == 'blocked'
assert o1.cancelled
assert not o2.cancelled
def test_no_overlapping_dates(session, collections, prebooking_period, owner):
period = prebooking_period
o = collections.occasions.add(
start=period.execution_start,
end=period.execution_start + timedelta(hours=2),
timezone="Europe/Zurich",
activity=collections.activities.add("Sport", username=owner.username),
period=period
)
with pytest.raises(AssertionError):
collections.occasions.add_date(
occasion=o,
start=period.execution_start + timedelta(hours=1),
end=period.execution_start + timedelta(hours=3),
timezone="Europe/Zurich"
)
def test_no_occasion_orphans(session, collections, prebooking_period, owner):
period = prebooking_period
collections.occasions.add(
start=period.execution_start,
end=period.execution_start + timedelta(hours=2),
timezone="Europe/Zurich",
activity=collections.activities.add("Sport", username=owner.username),
period=period
)
transaction.commit()
assert collections.occasions.query().count() == 1
assert session.query(OccasionDate).count() == 1
collections.occasions.delete(collections.occasions.query().first())
transaction.commit()
assert collections.occasions.query().count() == 0
assert session.query(OccasionDate).count() == 0
def test_date_changes(session, collections, prebooking_period, owner):
# Fixme: Is a flaky test
period = prebooking_period
collections.occasions.add(
start=period.execution_start,
end=period.execution_start + timedelta(hours=2),
timezone="Europe/Zurich",
activity=collections.activities.add("Sport", username=owner.username),
period=period
)
transaction.commit()
o = collections.occasions.query().first()
assert DAYS.has(o.duration, DAYS.half)
assert not DAYS.has(o.duration, DAYS.full)
assert not DAYS.has(o.duration, DAYS.many)
o.dates[0].end += timedelta(hours=6)
assert DAYS.has(o.duration, DAYS.half)
assert not DAYS.has(o.duration, DAYS.full)
assert not DAYS.has(o.duration, DAYS.many)
session.flush()
assert not DAYS.has(o.duration, DAYS.half)
assert DAYS.has(o.duration, DAYS.full)
assert not DAYS.has(o.duration, DAYS.many)
def test_age_barriers(prebooking_period):
period = prebooking_period
o = Occasion(age=NumericRange(6, 9), period=period, dates=[
OccasionDate(
start=datetime(2017, 7, 26, 10),
end=datetime(2017, 7, 26, 16),
timezone='Europe/Zurich'
),
OccasionDate(
start=datetime(2017, 7, 26, 17),
end=datetime(2017, 7, 26, 20),
timezone='Europe/Zurich'
),
])
period.age_barrier_type = 'exact'
assert not o.is_too_young(date(2011, 7, 25))
assert not o.is_too_young(date(2011, 7, 26))
assert o.is_too_young(date(2011, 7, 27))
assert o.is_too_young(date(2011, 7, 28))
assert o.is_too_old(date(2008, 7, 25))
assert o.is_too_old(date(2008, 7, 26))
assert not o.is_too_old(date(2008, 7, 27))
assert not o.is_too_old(date(2008, 7, 28))
period.age_barrier_type = 'year'
assert not o.is_too_young(date(2011, 1, 1))
assert not o.is_too_young(date(2011, 12, 31))
assert o.is_too_young(date(2012, 1, 1))
assert o.is_too_young(date(2012, 12, 31))
assert not o.is_too_young(date(2008, 1, 1))
assert not o.is_too_young(date(2008, 12, 31))
assert o.is_too_old(date(2007, 1, 1))
assert o.is_too_old(date(2007, 12, 31))
def test_deadline(session, collections, prebooking_period, owner):
period = prebooking_period
start, end = period.execution_start,\
period.execution_start + timedelta(hours=2)
occasion = collections.occasions.add(
start=start,
end=end,
timezone="Europe/Zurich",
activity=collections.activities.add("Sport", username=owner.username),
period=period
)
assert occasion.deadline == period.booking_end.date()
period.deadline_days = 1
assert occasion.deadline == (start - timedelta(days=2)).date()
def test_cancellation_deadline(session, collections, prebooking_period, owner):
period = prebooking_period
start, end = period.execution_start,\
period.execution_start + timedelta(hours=2)
occasion = collections.occasions.add(
start=start,
end=end,
timezone="Europe/Zurich",
activity=collections.activities.add("Sport", username=owner.username),
period=period
)
assert occasion.cancellation_deadline is None
period.cancellation_days = 1
assert occasion.cancellation_deadline == (start - timedelta(days=2)).date()
period.cancellation_date = date(2017, 2, 23)
assert occasion.cancellation_deadline == date(2017, 2, 23)
def test_prebooking_phases():
period = Period()
period.prebooking_start = date(2017, 5, 1)
period.prebooking_end = date(2017, 5, 2)
period.booking_start = date(2017, 5, 3)
def standardize(string):
dt = datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
return standardize_date(dt, 'Europe/Zurich')
with freeze_time(standardize('2017-04-30 23:59:59')):
assert period.is_prebooking_in_future
with freeze_time(standardize('2017-05-01 00:00:00')):
assert not period.is_prebooking_in_future
period.active = False
assert not period.is_currently_prebooking
period.active = True
assert period.is_currently_prebooking
with freeze_time(standardize('2017-05-05 00:00:00')):
assert not period.is_prebooking_in_future
assert not period.is_currently_prebooking
assert period.is_prebooking_in_past
with freeze_time(standardize('2017-05-02 23:59:59')):
assert period.is_currently_prebooking
period.confirmed = True
assert not period.is_currently_prebooking
assert period.is_prebooking_in_past
def test_publication_request(session, owner):
activities = ActivityCollection(session)
requests = PublicationRequestCollection(session)
periods = PeriodCollection(session)
period = periods.add(
title="Autumn 2016",
prebooking=(datetime(2016, 9, 1), datetime(2016, 9, 30)),
booking=(datetime(2016, 9, 30), datetime(2016, 9, 30)),
execution=(datetime(2016, 10, 1), datetime(2016, 10, 31)),
active=True
)
activity = activities.add(
title="Sport",
username=owner.username,
)
request = requests.add(activity=activity, period=period)
session.flush()
request = requests.query().first()
assert request.activity.title == "Sport"
assert request.period.title == "Autumn 2016"
def test_archive_period(session, owner):
activities = ActivityCollection(session)
occasions = OccasionCollection(session)
periods = PeriodCollection(session)
current_period = periods.add(
title="Autumn 2017",
prebooking=(datetime(2017, 9, 1), datetime(2017, 9, 30)),
booking=(datetime(2017, 9, 30), datetime(2017, 9, 30)),
execution=(datetime(2017, 10, 1), datetime(2017, 10, 31)),
active=True
)
future_period = periods.add(
title="Winter 2017",
prebooking=(datetime(2017, 11, 1), datetime(2017, 11, 30)),
booking=(datetime(2017, 11, 30), datetime(2017, 11, 30)),
execution=(datetime(2017, 12, 1), datetime(2017, 12, 31)),
active=False
)
sport = activities.add("Sport", username=owner.username)
games = activities.add("Games", username=owner.username)
empty = activities.add("Empty", username=owner.username)
sport.propose().accept()
games.propose().accept()
empty.propose().accept()
occasions.add(
start=datetime(2017, 10, 4, 13),
end=datetime(2017, 10, 4, 14),
timezone="Europe/Zurich",
meeting_point="Lucerne",
age=(6, 9),
spots=(2, 10),
note="Bring game-face",
activity=sport,
period=current_period
)
occasions.add(
start=datetime(2017, 12, 4, 13),
end=datetime(2017, 12, 4, 14),
timezone="Europe/Zurich",
meeting_point="Lucerne",
age=(6, 9),
spots=(2, 10),
note="Bring game-face",
activity=sport,
period=future_period
)
occasions.add(
start=datetime(2017, 12, 4, 13),
end=datetime(2017, 12, 4, 14),
timezone="Europe/Zurich",
meeting_point="Lucerne",
age=(6, 9),
spots=(2, 10),
note="Bring game-face",
activity=games,
period=current_period
)
current_period.confirmed = True
current_period.finalized = True
current_period.archive()
assert current_period.archived == True
assert sport.state == 'accepted'
assert games.state == 'archived'
assert empty.state == 'archived'
def test_activity_filter_toggle():
f = ActivityFilter(tags=['Foo'])
assert not f.toggled(tag='Foo').tags
assert f.toggled(tag='Bar').tags == {'Foo', 'Bar'}
def test_activity_period_filter(scenario):
# an activity fully booked in the previous period...
scenario.add_period(title="Prev", active=False, confirmed=True)
| |
mass - 7 ")
print("Atomic (proton) number - 3")
print("Discovered in 1817")
print("Solid at room temp")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Beryllium():
os.system('clear')
print("Be - Beryllium - Element number 4 ")
print("Relative atomic mass - 9 ")
print("Atomic (proton) number - 4 ")
print("Discovered in 1798")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Boron():
os.system('clear')
print("B - Boron - Element number 5 ")
print("Relative atomic mass - 11 ")
print("Atomic (proton) number - 4 ")
print("Discovered in 1808")
print("solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Carbon():
os.system('clear')
print("C - Carbon - Element number 6 ")
print("Relative atomic mass - 12 ")
print("Atomic (proton) number - 6 ")
print("Discovered in 3750 BCE")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Nitrogen():
os.system('clear')
print("N - Nitrogen - Element number 7 ")
print("Relative atomic mass - 14 ")
print("Atomic (proton) number - 7 ")
print("Discovered in 1772")
print("Gas at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Oxygen():
os.system('clear')
print("O - Oxygen - Element number 8 ")
print("Relative atomic mass - 16 ")
print("Atomic (proton) number - 8 ")
print("Discovered in 1771")
print("Gas at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Fluorine():
os.system('clear')
print("F - Fluorine - Element number 9 ")
print("Relative atomic mass - 19 ")
print("Atomic (proton) number - 9 ")
print("Discovered in 1810")
print("Gas at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Neon():
os.system('clear')
print("Ne - Neon - Element number 10 ")
print("Relative atomic mass - 20 ")
print("Atomic (proton) number - 10 ")
print("Discovered in 1897")
print("Gas at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Sodium():
os.system('clear')
print("Na - Sodium - Element number 11 ")
print("Relative atomic mass - 23 ")
print("Atomic (proton) number - 11 ")
print("Discovered in 1807")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Magnesium():
os.system('clear')
print("Mg - Magnesium - Element number 12 ")
print("Relative atomic mass - 24 ")
print("Atomic (proton) number - 12 ")
print("Discovered in 1755")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Aluminium():
os.system('clear')
print("Al - Aluminium - Element number 13 ")
print("Relative atomic mass - 27 ")
print("Atomic (proton) number - 13 ")
print("Discovered in 1824")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Silicon():
os.system('clear')
print("Si - Silicon - Element number 14 ")
print("Relative atomic mass - 28 ")
print("Atomic (proton) number - 14 ")
print("Discovered in 1824")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Phosphorus():
os.system('clear')
print("P - Phosphorus - Element number 2 ")
print("Relative atomic mass - 31 ")
print("Atomic (proton) number - 15 ")
print("Discovered in 1669")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Sulfur():
os.system('clear')
print("S - Sulfur - Element number 16 ")
print("Relative atomic mass - 32 ")
print("Atomic (proton) number - 16 ")
print("Discovered before 2000 BCE")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Chlorine():
os.system('clear')
print("Cl - Chlorine - Element number 17 ")
print("Relative atomic mass - 35.5 ")
print("Atomic (proton) number - 17 ")
print("Discovered in 1774")
print("Gas at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Argon():
os.system('clear')
print("Ar - Argon - Element number 18 ")
print("Relative atomic mass - 40 ")
print("Atomic (proton) number - 18 ")
print("Discovered in 1894")
print("Gas at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Potassium():
os.system('clear')
print("K - Potassium - Element number 19 ")
print("Relative atomic mass - 39 ")
print("Atomic (proton) number - 19 ")
print("Discovered in 1807")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Calcium():
os.system('clear')
print("Ca - Calcium - Element number 20 ")
print("Relative atomic mass - 40 ")
print("Atomic (proton) number - 20 ")
print("Discovered in 1808")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Scandium():
os.system('clear')
print("Sc - Scandium - Element number 21 ")
print("Relative atomic mass - 45 ")
print("Atomic (proton) number - 21 ")
print("Discovered in 1879")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Titanium():
os.system('clear')
print("Ti - Titanium - Element number 22 ")
print("Relative atomic mass - 48 ")
print("Atomic (proton) number - 22 ")
print("Discovered in 1791")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Vanadium():
os.system('clear')
print("V - Vanadium - Element number 23 ")
print("Relative atomic mass - 51 ")
print("Atomic (proton) number - 23 ")
print("Discovered in 1801")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Chromium():
os.system('clear')
print("Cr - Chromium - Element number 24 ")
print("Relative atomic mass - 52 ")
print("Atomic (proton) number - 24 ")
print("Discovered in 1798")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Manganese():
os.system('clear')
print("Mn - Manganese - Element number 25 ")
print("Relative atomic mass - 55 ")
print("Atomic (proton) number - 25 ")
print("Discovered in 1774")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Iron():
os.system('clear')
print("Fe - Iron - Element number 26 ")
print("Relative atomic mass - 56 ")
print("Atomic (proton) number - 26 ")
print("Discovered approximately in 3500BC")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Cobalt():
os.system('clear')
print("Co - Cobalt - Element number 27 ")
print("Relative atomic mass - 59 ")
print("Atomic (proton) number - 27 ")
print("Discovered in 1739")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Nickel():
os.system('clear')
print("Ni - Nickel - Element number 28 ")
print("Relative atomic mass - 59 ")
print("Atomic (proton) number - 28 ")
print("Discovered in 1739")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Copper():
os.system('clear')
print("Cu - Copper - Element number 29 ")
print("Relative atomic mass - 63.5 ")
print("Atomic (proton) number - 29 ")
print("Discovered in 9000 BC")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Zinc():
os.system('clear')
print("Zn - Zinc - Element number 30 ")
print("Relative atomic mass - 65 ")
print("Atomic (proton) number - 30 ")
print("Discovered in 1746")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Gallium():
os.system('clear')
print("Ga - Gallium - Element number 31 ")
print("Relative atomic mass - 70 ")
print("Atomic (proton) number - 31 ")
print("Discovered in 1875")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Germanium():
os.system('clear')
print("Ge - Germanium - Element number 32 ")
print("Relative atomic mass - 73 ")
print("Atomic (proton) number - 32 ")
print("Discovered in 1886")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Arsenic():
os.system('clear')
print("As - Arsenic - Element number 33 ")
print("Relative atomic mass - 75 ")
print("Atomic (proton) number - 33 ")
print("Discovered approximately 1250")
print("Solid at room temperature")
print("")
input("Press enter to return to the main menu ->")
os.system('clear')
MainMenu()
def Selenium():
os.system('clear')
print("Se - Selenium - Element number 34 ")
print("Relative atomic mass - 79 ")
print("Atomic (proton) | |
#! /usr/bin/env python
#-------------------------------------------------------------------------------
# Copyright 2006-2012 UT-Battelle, LLC. See LICENSE for more information.
#-------------------------------------------------------------------------------
import sys
import BeautifulSoup
import urllib2
from math import floor, ceil
PLOT = True
PLOT_MULTIPLE_INSTANCES = False # Separate concurrent instances of the same component
PLOT_END_EDGE = False # Plot an edge whenever a task finishes
try:
from pylab import *
except:
PLOT = False
def plot_exec_time(plot_data, used_procs, used_proc_map, task_procs):
import numpy as np
figure()
x = [float(k) for k in sorted(plot_data.keys(), key = float)]
y = np.array([plot_data[k] for k in sorted(plot_data.keys(), key = float)])
#plot(x, y)
vlines(x, [0], y, colors = 'b')
# l = legend()
xlabel('Wall Time')
ylabel('Serial Execution Time')
title('Serial Execution Periods for IPS Simulation')
grid(True)
figure()
sorted_proc_times = sorted(used_procs.keys(), key = float)
x = [float(k) for k in sorted_proc_times]
y = [used_procs[k] for k in sorted_proc_times]
plot(x, y)
area = 0.0
for k in range(len(sorted_proc_times) - 1):
area += used_procs[sorted_proc_times[k]] * \
(float(sorted_proc_times[k+1]) - float(sorted_proc_times[k]))
average_util = area / (float(sorted_proc_times[-1]) - float(sorted_proc_times[0]))
# l = legend()
xlabel('Wall Time')
ylabel('Processor Count')
title('Processor Utilization')
print '==========================='
all_util = {}
start_point = int(floor(x[0]))
for index in range(1, len(x)):
end_point = int(floor(x[index]))
value = y[index]
for t in range(start_point, end_point):
all_util[t] = value
# print t, value
start_point = end_point
print '==========================='
values = [all_util[k] for k in sorted(all_util.keys(), key = float)]
window = 3600
moving_sum = sum(values[0:window])
moving_ave = {window/2:moving_sum/float(window)}
for index in range (window/2 + 1, len(values) - window/2):
# print index, index-window/2
index_in = index+int(floor(window/2))
index_out = index-int(floor(window/2)) - 1
# print index_in, values[index_in], index_out, values[index_out]
moving_sum += (values[index_in] - values[index_out])
moving_ave[index] = moving_sum /float(window)
#for k in sorted(moving_ave.keys(), key = float):
# print k, moving_ave[k]
x2 = [float(k) for k in sorted(moving_ave.keys(), key = float)]
y2 = [moving_ave[k] for k in sorted(moving_ave.keys(), key = float)]
plot_label = '%.1fH Moving Ave.' % (float(window/3600.))
#plot(x2, y2, linewidth=2, label = plot_label)
grid(True)
plot([sorted_proc_times[0], sorted_proc_times[-1]], [average_util, average_util],
linewidth=2, label= 'Average')
l = plt.legend()
fig = figure()
comp_names = used_proc_map.keys()
comp_name = comp_names[0]
comp_util = used_proc_map[comp_name]
all_times = sorted(comp_util.keys())
print '%15s' % ('Time'),
for comp in comp_names:
print '%15s' % (comp),
print
for t in all_times:
print '%15.5f' % (t),
for comp in comp_names:
print '%15d' % (used_proc_map[comp][t]),
print
print task_procs
x = np.array(all_times)
y_sum = np.array([0.0 for wall_time in all_times])
y = {}
ax1 = fig.add_subplot(111)
# x0 = np.array([0.0, all_times[-1]])
# y0 = np.array([total_procs, total_procs])
# ax1.plot(x0, y0, linestyle = '--', linewidth = 2)
colors = ["#CC6666", "#1DACD6", "#6E5160"]
comp_color = {}
for comp in comp_names:
print 'plotting for ', comp
data = used_proc_map[comp]
y[comp] = np.array([data[wall_time] for wall_time in all_times])
comp_color[comp] = colors.pop(0)
y_plot_old = y_sum.copy()
if (PLOT_MULTIPLE_INSTANCES):
print max(y[comp]), task_procs[comp], max(y[comp]) / task_procs[comp]
max_num_comp_sims = max(y[comp]) / task_procs[comp]
print max_num_comp_sims + 1
y_inc = [0] * len(y[comp])
for i in range(1, max_num_comp_sims + 1):
for t in range(len(y_inc)):
y_inc[t] = min(y_inc[t] + task_procs[comp], y[comp][t])
y_inc_array = np.array(y_inc)
y_plot = y_inc + y_sum
if(i == 1):
# print 'First ', comp
ax1.plot(x, y_plot, label = comp, markeredgecolor='c',
markerfacecolor=comp_color[comp], color = 'k', linewidth=0.5)
else:
# print 'Next i = ', i, comp
ax1.plot(x, y_plot, markeredgecolor='c',
markerfacecolor=comp_color[comp], color = 'k', linewidth=0.5)
plt.fill_between(x, y_plot, y_plot_old, color = comp_color[comp], alpha = 0.5)
y_plot_old = y_plot
else:
y_plot = y[comp] + y_sum
ax1.plot(x, y_plot, label = comp, markeredgecolor='c',
markerfacecolor=comp_color[comp], color = 'k', linewidth=0.5)
plt.fill_between(x, y_plot, y_plot_old, color = comp_color[comp], alpha = 0.5)
y_sum = y_plot
lgd = ax1.legend(numpoints = 2, handletextpad = -1, ncol = 3, loc = 'upper center', fancybox = False,
mode = None) # , prop = {'size':10})
lines = lgd.get_lines()
lgd_texts = lgd.get_texts()
for i in range(len(lines)):
l = lines[i]
comp_name = lgd_texts[i]
fill_color = comp_color[comp_name.get_text()]
l.set_linestyle('')
l.set_marker('s')
l.set_markersize(12)
l.set_markevery(2)
l.set_markerfacecolor(fill_color)
l.set_alpha(0.5)
l.set_markeredgecolor('k')
l.set_markeredgewidth(1.5)
plt.xlabel('Wall Time (Sec.)')
plt.ylabel('Cores Used')
show()
def get_task_times(url_list):
task_data = {}
active_tasks = {}
used_procs = {}
task_usage = {}
task_map = {}
task_start_map = {}
task_end_map = {}
all_task_times = []
all_comp_names = []
task_procs = {}
for url in url_list:
try:
page = urllib2.urlopen(url)
except:
print 'Error retreiving URL ', url
raise
parsed_page = BeautifulSoup.BeautifulSoup(page)
events_table = parsed_page('table')[3]
events = events_table('tr')[1:]
for event in events:
fields = event('td')
field_values = [field.contents[0].strip() for field in fields]
if (field_values[2] == u'IPS_TASK_END'):
#print ' '.join(field_values)
comment = field_values[-1]
comp = field_values[3]
if comp not in all_comp_names:
all_comp_names.append(comp)
comment_fields = comment.split()
raw_task_id = comment_fields[comment_fields.index('task_id') + 2]
phys_stamp = field_values[-2]
wall_time = field_values[-3]
all_task_times.append((wall_time, 'END'))
task_data[wall_time] = 'END'
task_id = url + '|' + raw_task_id
try:
new_task = task_map[task_id]
except KeyError:
new_task = Task(task_id = task_id,
end_time = float(wall_time),
phys_time = float(phys_stamp),
comp_name = comp)
task_map[task_id] = new_task
else:
new_task.end_time = float(wall_time)
try:
task_end_map[wall_time].append(task_id)
except:
task_end_map[wall_time] = [task_id]
#print phys_stamp, comp_task, exec_time
elif (field_values[2] in [u'IPS_LAUNCH_TASK_POOL', u'IPS_LAUNCH_TASK']):
comment = field_values[-1]
comp = field_values[3]
wall_time = field_values[-3]
all_task_times.append((wall_time, 'START'))
comment_fields = comment.split()
raw_task_id = comment_fields[comment_fields.index('task_id') + 2]
task_id = url + '|' + raw_task_id
phys_stamp = field_values[-2]
print comment_fields
if 'mpiexec' in comment_fields:
dash_n_idx = comment_fields.index('-n')
nproc = int(comment_fields[dash_n_idx + 1 ])
else:
try:
aprun = comment_fields.index('aprun')
nproc = int(comment_fields[aprun + 2 ])
except:
raise
try:
new_task = task_map[task_id]
except KeyError:
new_task = Task(task_id = task_id,
nproc = nproc,
start_time = float(wall_time),
phys_time = float(phys_stamp),
comp_name = comp)
task_map[task_id] = new_task
else:
new_task.nproc = nproc
new_task.start_time = wall_time
new_task.phys_time = phys_stamp
try:
task_start_map[wall_time].append(task_id)
except:
task_start_map[wall_time] = [task_id]
if comp not in all_comp_names:
all_comp_names.append(comp)
if comp not in task_procs.keys():
task_procs[comp] = nproc
print comp, task_procs[comp]
elif(field_values[2] == u'IPS_START'):
wall_time = field_values[-3]
all_task_times.append((wall_time, 'IPS_START'))
active_tasks[wall_time] = 0
all_task_times = sorted(all_task_times, key = lambda x: float(x[0]))
print 'wall_time, nproc_started'
for wall_time in sorted(task_start_map.keys(), key = float):
tid_list = task_start_map[wall_time]
print wall_time, [task_map[tid].nproc for tid in tid_list]
print '======================================================'
print 'wall_time, nproc_ended'
for wall_time in sorted(task_end_map.keys(), key = float):
tid_list = task_end_map[wall_time]
print wall_time, [task_map[tid].nproc for tid in tid_list]
print '======================================================'
current_used_procs = 0
active_tasks_count = 0
used_proc_map = {}
cur_util_map = {}
for comp in all_comp_names:
used_proc_map[comp] = {}
cur_util_map[comp] = 0
while True:
try:
(event_time, event) = all_task_times.pop(0)
except IndexError:
break
if (event == 'START'):
tid = task_start_map[event_time].pop(0)
prior_walltime = '%f' % (float(event_time) - 0.00001)
active_tasks[prior_walltime] = active_tasks_count
used_procs[prior_walltime] = current_used_procs
task = task_map[tid]
active_tasks_count += 1
current_used_procs += task.nproc
comp_name = task.comp_name
used_proc_per_comp = used_proc_map[comp_name]
if float(event_time) - 0.00001 not in used_proc_per_comp.keys():
if (PLOT_END_EDGE):
used_proc_per_comp[float(event_time) - 0.00003] = cur_util_map[comp_name]
used_proc_per_comp[float(event_time) - 0.00002] = cur_util_map[comp_name]
used_proc_per_comp[float(event_time) - 0.00001] = cur_util_map[comp_name]
cur_util_map[comp_name] += task.nproc
used_proc_per_comp[float(event_time)] = cur_util_map[comp_name]
for other_comp in all_comp_names:
if comp_name == other_comp:
continue
used_proc_per_comp = used_proc_map[other_comp]
used_proc_per_comp[float(event_time)] = cur_util_map[other_comp]
if (PLOT_END_EDGE):
used_proc_per_comp[float(event_time) - 0.00003] = cur_util_map[other_comp]
used_proc_per_comp[float(event_time) - 0.00002] = cur_util_map[other_comp]
used_proc_per_comp[float(event_time) - 0.00001] = cur_util_map[other_comp]
elif (event == 'END'):
prior_walltime = '%f' % (float(event_time) - 0.00001)
active_tasks[prior_walltime] = active_tasks_count
used_procs[prior_walltime] = current_used_procs
tid = task_end_map[event_time].pop(0)
task = task_map[tid]
active_tasks_count -= 1
current_used_procs -= task.nproc
comp_name = task.comp_name
used_proc_per_comp = used_proc_map[comp_name]
if float(event_time) - 0.00001 not in used_proc_per_comp.keys():
if (PLOT_END_EDGE):
used_proc_per_comp[float(event_time) - 0.00003] = cur_util_map[comp_name]
used_proc_per_comp[float(event_time) - 0.00002] = 0
used_proc_per_comp[float(event_time) - 0.00001] = 0
else:
used_proc_per_comp[float(event_time) - 0.00001] = cur_util_map[comp_name]
cur_util_map[comp_name] -= task.nproc
used_proc_per_comp[float(event_time)] = cur_util_map[comp_name]
for other_comp in all_comp_names:
if comp_name == other_comp:
continue
used_proc_per_comp = used_proc_map[other_comp]
used_proc_per_comp[float(event_time)] = cur_util_map[other_comp]
if (PLOT_END_EDGE):
used_proc_per_comp[float(event_time) - 0.00003] = cur_util_map[other_comp]
used_proc_per_comp[float(event_time) - 0.00002] = cur_util_map[other_comp]
used_proc_per_comp[float(event_time) - 0.00001] = cur_util_map[other_comp]
elif (event == 'IPS_START'):
current_used_procs = 0
active_tasks_count = 0
active_tasks[event_time] = active_tasks_count
used_procs[event_time] = current_used_procs
# print 'Wall Time, Active Tasks, Used Procs'
# for wall_time in sorted(active_tasks.keys(), key = float):
# print wall_time, active_tasks[wall_time], used_procs[wall_time]
print '======================================================'
print ' Task ID, Start time, End time'
for tid in sorted(task_map.keys(), key = lambda x: float(x.split('|')[1])):
task = task_map[tid]
print '%10s %10s %10s' % (tid.split('|')[1], str(task.start_time), str(task.end_time))
index = 0
serial_times = {}
print '=============================================='
print 'Serial Times'
print ' Start Stop Interval'
sorted_walltime = sorted(active_tasks.keys(), key = float)
for i in range(len(sorted_walltime)):
if active_tasks[sorted_walltime[i]] == 0:
try:
index = sorted_walltime[i]
interval = float(sorted_walltime[i+1]) - float(sorted_walltime[i])
if (interval > 0.1):
serial_times[index] = interval
print '%12.3f %12.3f %12.3f' % \
(float(sorted_walltime[i]),
| |
= [
tablename_new + '_' + suffix for suffix in METADATA_TABLE_COLUMN_NAMES
]
id_iter = [key for key in key_old_list]
val_iter = [(key,) for key in key_new_list]
colnames = ('metadata_key',)
self.set(
METADATA_TABLE_NAME, colnames, val_iter, id_iter, id_colname='metadata_key'
)
if invalidate_cache:
self.invalidate_tables_cache()
def drop_table(self, tablename, invalidate_cache=True):
logger.info('[sql] schema dropping tablename=%r' % tablename)
# Technically insecure call, but all entries are statically inputted by
# the database's owner, who could delete or alter the entire database
# anyway.
operation = f'DROP TABLE IF EXISTS {tablename}'
if self.uri.startswith('postgresql'):
operation = f'{operation} CASCADE'
self.executeone(text(operation), [])
# Delete table's metadata
key_list = [tablename + '_' + suffix for suffix in METADATA_TABLE_COLUMN_NAMES]
self.delete(METADATA_TABLE_NAME, key_list, id_colname='metadata_key')
if invalidate_cache:
self.invalidate_tables_cache()
def drop_all_tables(self):
"""
DELETES ALL INFO IN TABLE
"""
self._tablenames = None
for tablename in self.get_table_names():
if tablename != 'metadata':
self.drop_table(tablename, invalidate_cache=False)
self.invalidate_tables_cache()
# ==============
# CONVINENCE
# ==============
def dump_tables_to_csv(self, dump_dir=None):
""" Convenience: Dumps all csv database files to disk """
if dump_dir is None:
dump_dir = join(self.dir_, 'CSV_DUMP')
ut.ensuredir(dump_dir)
for tablename in self.get_table_names():
table_fname = tablename + '.csv'
table_fpath = join(dump_dir, table_fname)
table_csv = self.get_table_csv(tablename)
ut.writeto(table_fpath, table_csv)
def get_schema_current_autogeneration_str(self, autogen_cmd=''):
"""Convenience: Autogenerates the most up-to-date database schema
CommandLine:
python -m dtool.sql_control --exec-get_schema_current_autogeneration_str
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> tablename = 'keypoint'
>>> db = depc[tablename].db
>>> result = db.get_schema_current_autogeneration_str('')
>>> print(result)
"""
db_version_current = self.get_db_version()
# Define what tab space we want to save
tab1 = ' ' * 4
line_list = []
# autogen_cmd = 'python -m dtool.DB_SCHEMA --test-test_dbschema
# --force-incremental-db-update --dump-autogen-schema'
# File Header
line_list.append(ut.TRIPLE_DOUBLE_QUOTE)
line_list.append('AUTOGENERATED ON ' + ut.timestamp('printable'))
line_list.append('AutogenCommandLine:')
# TODO: Fix autogen command
line_list.append(ut.indent(autogen_cmd, tab1))
line_list.append(ut.TRIPLE_DOUBLE_QUOTE)
line_list.append('# -*- coding: utf-8 -*-')
# line_list.append('from wbia import constants as const')
line_list.append('\n')
line_list.append('# =======================')
line_list.append('# Schema Version Current')
line_list.append('# =======================')
line_list.append('\n')
line_list.append('VERSION_CURRENT = %s' % ut.repr2(db_version_current))
line_list.append('\n')
line_list.append('def update_current(db, ibs=None):')
# Function content
first = True
for tablename in sorted(self.get_table_names()):
if first:
first = False
else:
line_list.append('%s' % '')
line_list += self.get_table_autogen_str(tablename)
pass
line_list.append('')
return '\n'.join(line_list)
def get_table_constraints(self, tablename):
"""
TODO: use coldef_list with table_autogen_dict instead
"""
constraint = self.metadata[tablename].constraint
return None if constraint is None else constraint.split(';')
def get_coldef_list(self, tablename):
"""
Returns:
list of (str, str) : each tuple is (col_name, col_type)
"""
column_list = self.get_columns(tablename)
coldef_list = []
for column in column_list:
col_name = column.name
col_type = str(column[2])
if column[5] == 1:
col_type += ' PRIMARY KEY'
elif column[3] == 1:
col_type += ' NOT NULL'
if column[4] is not None:
default_value = six.text_type(column[4])
# HACK: add parens if the value contains parens in the future
# all default values should contain parens
LEOPARD_TURK_HACK = True
if LEOPARD_TURK_HACK and '(' not in default_value:
col_type += ' DEFAULT %s' % default_value
else:
col_type += ' DEFAULT (%s)' % default_value
coldef_list.append((col_name, col_type))
return coldef_list
@profile
def get_table_autogen_dict(self, tablename):
r"""
Args:
tablename (str):
Returns:
dict: autogen_dict
CommandLine:
python -m dtool.sql_control get_table_autogen_dict
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> db = SQLDatabaseController('sqlite:///', 'testing')
>>> tablename = 'dummy_table'
>>> db.add_table(tablename, (
>>> ('rowid', 'INTEGER PRIMARY KEY'),
>>> ('value1', 'TEXT'),
>>> ('value2', 'TEXT NOT NULL'),
>>> ('value3', 'TEXT DEFAULT 1'),
>>> ('time_added', "INTEGER DEFAULT (CAST(STRFTIME('%s', 'NOW', 'UTC') AS INTEGER))")
>>> ))
>>> autogen_dict = db.get_table_autogen_dict(tablename)
>>> result = ut.repr2(autogen_dict, nl=2)
>>> print(result)
"""
autogen_dict = ut.odict()
autogen_dict['tablename'] = tablename
autogen_dict['coldef_list'] = self.get_coldef_list(tablename)
autogen_dict['docstr'] = self.get_table_docstr(tablename)
autogen_dict['superkeys'] = self.get_table_superkey_colnames(tablename)
autogen_dict['dependson'] = self.metadata[tablename].dependson
return autogen_dict
def get_table_autogen_str(self, tablename):
r"""
Args:
tablename (str):
Returns:
str: quoted_docstr
CommandLine:
python -m dtool.sql_control get_table_autogen_str
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> db = SQLDatabaseController('sqlite:///', 'testing')
>>> tablename = 'dummy_table'
>>> db.add_table(tablename, (
>>> ('rowid', 'INTEGER PRIMARY KEY'),
>>> ('value', 'TEXT'),
>>> ('time_added', "INTEGER DEFAULT (CAST(STRFTIME('%s', 'NOW', 'UTC') AS INTEGER))")
>>> ))
>>> result = '\n'.join(db.get_table_autogen_str(tablename))
>>> print(result)
"""
line_list = []
tab1 = ' ' * 4
tab2 = ' ' * 8
line_list.append(tab1 + 'db.add_table(%s, [' % (ut.repr2(tablename),))
# column_list = db.get_columns(tablename)
# colnamerepr_list = [ut.repr2(six.text_type(column[1]))
# for column in column_list]
autogen_dict = self.get_table_autogen_dict(tablename)
coldef_list = autogen_dict['coldef_list']
max_colsize = max(32, 2 + max(map(len, ut.take_column(coldef_list, 0))))
# for column, colname_repr in zip(column_list, colnamerepr_list):
for col_name, col_type in coldef_list:
name_part = ('%s,' % ut.repr2(col_name)).ljust(max_colsize)
type_part = ut.repr2(col_type)
line_list.append(tab2 + '(%s%s),' % (name_part, type_part))
line_list.append(tab1 + '],')
superkeys = self.get_table_superkey_colnames(tablename)
docstr = self.get_table_docstr(tablename)
# Append metadata values
specially_handled_table_metakeys = [
'docstr',
'superkeys',
# 'constraint',
'dependsmap',
]
def quote_docstr(docstr):
if docstr is None:
return None
import textwrap
wraped_docstr = '\n'.join(textwrap.wrap(ut.textblock(docstr)))
indented_docstr = ut.indent(wraped_docstr.strip(), tab2)
_TSQ = ut.TRIPLE_SINGLE_QUOTE
quoted_docstr = _TSQ + '\n' + indented_docstr + '\n' + tab2 + _TSQ
return quoted_docstr
line_list.append(tab2 + 'docstr=%s,' % quote_docstr(docstr))
line_list.append(tab2 + 'superkeys=%s,' % (ut.repr2(superkeys),))
# Hack out docstr and superkeys for now
for suffix in METADATA_TABLE_COLUMN_NAMES:
if suffix in specially_handled_table_metakeys:
continue
key = tablename + '_' + suffix
val = getattr(self.metadata[tablename], suffix)
logger.info(key)
if val is not None:
line_list.append(tab2 + '%s=%s,' % (suffix, ut.repr2(val)))
dependsmap = self.metadata[tablename].dependsmap
if dependsmap is not None:
_dictstr = ut.indent(ut.repr2(dependsmap, nl=1), tab2)
depends_map_dictstr = ut.align(_dictstr.lstrip(' '), ':')
# hack for formatting
depends_map_dictstr = depends_map_dictstr.replace(tab1 + '}', '}')
line_list.append(tab2 + 'dependsmap=%s,' % (depends_map_dictstr,))
line_list.append(tab1 + ')')
return line_list
def dump_schema(self):
"""
Convenience: Dumps all csv database files to disk NOTE: This function
is semi-obsolete because of the auto-generated current schema file.
Use dump_schema_current_autogeneration instead for all purposes except
for parsing out the database schema or for consice visual
representation.
"""
app_resource_dir = ut.get_app_resource_dir('wbia')
dump_fpath = join(app_resource_dir, 'schema.txt')
with open(dump_fpath, 'w') as file_:
for tablename in sorted(self.get_table_names()):
file_.write(tablename + '\n')
column_list = self.get_columns(tablename)
for column in column_list:
col_name = str(column[1]).ljust(30)
col_type = str(column[2]).ljust(10)
col_null = str(
('ALLOW NULL' if column[3] == 1 else 'NOT NULL')
).ljust(12)
col_default = str(column[4]).ljust(10)
col_key = str(('KEY' if column[5] == 1 else ''))
col = (col_name, col_type, col_null, col_default, col_key)
file_.write('\t%s%s%s%s%s\n' % col)
ut.view_directory(app_resource_dir)
def invalidate_tables_cache(self):
"""Invalidates the controller's cache of table names and objects
Resets the caches and/or repopulates them.
"""
self._tablenames = None
self._sa_metadata = sqlalchemy.MetaData()
self.get_table_names()
def get_table_names(self, lazy=False):
""" Conveinience: """
if not lazy or self._tablenames is None:
dialect = self._engine.dialect.name
if dialect == 'sqlite':
stmt = "SELECT name FROM sqlite_master WHERE type='table'"
params = {}
elif dialect == 'postgresql':
stmt = text(
"""\
SELECT table_name FROM information_schema.tables
WHERE table_type='BASE TABLE'
AND table_schema = :schema"""
)
params = {'schema': self.schema_name}
else:
raise RuntimeError(f'Unknown dialect {dialect}')
with self.connect() as conn:
result = conn.execute(stmt, **params)
tablename_list = result.fetchall()
self._tablenames = {str(tablename[0]) for tablename in tablename_list}
return self._tablenames
@property
def tablenames(self):
return self.get_table_names()
def has_table(self, tablename, colnames=None, lazy=True):
""" checks if a table exists """
# if not lazy or self._tablenames is None:
return tablename in self.get_table_names(lazy=lazy)
@profile
def get_table_superkey_colnames(self, tablename):
"""
get_table_superkey_colnames
Actually resturns a list of tuples. need to change the name to
get_table_superkey_colnames_list
Args:
tablename (str):
Returns:
list: superkeys
CommandLine:
python -m dtool.sql_control --test-get_table_superkey_colnames
python -m wbia --tf get_table_superkey_colnames --tablename=contributors
python -m wbia --tf get_table_superkey_colnames --db PZ_Master0 --tablename=annotations
python -m wbia --tf get_table_superkey_colnames --db PZ_Master0 --tablename=contributors # NOQA
Example0:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> db = depc['chip'].db
>>> superkeys = db.get_table_superkey_colnames('chip')
>>> result = ut.repr2(superkeys, nl=False)
>>> print(result)
[('dummy_annot_rowid', 'config_rowid')]
"""
assert tablename in self.get_table_names(
lazy=True
), 'tablename=%r is not a part of this database' % (tablename,)
superkeys = self.metadata[tablename].superkeys
if superkeys is None:
superkeys = []
return superkeys
def get_table_primarykey_colnames(self, tablename):
columns = self.get_columns(tablename)
primarykey_colnames = tuple(
[name for (column_id, name, type_, notnull, dflt_value, pk) in columns if pk]
)
return primarykey_colnames
def get_table_docstr(self, tablename):
r"""
CommandLine:
python -m dtool.sql_control --exec-get_table_docstr
Example0:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> tablename = 'keypoint'
>>> db = depc[tablename].db
>>> result = db.get_table_docstr(tablename)
>>> print(result)
Used to store | |
To ease the presentation
some names are abbreivated:
.. seqdiag::
diagram {
activation = none;
=== Schedule "block_write" ===
demote --> executor [ label = "schedule(block_write)" ];
demote <-- executor;
=== Execute "block_write" and schedule "wait_slaves" ===
executor -> block_write [ label = "execute(block_write)" ];
block_write --> executor [ label = "schedule(wait_slaves)" ];
block_write <-- executor;
executor <- block_write;
=== Execute "wait_slaves" ===
executor -> wait_slaves [ label = "execute(wait_slaves)" ];
wait_slaves --> executor;
wait_slaves <-- executor;
executor <- wait_slaves;
}
"""
procedures = _events.trigger(
BLOCK_WRITE_DEMOTE, self.get_lockable_objects(), group_id,
update_only
)
return self.wait_for_procedures(procedures, synchronous)
@_events.on_event(FIND_CANDIDATE_SWITCH)
def _find_candidate_switch(group_id):
"""Find the best slave to replace the current master.
"""
slave_uuid = _do_find_candidate(group_id, FIND_CANDIDATE_SWITCH)
_events.trigger_within_procedure(CHECK_CANDIDATE_SWITCH, group_id,
slave_uuid)
def _do_find_candidate(group_id, event):
"""Find the best candidate in a group that may be used to replace the
current master if there is any.
It chooses the slave that has processed more transactions and may become a
master, e.g. has the binary log enabled. This function does not consider
purged transactions and delays in the slave while picking up a slave.
:param group_id: Group's id from where a candidate will be chosen.
:return: Return the uuid of the best candidate to become a master in the
group.
"""
forbidden_status = (_server.MySQLServer.FAULTY, _server.MySQLServer.SPARE)
group = _server.Group.fetch(group_id)
master_uuid = None
if group.master:
master_uuid = str(group.master)
chosen_uuid = None
chosen_gtid_status = None
for candidate in group.servers():
if master_uuid != str(candidate.uuid) and \
candidate.status not in forbidden_status:
try:
candidate.connect()
gtid_status = candidate.get_gtid_status()
master_issues, why_master_issues = \
_replication.check_master_issues(candidate)
slave_issues = False
why_slave_issues = {}
if event == FIND_CANDIDATE_SWITCH:
slave_issues, why_slave_issues = \
_replication.check_slave_issues(candidate)
has_valid_master = (master_uuid is None or \
_replication.slave_has_master(candidate) == master_uuid)
can_become_master = False
if chosen_gtid_status:
n_trans = 0
try:
n_trans = _replication.get_slave_num_gtid_behind(
candidate, chosen_gtid_status
)
except _errors.InvalidGtidError:
pass
if n_trans == 0 and not master_issues and \
has_valid_master and not slave_issues:
chosen_gtid_status = gtid_status
chosen_uuid = str(candidate.uuid)
can_become_master = True
elif not master_issues and has_valid_master and \
not slave_issues:
chosen_gtid_status = gtid_status
chosen_uuid = str(candidate.uuid)
can_become_master = True
if not can_become_master:
_LOGGER.warning(
"Candidate (%s) cannot become a master due to the "
"following reasons: issues to become a "
"master (%s), prerequistes as a slave (%s), valid "
"master (%s).", candidate.uuid, why_master_issues,
why_slave_issues, has_valid_master
)
except _errors.DatabaseError as error:
_LOGGER.warning(
"Error accessing candidate (%s): %s.", candidate.uuid,
error
)
if not chosen_uuid:
raise _errors.GroupError(
"There is no valid candidate that can be automatically "
"chosen in group (%s). Please, choose one manually." %
(group_id, )
)
return chosen_uuid
@_events.on_event(CHECK_CANDIDATE_SWITCH)
def _check_candidate_switch(group_id, slave_id):
"""Check if the candidate has all the features to become the new
master.
"""
allowed_status = (_server.MySQLServer.SECONDARY, _server.MySQLServer.SPARE)
group = _server.Group.fetch(group_id)
if not group.master:
raise _errors.GroupError(
"Group (%s) does not contain a valid "
"master. Please, run a promote or failover." % (group_id, )
)
slave = _retrieve_server(slave_id, group_id)
slave.connect()
if group.master == slave.uuid:
raise _errors.ServerError(
"Candidate slave (%s) is already master." % (slave_id, )
)
master_issues, why_master_issues = _replication.check_master_issues(slave)
if master_issues:
raise _errors.ServerError(
"Server (%s) is not a valid candidate slave "
"due to the following reason(s): (%s)." %
(slave.uuid, why_master_issues)
)
slave_issues, why_slave_issues = _replication.check_slave_issues(slave)
if slave_issues:
raise _errors.ServerError(
"Server (%s) is not a valid candidate slave "
"due to the following reason: (%s)." %
(slave.uuid, why_slave_issues)
)
master_uuid = _replication.slave_has_master(slave)
if master_uuid is None or group.master != _uuid.UUID(master_uuid):
raise _errors.GroupError(
"The group's master (%s) is different from the candidate's "
"master (%s)." % (group.master, master_uuid)
)
if slave.status not in allowed_status:
raise _errors.ServerError("Server (%s) is faulty." % (slave_id, ))
_events.trigger_within_procedure(
BLOCK_WRITE_SWITCH, group_id, master_uuid, str(slave.uuid)
)
@_events.on_event(BLOCK_WRITE_SWITCH)
def _block_write_switch(group_id, master_uuid, slave_uuid):
"""Block and disable write access to the current master.
"""
_do_block_write_master(group_id, master_uuid)
_events.trigger_within_procedure(WAIT_SLAVES_SWITCH, group_id,
master_uuid, slave_uuid
)
def _do_block_write_master(group_id, master_uuid, update_only=False):
"""Block and disable write access to the current master.
Note that connections are not killed and blocking the master
may take some time.
"""
master = _server.MySQLServer.fetch(_uuid.UUID(master_uuid))
assert(master.status == _server.MySQLServer.PRIMARY)
master.mode = _server.MySQLServer.READ_ONLY
master.status = _server.MySQLServer.SECONDARY
if not update_only:
master.connect()
_utils.set_read_only(master, True)
# Temporarily unset the master in this group.
group = _server.Group.fetch(group_id)
_set_group_master_replication(group, None)
# At the end, we notify that a server was demoted.
_events.trigger("SERVER_DEMOTED", set([group_id]),
group_id, str(master.uuid)
)
@_events.on_event(WAIT_SLAVES_SWITCH)
def _wait_slaves_switch(group_id, master_uuid, slave_uuid):
"""Synchronize candidate with master and also all the other slaves.
Note that this can be optimized as one may determine the set of
slaves that must be synchronized with the master.
"""
master = _server.MySQLServer.fetch(_uuid.UUID(master_uuid))
master.connect()
slave = _server.MySQLServer.fetch(_uuid.UUID(slave_uuid))
slave.connect()
_utils.synchronize(slave, master)
_do_wait_slaves_catch(group_id, master, [slave_uuid])
_events.trigger_within_procedure(CHANGE_TO_CANDIDATE, group_id, slave_uuid)
def _do_wait_slaves_catch(group_id, master, skip_servers=None):
"""Synchronize slaves with master.
"""
skip_servers = skip_servers or []
skip_servers.append(str(master.uuid))
group = _server.Group.fetch(group_id)
for server in group.servers():
if str(server.uuid) not in skip_servers:
try:
server.connect()
used_master_uuid = _replication.slave_has_master(server)
if str(master.uuid) == used_master_uuid:
_utils.synchronize(server, master)
else:
_LOGGER.debug("Slave (%s) has a different master "
"from group (%s).", server.uuid, group_id)
except _errors.DatabaseError as error:
_LOGGER.debug(
"Error synchronizing slave (%s): %s.", server.uuid,
error
)
@_events.on_event(CHANGE_TO_CANDIDATE)
def _change_to_candidate(group_id, master_uuid, update_only=False):
"""Switch to candidate slave.
"""
forbidden_status = (_server.MySQLServer.FAULTY, )
master = _server.MySQLServer.fetch(_uuid.UUID(master_uuid))
master.mode = _server.MySQLServer.READ_WRITE
master.status = _server.MySQLServer.PRIMARY
if not update_only:
# Prepare the server to be the master
master.connect()
_utils.reset_slave(master)
_utils.set_read_only(master, False)
group = _server.Group.fetch(group_id)
_set_group_master_replication(group, master.uuid, update_only)
if not update_only:
# Make slaves point to the master.
for server in group.servers():
if server.uuid != _uuid.UUID(master_uuid) and \
server.status not in forbidden_status:
try:
server.connect()
_utils.switch_master(server, master)
except _errors.DatabaseError as error:
_LOGGER.debug(
"Error configuring slave (%s): %s.", server.uuid, error
)
# At the end, we notify that a server was promoted.
_events.trigger("SERVER_PROMOTED", set([group_id]),
group_id, master_uuid
)
@_events.on_event(FIND_CANDIDATE_FAIL)
def _find_candidate_fail(group_id):
"""Find the best candidate to replace the failed master.
"""
slave_uuid = _do_find_candidate(group_id, FIND_CANDIDATE_FAIL)
_events.trigger_within_procedure(CHECK_CANDIDATE_FAIL, group_id,
slave_uuid)
@_events.on_event(CHECK_CANDIDATE_FAIL)
def _check_candidate_fail(group_id, slave_id):
"""Check if the candidate has all the prerequisites to become the new
master.
"""
allowed_status = (_server.MySQLServer.SECONDARY, _server.MySQLServer.SPARE)
group = _server.Group.fetch(group_id)
slave = _retrieve_server(slave_id, group_id)
slave.connect()
if group.master == slave.uuid:
raise _errors.ServerError(
"Candidate slave (%s) is already master." % (slave_id, )
)
master_issues, why_master_issues = _replication.check_master_issues(slave)
if master_issues:
raise _errors.ServerError(
"Server (%s) is not a valid candidate slave "
"due to the following reason(s): (%s)." %
(slave.uuid, why_master_issues)
)
if slave.status not in allowed_status:
raise _errors.ServerError("Server (%s) is faulty." % (slave_id, ))
_events.trigger_within_procedure(WAIT_SLAVE_FAIL, group_id, str(slave.uuid))
@_events.on_event(WAIT_SLAVE_FAIL)
def _wait_slave_fail(group_id, slave_uuid):
"""Wait until a slave processes its backlog.
"""
slave = _server.MySQLServer.fetch(_uuid.UUID(slave_uuid))
slave.connect()
try:
_utils.process_slave_backlog(slave)
except _errors.DatabaseError as error:
_LOGGER.warning(
"Error trying to process transactions in the relay log "
"for candidate (%s): %s.", slave, error
)
_events.trigger_within_procedure(CHANGE_TO_CANDIDATE, group_id, slave_uuid)
@_events.on_event(BLOCK_WRITE_DEMOTE)
def _block_write_demote(group_id, update_only):
"""Block and disable write access to the current master.
"""
group = _server.Group.fetch(group_id)
if not group:
raise _errors.GroupError("Group (%s) does not exist." % (group_id, ))
if not group.master:
raise _errors.GroupError("Group (%s) does not have a master." %
(group_id, ))
master = _server.MySQLServer.fetch(group.master)
assert(master.status in \
(_server.MySQLServer.PRIMARY, _server.MySQLServer.FAULTY)
)
if master.status == _server.MySQLServer.PRIMARY:
master.connect()
master.mode = _server.MySQLServer.READ_ONLY
master.status = _server.MySQLServer.SECONDARY
_utils.set_read_only(master, True)
if not update_only:
_events.trigger_within_procedure(
WAIT_SLAVES_DEMOTE, group_id, str(master.uuid)
)
_set_group_master_replication(group, None, update_only)
@_events.on_event(WAIT_SLAVES_DEMOTE)
def _wait_slaves_demote(group_id, master_uuid):
"""Synchronize slaves with master.
"""
master = _server.MySQLServer.fetch(_uuid.UUID(master_uuid))
master.connect()
# Synchronize slaves.
_do_wait_slaves_catch(group_id, master)
# Stop replication threads at all slaves.
group = _server.Group.fetch(group_id)
for server in group.servers():
try:
server.connect()
_utils.stop_slave(server)
except _errors.DatabaseError as error:
_LOGGER.debug(
"Error waiting for slave (%s) to stop: %s.", server.uuid,
error
)
def _set_group_master_replication(group, server_id, update_only=False):
"""Set the master for the given group and also reset the
replication with the other group masters. Any change of master
for a group will be initiated through this method. The method also
takes care of resetting the master and the slaves that are registered
with this group to connect with the new master.
The idea is that operations like switchover, failover, promote all are
finally master changing operations. Hence the right place to handle
these operations is at the place where the master is being changed.
The following operations need to be done
- Stop the slave on the old master
- Stop the slaves replicating from the old master
- Start the slave on the new master
- Start the slaves with the new master
| |
<reponame>RichardScottOZ/geoapps
import numpy as np
from scipy.interpolate import LinearNDInterpolator
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from scipy.spatial import cKDTree
from scipy.interpolate.interpnd import _ndim_coords_from_arrays
from matplotlib.colors import LightSource, Normalize
from .matutils import mkvc
def plot2Ddata(
xyz,
data,
vec=False,
nx=100,
ny=100,
ax=None,
mask=None,
level=None,
figname=None,
ncontour=10,
dataloc=False,
contourOpts={},
scale="linear",
clim=None,
):
"""
Take unstructured xy points, interpolate, then plot in 2D
:param numpy.array xyz: data locations
:param numpy.array data: data values
:param bool vec: plot streamplot?
:param float nx: number of x grid locations
:param float ny: number of y grid locations
:param matplotlib.axes ax: axes
:param numpy.array mask: mask for the array
:param float level: level at which to draw a contour
:param string figname: figure name
:param float ncontour: number of :meth:`matplotlib.pyplot.contourf`
contours
:param bool dataloc: plot the data locations
:param dict controuOpts: :meth:`matplotlib.pyplot.contourf` options
:param numpy.array clim: colorbar limits
"""
if ax is None:
fig = plt.figure()
ax = plt.subplot(111)
xmin, xmax = xyz[:, 0].min(), xyz[:, 0].max()
ymin, ymax = xyz[:, 1].min(), xyz[:, 1].max()
x = np.linspace(xmin, xmax, nx)
y = np.linspace(ymin, ymax, ny)
X, Y = np.meshgrid(x, y)
xy = np.c_[X.flatten(), Y.flatten()]
if vec is False:
F = LinearNDInterpolator(xyz[:, :2], data)
DATA = F(xy)
DATA = DATA.reshape(X.shape)
if scale == "log":
DATA = np.log10(abs(DATA))
cont = ax.contourf(X, Y, DATA, ncontour, **contourOpts)
if level is not None:
if scale == "log":
level = np.log10(level)
CS = ax.contour(X, Y, DATA, level, colors="k", linewidths=2)
else:
# Assume size of data is (N,2)
datax = data[:, 0]
datay = data[:, 1]
Fx = LinearNDInterpolator(xyz[:, :2], datax)
Fy = LinearNDInterpolator(xyz[:, :2], datay)
DATAx = Fx(xy)
DATAy = Fy(xy)
DATA = np.sqrt(DATAx ** 2 + DATAy ** 2).reshape(X.shape)
DATAx = DATAx.reshape(X.shape)
DATAy = DATAy.reshape(X.shape)
if scale == "log":
DATA = np.log10(abs(DATA))
cont = ax.contourf(X, Y, DATA, ncontour, **contourOpts)
ax.streamplot(X, Y, DATAx, DATAy, color="w")
if level is not None:
CS = ax.contour(X, Y, DATA, level, colors="k", linewidths=2)
if dataloc:
ax.plot(xyz[:, 0], xyz[:, 1], "k.", ms=2)
plt.gca().set_aspect("equal", adjustable="box")
if figname:
plt.axis("off")
fig.savefig(figname, dpi=200)
if level is None:
return cont, ax
else:
return cont, ax, CS
def plotLayer(
sig, LocSigZ, xscale="log", ax=None, showlayers=False, xlim=None, **kwargs
):
"""Plot a layered earth model"""
sigma = np.repeat(sig, 2, axis=0)
z = np.repeat(LocSigZ[1:], 2, axis=0)
z = np.r_[LocSigZ[0], z, LocSigZ[-1]]
if xlim is None:
sig_min = sig.min() * 0.5
sig_max = sig.max() * 2
else:
sig_min, sig_max = xlim
if xscale == "linear" and sig.min() == 0.0:
if xlim is None:
sig_min = -sig.max() * 0.5
sig_max = sig.max() * 2
if ax is None:
plt.xscale(xscale)
plt.xlim(sig_min, sig_max)
plt.ylim(z.min(), z.max())
plt.xlabel("Conductivity (S/m)", fontsize=14)
plt.ylabel("Depth (m)", fontsize=14)
plt.ylabel("Depth (m)", fontsize=14)
if showlayers is True:
for locz in LocSigZ:
plt.plot(
np.linspace(sig_min, sig_max, 100),
np.ones(100) * locz,
"b--",
lw=0.5,
)
return plt.plot(sigma, z, "k-", **kwargs)
else:
ax.set_xscale(xscale)
ax.set_xlim(sig_min, sig_max)
ax.set_ylim(z.min(), z.max())
ax.set_xlabel("Conductivity (S/m)", fontsize=14)
ax.set_ylabel("Depth (m)", fontsize=14)
if showlayers is True:
for locz in LocSigZ:
ax.plot(
np.linspace(sig_min, sig_max, 100),
np.ones(100) * locz,
"b--",
lw=0.5,
)
return ax.plot(sigma, z, "k-", **kwargs)
def plotDataHillside(
x,
y,
z,
axs=None,
fill=True,
contour=0,
vmin=None,
vmax=None,
clabel=True,
cmap="RdBu_r",
ve=1.0,
alpha=1.0,
alphaHS=1.0,
distMax=1000,
midpoint=None,
azdeg=315,
altdeg=45,
):
ls = LightSource(azdeg=azdeg, altdeg=altdeg)
if x.ndim == 1:
# Create grid of points
vectorX = np.linspace(x.min(), x.max(), 1000)
vectorY = np.linspace(y.min(), y.max(), 1000)
X, Y = np.meshgrid(vectorX, vectorY)
# Interpolate
d_grid = griddata(np.c_[x, y], z, (X, Y), method="cubic")
# Remove points beyond treshold
tree = cKDTree(np.c_[x, y])
xi = _ndim_coords_from_arrays((X, Y), ndim=2)
dists, indexes = tree.query(xi)
# Copy original result but mask missing values with NaNs
d_grid[dists > distMax] = np.nan
else:
X, Y, d_grid = x, y, z
class MidPointNorm(Normalize):
def __init__(self, midpoint=None, vmin=None, vmax=None, clip=False):
Normalize.__init__(self, vmin, vmax, clip)
self.midpoint = midpoint
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
if self.midpoint is None:
self.midpoint = np.mean(value)
vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint
if not (vmin < midpoint < vmax):
raise ValueError("midpoint must be between maxvalue and minvalue.")
elif vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("maxvalue must be bigger than minvalue")
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = np.ma.getmask(result)
result = np.ma.array(
np.clip(result.filled(vmax), vmin, vmax), mask=mask
)
# ma division is very slow; we can take a shortcut
resdat = result.data
# First scale to -1 to 1 range, than to from 0 to 1.
resdat -= midpoint
resdat[resdat > 0] /= abs(vmax - midpoint)
resdat[resdat < 0] /= abs(vmin - midpoint)
resdat /= 2.0
resdat += 0.5
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint
if cbook.iterable(value):
val = ma.asarray(value)
val = 2 * (val - 0.5)
val[val > 0] *= abs(vmax - midpoint)
val[val < 0] *= abs(vmin - midpoint)
val += midpoint
return val
else:
val = 2 * (val - 0.5)
if val < 0:
return val * abs(vmin - midpoint) + midpoint
else:
return val * abs(vmax - midpoint) + midpoint
im, CS = [], []
if axs is None:
axs = plt.subplot()
if fill:
extent = x.min(), x.max(), y.min(), y.max()
im = axs.contourf(
X,
Y,
d_grid,
50,
vmin=vmin,
vmax=vmax,
cmap=cmap,
norm=MidPointNorm(midpoint=midpoint),
alpha=alpha,
)
axs.imshow(
ls.hillshade(d_grid, vert_exag=ve, dx=1.0, dy=1.0),
cmap="gray",
alpha=alphaHS,
extent=extent,
origin="lower",
)
if contour > 0:
CS = axs.contour(X, Y, d_grid, int(contour), colors="k", linewidths=0.5)
if clabel:
plt.clabel(CS, inline=1, fontsize=10, fmt="%i")
return im, CS
def plotModelSections(
mesh,
m,
normal="x",
ind=0,
vmin=None,
vmax=None,
subFact=2,
scale=1.0,
xlim=None,
ylim=None,
vec="k",
title=None,
axs=None,
actv=None,
contours=None,
fill=True,
orientation="vertical",
cmap="pink_r",
contourf=False,
colorbar=False,
):
"""
Plot section through a 3D tensor model
"""
# plot recovered model
nC = mesh.nC
if vmin is None:
vmin = m[np.isnan(m) != True].min()
if vmax is None:
vmax = m[np.isnan(m) != True].max()
if len(m) == 3 * nC:
m_lpx = m[0:nC]
m_lpy = m[nC : 2 * nC]
m_lpz = m[2 * nC :]
if actv is not None:
m_lpx[actv != True] = np.nan
m_lpy[actv != True] = np.nan
m_lpz[actv != True] = np.nan
amp = np.sqrt(m_lpx ** 2.0 + m_lpy ** 2.0 + m_lpz ** 2.0)
m_lpx = (m_lpx).reshape(mesh.vnC, order="F")
m_lpy = (m_lpy).reshape(mesh.vnC, order="F")
m_lpz = (m_lpz).reshape(mesh.vnC, order="F")
amp = amp.reshape(mesh.vnC, order="F")
else:
if actv is not None:
m[actv != True] = np.nan
amp = m.reshape(mesh.vnC, order="F")
xx = mesh.gridCC[:, 0].reshape(mesh.vnC, order="F")
zz = mesh.gridCC[:, 2].reshape(mesh.vnC, order="F")
yy = mesh.gridCC[:, 1].reshape(mesh.vnC, order="F")
if axs is None:
fig, axs = plt.figure(), plt.subplot()
if normal == "x":
xx = yy[ind, :, :].T
yy = zz[ind, :, :].T
model = amp[ind, :, :].T
if len(m) == 3 * nC:
mx = m_lpy[ind, ::subFact, ::subFact].T
my = m_lpz[ind, ::subFact, ::subFact].T
elif normal == "y":
xx = xx[:, ind, :].T
yy = zz[:, ind, :].T
model = amp[:, ind, :].T
if len(m) == 3 * nC:
mx = m_lpx[::subFact, ind, ::subFact].T
my = m_lpz[::subFact, ind, ::subFact].T
elif normal == "z":
if actv is not None:
actIndFull = np.zeros(mesh.nC, dtype=bool)
actIndFull[actv] = True
else:
actIndFull = np.ones(mesh.nC, dtype=bool)
actIndFull = actIndFull.reshape(mesh.vnC, order="F")
model = np.zeros((mesh.nCx, mesh.nCy))
mx = np.zeros((mesh.nCx, mesh.nCy))
my = np.zeros((mesh.nCx, mesh.nCy))
for ii in range(mesh.nCx):
for jj in range(mesh.nCy):
zcol = actIndFull[ii, jj, :]
model[ii, jj] = amp[ii, jj, np.where(zcol)[0][-ind]]
if len(m) == 3 * nC:
mx[ii, jj] = m_lpx[ii, jj, np.where(zcol)[0][-ind]]
my[ii, jj] = m_lpy[ii, jj, np.where(zcol)[0][-ind]]
xx = xx[:, :, ind].T
yy = yy[:, :, ind].T
model = model.T
if len(m) == 3 * nC:
mx = mx[::subFact, ::subFact].T
my = my[::subFact, ::subFact].T
im2, cbar = [], []
if fill:
if contourf:
im2 = axs.contourf(xx, yy, amp, 10, vmin=vmin, vmax=vmax, cmap=cmap)
else:
if mesh.dim == 3:
im2 = mesh.plotSlice(
mkvc(amp),
ind=ind,
normal=normal.upper(),
ax=axs,
clim=[vmin, vmax],
pcolorOpts={"clim": [vmin, vmax], "cmap": cmap},
)[0]
else:
im2 = mesh.plotImage(
mkvc(amp),
ax=axs,
clim=[vmin, vmax],
pcolorOpts={"clim": [vmin, vmax], "cmap": cmap, "alpha": alpha},
)[0]
if colorbar:
cbar = plt.colorbar(
im2,
orientation=orientation,
| |
import logging
import os
import numpy as np
import pandas as pd
import sqlalchemy
from cached_property import cached_property
from scipy.interpolate import interp1d
from aqueduct.errors import Error
class RiskService(object):
def __init__(self, user_selections):
# DB Connection
self.engine = sqlalchemy.create_engine(os.getenv('POSTGRES_URL'))
self.metadata = sqlalchemy.MetaData(bind=self.engine)
self.metadata.reflect(self.engine)
# BACKGROUND INFO
self.flood_types = ["riverine", "coastal"]
self.exposures = ["gdpexp", "popexp", "urban_damage_v2"]
self.geogunits = ["geogunit_103", "geogunit_108"]
self.scenarios = {"business as usual": ['rcp8p5', 'ssp2', "bau"],
"pessimistic": ['rcp8p5', 'ssp3', "pes"],
"optimistic": ['rcp4p5', 'ssp2', "opt"],
"rcp8p5": ['rcp8p5', 'ssp3', "pes"],
"rcp4p5": ['rcp8p5', 'ssp2', "bau"]}
self.models = {"riverine": ["gf", "ha", "ip", "mi", "nr"],
# "coastal": ["wt"]}
"coastal": ["95", "50", "05"]}
self.years = [2010., 2030., 2050., 2080.]
self.ys = [str(x)[0:4] for x in self.years]
self.rps = [2, 5, 10, 25, 50, 100, 250, 500, 1000]
self.rps_names = ["rp" + str(x).zfill(5) for x in self.rps]
# MANDATORY USER INPUTS
self.flood = user_selections.get("flood") # Flood type
self.exposure = user_selections.get("exposure") # Exposure type
self.geogunit_unique_name = user_selections.get("geogunit_unique_name") # Unique geographical unit name
self.sub_scenario = user_selections.get(
"sub_scenario") # Subsidence option (Will always be no for Riverine floods)
self.existing_prot = user_selections.get(
"existing_prot") # User input for protection standard (triggers on-the-fly calculation)
self.scenario = user_selections.get("scenario")
self.geogunit, self.geogunit_name, self.geogunit_type, self.clim, self.socio, self.scen_abb, self.sub_abb, self.df_precalc, self.prot_pres, self.risk_analysis = self.user_selections()
# Scenario abbreviation
self.mods = self.models.get(self.flood)
def user_selections(self):
"""
Purpose: Gather all necessary inputs to run any analysis
Input:
flood: Riverine of Coastal (User must select)
Geogunit_unique_name: geographical unit name from website. (User must select)
Website should use list of unique names to avoid selecting more than one unit
Scenario: Business as usual, Pessimistic, Optimistic
sub_scenario: Yes (defaul(t), No does the user want to consider subsidence? Only relevant for coastal)
existing_prot: Default protection standard. User can input their own or, which will trigger on-the-fly calculations
Output:
geogunit unit - (geogunit_103 for cities, geogunit_108 for everything else)
geogunit_name - original (ie non-unique) name
geogunit_type - City, State, Country, Basin
clim - rcp4p5, rcp8p4 (climate scenario associated with overall scenario)
socio - base, ssp2, ssp3 (socioeconomic scenario associated with overall scenario)
sub_scenario- Yes, No (Is subsidence included?)
sub_abb - wtsub or nosub (code name for subsidence. wtsub = with sub)
prot_pres - default protection standard for unit as a whole
risk_analysis - can we use precalculated risk data, or do we need to calculate on-the-fly?
"""
# GEOGUNIT INFO
fids, geogunit_name, geogunit_type = pd.read_sql_query(
"SELECT fids, name, type FROM lookup_master where uniqueName = '{0}' ".format(self.geogunit_unique_name),
self.engine).values[0]
geogunit = "geogunit_103" if geogunit_type.lower() == "city" else "geogunit_108"
# IMPACT DRIVER INFO (climate and socioeconomc scenarios
clim, socio, scen_abb = self.scenarios.get(self.scenario)
# SUBSIDENCE INFO
# Make sure subsidence is turned off for river floods
sub_abb = "wtsub" if self.sub_scenario else "nosub"
# DEFAULT DATA
defaultfn = "precalc_agg_{0}_{1}_{2}".format(self.flood, geogunit_type.lower(), sub_abb)
logging.info(f'[RISK - user_selection]: {str(defaultfn)}')
df_precalc = pd.read_sql_query("SELECT * FROM {0} where id like '{1}'".format(defaultfn, geogunit_name),
self.engine, index_col='id')
# PROTECTION STANDARDS and RISK ANALYSIS TYPE
if not self.existing_prot:
risk_analysis = "precalc"
# Hardwire in the protection standards for the Netherlands or Average prot standard for a whole unit (i.e. country)
# here self.exposure should be allways urban_damage_v2
prot_pres = (1000 if geogunit_name in ['Noord-Brabant, Netherlands', 'Zeeland, Netherlands',
'Zeeuwse meren, Netherlands', 'Zuid-Holland, Netherlands',
'Drenthe, Netherlands', 'Flevoland, Netherlands',
'Friesland, Netherlands', 'Gelderland, Netherlands',
'Groningen, Netherlands', 'IJsselmeer, Netherlands',
'Limburg, Netherlands', 'Noord-Holland, Netherlands',
'Overijssel, Netherlands', 'Utrecht, Netherlands',
'Netherlands'] else df_precalc[
["_".join(['urban_damage_v2', '2010', scen_abb, "prot_avg"])]])
else:
risk_analysis = "calc"
prot_pres = self.existing_prot
return geogunit, geogunit_name, geogunit_type.lower(), clim, socio, scen_abb, sub_abb, df_precalc, prot_pres, risk_analysis
def lp_data(self):
inFormat = 'raw_agg_{:s}_{:s}_{:s}'.format(self.flood, self.geogunit_type, self.exposure)
cols = [
'{0} as {1}'.format(col, col.replace(self.clim, 'lp').replace(self.socio + "_" + self.sub_abb + "_", ''))
for col in sqlalchemy.Table(inFormat, self.metadata).columns.keys() if
(self.clim in col) and (self.socio in col) and (self.sub_abb in col)]
df_temp = pd.read_sql_query(
"SELECT {0} FROM {1} where id like '{2}'".format(', '.join(cols), inFormat, self.geogunit_name),
self.engine)
df_lpcurve = df_temp.T
df1 = df_lpcurve.reset_index().rename(columns={"index": "index", 0: "y"})
df2 = df_lpcurve.reset_index()['index'].str.split('_', expand=True).rename(
columns={0: "lp", 1: "c", 2: "year", 3: "x"})
logging.info('[RISK]: lp_curve')
#logging.info(df1)
#logging.info(df2)
return pd.concat([df1, df2], axis=1).reindex(df1.index)[['c', 'year', 'y', 'x']].replace(self.rps_names, self.rps)
#return pd.concat([df1, df2], axis=1, join_axes=[df1.index])[['c', 'year', 'y', 'x']].replace(self.rps_names, self.rps)
def bench(self):
defaultfn = "precalc_agg_{0}_{1}_{2}".format(self.flood, self.geogunit_type, self.sub_abb)
print(defaultfn)
# cols = ['{0} as {1}'.format(col, col.replace(self.exposure, 'bench').replace('urban_damage_v2', 'bench').replace("_"+ self.scen_abb, '')) for col in sqlalchemy.Table(defaultfn, self.metadata).columns.keys() if ((self.exposure in col) or ('urban_damage_v2' in col)) and (self.scen_abb in col) and ("cc" not in col) and ("soc" not in col) and ("sub" not in col) and ("avg" in col)]
cols = ['{0} as {1}'.format(col,
col.replace(self.exposure, 'bench').replace('urban_damage_v2', 'bench').replace(
"_" + self.scen_abb, '')) for col in
sqlalchemy.Table(defaultfn, self.metadata).columns.keys() if
((self.exposure in col) or ('prot' in col)) and (self.scen_abb in col) and ("cc" not in col) and (
"soc" not in col) and ("sub" not in col) and ("avg" in col)]
benchData = pd.read_sql_query("SELECT id, {0} FROM {1}".format(', '.join(cols), defaultfn), self.engine,
index_col='id')
return benchData
def format_risk(self, dataframe):
datalist = ["tot_avg", "tot_min", "tot_max",
"ast", "prot_avg",
"per_avg", "per_min", "per_max",
"cc_avg", "cc_min", "cc_max",
"soc_avg", "sub_avg"]
colNames = ["Annual_Damage_Avg", "Annual_Damage_Min", "Annual_Damage_Max",
"Asset_Value", "Flood_Protection",
"Percent_Damage_Avg", "Percent_Damage_Min", "Percent_Damage_Max",
"CC_Driver_Avg", "CC_Driver_Min", "CC_Driver_Max",
"Soc_Driver", "Sub_Driver"]
df_final = pd.DataFrame(index=self.ys, columns=colNames)
for d in range(0, len(datalist)):
selData = dataframe[[col for col in dataframe.columns.tolist() if (datalist[d] in col)]]
if len(selData.values[0]) == 3:
df_final[colNames[d]][1:] = selData.values[0]
else:
df_final[colNames[d]] = selData.values[0]
return df_final
def find_assets(self):
"""
Purpose: Find total asset value
Output:
df_aggregate = Annual impacts for each year for user-selected geographical unit
"""
# Create term to filter out unnecessary results. Drop SSP2 data if scenario
# is pessemistic. Else, drop SSP3
dropex = "ssp2" if self.scen_abb == "pes" else "ssp3"
assts = self.df_precalc[[col for col in self.df_precalc.columns.tolist() if
(self.exposure in col) and (self.scen_abb in col) and ("ast" in col) and (
dropex not in col)]]
return assts.reset_index(drop=True)
def run_stats(self, dataframe):
"""
Purpose: Finds the average, min, and max impact for all impact types
Input:
dataframe: Data associated with flood, geography, exposure type for all climate models
Output:
Dataframe with average impact data for each year for each impact type. Also includes min and max (uncertainity)
"""
# Create dataframe to hold final data
df_final = pd.DataFrame(index=dataframe.index)
# Define column field name structure
colFormat = '{:s}_{:s}_{:s}_{:s}_{:s}'.format
# Run following analysis for each year and impact type
for y in self.ys:
for t in ["cc", "soc", "sub", "tot", "prot"]:
df_filt = dataframe[[col for col in dataframe.columns if (t in col) and (y in col)]]
df_final[colFormat(self.exposure, y, self.scen_abb, t, "avg")] = df_filt.mean(axis=1)
if y != '2010' and t == "tot" or y != '2010' and t == 'cc':
df_final[colFormat(self.exposure, y, self.scen_abb, t, "min")] = df_filt.min(axis=1)
df_final[colFormat(self.exposure, y, self.scen_abb, t, "max")] = df_filt.max(axis=1)
df_final.replace(np.nan, 0, inplace=True)
return df_final
def ratio_to_total(self, dataframe):
"""
Purpose: Finds the impact attributed to climate change only, socioecon only, and subsidence only
Input:
inData: Annual expected impact data (found using default_risk function)
mods: All possible climate models
Output:
Dataframe with final impact data for each year for each impact type. Column name also specifies given model
"""
# Create dataframe to hold final data
df_final = pd.DataFrame(index=dataframe.index)
# Run analysis for each climate model and each year past 2010
colFormat = '{:s}_{:s}_{:s}_{:s}_{:s}'.format
df_final[colFormat(self.exposure, "2010", self.scen_abb, "prot", "avg")] = dataframe[
colFormat(self.exposure, "2010", self.scen_abb, "prot", "avg")]
tot2010 = dataframe[colFormat(self.exposure, "2010", self.scen_abb, "tot", "avg")]
df_final[colFormat(self.exposure, "2010", self.scen_abb, "tot", "avg")] = tot2010
for y in self.ys[1:]:
# Filter data year
df_filt = dataframe[[col for col in dataframe.columns if (y in col)]]
# Total impact for selected year is already calculated
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "avg")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "avg")]
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "min")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "min")]
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "max")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "max")]
# Find the difference from each impact to the 2010 baseline data
df_filt['tot_diff'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "tot",
"avg")] - tot2010 # Total impact
df_filt['cc_diff_avg'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"avg")] - tot2010 # Total impact
df_filt['cc_diff_min'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"min")] - tot2010 # Total impact
df_filt['cc_diff_max'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"max")] - tot2010 # Total impact
df_filt['soc_diff'] = | |
list of 3 ints
[x, y, z] bounding box padding beyond box spanned by coordinates
:param remesh_preview: bool
:param return_new_lvl2_nodes: bool
:param root_ids: list of uint64s
:param n_tries: int
:return: list of uint64s or None if no split was performed
"""
if source_ids is not None and sink_ids is not None:
if not (isinstance(source_ids, list) or isinstance(source_ids,
np.ndarray)):
source_ids = [source_ids]
if not (isinstance(sink_ids, list) or isinstance(sink_ids,
np.ndarray)):
sink_ids = [sink_ids]
# Sanity Checks
if np.any(np.in1d(sink_ids, source_ids)):
raise cg_exceptions.PreconditionError(
f"One or more supervoxel exists as both, sink and source."
)
for source_id in source_ids:
layer = self.get_chunk_layer(source_id)
if layer != 1:
raise cg_exceptions.PreconditionError(
f"Supervoxel expected, but {source_id} is a layer {layer} node."
)
for sink_id in sink_ids:
layer = self.get_chunk_layer(sink_id)
if layer != 1:
raise cg_exceptions.PreconditionError(
f"Supervoxel expected, but {sink_id} is a layer {layer} node."
)
root_ids = set()
for source_id in source_ids:
root_ids.add(self.get_root(source_id))
for sink_id in sink_ids:
root_ids.add(self.get_root(sink_id))
if mincut:
assert source_coords is not None
assert sink_coords is not None
assert sink_ids is not None
assert source_ids is not None
root_ids = set()
for source_id in source_ids:
root_ids.add(self.get_root(source_id))
for sink_id in sink_ids:
root_ids.add(self.get_root(sink_id))
else:
if atomic_edges is None:
assert source_ids is not None
assert sink_ids is not None
atomic_edges = np.array(list(itertools.product(source_ids,
sink_ids)))
root_ids = set()
for atomic_edge in atomic_edges:
root_ids.add(self.get_root(atomic_edge[0]))
root_ids.add(self.get_root(atomic_edge[1]))
if len(root_ids) > 1:
raise cg_exceptions.PreconditionError(
f"All supervoxel must belong to the same object. Already split?"
)
root_ids = list(root_ids)
# Get a unique id for this operation
operation_id = self.get_unique_operation_id()
i_try = 0
while i_try < n_tries:
# Try to acquire lock and only continue if successful
lock_root_ids = np.unique(root_ids)
lock_acquired, lock_root_ids = \
self.lock_root_loop(root_ids=lock_root_ids,
operation_id=operation_id)
if lock_acquired:
# (run mincut) and remove edges + update hierarchy
if mincut:
success, result = \
self._remove_edges_mincut(operation_id=operation_id,
source_ids=source_ids,
sink_ids=sink_ids,
source_coords=source_coords,
sink_coords=sink_coords,
bb_offset=bb_offset)
if success:
new_root_ids, rows, removed_edges, time_stamp, \
lvl2_node_mapping = result
else:
for lock_root_id in lock_root_ids:
self.unlock_root(lock_root_id,
operation_id=operation_id)
return None
else:
success, result = \
self._remove_edges(operation_id=operation_id,
atomic_edges=atomic_edges)
if success:
new_root_ids, rows, time_stamp, \
lvl2_node_mapping = result
removed_edges = atomic_edges
else:
for lock_root_id in lock_root_ids:
self.unlock_root(lock_root_id,
operation_id=operation_id)
return None
# Add a row to the log
log_row = self._create_split_log_row(operation_id,
user_id,
new_root_ids,
source_ids,
sink_ids,
source_coords,
sink_coords,
removed_edges,
bb_offset,
time_stamp)
# Put log row first!
rows = [log_row] + rows
# Execute write (makes sure that we are still owning the lock)
# if len(sink_ids) > 1 or len(source_ids) > 1:
# self.logger.debug(removed_edges)
# else:
if self.bulk_write(rows, lock_root_ids,
operation_id=operation_id, slow_retry=False):
if remesh_preview:
meshgen.mesh_lvl2_previews(self, list(
lvl2_node_mapping.keys()))
self.logger.debug(f"new root ids: {new_root_ids}")
if return_new_lvl2_nodes:
return new_root_ids, list(lvl2_node_mapping.keys())
else:
return new_root_ids
for lock_root_id in lock_root_ids:
self.unlock_root(lock_root_id, operation_id=operation_id)
i_try += 1
self.logger.debug(f"Waiting - {i_try}")
time.sleep(1)
self.logger.warning("Could not acquire root object lock.")
raise cg_exceptions.LockingError(f"Could not acquire root object lock.")
def _remove_edges_mincut(self, operation_id: np.uint64,
source_ids: Sequence[np.uint64],
sink_ids: Sequence[np.uint64],
source_coords: Sequence[Sequence[int]],
sink_coords: Sequence[Sequence[int]],
bb_offset: Tuple[int, int, int] = (120, 120, 12)
) -> Tuple[
bool, # success
Optional[Tuple[
List[np.uint64], # new_roots
List[bigtable.row.Row], # rows
np.ndarray, # removed_edges
datetime.datetime,
dict]]]: # timestamp
""" Computes mincut and removes edges accordingly
:param operation_id: uint64
:param source_ids: uint64
:param sink_ids: uint64
:param source_coords: list of 3 ints
[x, y, z] coordinate of source supervoxel
:param sink_coords: list of 3 ints
[x, y, z] coordinate of sink supervoxel
:param bb_offset: list of 3 ints
[x, y, z] bounding box padding beyond box spanned by coordinates
:return: list of uint64s if successful, or None if no valid split
new root ids
"""
time_start = time.time()
bb_offset = np.array(list(bb_offset))
source_coords = np.array(source_coords)
sink_coords = np.array(sink_coords)
# Decide a reasonable bounding box (NOT guaranteed to be successful!)
coords = np.concatenate([source_coords, sink_coords])
bounding_box = [np.min(coords, axis=0), np.max(coords, axis=0)]
bounding_box[0] -= bb_offset
bounding_box[1] += bb_offset
# Verify that sink and source are from the same root object
root_ids = set()
for source_id in source_ids:
root_ids.add(self.get_root(source_id))
for sink_id in sink_ids:
root_ids.add(self.get_root(sink_id))
if len(root_ids) > 1:
raise cg_exceptions.PreconditionError(
f"All supervoxel must belong to the same object. Already split?"
)
self.logger.debug("Get roots and check: %.3fms" %
((time.time() - time_start) * 1000))
time_start = time.time() # ------------------------------------------
root_id = root_ids.pop()
# Get edges between local supervoxels
n_chunks_affected = np.product((np.ceil(bounding_box[1] / self.chunk_size)).astype(np.int) -
(np.floor(bounding_box[0] / self.chunk_size)).astype(np.int))
self.logger.debug("Number of affected chunks: %d" % n_chunks_affected)
self.logger.debug(f"Bounding box: {bounding_box}")
self.logger.debug(f"Bounding box padding: {bb_offset}")
self.logger.debug(f"Source ids: {source_ids}")
self.logger.debug(f"Sink ids: {sink_ids}")
self.logger.debug(f"Root id: {root_id}")
edges, affs, areas = self.get_subgraph_edges(root_id,
bounding_box=bounding_box,
bb_is_coordinate=True)
self.logger.debug("Get edges and affs: %.3fms" %
((time.time() - time_start) * 1000))
time_start = time.time() # ------------------------------------------
# Compute mincut
atomic_edges = cutting.mincut(edges, affs, source_ids, sink_ids)
self.logger.debug("Mincut: %.3fms" % ((time.time() - time_start) * 1000))
time_start = time.time() # ------------------------------------------
if len(atomic_edges) == 0:
self.logger.warning("Mincut failed. Try again...")
return False, None
# Check if any edge in the cutset is infinite (== between chunks)
# We would prevent such a cut
atomic_edges_flattened_view = atomic_edges.view(dtype='u8,u8')
edges_flattened_view = edges.view(dtype='u8,u8')
cutset_mask = np.in1d(edges_flattened_view, atomic_edges_flattened_view)
if np.any(np.isinf(affs[cutset_mask])):
self.logger.error("inf in cutset")
return False, None
# Remove edgesc
success, result = self._remove_edges(operation_id, atomic_edges)
if not success:
self.logger.error("remove edges failed")
return False, None
new_roots, rows, time_stamp, lvl2_node_mapping = result
self.logger.debug("Remove edges: %.3fms" % ((time.time() - time_start) * 1000))
time_start = time.time() # ------------------------------------------
return True, (new_roots, rows, atomic_edges, time_stamp, lvl2_node_mapping)
def _remove_edges(self, operation_id: np.uint64,
atomic_edges: Sequence[Tuple[np.uint64, np.uint64]]
) -> Tuple[bool, # success
Optional[Tuple[
List[np.uint64], # new_roots
List[bigtable.row.Row], # rows
datetime.datetime,
dict]]]: # timestamp
""" Removes atomic edges from the ChunkedGraph
:param operation_id: uint64
:param atomic_edges: list of two uint64s
:return: list of uint64s
new root ids
"""
time_stamp = datetime.datetime.utcnow()
# Comply to resolution of BigTables TimeRange
time_stamp = get_google_compatible_time_stamp(time_stamp,
round_up=False)
# Make sure that we have a list of edges
if isinstance(atomic_edges[0], np.uint64):
atomic_edges = [atomic_edges]
atomic_edges = np.array(atomic_edges)
u_atomic_ids = np.unique(atomic_edges)
# Get number of layers and the original root
original_parent_ids = self.get_all_parents(atomic_edges[0, 0])
original_root = original_parent_ids[-1]
# Find lowest level chunks that might have changed
chunk_ids = self.get_chunk_ids_from_node_ids(u_atomic_ids)
u_chunk_ids, u_chunk_ids_idx = np.unique(chunk_ids,
return_index=True)
involved_chunk_id_dict = dict(zip(u_chunk_ids,
u_atomic_ids[u_chunk_ids_idx]))
# Note: After removing the atomic edges, we basically need to build the
# ChunkedGraph for these chunks from the ground up.
# involved_chunk_id_dict stores a representative for each chunk that we
# can use to acquire the parent that knows about all atomic nodes in the
# chunk.
rows = []
# Remove atomic edges
# Removing edges nodewise. We cannot remove edges edgewise because that
# would add up multiple changes to each node (row). Unfortunately,
# the batch write (mutate_rows) from BigTable cannot handle multiple
# changes to the same row-col within a batch write and only executes
# one of them.
for u_atomic_id in np.unique(atomic_edges):
atomic_node_info = self.get_atomic_node_info(u_atomic_id)
partners = np.concatenate([atomic_edges[atomic_edges[:, 0] == u_atomic_id][:, 1],
atomic_edges[atomic_edges[:, 1] == u_atomic_id][:, 0]])
partner_ids = np.where(
np.in1d(atomic_node_info[column_keys.Connectivity.Partner], partners))[0]
partner_ids = \
np.array(partner_ids, dtype=column_keys.Connectivity.Connected.basetype)
val_dict = {column_keys.Connectivity.Connected: partner_ids}
rows.append(self.mutate_row(serializers.serialize_uint64(u_atomic_id),
val_dict, time_stamp=time_stamp))
# Dictionaries keeping temporary information about the ChunkedGraph
# while updates are not written to BigTable yet
new_layer_parent_dict = {}
cross_edge_dict = {}
old_id_dict = collections.defaultdict(list)
# This view of the to be removed edges helps us to compute the mask
# of the retained edges in each chunk
double_atomic_edges = np.concatenate([atomic_edges,
atomic_edges[:, ::-1]],
axis=0)
double_atomic_edges_view = double_atomic_edges.view(dtype='u8,u8')
n_edges = double_atomic_edges.shape[0]
double_atomic_edges_view = double_atomic_edges_view.reshape(n_edges)
nodes_in_removed_edges = np.unique(atomic_edges)
lvl2_node_mapping = {} # Needed for instant remeshing
# For each involved chunk we need to compute connected components
for chunk_id in involved_chunk_id_dict.keys():
# Get the local subgraph
node_id = involved_chunk_id_dict[chunk_id]
old_parent_id = self.get_parent(node_id)
chunk_edges, _, _ = self.get_subgraph_chunk(old_parent_id,
make_unique=False)
# These edges still contain the removed edges.
# For consistency reasons we can only write to BigTable one time.
# Hence, we have to evict the to be removed "atomic_edges" from the
# queried edges.
retained_edges_mask =\
~np.in1d(chunk_edges.view(dtype='u8,u8').reshape(chunk_edges.shape[0]),
double_atomic_edges_view)
chunk_edges = chunk_edges[retained_edges_mask]
# The cross chunk edges are passed on to the parents to compute
# connected components in higher layers.
terminal_chunk_ids = self.get_chunk_ids_from_node_ids(np.ascontiguousarray(chunk_edges[:, 1]))
cross_edge_mask = terminal_chunk_ids != chunk_id
cross_edges = chunk_edges[cross_edge_mask]
chunk_edges = chunk_edges[~cross_edge_mask]
isolated_nodes = list(filter(
| |
# Copyright (c) 2016 SUSE. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from azure.common import AzureMissingResourceHttpError
from azure.storage.blob.pageblobservice import PageBlobService
from datetime import datetime
from builtins import bytes
from uuid import uuid4
# project
from azurectl.defaults import Defaults
from azurectl.storage.storage import Storage
from azurectl.azurectl_exceptions import (
AzureDataDiskAttachError,
AzureDataDiskCreateError,
AzureDataDiskShowError,
AzureDataDiskDeleteError,
AzureDataDiskNoAvailableLun
)
class DataDisk(object):
"""
Implements virtual machine data disk (non-root/boot disk) management.
"""
def __init__(self, account):
self.account = account
self.service = account.get_management_service()
self.data_disk_name = None
def create(self, identifier, disk_size_in_gb, label=None):
"""
Create new data disk
"""
footer = self.__generate_vhd_footer(disk_size_in_gb)
disk_name = self.__generate_filename(identifier)
size_in_bytes = int(disk_size_in_gb) * 1073741824 + 512
try:
storage = Storage(
self.account, self.account.storage_container()
)
storage.upload_empty_image(size_in_bytes, footer, disk_name)
except Exception as e:
raise AzureDataDiskCreateError(
'%s: %s' % (type(e).__name__, format(e))
)
disk_url = self.__data_disk_url(disk_name)
args = {
'media_link': disk_url,
'name': self.__strip_platform_extension(disk_name),
'has_operating_system': False,
'os': 'Linux'
}
args['label'] = label if label else identifier
try:
self.service.add_disk(**args)
except Exception as e:
raise AzureDataDiskCreateError(
'%s: %s' % (type(e).__name__, format(e))
)
def show(self, disk_name):
"""
Show details of the specified disk
"""
try:
disk = self.service.get_disk(disk_name)
except Exception as e:
raise AzureDataDiskShowError(
'%s: %s' % (type(e).__name__, format(e))
)
return self.__decorate_disk(disk)
def delete(self, disk_name):
"""
Delete data disk and the underlying vhd disk image
Note the deletion will fail if the disk is still
in use, meaning attached to an instance
"""
try:
self.service.delete_disk(disk_name, delete_vhd=True)
except Exception as e:
raise AzureDataDiskDeleteError(
'%s: %s' % (type(e).__name__, format(e))
)
def list(self):
"""
List disk(s) from your image repository
"""
disks = []
try:
disks = self.service.list_disks()
except Exception:
pass
return [
self.__decorate_disk_list(disk) for disk in disks
]
def show_attached(
self, cloud_service_name, instance_name=None, at_lun=None
):
"""
Show details of the data disks attached to the virtual
machine. If a lun is specified show only details for the disk
at the specified lun
"""
if not instance_name:
instance_name = cloud_service_name
disks = []
luns = [at_lun] if at_lun is not None else list(range(Defaults.max_vm_luns()))
for lun in luns:
try:
disks.append(self.service.get_data_disk(
cloud_service_name, cloud_service_name, instance_name, lun
))
except Exception as e:
if at_lun is not None:
# only if a disk information is requested for a specific
# lun but does not exist, an exception is raised
raise AzureDataDiskShowError(
'%s: %s' % (type(e).__name__, format(e))
)
return [self.__decorate_attached_disk(disk) for disk in disks]
def attach(
self, disk_name, cloud_service_name, instance_name=None,
label=None, lun=None, host_caching=None, blob_name=None
):
"""
Attach existing data disk to the instance
"""
if not instance_name:
instance_name = cloud_service_name
if lun not in list(range(Defaults.max_vm_luns())):
lun = self.__get_first_available_lun(
cloud_service_name, instance_name
)
if disk_name and not blob_name:
# assume existing data-disk
args = {
'disk_name': disk_name
}
elif disk_name and blob_name:
# create new data-disk based using disk_name
args = {
'disk_name': disk_name,
'source_media_link': self.__data_disk_url(blob_name)
}
elif not disk_name and blob_name:
# find data-disk name for blob_name,
# or create a new data-disk for blob_name
disk_name = self.__find_existing_disk_name_for_blob_name(
blob_name,
self.service.list_disks()
)
if disk_name:
args = {
'disk_name': disk_name
}
else:
args = {
'disk_name': self.__strip_platform_extension(blob_name),
'source_media_link': self.__data_disk_url(blob_name)
}
else:
raise AzureDataDiskAttachError(
"Neither disk_name nor blob_name was supplied"
)
if host_caching:
args['host_caching'] = host_caching
if label:
args['disk_label'] = label
try:
result = self.service.add_data_disk(
cloud_service_name, cloud_service_name, instance_name, lun,
**args
)
self.attached_lun = lun
except Exception as e:
raise AzureDataDiskAttachError(
'%s: %s' % (type(e).__name__, format(e))
)
return Defaults.unify_id(result.request_id)
def detach(self, lun, cloud_service_name, instance_name=None):
"""
Delete data disk from the instance, retaining underlying vhd blob
"""
if not instance_name:
instance_name = cloud_service_name
try:
result = self.service.delete_data_disk(
cloud_service_name, cloud_service_name, instance_name, lun,
delete_vhd=False
)
except Exception as e:
raise AzureDataDiskDeleteError(
'%s: %s' % (type(e).__name__, format(e))
)
return Defaults.unify_id(result.request_id)
def __get_first_available_lun(self, cloud_service_name, instance_name):
lun = 0
while lun < Defaults.max_vm_luns():
try:
self.service.get_data_disk(
cloud_service_name, cloud_service_name, instance_name, lun
)
except AzureMissingResourceHttpError:
return lun
else:
lun += 1
raise AzureDataDiskNoAvailableLun(
"All LUNs on this VM are occupied."
)
def __generate_filename(self, identifier):
"""
Generate vhd disk name with respect to the Azure naming
conventions for data disks
"""
self.data_disk_name = '%s-data-disk-%s.vhd' % (
identifier, datetime.isoformat(datetime.utcnow()).replace(':', '_')
)
return self.data_disk_name
def __data_disk_url(self, filename):
blob_service = PageBlobService(
self.account.storage_name(),
self.account.storage_key(),
endpoint_suffix=self.account.get_blob_service_host_base()
)
return blob_service.make_blob_url(
self.account.storage_container(),
filename
)
def __strip_platform_extension(self, name):
extensions = ['.vhd', '.vhdfixed']
for extension in extensions:
if name.endswith(extension):
return name[:-len(extension)]
def __find_existing_disk_name_for_blob_name(self, blob_name, disks):
"""
if a data-disk object exists for the given blob name, find it;
if not, return None as a signal to generate one.
"""
disk_url = self.__data_disk_url(blob_name)
for disk in disks:
if disk.media_link == disk_url:
return disk.name
return None
def __decorate_attached_disk(self, data_virtual_hard_disk):
return {
'label': data_virtual_hard_disk.disk_label,
'host-caching': data_virtual_hard_disk.host_caching,
'disk-url': data_virtual_hard_disk.media_link,
'source-image-url': data_virtual_hard_disk.source_media_link,
'lun': data_virtual_hard_disk.lun,
'size': '%d GB' % data_virtual_hard_disk.logical_disk_size_in_gb
}
def __decorate_disk(self, disk):
attach_info = {}
if disk.attached_to:
attach_info = {
'hosted_service_name': disk.attached_to.hosted_service_name,
'deployment_name': disk.attached_to.deployment_name,
'role_name': disk.attached_to.role_name
}
return {
'affinity_group': disk.affinity_group,
'attached_to': attach_info,
'has_operating_system': disk.has_operating_system,
'is_corrupted': disk.is_corrupted,
'location': disk.location,
'logical_disk_size_in_gb': '%d GB' % disk.logical_disk_size_in_gb,
'label': disk.label,
'media_link': disk.media_link,
'name': disk.name,
'os': disk.os,
'source_image_name': disk.source_image_name
}
def __decorate_disk_list(self, disk):
attached = True if disk.attached_to else False
return {
'is_attached': attached,
'name': disk.name,
}
def __generate_vhd_footer(self, disk_size_in_gb):
"""
Kudos to <NAME>: https://gist.github.com/sedouard
who provided the following:
Generate an empty vhd fixed disk of the specified size.
The file must be conform to the VHD Footer Format Specification at
https://technet.microsoft.com/en-us/virtualization/bb676673.aspx#E3B
which specifies the data structure as follows:
* Field Size (bytes)
* Cookie 8
* Features 4
* Version 4
* Data Offset 4
* TimeStamp 4
* Creator App 4
* Creator Ver 4
* CreatorHostOS 4
* Original Size 8
* Current Size 8
* Disk Geo 4
* Disk Type 4
* Checksum 4
* Unique ID 16
* Saved State 1
* Reserved 427
"""
# disk size in bytes
byte_size = int(disk_size_in_gb) * 1073741824
# the ascii string 'conectix'
cookie = bytearray(
[0x63, 0x6f, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x78]
)
# no features enabled
features = bytearray(
[0x00, 0x00, 0x00, 0x02]
)
# current file version
version = bytearray(
[0x00, 0x01, 0x00, 0x00]
)
# in the case of a fixed disk, this is set to -1
data_offset = bytearray(
[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
)
# hex representation of seconds since january 1st 2000
timestamp = bytearray.fromhex(
hex(int(datetime.now().strftime('%s')) - 946684800).replace(
'L', ''
).replace('0x', '').zfill(8))
# ascii code for 'wa' = windowsazure
creator_app = bytearray(
[0x77, 0x61, 0x00, 0x00]
)
# ascii code for version of creator application
creator_version = bytearray(
[0x00, 0x07, 0x00, 0x00]
)
# creator host os. windows or mac, ascii for 'wi2k'
creator_os = bytearray(
[0x57, 0x69, 0x32, 0x6b]
)
original_size = bytearray.fromhex(
hex(byte_size).replace('0x', '').zfill(16)
)
current_size = bytearray.fromhex(
hex(byte_size).replace('0x', '').zfill(16)
)
# 0x820=2080 cylenders, 0x10=16 heads, 0x3f=63 sectors/track
disk_geometry = bytearray(
[0x08, 0x20, 0x10, 0x3f]
)
# 0x2 = fixed hard disk
disk_type = bytearray(
[0x00, 0x00, 0x00, 0x02]
)
# a uuid
unique_id = bytearray.fromhex(uuid4().hex)
# saved state and reserved
saved_reserved = bytearray(428)
# Compute Checksum with Checksum = ones compliment of sum of
# all fields excluding the checksum field
to_checksum_array = \
cookie + features + version + data_offset + \
timestamp + creator_app + creator_version + \
creator_os + original_size + current_size + \
disk_geometry + disk_type + unique_id + saved_reserved
total = 0
for b in to_checksum_array:
total += b
total = ~total
# handle two's compliment
def tohex(val, nbits):
return hex((val + (1 << nbits)) % (1 << nbits))
checksum = bytearray.fromhex(
tohex(total, 32).replace('0x', '')
)
# vhd disk blob
blob_data = \
cookie + features + version + data_offset + \
timestamp + creator_app + creator_version + \
creator_os + original_size | |
<reponame>crdietrich/meerkat
"""MPU-6050 Gyroscope/Accelerometer I2C Driver for Raspberry PI & MicroPython
Made by: MrTijn/Tijndagamer
Forked 01/02/2019 from https://github.com/Tijndagamer/mpu6050
and merged into meerkat by: <NAME> / crdietrich
The MIT License (MIT)
Copyright (c) 2015, 2016, 2017, 2018 Martijn (MrTijn), 2019 <NAME>
and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from meerkat.base import I2C
from meerkat.data import Meta, CSVWriter, JSONWriter
class mpu6050:
# Global Variables
GRAVITIY_MS2 = 9.80665
# State Variables
accel_range = None
gyro_range = None
# Scale Modifiers
ACCEL_SCALE_MODIFIER_2G = 16384.0
ACCEL_SCALE_MODIFIER_4G = 8192.0
ACCEL_SCALE_MODIFIER_8G = 4096.0
ACCEL_SCALE_MODIFIER_16G = 2048.0
GYRO_SCALE_MODIFIER_250DEG = 131.0
GYRO_SCALE_MODIFIER_500DEG = 65.5
GYRO_SCALE_MODIFIER_1000DEG = 32.8
GYRO_SCALE_MODIFIER_2000DEG = 16.4
# Pre-defined ranges
ACCEL_RANGE_2G = 0x00
ACCEL_RANGE_4G = 0x08
ACCEL_RANGE_8G = 0x10
ACCEL_RANGE_16G = 0x18
GYRO_RANGE_250DEG = 0x00
GYRO_RANGE_500DEG = 0x08
GYRO_RANGE_1000DEG = 0x10
GYRO_RANGE_2000DEG = 0x18
# MPU-6050 Registers
PWR_MGMT_1 = 0x6B
PWR_MGMT_2 = 0x6C
ACCEL_XOUT0 = 0x3B
ACCEL_YOUT0 = 0x3D
ACCEL_ZOUT0 = 0x3F
TEMP_OUT0 = 0x41
GYRO_XOUT0 = 0x43
GYRO_YOUT0 = 0x45
GYRO_ZOUT0 = 0x47
ACCEL_CONFIG = 0x1C
GYRO_CONFIG = 0x1B
def __init__(self, bus_n, bus_addr=0x68, output='csv', name='MPU6050'):
# i2c bus
self.bus = I2C(bus_n=bus_n, bus_addr=bus_addr)
# Wake up the MPU-6050 since it starts in sleep mode
# by toggling bit6 from 1 to 0, see pg 40 of RM-MPU-6000A-00 v4.2
self.bus.write_register_8bit(self.PWR_MGMT_1, 0x00)
# information about this device
self.metadata = Meta(name=name)
self.metadata.description = 'TDK InvenSense Gyro & Accelerometer'
self.metadata.urls = 'https://www.invensense.com/products/motion-tracking/6-axis/mpu-6050/'
self.metadata.manufacturer = 'Adafruit Industries & TDK'
# note: accuracy in datasheet is relative to scale factor - LSB/(deg/s) +/-3%
# is there a better way to describe this? +/-3% below implies relative to deg/s output...
self.metadata.header = ['description', 'sample_n', 'ax', 'ay', 'az', 'gx', 'gy', 'gz']
self.metadata.dtype = ['str', 'int', 'float', 'float', 'float', 'float', 'float', 'float']
self.metadata.units = [None, 'count', 'g', 'g', 'g', 'deg/s', 'deg/s', 'deg/s']
self.metadata.accuracy = [None, 1, '+/-3%', '+/-3%', '+/-3%', '+/-3%', '+/-3%', '+/-3%']
self.metadata.accuracy_precision_note = 'See datasheet for scale factor dependent accuracy & LSB precision'
self.metadata.precision = None
# specific specifications
self.metadata.gyro_accuracy = '+/-3%, +/-2% cross axis'
self.metadata.gyro_precision = '16bit'
self.metadata.gyro_noise = '0.05 deg/s-rms'
self.metadata.accel_accuracy = '+/-0.5%, +/-2 cross axis'
self.metadata.accel_precision = '16bit'
self.metadata.accel_noise = 'PSD 400 ug / Hz**1/2'
self.metadata.bus_n = bus_n
self.metadata.bus_addr = hex(bus_addr)
# data recording classes
self.writer_output = output
self.csv_writer = CSVWriter(metadata=self.metadata, time_format='std_time_ms')
self.json_writer = JSONWriter(metadata=self.metadata, time_format='std_time_ms')
# I2C communication methods
def read_i2c_word(self, register):
"""Read two i2c registers and combine them.
register -- the first register to read from.
Returns the combined read results.
"""
value = self.bus.read_register_16bit(register)
if value >= 0x8000:
return -((65535 - value) + 1)
else:
return value
# MPU-6050 Methods
def get_temp(self):
"""Reads the temperature from the onboard temperature sensor of the MPU-6050.
Returns the temperature in degrees Celcius.
"""
raw_temp = self.read_i2c_word(self.TEMP_OUT0)
# Get the actual temperature using the formule given in the
# MPU-6050 Register Map and Descriptions revision 4.2, page 30
actual_temp = (raw_temp / 340.0) + 36.53
return actual_temp
def set_accel_range(self, accel_range):
"""Sets the range of the accelerometer to range.
accel_range -- the range to set the accelerometer to. Using a
pre-defined range is advised.
"""
self.accel_range = accel_range
# First change it to 0x00 to make sure we write the correct value later
self.bus.write_register_16bit(self.ACCEL_CONFIG, 0x00)
# Write the new range to the ACCEL_CONFIG register
self.bus.write_register_16bit(self.ACCEL_CONFIG, accel_range)
def read_accel_range(self, raw = False):
"""Reads the range the accelerometer is set to.
If raw is True, it will return the raw value from the ACCEL_CONFIG
register
If raw is False, it will return an integer: -1, 2, 4, 8 or 16. When it
returns -1 something went wrong.
"""
raw_data = self.bus.read_register_16bit(self.ACCEL_CONFIG)
if raw is True:
return raw_data
elif raw is False:
if raw_data == self.ACCEL_RANGE_2G:
return 2
elif raw_data == self.ACCEL_RANGE_4G:
return 4
elif raw_data == self.ACCEL_RANGE_8G:
return 8
elif raw_data == self.ACCEL_RANGE_16G:
return 16
else:
return -1
def get_accel(self, g = False):
"""Gets and returns the X, Y and Z values from the accelerometer.
If g is True, it will return the data in g
If g is False, it will return the data in m/s^2
Returns a dictionary with the measurement results.
"""
x = self.bus.read_register_16bit(self.ACCEL_XOUT0)
y = self.bus.read_register_16bit(self.ACCEL_YOUT0)
z = self.bus.read_register_16bit(self.ACCEL_ZOUT0)
accel_scale_modifier = None
accel_range = self.read_accel_range(True)
if accel_range == self.ACCEL_RANGE_2G:
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G
elif accel_range == self.ACCEL_RANGE_4G:
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G
elif accel_range == self.ACCEL_RANGE_8G:
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G
elif accel_range == self.ACCEL_RANGE_16G:
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G
else:
print("Unkown range - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G")
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G
x = x / accel_scale_modifier
y = y / accel_scale_modifier
z = z / accel_scale_modifier
if g is True:
return {'x': x, 'y': y, 'z': z}
elif g is False:
x = x * self.GRAVITIY_MS2
y = y * self.GRAVITIY_MS2
z = z * self.GRAVITIY_MS2
return x, y, z
def set_gyro_range(self, gyro_range):
"""Sets the range of the gyroscope to range.
gyro_range -- the range to set the gyroscope to. Using a pre-defined
range is advised.
"""
self.gyro_range = gyro_range
# First change it to 0x00 to make sure we write the correct value later
self.bus.write_register_16bit(self.GYRO_CONFIG, 0x00)
# Write the new range to the ACCEL_CONFIG register
self.bus.write_register_16bit(self.GYRO_CONFIG, gyro_range)
def read_gyro_range(self, raw = False):
"""Reads the range the gyroscope is set to.
If raw is True, it will return the raw value from the GYRO_CONFIG
register.
If raw is False, it will return 250, 500, 1000, 2000 or -1. If the
returned value is equal to -1 something went wrong.
"""
raw_data = self.bus.read_register_16bit(self.GYRO_CONFIG)
if raw is True:
return raw_data
elif raw is False:
if raw_data == self.GYRO_RANGE_250DEG:
return 250
elif raw_data == self.GYRO_RANGE_500DEG:
return 500
elif raw_data == self.GYRO_RANGE_1000DEG:
return 1000
elif raw_data == self.GYRO_RANGE_2000DEG:
return 2000
else:
return -1
def get_gyro(self):
"""Gets and returns the X, Y and Z values from the gyroscope.
Returns the read values in a dictionary.
"""
x = self.read_i2c_word(self.GYRO_XOUT0)
y = self.read_i2c_word(self.GYRO_YOUT0)
z = self.read_i2c_word(self.GYRO_ZOUT0)
gyro_scale_modifier = None
gyro_range = self.read_gyro_range(True)
if gyro_range == self.GYRO_RANGE_250DEG:
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG
elif gyro_range == self.GYRO_RANGE_500DEG:
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_500DEG
elif gyro_range == self.GYRO_RANGE_1000DEG:
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_1000DEG
elif gyro_range == self.GYRO_RANGE_2000DEG:
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_2000DEG
else:
print("Unkown range - gyro_scale_modifier set to self.GYRO_SCALE_MODIFIER_250DEG")
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG
x = x / gyro_scale_modifier
y = y / gyro_scale_modifier
z = z / gyro_scale_modifier
return x, y, z
def get_all(self):
"""Reads and returns all the available data."""
temp = self.get_temp()
accel = self.get_accel()
gyro = self.get_gyro()
return [temp] + list(accel) + list(gyro)
def get(self, description='NA', n=1, delay=None):
"""Get formatted output.
Parameters
----------
description : char, description of data sample collected
n : int, number of samples to record in this burst
delay : float, seconds to delay between samples if n > 1
Returns
-------
data : list, data containing:
description: str, description of sample under test
temperature : float, temperature in degrees Celcius
delay : float, seconds to delay between samples if n > 1
"""
data_list = []
for m in range(1, n+1):
data_list.append([description, m] +
list(self.get_accel()) +
list(self.get_gyro()))
if n == 1:
return data_list[0]
if delay is not None:
time.sleep(delay)
return data_list
def publish(self, description='NA', n=1, delay=None):
"""Output | |
<filename>base_engine/lib/yael_v260_modif/yael/yael.py<gh_stars>10-100
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_yael')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_yael')
_yael = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_yael', [dirname(__file__)])
except ImportError:
import _yael
return _yael
if fp is not None:
try:
_mod = imp.load_module('_yael', fp, pathname, description)
finally:
fp.close()
return _mod
_yael = swig_import_helper()
del swig_import_helper
else:
import _yael
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
def _frompointer_and_acquire(aclass,ptr):
r=aclass.frompointer(ptr)
if r: r.this.acquire()
return r
def fvec_from_pointer_long(ptr):
return _yael.fvec_from_pointer_long(ptr)
fvec_from_pointer_long = _yael.fvec_from_pointer_long
def fvec_to_pointer_long(fv):
return _yael.fvec_to_pointer_long(fv)
fvec_to_pointer_long = _yael.fvec_to_pointer_long
def dvec_from_pointer_long(ptr):
return _yael.dvec_from_pointer_long(ptr)
dvec_from_pointer_long = _yael.dvec_from_pointer_long
def dvec_to_pointer_long(fv):
return _yael.dvec_to_pointer_long(fv)
dvec_to_pointer_long = _yael.dvec_to_pointer_long
class DoubleArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DoubleArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DoubleArray, name)
__repr__ = _swig_repr
def __init__(self, nelements):
this = _yael.new_DoubleArray(nelements)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _yael.delete_DoubleArray
__del__ = lambda self: None
def __getitem__(self, index):
return _yael.DoubleArray___getitem__(self, index)
def __setitem__(self, index, value):
return _yael.DoubleArray___setitem__(self, index, value)
def cast(self):
return _yael.DoubleArray_cast(self)
if _newclass:
frompointer = staticmethod(_yael.DoubleArray_frompointer)
else:
frompointer = _yael.DoubleArray_frompointer
def plus(self, i):
return _yael.DoubleArray_plus(self, i)
def clear(self, n):
return _yael.DoubleArray_clear(self, n)
def copyfrom(self, src, dest_ofs, n):
return _yael.DoubleArray_copyfrom(self, src, dest_ofs, n)
def tostring(self, n):
return _yael.DoubleArray_tostring(self, n)
def fromstring(self, obj):
return _yael.DoubleArray_fromstring(self, obj)
DoubleArray_swigregister = _yael.DoubleArray_swigregister
DoubleArray_swigregister(DoubleArray)
cvar = _yael.cvar
def DoubleArray_frompointer(t):
return _yael.DoubleArray_frompointer(t)
DoubleArray_frompointer = _yael.DoubleArray_frompointer
DoubleArray.aptr=DoubleArray.acquirepointer=staticmethod(lambda ptr: _frompointer_and_acquire(DoubleArray,ptr))
class FloatArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, FloatArray, name)
__repr__ = _swig_repr
__swig_destroy__ = _yael.delete_FloatArray
__del__ = lambda self: None
def __getitem__(self, index):
return _yael.FloatArray___getitem__(self, index)
def __setitem__(self, index, value):
return _yael.FloatArray___setitem__(self, index, value)
def cast(self):
return _yael.FloatArray_cast(self)
if _newclass:
frompointer = staticmethod(_yael.FloatArray_frompointer)
else:
frompointer = _yael.FloatArray_frompointer
def plus(self, i):
return _yael.FloatArray_plus(self, i)
def clear(self, n):
return _yael.FloatArray_clear(self, n)
def copyfrom(self, src, dest_ofs, n):
return _yael.FloatArray_copyfrom(self, src, dest_ofs, n)
def tostring(self, n):
return _yael.FloatArray_tostring(self, n)
def fromstring(self, obj):
return _yael.FloatArray_fromstring(self, obj)
def __init__(self, *args):
this = _yael.new_FloatArray(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
FloatArray_swigregister = _yael.FloatArray_swigregister
FloatArray_swigregister(FloatArray)
def FloatArray_frompointer(t):
return _yael.FloatArray_frompointer(t)
FloatArray_frompointer = _yael.FloatArray_frompointer
FloatArray.aptr=FloatArray.acquirepointer=staticmethod(lambda ptr: _frompointer_and_acquire(FloatArray,ptr))
class IntArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntArray, name)
__repr__ = _swig_repr
__swig_destroy__ = _yael.delete_IntArray
__del__ = lambda self: None
def __getitem__(self, index):
return _yael.IntArray___getitem__(self, index)
def __setitem__(self, index, value):
return _yael.IntArray___setitem__(self, index, value)
def cast(self):
return _yael.IntArray_cast(self)
if _newclass:
frompointer = staticmethod(_yael.IntArray_frompointer)
else:
frompointer = _yael.IntArray_frompointer
def plus(self, i):
return _yael.IntArray_plus(self, i)
def clear(self, n):
return _yael.IntArray_clear(self, n)
def copyfrom(self, src, dest_ofs, n):
return _yael.IntArray_copyfrom(self, src, dest_ofs, n)
def tostring(self, n):
return _yael.IntArray_tostring(self, n)
def fromstring(self, obj):
return _yael.IntArray_fromstring(self, obj)
def __init__(self, *args):
this = _yael.new_IntArray(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
IntArray_swigregister = _yael.IntArray_swigregister
IntArray_swigregister(IntArray)
def IntArray_frompointer(t):
return _yael.IntArray_frompointer(t)
IntArray_frompointer = _yael.IntArray_frompointer
IntArray.aptr=IntArray.acquirepointer=staticmethod(lambda ptr: _frompointer_and_acquire(IntArray,ptr))
class bvec(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, bvec, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, bvec, name)
__repr__ = _swig_repr
def __init__(self, nelements):
this = _yael.new_bvec(nelements)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _yael.delete_bvec
__del__ = lambda self: None
def __getitem__(self, index):
return _yael.bvec___getitem__(self, index)
def __setitem__(self, index, value):
return _yael.bvec___setitem__(self, index, value)
def cast(self):
return _yael.bvec_cast(self)
if _newclass:
frompointer = staticmethod(_yael.bvec_frompointer)
else:
frompointer = _yael.bvec_frompointer
def plus(self, i):
return _yael.bvec_plus(self, i)
def clear(self, n):
return _yael.bvec_clear(self, n)
def copyfrom(self, src, dest_ofs, n):
return _yael.bvec_copyfrom(self, src, dest_ofs, n)
def tostring(self, n):
return _yael.bvec_tostring(self, n)
def fromstring(self, obj):
return _yael.bvec_fromstring(self, obj)
bvec_swigregister = _yael.bvec_swigregister
bvec_swigregister(bvec)
def bvec_frompointer(t):
return _yael.bvec_frompointer(t)
bvec_frompointer = _yael.bvec_frompointer
bvec.aptr=bvec.acquirepointer=staticmethod(lambda ptr: _frompointer_and_acquire(bvec,ptr))
class lvec(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, lvec, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, lvec, name)
__repr__ = _swig_repr
def __init__(self, nelements):
this = _yael.new_lvec(nelements)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _yael.delete_lvec
__del__ = lambda self: None
def __getitem__(self, index):
return _yael.lvec___getitem__(self, index)
def __setitem__(self, index, value):
return _yael.lvec___setitem__(self, index, value)
def cast(self):
return _yael.lvec_cast(self)
if _newclass:
frompointer = staticmethod(_yael.lvec_frompointer)
else:
frompointer = _yael.lvec_frompointer
def plus(self, i):
return _yael.lvec_plus(self, i)
def clear(self, n):
return _yael.lvec_clear(self, n)
def copyfrom(self, src, dest_ofs, n):
return _yael.lvec_copyfrom(self, src, dest_ofs, n)
def tostring(self, n):
return _yael.lvec_tostring(self, n)
def fromstring(self, obj):
return _yael.lvec_fromstring(self, obj)
lvec_swigregister = _yael.lvec_swigregister
lvec_swigregister(lvec)
def lvec_frompointer(t):
return _yael.lvec_frompointer(t)
lvec_frompointer = _yael.lvec_frompointer
lvec.aptr=lvec.acquirepointer=staticmethod(lambda ptr: _frompointer_and_acquire(lvec,ptr))
class UInt64Array(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, UInt64Array, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, UInt64Array, name)
__repr__ = _swig_repr
def __init__(self, nelements):
this = _yael.new_UInt64Array(nelements)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _yael.delete_UInt64Array
__del__ = lambda self: None
def __getitem__(self, index):
return _yael.UInt64Array___getitem__(self, index)
def __setitem__(self, index, value):
return _yael.UInt64Array___setitem__(self, index, value)
def cast(self):
return _yael.UInt64Array_cast(self)
if _newclass:
frompointer = staticmethod(_yael.UInt64Array_frompointer)
else:
frompointer = _yael.UInt64Array_frompointer
def plus(self, i):
return _yael.UInt64Array_plus(self, i)
def clear(self, n):
return _yael.UInt64Array_clear(self, n)
def copyfrom(self, src, dest_ofs, n):
return _yael.UInt64Array_copyfrom(self, src, dest_ofs, n)
def tostring(self, n):
return _yael.UInt64Array_tostring(self, n)
def fromstring(self, obj):
return _yael.UInt64Array_fromstring(self, obj)
UInt64Array_swigregister = _yael.UInt64Array_swigregister
UInt64Array_swigregister(UInt64Array)
def UInt64Array_frompointer(t):
return _yael.UInt64Array_frompointer(t)
UInt64Array_frompointer = _yael.UInt64Array_frompointer
UInt64Array.aptr=UInt64Array.acquirepointer=staticmethod(lambda ptr: _frompointer_and_acquire(UInt64Array,ptr))
class IntPtrArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntPtrArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntPtrArray, name)
__repr__ = _swig_repr
def __init__(self, nelements):
this = _yael.new_IntPtrArray(nelements)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _yael.delete_IntPtrArray
__del__ = lambda self: None
def __getitem__(self, index):
return _yael.IntPtrArray___getitem__(self, index)
def __setitem__(self, index, value):
return _yael.IntPtrArray___setitem__(self, index, value)
def cast(self):
return _yael.IntPtrArray_cast(self)
if _newclass:
frompointer = staticmethod(_yael.IntPtrArray_frompointer)
else:
frompointer = _yael.IntPtrArray_frompointer
def plus(self, i):
return _yael.IntPtrArray_plus(self, i)
def clear(self, n):
return _yael.IntPtrArray_clear(self, n)
def copyfrom(self, src, dest_ofs, n):
return _yael.IntPtrArray_copyfrom(self, src, dest_ofs, n)
def tostring(self, n):
return _yael.IntPtrArray_tostring(self, n)
def fromstring(self, obj):
return _yael.IntPtrArray_fromstring(self, obj)
IntPtrArray_swigregister = _yael.IntPtrArray_swigregister
IntPtrArray_swigregister(IntPtrArray)
def IntPtrArray_frompointer(t):
return _yael.IntPtrArray_frompointer(t)
IntPtrArray_frompointer = _yael.IntPtrArray_frompointer
IntPtrArray.aptr=IntPtrArray.acquirepointer=staticmethod(lambda ptr: _frompointer_and_acquire(IntPtrArray,ptr))
class FloatPtrArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatPtrArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, FloatPtrArray, name)
__repr__ = _swig_repr
def __init__(self, nelements):
this = _yael.new_FloatPtrArray(nelements)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _yael.delete_FloatPtrArray
__del__ = lambda self: None
def __getitem__(self, index):
return _yael.FloatPtrArray___getitem__(self, index)
def __setitem__(self, index, value):
return _yael.FloatPtrArray___setitem__(self, index, value)
def cast(self):
return _yael.FloatPtrArray_cast(self)
if _newclass:
frompointer = staticmethod(_yael.FloatPtrArray_frompointer)
else:
frompointer = _yael.FloatPtrArray_frompointer
def plus(self, i):
return _yael.FloatPtrArray_plus(self, i)
def clear(self, n):
return _yael.FloatPtrArray_clear(self, n)
def copyfrom(self, src, dest_ofs, n):
return _yael.FloatPtrArray_copyfrom(self, src, dest_ofs, n)
def tostring(self, n):
return _yael.FloatPtrArray_tostring(self, n)
def fromstring(self, obj):
return _yael.FloatPtrArray_fromstring(self, obj)
FloatPtrArray_swigregister = _yael.FloatPtrArray_swigregister
FloatPtrArray_swigregister(FloatPtrArray)
def FloatPtrArray_frompointer(t):
return _yael.FloatPtrArray_frompointer(t)
FloatPtrArray_frompointer = _yael.FloatPtrArray_frompointer
FloatPtrArray.aptr=FloatPtrArray.acquirepointer=staticmethod(lambda ptr: _frompointer_and_acquire(FloatPtrArray,ptr))
class BytePtrArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BytePtrArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BytePtrArray, name)
__repr__ = _swig_repr
def __init__(self, nelements):
this = _yael.new_BytePtrArray(nelements)
try:
self.this.append(this)
except __builtin__.Exception:
| |
shortcut to change CSS Styles without having to update the main framework.
Those changes will only impact the current report.
:tip:
If you create a module CssOvr.py in the root of your environment, all the CSS Classes will be automatically
loaded and if some already existing in the framework, they will be overridden.
"""
for name in dir(pyMod):
if name.startswith("Css") and name != 'CssBase':
self.cssBespoke[str(name)] = getattr(pyMod, name)
def addPy(self, pyCssCls):
"""
:category: Css Classes
:rubric: PY
:type: Framework Extension
:dsc:
Add a bespoke class to the CSS Style Factory. This class is added on the fly and it cannot override an existing one.
"""
cssCls = type(pyCssCls.__name__, (pyCssCls, CssCls), {})
if not pyCssCls.__name__ in self.__factory:
self.__factory[pyCssCls.__name__] = {'class': cssCls, 'file': 'external (%s)' % self.aresObj.run.report_name}
self.add(pyCssCls.__name__)
def addCls(self, clsName, params):
"""
:category: CSS / Python Collection
:rubric: PY
:dsc:
Function to define a CSS class on the fly from the Python layer.
:return: The Python CssCls object
"""
styles = [{'attr': key, 'value': val} for key, val in params.items()]
self.cssBespoke[clsName] = type(clsName, (CssCls, ), dict(__style=[]))()
self.cssBespoke[clsName].style = styles
self.cssStyles.update(self.cssBespoke[clsName].getStyles())
return self.cssBespoke[clsName]
def change(self, cssCls, name, value):
"""
:category: CSS / Python Overrides
:rubric: PY
:dsc:
Store the attributes to be changed / overridden for a given class
"""
self.__bespokeAttr[cssCls][name] = value
def reload(self):
"""
:category: CSS function
:rubric: PY
:dsc:
Force the CSS cache to be refreshed.
This should never be used locally as a simple change in the code will refresh all the caches as Flask will automatically restart
"""
self.__factory = load(forceReload=True)
def get(self, clsName):
"""
:category: CSS function
:rubric: PY
:dsc:
Returns the CSS attributes for a given Python class Name
:return: The Python CSS Object
"""
pyCss = self.cssBespoke[clsName] if clsName in self.cssBespoke else self.__factory.get(clsName, {}).get('class', None)
return pyCss
def add(self, clsName, htmlId=None, htmlTag=None, htmlType=None, cssRef=None):
"""
:category: CSS function
:rubric: PY
:dsc:
Add the Python Class to the report CSS objects. The bespoke style overrides will be applied first. The default are the
standard styles defined in the root of the CSS module
:return: The Python CSS Id (defined from the method setId in CssCls)
"""
cssMod = self.__factory.get(clsName, {}).get('class', None) if not clsName in self.cssBespoke else self.cssBespoke[clsName]
if cssMod is None:
return None
pyCss = cssMod(htmlId=htmlId, htmlTag=htmlTag, htmlType=htmlType, cssRef=cssRef)
pyCss.colorCharts = self.colorCharts
if clsName in self.__bespokeAttr:
for name, value in self.__bespokeAttr[clsName].items():
pyCss.update(name, value)
self.cssStyles.update(pyCss.getStyles())
return pyCss.cssId
def __str__(self):
"""
:category: CSS function
:rubric: PY
:dsc:
This function will be in charge of producing the best CSS content according to the need.
If minify is set to true it will have to try to create groups and to aggregate the data before writing a one liner
:return: The String with all the CSS classes and definition
"""
if self.minify:
return "".join([ "%s %s" % (key, val) for key, val in self.cssStyles.items() if val != '{}'])
# no need for performance in the web report, certainly an investigation
return "\n".join(["%s %s" % (key, val) for key, val in self.cssStyles.items() if val != '{}'])
def getStyle(self, clsName):
"""
:category: CSS function
:rubric: PY
:dsc:
Get the CSS Attributes for a given Python CSS Class Name
:return: Return a String representing the CSS Attributes for a given Python CSS Class Name
"""
if clsName in self.cssBespoke:
return self.cssBespoke[clsName](None).getStyles().values()[0][1:-1]
return self.__factory[clsName]['class'](None).getStyles().values()[0][1:-1]
def pyRef(self, clsName):
"""
:category: CSS function
:rubric: PY
:dsc:
Convert the CSS Class Name to a standardized Class Name within this Python Framework
:return: A string with the CSS converted name
"""
return 'py_%s' % clsName.lower()
def getClsTag(self, clsNames):
"""
:category: HTML function
:rubric: PY
:dsc:
Create the CSS Tag to be added to the HTML Element to consider the different classes.
This will only add a class tag with the list of class names defined.
:return: A string with the HTML Class information to add to the element
"""
return 'class="%s"' % " ".join([self.pyRef(clsName) for clsName in clsNames])
class CssCls(object):
""" CSS Base class of all the derived styles
:category: CSS Class
:rubric: CSS
:dsc:
Main class to create from the Python CSS Framework well defined CSS Fragment which will be added to the page.
Each CSS Class create will produce a Class Name and it will be the one used in all the AReS components to set the Style.
This module will only consider the Static CSS classes and all the bespoke CSS Style used to defined more specifically a component will
be defined either in the string method of the component (old way) or in the jsStyle variable of the component (new way)
:TODO:
work on a way to optimize the CSS String generated in the header
example: http://www.cssportal.com/css-optimize/
"""
# This is a private function and it is not supposed to be updated
# please use the variable style in the class for any change
# It should be transformed ONLY in this class
# The structure of the dictionaries is using attr and value to be able to add some special
# keys in the future.
__style = None
reqCss = None # List of CSS Configurations required
preceedTag, parentTag, childrenTag, directChildrenTag, htmlTag = None, None, None, None, None
cssId = None # CSS Id
# Default values for the style in the web portal
colors10 = ['#5dd45d'] # the different colors used as reference in the framework
fontSize, headerFontSize = '14px', '18px'
# State variables, should have the same structure than __style
# Those variables are the ones used directly so please do not change then
# we usse static variables to nake it easier to retrieve in the editor
# target is not implemented and this feature is done in the javascript
hover, active, checked, disabled, empty, enabled, focus, link, visited = 9 * [None]
# Item CSS selector, should also have the sa,e structure than __style
before, after = None, None
childKinds = None
def __init__(self, htmlId=None, htmlTag=None, htmlType=None, cssRef=None):
""" Instantiate a CSS object with the different possible classes to be used in the style of the components
"""
if self.htmlTag is not None:
htmlTag = self.htmlTag
self.setId(htmlId=htmlId, htmlTag=htmlTag, htmlType=htmlType, cssRef=cssRef)
self.style = CssStyle()
for l in getattr(self, "_%s__style" % self.__class__.__name__, {}):
self.style.append(dict(l))
# To add some special features required for this component.
# This is to avoid having to put multiple times the same line of CSS in each class
# This will simplify a lot the testing
if self.reqCss is not None:
for css in self.reqCss:
for l in getattr(css, "_%s__style" % css.__name__, []):
self.style.append(dict(l))
# Store the different CSS Styles defined in the python layer to dictionaries
# This will allow the fact that some bespoke configuration can inherit from the main configuration
# but some special attributes might be overidden.
# It is not possible to change this differently from the components as it is supposed to be
# static and it will be used as a text file in the future
# If more overrides are needed please use the function .css() available in the components
# or talk to your IT team in charge of this framework
self.eventsStyles = {}
for state in ['hover', 'active', 'checked', 'disabled', 'empty', 'enabled', 'focus', 'link', 'visited', 'after', 'before']:
if getattr(self, state, None) is not None:
self.eventsStyles[state] = CssStyle()
for rec in getattr(self, state):
self.eventsStyles[state].append(dict(rec))
# To add CSS Style link tr:nth-child(even)
if self.childKinds is not None:
if not isinstance(self.childKinds, list):
self.childKinds = [self.childKinds]
for childKind in self.childKinds:
childValue = "%(type)s%(value)s" % childKind
self.eventsStyles[childValue] = CssStyle()
for rec in childKind['style']:
self.eventsStyles[childValue].append(dict(rec))
def customize(self, style, eventsStyles):
"""
:category: CSS Class override
:rubric: CSS
:dsc:
Function defined to override or define the static CSS parameters when an CSS Style python object is instanciated.
This will allow for example to define the color according to the standard ones without hard coding | |
<reponame>vertica/vertica_ml_python
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality for conducting
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to do all of the above. The idea is simple: instead of moving
# data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import os, math, shutil, re, time, decimal, warnings, datetime
from typing import Union
# VerticaPy Modules
import vertica_python
import verticapy
from verticapy.toolbox import *
from verticapy.errors import *
# Other Modules
import pandas as pd
try:
from IPython.core.display import display
except:
pass
#
# ---#
def create_schema(
schema: str, raise_error: bool = False,
):
"""
---------------------------------------------------------------------------
Creates a new schema.
Parameters
----------
schema: str
Schema name.
raise_error: bool, optional
If the schema couldn't be created, the function raises an error.
Returns
-------
bool
True if the schema was successfully created, False otherwise.
"""
try:
executeSQL(f"CREATE SCHEMA {schema};", title="Creating the new schema.")
return True
except:
if raise_error:
raise
return False
# ---#
def create_table(
table_name: str,
dtype: dict,
schema: str = "",
temporary_table: bool = False,
temporary_local_table: bool = True,
genSQL: bool = False,
raise_error: bool = False,
):
"""
---------------------------------------------------------------------------
Creates a new table using the input columns' names and data types.
Parameters
----------
table_name: str, optional
The final table name.
dtype: dict
Dictionary of the user types. Each key represents a column name and each
value represents its data type.
Example: {"age": "int", "name": "varchar"}
schema: str, optional
Schema name.
temporary_table: bool, optional
If set to True, a temporary table will be created.
temporary_local_table: bool, optional
If set to True, a temporary local table will be created. The parameter
'schema' must be empty, otherwise this parameter is ignored.
genSQL: bool, optional
If set to True, the SQL code for creating the final table will be
generated but not executed.
raise_error: bool, optional
If the relation couldn't be created, raises the entire error.
Returns
-------
bool
True if the table was successfully created, False otherwise.
"""
check_types(
[
("table_name", table_name, [str]),
("schema", schema, [str]),
("dtype", dtype, [dict]),
("genSQL", genSQL, [bool]),
("temporary_table", temporary_table, [bool]),
("temporary_local_table", temporary_local_table, [bool]),
("raise_error", raise_error, [bool]),
]
)
if schema.lower() == "v_temp_schema":
schema = ""
temporary_local_table = True
input_relation = (
quote_ident(schema) + "." + quote_ident(table_name)
if schema
else quote_ident(table_name)
)
temp = "TEMPORARY " if temporary_table else ""
if not (schema):
temp = "LOCAL TEMPORARY " if temporary_local_table else ""
query = "CREATE {}TABLE {}({}){};".format(
temp,
input_relation,
", ".join(
["{} {}".format(quote_ident(column), dtype[column]) for column in dtype]
),
" ON COMMIT PRESERVE ROWS" if temp else "",
)
if genSQL:
return query
try:
executeSQL(query, title="Creating the new table.")
return True
except:
if raise_error:
raise
return False
# ---#
def create_verticapy_schema():
"""
---------------------------------------------------------------------------
Creates a schema named 'verticapy' used to store VerticaPy extended models.
"""
sql = "CREATE SCHEMA IF NOT EXISTS verticapy;"
executeSQL(sql, title="Creating VerticaPy schema.")
sql = """CREATE TABLE IF NOT EXISTS verticapy.models (model_name VARCHAR(128),
category VARCHAR(128),
model_type VARCHAR(128),
create_time TIMESTAMP,
size INT);"""
executeSQL(sql, title="Creating the models table.")
sql = """CREATE TABLE IF NOT EXISTS verticapy.attr (model_name VARCHAR(128),
attr_name VARCHAR(128),
value VARCHAR(65000));"""
executeSQL(sql, title="Creating the attr table.")
# ---#
def drop(name: str = "", method: str = "auto", raise_error: bool = False, **kwds):
"""
---------------------------------------------------------------------------
Drops the input relation. This can be a model, view, table, text index,
schema, or geo index.
Parameters
----------
name: str, optional
Relation name. If empty, it will drop all VerticaPy temporary
elements.
method / relation_type: str, optional
Method used to drop.
auto : identifies the table/view/index/model to drop.
It will never drop an entire schema unless the
method is set to 'schema'.
model : drops the input model.
table : drops the input table.
view : drops the input view.
geo : drops the input geo index.
text : drops the input text index.
schema : drops the input schema.
raise_error: bool, optional
If the object couldn't be dropped, this function raises an error.
Returns
-------
bool
True if the relation was dropped, False otherwise.
"""
if "relation_type" in kwds and method == "auto":
method = kwds["relation_type"]
if isinstance(method, str):
method = method.lower()
check_types(
[
("name", name, [str]),
(
"method",
method,
["table", "view", "model", "geo", "text", "auto", "schema"],
),
("raise_error", raise_error, [bool]),
]
)
schema, relation = schema_relation(name)
schema, relation = schema[1:-1], relation[1:-1]
if not (name):
method = "temp"
if method == "auto":
fail, end_conditions = False, False
query = (
f"SELECT * FROM columns WHERE table_schema = '{schema}'"
f" AND table_name = '{relation}'"
)
result = executeSQL(query, print_time_sql=False, method="fetchrow")
if not (result):
query = (
f"SELECT * FROM view_columns WHERE table_schema = '{schema}'"
f" AND table_name = '{relation}'"
)
result = executeSQL(query, print_time_sql=False, method="fetchrow")
elif not (end_conditions):
method = "table"
end_conditions = True
if not (result):
try:
query = (
"SELECT model_type FROM verticapy.models WHERE "
"LOWER(model_name) = '{0}'"
).format(quote_ident(name).lower())
result = executeSQL(query, print_time_sql=False, method="fetchrow")
except:
result = []
elif not (end_conditions):
method = "view"
end_conditions = True
if not (result):
query = f"SELECT * FROM models WHERE schema_name = '{schema}' AND model_name = '{relation}'"
result = executeSQL(query, print_time_sql=False, method="fetchrow")
elif not (end_conditions):
method = "model"
end_conditions = True
if not (result):
query = (
"SELECT * FROM (SELECT STV_Describe_Index () OVER ()) x WHERE name IN "
f"('{schema}.{relation}', '{relation}', '\"{schema}\".\"{relation}\"', "
f"'\"{relation}\"', '{schema}.\"{relation}\"', '\"{schema}\".{relation}')"
)
result = executeSQL(query, print_time_sql=False, method="fetchrow")
elif not (end_conditions):
method = "model"
end_conditions = True
if not (result):
try:
query = f'SELECT * FROM "{schema}"."{relation}" LIMIT 0;'
executeSQL(query, print_time_sql=False)
method = "text"
except:
fail = True
elif not (end_conditions):
method = "geo"
end_conditions = True
if fail:
if raise_error:
raise MissingRelation(
f"No relation / index / view / model named '{name}' was detected."
)
return False
query = ""
if method == "model":
model_type = kwds["model_type"] if "model_type" in kwds else None
try:
query = "SELECT model_type FROM verticapy.models WHERE LOWER(model_name) = '{}'".format(
quote_ident(name).lower()
)
result = executeSQL(query, print_time_sql=False, method="fetchfirstelem")
is_in_verticapy_schema = True
if not (model_type):
model_type = result
except:
is_in_verticapy_schema = False
if (
model_type
in (
"DBSCAN",
"LocalOutlierFactor",
"CountVectorizer",
"KernelDensity",
"AutoDataPrep",
"KNeighborsRegressor",
"KNeighborsClassifier",
"NearestCentroid",
)
or is_in_verticapy_schema
):
if model_type in ("DBSCAN", "LocalOutlierFactor"):
drop(name, method="table")
elif model_type == "CountVectorizer":
drop(name, method="text")
query = (
"SELECT value FROM verticapy.attr WHERE LOWER(model_name) = '{0}' "
"AND attr_name = 'countvectorizer_table'"
).format(quote_ident(name).lower())
res = executeSQL(query, print_time_sql=False, method="fetchrow")
if res and res[0]:
drop(res[0], method="table")
elif model_type == "KernelDensity":
drop(name.replace('"', "") + "_KernelDensity_Map", method="table")
drop(
"{}_KernelDensity_Tree".format(name.replace('"', "")),
method="model",
)
elif model_type == "AutoDataPrep":
drop(name, method="table")
if is_in_verticapy_schema:
sql = "DELETE FROM verticapy.models WHERE LOWER(model_name) = '{}';".format(
quote_ident(name).lower()
)
executeSQL(sql, title="Deleting vModel.")
executeSQL("COMMIT;", title="Commit.")
sql = "DELETE FROM verticapy.attr WHERE LOWER(model_name) = '{}';".format(
quote_ident(name).lower()
)
executeSQL(sql, title="Deleting vModel attributes.")
executeSQL("COMMIT;", title="Commit.")
else:
query = f"DROP MODEL {name};"
elif method == "table":
query = f"DROP TABLE {name};"
elif method == "view":
query = f"DROP VIEW {name};"
elif method == "geo":
query = f"SELECT | |
# This file is generated automatically through:
# d2lbook build lib
# Don't edit it directly
import sys
d2l = sys.modules[__name__]
# Defined in file: ./chapter_preface/preface.md
from IPython import display
import collections
import os
import sys
import numpy as np
import math
from matplotlib import pyplot as plt
from mxnet import nd, autograd, gluon, init, context, image
from mxnet.gluon import nn, rnn
import random
import re
import time
import tarfile
import zipfile
# Defined in file: ./chapter_crashcourse/probability.md
def use_svg_display():
"""Use the svg format to display plot in jupyter."""
display.set_matplotlib_formats('svg')
# Defined in file: ./chapter_crashcourse/probability.md
def set_figsize(figsize=(3.5, 2.5)):
"""Change the default figure size"""
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
# Defined in file: ./chapter_crashcourse/naive-bayes.md
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
ax.imshow(img.asnumpy())
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
# Defined in file: ./chapter_linear-networks/linear-regression.md
class Timer(object):
"""Record multiple running times."""
def __init__(self):
self.times = []
self.start()
def start(self):
"""Start the timer"""
self.start_time = time.time()
def stop(self):
"""Stop the timer and record the time in a list"""
self.times.append(time.time() - self.start_time)
return self.times[-1]
def avg(self):
"""Return the average time"""
return sum(self.times)/len(self.times)
def sum(self):
"""Return the sum of time"""
return sum(self.times)
def cumsum(self):
"""Return the accumuated times"""
return np.array(self.times).cumsum().tolist()
# Defined in file: ./chapter_linear-networks/linear-regression.md
def plot(X, Y=None, xlabel=None, ylabel=None, legend=[], xlim=None,
ylim=None, xscale='linear', yscale='linear', fmts=None,
figsize=(3.5, 2.5), axes=None):
"""Plot multiple lines"""
d2l.set_figsize(figsize)
axes = axes if axes else d2l.plt.gca()
if isinstance(X, nd.NDArray): X = X.asnumpy()
if isinstance(Y, nd.NDArray): Y = Y.asnumpy()
if not hasattr(X[0], "__len__"): X = [X]
if Y is None: X, Y = [[]]*len(X), X
if not hasattr(Y[0], "__len__"): Y = [Y]
if len(X) != len(Y): X = X * len(Y)
if not fmts: fmts = ['-']*len(X)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if isinstance(x, nd.NDArray): x = x.asnumpy()
if isinstance(y, nd.NDArray): y = y.asnumpy()
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
# Defined in file: ./chapter_linear-networks/linear-regression.md
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""A utility function to set matplotlib axes"""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend: axes.legend(legend)
axes.grid()
# Defined in file: ./chapter_linear-networks/linear-regression-scratch.md
def synthetic_data(w, b, num_examples):
"""generate y = X w + b + noise"""
X = nd.random.normal(scale=1, shape=(num_examples, len(w)))
y = nd.dot(X, w) + b
y += nd.random.normal(scale=0.01, shape=y.shape)
return X, y
# Defined in file: ./chapter_linear-networks/linear-regression-scratch.md
def linreg(X, w, b):
return nd.dot(X, w) + b
# Defined in file: ./chapter_linear-networks/linear-regression-scratch.md
def squared_loss(y_hat, y):
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
# Defined in file: ./chapter_linear-networks/linear-regression-scratch.md
def sgd(params, lr, batch_size):
for param in params:
param[:] = param - lr * param.grad / batch_size
# Defined in file: ./chapter_linear-networks/linear-regression-gluon.md
def load_array(data_arrays, batch_size, is_train=True):
"""Construct a Gluon data loader"""
dataset = gluon.data.ArrayDataset(*data_arrays)
return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)
# Defined in file: ./chapter_linear-networks/fashion-mnist.md
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
# Defined in file: ./chapter_linear-networks/fashion-mnist.md
def get_dataloader_workers(num_workers=4):
# 0 means no additional process is used to speed up the reading of data.
if sys.platform.startswith('win'):
return 0
else:
return num_workers
# Defined in file: ./chapter_linear-networks/fashion-mnist.md
def load_data_fashion_mnist(batch_size, resize=None):
"""Download the Fashion-MNIST dataset and then load into memory."""
dataset = gluon.data.vision
trans = [dataset.transforms.Resize(resize)] if resize else []
trans.append(dataset.transforms.ToTensor())
trans = dataset.transforms.Compose(trans)
mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)
mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)
return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),
gluon.data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=get_dataloader_workers()))
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def accuracy(y_hat, y):
return (y_hat.argmax(axis=1) == y.astype('float32')).sum().asscalar()
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def evaluate_accuracy(net, data_iter):
metric = Accumulator(2) # num_corrected_examples, num_examples
for X, y in data_iter:
y = y.astype('float32')
metric.add(accuracy(net(X), y), y.size)
return metric[0] / metric[1]
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
class Accumulator(object):
"""Sum a list of numbers over time"""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a+b for a, b in zip(self.data, args)]
def reset(self):
self.data = [0] * len(self.data)
def __getitem__(self, i):
return self.data[i]
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def train_epoch_ch3(net, train_iter, loss, updater):
metric = Accumulator(3) # train_loss_sum, train_acc_sum, num_examples
if isinstance(updater, gluon.Trainer):
updater = updater.step
for X, y in train_iter:
# compute gradients and update parameters
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
updater(X.shape[0])
metric.add(l.sum().asscalar(), accuracy(y_hat, y), y.size)
# Return training loss and training accuracy
return metric[0]/metric[2], metric[1]/metric[2]
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
class Animator(object):
def __init__(self, xlabel=None, ylabel=None, legend=[], xlim=None,
ylim=None, xscale='linear', yscale='linear', fmts=None,
nrows=1, ncols=1, figsize=(3.5, 2.5)):
"""Incrementally plot multiple lines."""
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1: self.axes = [self.axes,]
# use a lambda to capture arguments
self.config_axes = lambda : d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
"""Add multiple data points into the figure."""
if not hasattr(y, "__len__"): y = [y]
n = len(y)
if not hasattr(x, "__len__"): x = [x] * n
if not self.X: self.X = [[] for _ in range(n)]
if not self.Y: self.Y = [[] for _ in range(n)]
if not self.fmts: self.fmts = ['-'] * n
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
trains, test_accs = [], []
animator = Animator(xlabel='epoch', xlim=[1, num_epochs],
ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch+1, train_metrics+(test_acc,))
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def predict_ch3(net, test_iter, n=6):
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y.asnumpy())
preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1).asnumpy())
titles = [true+'\n'+ pred for true, pred in zip(trues, preds)]
d2l.show_images(X[0:n].reshape((n,28,28)), 1, n, titles=titles[0:n])
# Defined in file: ./chapter_multilayer-perceptrons/underfit-overfit.md
def evaluate_loss(net, data_iter, loss):
"""Evaluate the loss of a model on the given dataset"""
metric = d2l.Accumulator(2) # sum_loss, num_examples
for X, y in data_iter:
metric.add(loss(net(X), y).sum().asscalar(), y.size)
return metric[0] / metric[1]
# Defined in file: ./chapter_deep-learning-computation/use-gpu.md
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu()."""
return context.gpu(i) if context.num_gpus() >= i + 1 else context.cpu()
# Defined in file: ./chapter_deep-learning-computation/use-gpu.md
def try_all_gpus():
"""Return all available GPUs, or [cpu(),] if no GPU exists."""
ctxes = [context.gpu(i) for i in range(context.num_gpus())]
return ctxes if ctxes else [context.cpu()]
# Defined in file: ./chapter_convolutional-neural-networks/conv-layer.md
def corr2d(X, K):
"""Compute 2D cross-correlation."""
h, w = K.shape
Y = nd.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
return Y
# Defined in file: ./chapter_convolutional-neural-networks/lenet.md
def evaluate_accuracy_gpu(net, data_iter, ctx=None):
if not ctx: # Query the first device the first parameter is on.
ctx = list(net.collect_params().values())[0].list_ctx()[0]
metric = d2l.Accumulator(2) # num_corrected_examples, num_examples
for X, y in data_iter:
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
metric.add(d2l.accuracy(net(X), y), y.size)
return metric[0]/metric[1]
# Defined in file: ./chapter_convolutional-neural-networks/lenet.md
def train_ch5(net, train_iter, test_iter, num_epochs, lr, ctx=d2l.try_gpu()):
net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': lr})
animator = d2l.Animator(xlabel='epoch', xlim=[0,num_epochs],
legend=['train loss','train acc','test acc'])
timer = d2l.Timer()
for epoch in range(num_epochs):
metric = d2l.Accumulator(3) # train_loss, train_acc, num_examples
for i, (X, y) in enumerate(train_iter):
timer.start()
# Here is the only difference compared to train_epoch_ch3
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
trainer.step(X.shape[0])
metric.add(l.sum().asscalar(), d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_loss, train_acc = metric[0]/metric[2], metric[1]/metric[2]
if (i+1) % 50 == 0:
animator.add(epoch + i/len(train_iter),
(train_loss, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch+1, (None, None, test_acc))
print('loss %.3f, train acc %.3f, test acc %.3f' % (
train_loss, train_acc, test_acc))
print('%.1f exampes/sec on %s'%(metric[2]*num_epochs/timer.sum(), ctx))
# Defined in file: ./chapter_convolutional-modern/resnet.md
class Residual(nn.Block):
def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
super(Residual, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,
strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1,
strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def forward(self, X):
Y = nd.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return nd.relu(Y + X)
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
def read_time_machine():
"""Load the | |
"""
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import logging
import yaml
import requests
import time
import configparser
from sonsmbase.smbase import sonSMbase
from .ssh import Client
import netaddr
def reverse(ip):
if len(ip) <= 1:
return ip
l = ip.split('.')
return '.'.join(l[::-1])
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("fsm-start-stop-configure")
LOG.setLevel(logging.DEBUG)
logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO)
class CssFSM(sonSMbase):
hostIp = 'none'
def __init__(self):
"""
:param specific_manager_type: specifies the type of specific manager
that could be either fsm or ssm.
:param service_name: the name of the service that this specific manager
belongs to.
:param function_name: the name of the function that this specific
manager belongs to, will be null in SSM case
:param specific_manager_name: the actual name of specific manager
(e.g., scaling, placement)
:param id_number: the specific manager id number which is used to
distinguish between multiple SSM/FSM that are created for the same
objective (e.g., scaling with algorithm 1 and 2)
:param version: version
:param description: description
"""
self.specific_manager_type = 'fsm'
self.service_name = 'vcdn'
self.function_name = 'vtc'
self.specific_manager_name = 'css'
self.id_number = '1'
self.version = 'v0.1'
self.description = "An FSM that subscribes to start, stop and configuration topic"
super(self.__class__, self).__init__(specific_manager_type=self.specific_manager_type,
service_name=self.service_name,
function_name=self.function_name,
specific_manager_name=self.specific_manager_name,
id_number=self.id_number,
version=self.version,
description=self.description)
def on_registration_ok(self):
# The fsm registration was successful
LOG.debug("Received registration ok event.")
# send the status to the SMR
status = 'Subscribed, waiting for alert message'
message = {'name': self.specific_manager_id,
'status': status}
self.manoconn.publish(topic='specific.manager.registry.ssm.status',
message=yaml.dump(message))
# Subscribing to the topics that the fsm needs to listen on
topic = "generic.fsm." + str(self.sfuuid)
self.manoconn.subscribe(self.message_received, topic)
LOG.info("Subscribed to " + topic + " topic.")
def message_received(self, ch, method, props, payload):
"""
This method handles received messages
"""
# Decode the content of the message
request = yaml.load(payload)
# Don't trigger on non-request messages
if "fsm_type" not in request.keys():
LOG.info("Received a non-request message, ignoring...")
return
# Create the response
response = None
# the 'fsm_type' field in the content indicates for which type of
# fsm this message is intended. In this case, this FSM functions as
# start, stop and configure FSM
if str(request["fsm_type"]) == "start":
LOG.info("Start event received: " + str(request["content"]))
response = self.start_event(request["content"])
if str(request["fsm_type"]) == "stop":
LOG.info("Stop event received: " + str(request["content"]))
response = self.stop_event(request["content"])
if str(request["fsm_type"]) == "configure":
LOG.info("Config event received: " + str(request["content"]))
response = self.configure_event(request["content"])
if str(request["fsm_type"]) == "scale":
LOG.info("Scale event received: " + str(request["content"]))
response = self.scale_event(request["content"])
# If a response message was generated, send it back to the FLM
if response is not None:
# Generated response for the FLM
LOG.info("Response to request generated:" + str(response))
topic = "generic.fsm." + str(self.sfuuid)
corr_id = props.correlation_id
self.manoconn.notify(topic,
yaml.dump(response),
correlation_id=corr_id)
return
# If response is None:
LOG.info("Request received for other type of FSM, ignoring...")
def start_event(self, content):
"""
This method handles a start event.
"""
LOG.info("Performing life cycle start event")
LOG.info("content: " + str(content.keys()))
# TODO: Add the start logic. The content is a dictionary that contains
# the required data
# TODO = check vm_image if correct
vm_image = "vtc-vnf"
vnfr = content["vnfr"]
if (content['vnfd']['name']) == vm_image:
mgmt_ip = content['vnfr']['virtual_deployment_units'][0]['vnfc_instance'] [0]['connection_points'][0]['interface']['address']
if not mgmt_ip:
LOG.error("Couldn't obtain IP address from VNFR")
return
self.hostIp = mgmt_ip
# Post request
url = "http://"+mgmt_ip+":8080/startPFbridge"
querystring = {"jsonIn":"{\"netIN\":\"eth1\",\"netOUT\":\"eth2\", \"trans\": \"\" }"}
LOG.info(querystring)
headers = {
'content-type': "application/x-www-form-urlencoded",
'accept': "application/json",
}
number_of_retries = 20
for i in range(number_of_retries):
LOG.info("Attempting new post request: attempt " + str(i + 1))
try:
response = requests.request("POST", url, headers=headers, params=querystring, timeout=5.0)
LOG.info("Response on post request: " + str(response.text))
LOG.info("Status code of response " + str(response.status_code))
break
except:
LOG.info("Request timed out, retrying")
time.sleep(15)
#Configure montoring probe
#if sp_ip:
ssh_client = Client(mgmt_ip,'ubuntu','randompassword',LOG)
sp_ip = ssh_client.sendCommand("echo $SSH_CLIENT | awk '{ print $1}'")
if not self.validIP(sp_ip):
LOG.error("Couldn't obtain SP IP address from ssh_client. Monitoring configuration aborted")
sp_ip = '10.30.0.112'
LOG.info('Mon Config: Create new conf file')
self.createConf(sp_ip, 4, 'vtc-vnf')
ssh_client.sendFile('node.conf')
ssh_client.sendCommand('ls /tmp/')
ssh_client.sendCommand('sudo mv /tmp/node.conf /opt/Monitoring/node.conf')
ssh_client.sendCommand('sudo service mon-probe restart')
LOG.info('Mon Config: Completed')
LOG.info("Configuring vTC pfbridge and datasources")
#ssh_client.sendCommand('sudo /root/gowork/src/pfring_web_api/vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 -d http://'+mgmt_ip+':8086 &')
#LOG.info("Started pfbridge (if it was not)")
ssh_client.sendCommand("sudo sed -i 's/10.100.32.231/"+mgmt_ip+"/g' /root/gowork/src/vtc_dashboard/static/json/grafana_init_datasources.json")
LOG.info("Updating datasource")
ssh_client.sendCommand("sudo curl -X PUT --connect-timeout 60 --data-binary @/root/gowork/src/vtc_dashboard/static/json/grafana_init_datasources.json -H 'Content-Type:application/json' -H 'Accept: application/json' http://admin:admin@"+mgmt_ip+":3000/api/datasources/15")
ssh_client.close()
time.sleep(10)
#Stopping PFBRidge
url = "http://"+self.hostIp+":8080/stopPFbridge"
headers = {
'accept': "application/json",
}
response = requests.request("POST", url, headers=headers)
LOG.info("Response on post request: " + str(response.text))
LOG.info("Status code of response " + str(response.status_code))
#Starting PFBridge agan
url = "http://"+self.hostIp+":8080/startPFbridge"
querystring = {"jsonIn":"{\"netIN\":\"eth1\",\"netOUT\":\"eth2\",\"trans\":\"\"}"}
LOG.info(" Data to send to "+url+" is : ")
LOG.info(querystring)
headers = {
'content-type': "application/x-www-form-urlencoded",
'accept': "application/json",
}
response = requests.request("POST", url, headers=headers, params=querystring, timeout=5.0)
LOG.info("Response on post request: " + str(response.text))
LOG.info("Status code of response " + str(response.status_code))
LOG.info('Configurations completed')
#else:
# LOG.error("Couldn't obtain SP IP address. Monitoring configuration aborted")
# Create a response for the FLM
response = {}
response['status'] = 'COMPLETED'
# TODO: complete the response
return response
def stop_event(self, content):
"""
This method handles a stop event.
"""
LOG.info("Performing life cycle stop event")
LOG.info("content: " + str(content.keys()))
# TODO: Add the stop logic. The content is a dictionary that contains
# the required data
# TODO = check vm_image if correct
vm_image = "vtc-vnf"
vnfr = content["vnfr"]
if (content['vnfd']['name']) == vm_image:
mgmt_ip = content['vnfr']['virtual_deployment_units'][0]['vnfc_instance'] [0]['connection_points'][0]['interface']['address']
if not mgmt_ip:
LOG.error("Couldn't obtain IP address from VNFR")
return
url = "http://"+mgmt_ip+":8080/stopPFbridge"
headers = {
'accept': "application/json",
}
response = requests.request("POST", url, headers=headers)
LOG.info(response.text)
# Create a response for the FLM
response = {}
response['status'] = 'COMPLETED'
# TODO: complete the response
return response
def configure_event(self, content):
"""
This method handles a configure event.
"""
LOG.info("Performing life cycle configure event")
LOG.info("content: " + str(content.keys()))
# TODO: Add the configure logic. The content is a dictionary that
# contains the required data
nsr = content['nsr']
vnfrs = content['vnfrs']
for vnfr in vnfrs:
if (vnfr['virtual_deployment_units'][0]['vm_image']) == 'http://files.sonata-nfv.eu/son-vcdn-pilot/vtu-vnf/sonata-vtu.qcow2':
mgmt_ip = vnfr['virtual_deployment_units'][0]['vnfc_instance'] [0]['connection_points'][0]['interface']['address']
LOG.info("vTU's management IP retrieved: "+mgmt_ip)
try:
iprev = reverse(mgmt_ip)
LOG.info("Got the reverse IP to be turned to integer: "+iprev)
ipInt = int(netaddr.IPAddress(iprev))
LOG.info("Got the Integer from the IP: "+str(ipInt))
except Exception as err:
LOG.error("Got an exception: "+str(err))
return
#LOG.info("Sending ssh command to alter line in vTC with vTU IP as integer")
#ssh_client = Client(self.hostIp,'ubuntu','randompassword',LOG)
#ssh_client.sendCommand("sudo sed -i '1515s/.*/\tip_hdr->daddr = %s;/' /root/gowork/src/pfring_web_api/vtc/PF_RING/userland/examples/pfbridge.c" %ipInt)
#ssh_client.sendCommand("sudo make -C /root/gowork/src/pfring_web_api/vtc/PF_RING/userland/examples")
#ssh_client.close()
#Stopping PFBRidge
url = "http://"+self.hostIp+":8080/stopPFbridge"
headers = {
'accept': "application/json",
}
response = requests.request("POST", url, headers=headers)
LOG.info("Response on post request: " + str(response.text))
LOG.info("Status code of response " + str(response.status_code))
"""
#Starting PFBridge again with command line
ssh_client = Client(self.hostIp,'ubuntu','randompassword',LOG)
ssh_client.sendCommand('sudo /root/gowork/src/pfring_web_api/vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 -d http://'+mgmt_ip+':8086 -i '+str(ipInt)+' &')
LOG.info("Started pfbridge again with new configuration")
ssh_client.close()
"""
LOG.info("Putting to sleep for 5 seconds")
time.sleep(5)
#Starting PFBridge agan
url = "http://"+self.hostIp+":8080/startPFbridge"
querystring = {"jsonIn":"{\"netIN\":\"eth1\",\"netOUT\":\"eth2\",\"trans\":\""+str(ipInt)+"\"}"}
LOG.info(" Data to send to "+url+" is : ")
LOG.info(querystring)
headers = {
'content-type': "application/x-www-form-urlencoded",
'accept': "application/json",
}
response = requests.request("POST", url, headers=headers, params=querystring, timeout=5.0)
LOG.info("Response on post request: " + str(response.text))
LOG.info("Status code of response " + str(response.status_code))
# Create a response for the FLM
response = {}
response['status'] = 'COMPLETED'
# TODO: complete the response
return response
| |
3.125e-08,
'attempts': ' 2U', 'timeIncrement': 7.8125e-09, 'increment': 2,
'stepTime': 3.125e-08, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 2, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 3.3203125e-08,
'attempts': 3, 'timeIncrement': 1.953125e-09, 'increment': 2,
'stepTime': 3.3203125e-08, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 3.3203125e-08,
'attempts': ' 1U', 'timeIncrement': 2.9296875e-09, 'increment': 3,
'stepTime': 3.3203125e-08, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ERROR, {'phase': STANDARD_PHASE,
'message': 'Time increment required is less than the minimum specified',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 3.3203125e-08,
'attempts': ' 2U', 'timeIncrement': 1e-09, 'increment': 3,
'stepTime': 3.3203125e-08, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 5, 'phase': STANDARD_PHASE, 'equilibrium': 5})
mdb.jobs['OneTaper3D']._Message(ABORTED, {'phase': STANDARD_PHASE,
'message': 'Analysis phase failed due to errors', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {
'message': 'Abaqus/Standard Analysis exited with an error - Please see the message file for possible error messages if the file exists.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(JOB_ABORTED, {
'message': 'Abaqus/Standard Analysis exited with an error - Please see the message file for possible error messages if the file exists.',
'jobName': 'OneTaper3D'})
del mdb.models['Model-1'].parts['Part-2'].sets['PoorElements-1']
del mdb.models['Model-1'].parts['Part-2'].materialOrientations[2]
mdb.models['Model-1'].rootAssembly.regenerate()
mdb.models['Model-1'].steps['Step-1'].setValues(continueDampingFactors=False,
initialInc=0.001, stabilizationMagnitude=0.002, stabilizationMethod=
DISSIPATED_ENERGY_FRACTION)
mdb.jobs['OneTaper3D'].submit(consistencyChecking=OFF)
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': BATCHPRE_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 4 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 5 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': '36 elements are distorted. Either the isoparametric angles are out of the suggested limits or the triangular or tetrahedral quality measure is bad. The elements have been identified in element set WarnElemDistorted.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FILE, {'phase': BATCHPRE_PHASE,
'file': '/home2/banerjee/Abaqus/AdvComp/OneTaper3DCZM/OneTaper3D.odb',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(COMPLETED, {'phase': BATCHPRE_PHASE,
'message': 'Analysis phase complete', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': STANDARD_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STEP, {'phase': STANDARD_PHASE, 'stepId': 1,
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'The 3-direction at one or more points in one or more layers in 228 elements as defined in *ORIENTATION are in the opposite direction to the element normals. Either the 1 or 2 and the 3-direction defined in *ORIENTATION will be reversed. The elements have been identified in element set WarnElem3DirOppElemNormalStep1Inc1.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': 0,
'timeIncrement': 0.001, 'increment': 0, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 0,
'phase': STANDARD_PHASE, 'equilibrium': 0})
mdb.jobs['OneTaper3D']._Message(MEMORY_ESTIMATE, {'phase': STANDARD_PHASE,
'jobName': 'OneTaper3D', 'memory': 423.493077278137})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 1, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.001, 'attempts': 1,
'timeIncrement': 0.001, 'increment': 1, 'stepTime': 0.001, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 6,
'phase': STANDARD_PHASE, 'equilibrium': 6})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 2, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.002, 'attempts': 1,
'timeIncrement': 0.001, 'increment': 2, 'stepTime': 0.002, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 5,
'phase': STANDARD_PHASE, 'equilibrium': 5})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 3, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.003, 'attempts': 1,
'timeIncrement': 0.001, 'increment': 3, 'stepTime': 0.003, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 5,
'phase': STANDARD_PHASE, 'equilibrium': 5})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 4, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.004, 'attempts': 1,
'timeIncrement': 0.001, 'increment': 4, 'stepTime': 0.004, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 4,
'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 5, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.005, 'attempts': 1,
'timeIncrement': 0.001, 'increment': 5, 'stepTime': 0.005, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 6,
'phase': STANDARD_PHASE, 'equilibrium': 6})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 408 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.005, 'attempts': ' 1U',
'timeIncrement': 0.001, 'increment': 6, 'stepTime': 0.005, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 4,
'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.005, 'attempts': ' 2U',
'timeIncrement': 0.00025, 'increment': 6, 'stepTime': 0.005, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 4,
'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 6, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0050625, 'attempts': 3,
'timeIncrement': 6.25e-05, 'increment': 6, 'stepTime': 0.0050625,
'step': 1, 'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 8,
'phase': STANDARD_PHASE, 'equilibrium': 8})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 7, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.005125, 'attempts': 1,
'timeIncrement': 6.25e-05, 'increment': 7, 'stepTime': 0.005125, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 5,
'phase': STANDARD_PHASE, 'equilibrium': 5})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 8, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0051875, 'attempts': 1,
'timeIncrement': 6.25e-05, 'increment': 8, 'stepTime': 0.0051875,
'step': 1, 'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 4,
'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 9, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.00525, 'attempts': 1,
'timeIncrement': 6.25e-05, 'increment': 9, 'stepTime': 0.00525, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 3,
'phase': STANDARD_PHASE, 'equilibrium': 3})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 10, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.00534375,
'attempts': 1, 'timeIncrement': 9.375e-05, 'increment': 10,
'stepTime': 0.00534375, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 11, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.005484375,
'attempts': 1, 'timeIncrement': 0.000140625, 'increment': 11,
'stepTime': 0.005484375, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 12, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0056953125,
'attempts': 1, 'timeIncrement': 0.0002109375, 'increment': 12,
'stepTime': 0.0056953125, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 13, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.00601171875,
'attempts': 1, 'timeIncrement': 0.00031640625, 'increment': 13,
'stepTime': 0.00601171875, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 14, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.006486328125,
'attempts': 1, 'timeIncrement': 0.000474609375, 'increment': 14,
'stepTime': 0.006486328125, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 15, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0071982421875,
'attempts': 1, 'timeIncrement': 0.0007119140625, 'increment': 15,
'stepTime': 0.0071982421875, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 16, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.00826611328125,
'attempts': 1, 'timeIncrement': 0.00106787109375, 'increment': 16,
'stepTime': 0.00826611328125, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 17, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.009867919921875,
'attempts': 1, 'timeIncrement': 0.001601806640625, 'increment': 17,
'stepTime': 0.009867919921875, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 18, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0122706298828125,
'attempts': 1, 'timeIncrement': 0.0024027099609375, 'increment': 18,
'stepTime': 0.0122706298828125, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 4, 'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 26 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0122706298828125,
'attempts': ' 1U', 'timeIncrement': 0.00360406494140625, 'increment': 19,
'stepTime': 0.0122706298828125, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 3, 'phase': STANDARD_PHASE, 'equilibrium': 3})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 19, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0131716461181641,
'attempts': 2, 'timeIncrement': 0.000901016235351563, 'increment': 19,
'stepTime': 0.0131716461181641, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 20, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0145231704711914,
'attempts': 1, 'timeIncrement': 0.00135152435302734, 'increment': 20,
'stepTime': 0.0145231704711914, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 21, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0165504570007324,
'attempts': 1, 'timeIncrement': 0.00202728652954102, 'increment': 21,
'stepTime': 0.0165504570007324, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 22, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0195913867950439,
'attempts': 1, 'timeIncrement': 0.00304092979431152, 'increment': 22,
'stepTime': 0.0195913867950439, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, | |
import numpy as np
import matplotlib.pylab as plt
import neuroglancer
from analysis_script.utils_format_convert import read_image_vol_from_h5
from ffn.inference.storage import subvolume_path
from neuroglancer_segment_visualize import neuroglancer_visualize
from run_consensus import run_save_consensus
import networkx
import ffn.inference.storage as storage
from ffn.inference.segmentation import relabel_volume
import os
#%%
# seg_dir = "/home/morganlab/Documents/ixP11LGN/p11_1_exp10" # "/Users/binxu/Connectomics_Code/results/LGN/p11_1_exp8" # "/home/morganlab/Documents/ixP11LGN/p11_1_exp8"
# f = np.load(subvolume_path(seg_dir, (0, 0, 0), 'npz'))
# v1 = f['segmentation']
# f.close()
# f = np.load(subvolume_path(seg_dir, (0, 448, 0), 'npz'))
# v2 = f['segmentation']
# f.close()
# f = np.load(subvolume_path(seg_dir, (0, 0, 448), 'npz'))
# v3 = f['segmentation']
# f.close()
# f = np.load(subvolume_path(seg_dir, (0, 448, 448), 'npz'))
# v4 = f['segmentation']
# f.close()
#
# #%%
# seg_dict = {"seg_dir": "/home/morganlab/Documents/ixP11LGN/p11_1_exp10",
# "seg_1": {"corner": (0, 0, 0)},
# "seg_2": {"corner": (0, 0, 448)},
# "seg_3": {"corner": (0, 448, 0)},
# "seg_4": {"corner": (0, 448, 448)}
# }
# image_dir = "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5"
# neuroglancer_visualize(seg_dict, "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5")
# #%%
# config = """
# segmentation1 {
# directory: "/home/morganlab/Documents/ixP11LGN/p11_1_exp10"
# threshold: 0.6
# split_cc: 1
# min_size: 5000
# }
# segmentation2 {
# directory: "/home/morganlab/Documents/ixP11LGN/p11_1_exp10_rev"
# threshold: 0.6
# split_cc: 1
# min_size: 5000
# }
# segmentation_output_dir: "/home/morganlab/Documents/ixP11LGN/p11_1_exp10_consensus_rev/"
# type: CONSENSUS_SPLIT
# split_min_size: 5000
# """
# run_save_consensus(config, [(0, 0, 0), (0, 0, 448), (0, 448, 0), (0, 448, 448)])
# #%%
# seg_dict = {"seg_dir": "/home/morganlab/Documents/ixP11LGN/p11_1_exp10_consensus_rev/",
# "seg_1": {"corner": (0, 0, 0)},
# "seg_2": {"corner": (0, 0, 448)},
# "seg_3": {"corner": (0, 448, 0)},
# "seg_4": {"corner": (0, 448, 448)}
# }
# image_dir = "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5"
# neuroglancer_visualize(seg_dict, "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5")
# #%%
#
# plt.imshow(v1[:,-33:-30,:])
# plt.show()
# plt.imshow(v2[:,31:34,:])
# plt.show()
# #%% volume slicing function
# corner1 = (0, 0, 0)
# corner2 = (0, 448, 0)
# size = (152, 512, 512)
# size2 = (152, 512, 512)
# overlap_d = 3
def _overlap_selection(corner1, corner2, size, size2=None, overlap_d=3):
'''Return the middle of overlap subvolume to do next overlap analysis
:parameter overlap_d : it's actually the overlap_r not d
:return : sel1 sel2 2 slice object that can send into v1 v2
'''
if size2==None:
size2 = size
if corner1[0] == corner2[0] and corner1[2] == corner2[2]: # junction in y axis
if corner2[1] > corner1[1] and corner1[1] + size[1] > corner2[1]:
assert ( corner1[1] + size[1] - corner2[1] )%2 == 0
halfwid = ( corner1[1] + size[1] - corner2[1] )//2
sel1 = (slice(None), slice(-halfwid - overlap_d, -halfwid + overlap_d + 1), slice(None))
sel2 = (slice(None), slice(halfwid - overlap_d, halfwid + overlap_d + 1), slice(None))
elif corner1[1] > corner2[1] and corner2[1] + size[1] > corner1[1]:
assert (corner2[1] + size[1] - corner1[1]) % 2 == 0
halfwid = (corner2[1] + size[1] - corner1[1]) // 2
sel1 = (slice(None), slice(halfwid - overlap_d, halfwid + overlap_d + 1), slice(None))
sel2 = (slice(None), slice(-halfwid - overlap_d, -halfwid + overlap_d+ 1), slice(None))
else:
return ([],[],[]), ([],[],[])
elif corner1[0] == corner2[0] and corner1[1] == corner2[1]: # junction in x axis
if corner2[2] > corner1[2] and corner1[2] + size[2] > corner2[2]:
assert ( corner1[2] + size[2] - corner2[2] )%2 == 0
halfwid = ( corner1[2] + size[2] - corner2[2] )//2
sel1 = (slice(None), slice(None), slice(-halfwid - overlap_d, -halfwid + overlap_d + 1))
sel2 = (slice(None), slice(None), slice(halfwid - overlap_d, halfwid + overlap_d + 1))
elif corner1[2] > corner2[2] and corner2[2] + size[2] > corner1[2]:
assert (corner2[2] + size[2] - corner1[2]) % 2 == 0
halfwid = (corner2[2] + size[2] - corner1[2]) // 2
sel1 = (slice(None), slice(None), slice(halfwid - overlap_d, halfwid + overlap_d + 1))
sel2 = (slice(None), slice(None), slice(-halfwid - overlap_d, -halfwid + overlap_d + 1))
else:
return ([],[],[]), ([],[],[])
elif corner1[1] == corner2[1] and corner1[2] == corner2[2]: # junction in z axis
if corner2[0] > corner1[0] and corner1[0] + size[0] > corner2[0]:
assert ( corner1[0] + size[0] - corner2[0] )%2 == 0
halfwid = ( corner1[0] + size[0] - corner2[0] )//2
sel1 = (slice(-halfwid - overlap_d, -halfwid + overlap_d + 1), slice(None), slice(None))
sel2 = (slice(halfwid - overlap_d, halfwid + overlap_d + 1), slice(None), slice(None))
elif corner1[0] > corner2[0] and corner2[0] + size[0] > corner1[0]:
assert (corner2[0] + size[0] - corner1[0]) % 2 == 0
halfwid = (corner2[0] + size[0] - corner1[0]) // 2
sel1 = (slice(halfwid - overlap_d, halfwid + overlap_d + 1), slice(None), slice(None))
sel2 = (slice(-halfwid - overlap_d, -halfwid + overlap_d + 1), slice(None), slice(None))
else:
return ([],[],[]), ([],[],[])
else:
return ([],[],[]), ([],[],[])
return sel1, sel2
#%%
def merge_segment(v1, v2, corner1, corner2, size, size2=None, overlap_d=3, threshold=100):
v1 = np.uint64(v1)
v2 = np.uint64(v2) # note without enough byte space the coposite map method will fail
sel1, sel2 = _overlap_selection(corner1, corner2, size, size2=size2, overlap_d=overlap_d)
BASE = int(v1.max() + 1)
composite_map = v1[sel1] + v2[sel2] * BASE # note the label width
if composite_map.size==0:
print("Not adjacent, not mergeable!")
return None, None, None
compo_idx, cnt = np.unique(composite_map, return_counts=True)
idx2, idx1 = np.divmod(compo_idx, BASE)
merge_list_2 = []
size_list_2 = []
idx2_set = set(idx2)
for id2 in idx2_set:
overlap_cnt = cnt[idx2 == id2]
overlap_label = idx1[idx2 == id2]
i = overlap_cnt.argmax()
id1 = overlap_label[i]
overlap_size = overlap_cnt[i]
merge_list_2.append((id1, id2))
size_list_2.append(overlap_size)
merge_list_1 = []
size_list_1 = []
idx1_set = set(idx1)
for id1 in idx1_set:
overlap_cnt = cnt[idx1 == id1]
overlap_label = idx2[idx1 == id1]
i = overlap_cnt.argmax()
id2 = overlap_label[i]
overlap_size = overlap_cnt[i]
merge_list_1.append((id1, id2))
size_list_1.append(overlap_size)
consensus_merge = list(set(merge_list_1) & set(merge_list_2))
consensus_size_list = [(size_list_1[merge_list_1.index(pair)], size_list_2[merge_list_2.index(pair)]) for pair in
consensus_merge]
mask = [1 if (size_pair[1] > threshold and size_pair[0] > threshold) else 0 for size_pair in consensus_size_list]
# %% merge and remap index
merge_array = np.array(consensus_merge)
merge_array_filt = merge_array[np.array(mask, dtype=bool), :]
overlap_size_array = np.array(consensus_size_list)[np.array(mask, dtype=bool)]
overlap_size_array = overlap_size_array[:, 1] # to 1d array
global_shift = BASE
v2_new = v2 + global_shift # remap by offset
for id1, id2 in merge_array_filt[:, :]:
v2_new[v2_new == id2 + global_shift] = id1 # merge. (background is merged in this step)
return merge_array_filt, overlap_size_array, v2_new
#%%
# merge_array, overlap_size_array, v2_new = merge_segment(v1, v2, (0,0,0),(0,448,0),size=(152,512,512))
#%% Generate segment list and merge_pair list!
def stitich_subvolume_grid(seg_dir, x_step, y_step, x_num, y_num, size, start_corner = (0,0,0), output_dir=None,
overlap_d=3, overlap_thr=100):
"""Update on Feb.23rd allow non-exist patch"""
x_margin = (size[2] - x_step) // 2
y_margin = (size[1] - y_step) // 2
seg_id_dict = []
merge_pair_list = []
exist_mat = np.zeros((y_num + 2, x_num + 2), dtype=np.bool) # shape of the grid with 1 pad
exist_mat[1:-1, 1:-1] = 1
for i in range(x_num):
for j in range(y_num):
shift = (0, j*y_step, i*x_step)
corner = tuple([shift[i] + start_corner[i] for i in range(3)])
if os.path.exists(subvolume_path(seg_dir, corner, 'npz')):
f = np.load(subvolume_path(seg_dir, corner, 'npz'))
vol = f['segmentation']
f.close()
idx_list = np.unique(vol)
seg_id_dict.extend([(i, j, label) for label in idx_list])
else:
exist_mat[j+1, i+1] = False
vol = np.zeros(size, dtype=np.uint16)
print("Warn: Subvolume at %s not exists." % str(corner))
seg_id_dict.extend([(i, j, 0)])
if i == 0:
pass
elif not exist_mat[j+1, i] or not exist_mat[j+1, i+1]:
merge_pair_list.extend(
[[seg_id_dict.index((i - 1, j, 0)), seg_id_dict.index((i, j, 0))]]) # equalize the 0 background
else:
shift1 = (0, j * y_step, (i - 1) * x_step)
corner1 = tuple([shift1[i] + start_corner[i] for i in range(3)])
f = np.load(subvolume_path(seg_dir, corner1, 'npz'))
v1 = f['segmentation']
f.close()
merge_array, overlap_size_array, _ = merge_segment(v1, vol, corner1, corner, size, overlap_d=overlap_d, threshold=overlap_thr)
merge_pair_list.extend(
[[seg_id_dict.index((i - 1, j, id1)), seg_id_dict.index((i, j, id2))] for id1, id2 in merge_array])
if j == 0:
pass
elif not exist_mat[j, i+1] or not exist_mat[j+1, i+1]:
merge_pair_list.extend(
[[seg_id_dict.index((i, j - 1, 0)), seg_id_dict.index((i, j, 0))]])
else:
shift1 = (0, (j - 1)*y_step, i*x_step)
corner1 = tuple([shift1[i] + start_corner[i] for i in range(3)])
f = np.load(subvolume_path(seg_dir, corner1, 'npz'))
v1 = f['segmentation']
f.close()
merge_array, overlap_size_array, _ = merge_segment(v1, vol, corner1, corner, size, overlap_d=overlap_d, threshold=overlap_thr)
merge_pair_list.extend(
[[seg_id_dict.index((i, j - 1, id1)), seg_id_dict.index((i, j, id2))] for id1, id2 in merge_array])
# full_segment[:, global_y_sel(j), global_x_sel(i)] = vol[:, local_y_sel(j), local_x_sel(i)]
#%% find the network component in this global network!
segment_graph = networkx.Graph()
segment_graph.add_edges_from(merge_pair_list)
segment_graph.add_nodes_from(range(len(seg_id_dict)))
final_idx = []
for component in networkx.connected_components(segment_graph):
final_idx.append(min(component))
#%%
def global_x_sel(j, i):
start = i * x_step + x_margin
end = (i + 1) * x_step + x_margin
if not exist_mat[j+1, i]:
start -= x_margin
if not exist_mat[j+1, i+2]:
end += x_margin
return slice(start, end)
# if i == 0 and x_num == 1:
# return slice(None)
# elif i == 0:
# return slice(0, x_step + x_margin)
# elif i == x_num - 1:
# return slice((x_num - 1) * x_step + x_margin, x_num * x_step + 2 * x_margin)
# else:
def |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.