input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>formats/blast.py
"""
parses tabular BLAST -m8 (-format 6 in BLAST+) format
"""
import os.path as op
import sys
import logging
from itertools import groupby
from collections import defaultdict
from jcvi.formats.base import LineFile, BaseFile, must_open
from jcvi.formats.bed import Bed
from jcvi.formats.coords import print_stats
from jcvi.formats.sizes import Sizes
from jcvi.utils.grouper import Grouper
from jcvi.utils.orderedcollections import OrderedDict
from jcvi.utils.range import range_distance
from jcvi.apps.base import OptionParser, ActionDispatcher, sh, popen
class BlastLine(object):
__slots__ = ('query', 'subject', 'pctid', 'hitlen', 'nmismatch', 'ngaps', \
'qstart', 'qstop', 'sstart', 'sstop', 'evalue', 'score', \
'qseqid', 'sseqid', 'qi', 'si', 'orientation')
def __init__(self, sline):
args = sline.split("\t")
self.query = args[0]
self.subject = args[1]
self.pctid = float(args[2])
self.hitlen = int(args[3])
self.nmismatch = int(args[4])
self.ngaps = int(args[5])
self.qstart = int(args[6])
self.qstop = int(args[7])
self.sstart = int(args[8])
self.sstop = int(args[9])
self.evalue = float(args[10])
self.score = float(args[11])
if self.sstart > self.sstop:
self.sstart, self.sstop = self.sstop, self.sstart
self.orientation = '-'
else:
self.orientation = '+'
def __repr__(self):
return "BlastLine('%s' to '%s', eval=%.3f, score=%.1f)" % \
(self.query, self.subject, self.evalue, self.score)
def __str__(self):
args = [getattr(self, attr) for attr in BlastLine.__slots__[:12]]
if self.orientation == '-':
args[8], args[9] = args[9], args[8]
return "\t".join(str(x) for x in args)
@property
def swapped(self):
"""
Swap query and subject.
"""
args = [getattr(self, attr) for attr in BlastLine.__slots__[:12]]
args[0:2] = [self.subject, self.query]
args[6:10] = [self.sstart, self.sstop, self.qstart, self.qstop]
if self.orientation == '-':
args[8], args[9] = args[9], args[8]
b = "\t".join(str(x) for x in args)
return BlastLine(b)
@property
def bedline(self):
return "\t".join(str(x) for x in \
(self.subject, self.sstart - 1, self.sstop, self.query,
self.score, self.orientation))
class BlastSlow (LineFile):
"""
Load entire blastfile into memory
"""
def __init__(self, filename, sorted=False):
super(BlastSlow, self).__init__(filename)
fp = must_open(filename)
for row in fp:
self.append(BlastLine(row))
self.sorted = sorted
if not sorted:
self.sort(key=lambda x: x.query)
def iter_hits(self):
for query, blines in groupby(self, key=lambda x: x.query):
yield query, blines
def iter_hits_pair(self):
key = lambda x: (x.query, x.subject)
if not self.sorted:
self.sort(key=key)
for qs, blines in groupby(self, key=key):
yield qs, blines
def to_dict(self):
# for multiple HSPs pick the one with highest score
d = OrderedDict()
for line in self:
if (line.query, line.subject) not in d:
d[(line.query, line.subject)] = line
else:
cur_score = d[(line.query, line.subject)].score
if line.score > cur_score:
d[(line.query, line.subject)] = line
return d
class Blast (BaseFile):
"""
We can have a Blast class that loads entire file into memory, this is
not very efficient for big files (BlastSlow); when the BLAST file is
generated by BLAST/BLAT, the file is already sorted
"""
def __init__(self, filename):
super(Blast, self).__init__(filename)
self.fp = must_open(filename)
def __iter__(self):
self.fp.seek(0)
for row in self.fp:
if row[0] == '#':
continue
yield BlastLine(row)
def iter_hits(self):
for query, blines in groupby(self.fp,
key=lambda x: BlastLine(x).query):
blines = [BlastLine(x) for x in blines]
blines.sort(key=lambda x: -x.score) # descending score
yield query, blines
def iter_best_hit(self, N=1, hsps=False, ref="query"):
if ref == "query":
ref, hit = "query", "subject"
elif ref == "subject":
ref, hit = "subject", "query"
else:
sys.exit("`ref` must be either `query` or `subject`.")
for bref, blines in groupby(self.fp,
key=lambda x: getattr(BlastLine(x), ref)):
blines = [BlastLine(x) for x in blines]
blines.sort(key=lambda x: -x.score)
counter = 0
selected = set()
for b in blines:
if hsps:
selected.add(getattr(b, hit))
counter = len(selected)
if counter > N:
selected.remove(getattr(b, hit))
continue
else:
counter += 1
if counter > N:
break
yield bref, b
@property
def hits(self):
"""
returns a dict with query => blastline
"""
return dict(self.iter_hits())
@property
def best_hits(self):
"""
returns a dict with query => best blasthit
"""
return dict(self.iter_best_hit())
class BlastLineByConversion (BlastLine):
"""
make BlastLine object from tab delimited line objects with
BlastLine-like up to 12 fields formats
"""
def __init__(self, sline, mode="1"*12):
if int(mode, 2) == 4095:
super(BlastLineByConversion, self).__init__(sline)
elif 3072 <= int(mode, 2) < 4095:
args = sline.split("\t")
atoms = args[:2]
mode = list(mode)
if len(args) == 12:
for i in range(2, 12):
if mode[i] == "1":
atoms.append(args[i])
else:
atoms.append("-1")
if len(args) < 12:
for i in range(2, 12):
if mode[i] == "1":
atoms.append(args[i-mode[:i].count("0")])
else:
atoms.append("-1")
sline = "\t".join(atoms)
super(BlastLineByConversion, self).__init__(sline)
else:
m = "mode can only contain 0 or 1 \n"
m += "first two fields (query, subject) cannot be empty"
sys.exit(m)
def get_stats(blastfile):
from jcvi.utils.range import range_union
logging.debug("report stats on `%s`" % blastfile)
fp = open(blastfile)
ref_ivs = []
qry_ivs = []
identicals = 0
alignlen = 0
for row in fp:
c = BlastLine(row)
qstart, qstop = c.qstart, c.qstop
if qstart > qstop:
qstart, qstop = qstop, qstart
qry_ivs.append((c.query, qstart, qstop))
sstart, sstop = c.sstart, c.sstop
if sstart > sstop:
sstart, sstop = sstop, sstart
ref_ivs.append((c.subject, sstart, sstop))
alen = sstop - sstart
alignlen += alen
identicals += c.pctid / 100. * alen
qrycovered = range_union(qry_ivs)
refcovered = range_union(ref_ivs)
id_pct = identicals * 100. / alignlen
return qrycovered, refcovered, id_pct
def filter(args):
"""
%prog filter test.blast
Produce a new blast file and filter based on:
- score: >= cutoff
- pctid: >= cutoff
- hitlen: >= cutoff
- evalue: <= cutoff
- ids: valid ids
Use --inverse to obtain the complementary records for the criteria above.
- noself: remove self-self hits
"""
p = OptionParser(filter.__doc__)
p.add_option("--score", dest="score", default=0, type="int",
help="Score cutoff")
p.set_align(pctid=95, hitlen=100, evalue=.01)
p.add_option("--noself", default=False, action="store_true",
help="Remove self-self hits")
p.add_option("--ids", help="Path to file with ids to retain")
p.add_option("--inverse", default=False, action="store_true",
help="Similar to grep -v, inverse")
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
if opts.ids:
ids = set()
for row in must_open(opts.ids):
if row[0] == "#":
continue
row = row.replace(",", "\t")
ids.update(row.split())
else:
ids = None
blastfile, = args
inverse = opts.inverse
outfile = opts.outfile
fp = must_open(blastfile)
score, pctid, hitlen, evalue, noself = \
opts.score, opts.pctid, opts.hitlen, opts.evalue, opts.noself
newblastfile = blastfile + ".P{0}L{1}".format(int(pctid), hitlen) if \
outfile is None else outfile
if inverse:
newblastfile += ".inverse"
fw = must_open(newblastfile, "w")
for row in fp:
if row[0] == '#':
continue
c = BlastLine(row)
if ids:
if c.query in ids and c.subject in ids:
noids = False
else:
noids = True
else:
noids = None
remove = c.score < score or \
c.pctid < pctid or \
c.hitlen < hitlen or \
c.evalue > evalue or \
noids
if inverse:
remove = not remove
remove = remove or (noself and c.query == c.subject)
if not remove:
print >> fw, row.rstrip()
return newblastfile
def main():
actions = (
('summary', 'provide summary on id% and cov%'),
('completeness', 'print completeness statistics for each query'),
('annotation', 'create tabular file with the annotations'),
('top10', 'count the most frequent 10 hits'),
('filter', 'filter BLAST file (based on score, id%, alignlen)'),
('covfilter', 'filter BLAST file (based on id% and cov%)'),
('cscore', 'calculate C-score for BLAST pairs'),
('best', 'get best BLAST hit per query'),
('pairs', 'print paired-end reads of BLAST tabular file'),
('bed', 'get bed file from BLAST tabular file'),
('condense', 'group HSPs together for same query-subject pair'),
('chain', 'chain adjacent HSPs together'),
('swap', 'swap query and subjects in BLAST tabular file'),
('sort', 'sort lines so that query grouped together and scores desc'),
('subset', 'extract hits from some query and subject chrs'),
('mismatches', 'print out histogram of mismatches of HSPs'),
('annotate', 'annotate overlap types in BLAST tabular file'),
('score', 'add up the scores for each query seq'),
('rbbh', 'find reciprocal-best blast hits'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def rbbh(args):
"""
%prog rbbh A_vs_B.blast B_vs_A.blast
Identify the reciprocal best blast hit for each query sequence in set A
when compared to set B.
This program assumes that the BLAST results have already been filtered
based on a combination of %id, %cov, e-value cutoffs. BLAST output should
be in tabular `-m 8` format.
"""
p = OptionParser(rbbh.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
abfile, bafile, = args
ab = Blast(abfile)
ba = Blast(bafile)
ab_hits = ab.best_hits
ba_hits = ba.best_hits
for aquery in ab_hits:
ahit = ab_hits[aquery].subject
ba_bline = ba_hits.get(ahit)
if ba_bline:
bhit = ba_bline.subject
if bhit == aquery:
print "\t".join(str(x) for x in (aquery, ahit))
def score(args):
"""
%prog score blastfile query.fasta A.ids
Add up the scores for each query seq. Go through the lines and for each
query sequence, add | |
""" serial_connection.get_device_id_from_device_packet(packet)
Return the device id from the device packet.
It assumes the first entry in the packet tuple is the device id.
"""
return packet[0]
def build_packet(self, data_block):
""" serial_connection.build_packet(self, data_block)
Build a packet beginning with data_block. The default state
is the packet *is* the data block. This is to allow easy
inheritance for classes with more complicated packets.
"""
return data_block
def queue_handler(self, packets_to_handle=None):
""" serial_connection.queue_handler()
Function to handle the queue. This is the function that blocks
until data is available on the queue. It inspects packets as they
arrive, attempts to ignore malformed packets, and then sends the
rest on accordingly.
"""
if packets_to_handle == None:
block = True
else:
block = False
try:
while block or packets_to_handle > 0 :
try:
if self.should_exit:
break
# Drop out of the queue check every half a second to check
# we shouldn't be exiting
data_block = self.packet_q.get(True, 0.5)
if not block:
packets_to_handle = packets_to_handle - 1
except Empty:
if self.should_exit:
break
else:
continue
packet = self.build_packet(data_block)
packet_details = self.inspect_packet(data_block)
if packet_details[0] == GENERAL:
# We seem to have a general packet
destination = packet_details[1]
source = packet_details[2]
self.dispatch_packets(destination, packet, source)
elif packet_details[0] == DEVICE:
# We seem to have a device packet
device_id = packet_details[1]
# Call the device handler functions
if self.device_list.has_key(device_id):
destination = self.device_list[device_id]
self.dispatch_packets(destination, packet)
else:
warn('Data returned from unregistered device ' \
+ str(device_id) + ': ' + str(packet))
else:
# We seem to have a malformed packet
warn('Malformed packet received. Ignoring it...')
except KeyboardInterrupt:
self.close()
# Pass on the interrupt to the calling function
raise KeyboardInterrupt
return None
return None
def dispatch_packets(self, destination, packet, source=None):
""" serial_connection.dispatch_packets(destination, packet)
Attempt to dispatch the packet to the destination (a handler ID)
"""
if self.handler_list.has_key(destination):
# See whether we have a queue of a function on which to act
# This uses the big evil that is 'isinstance' to negate the
# need for a separate queue handler class. Perhaps there is
# a better way to do this...?
if isinstance(self.handler_list[destination], Queue):
# we have a handler queue
self.handler_list[destination].put(packet)
else:
# We have something else
self.handler_list[destination](packet)
else:
warn('No handler ID: %s is registered.' % (str(destination)))
if not source == None:
self.handler_list[source](('destination_not_registered', packet))
def open(self):
""" serial_connection.open()
Method to open the connection
"""
self.queue_handler()
self.running.set()
def close(self):
""" serial_connection.close()
Shutdown the connection IO connection
"""
self.should_exit = True
self.io.close()
self.running.clear()
print '\nGoodbye from the serial connection!'
class async_serial(Thread):
def __init__(self,
port = None, #Number of device, numbering starts at
# zero.
data_block_format = 'b',
read_q = None, #The queue on which to place the packets
# as they are read in. No argument implies
# that we need to initialise a new queue
packet_timeout=1, #Timeout waiting for packets to arrive.
# This is so we don't block permanently
# while nothing ever arrives.
baudrate=9600, #baudrate
bytesize=EIGHTBITS, #number of databits
parity=PARITY_NONE, #enable parity checking
stopbits=STOPBITS_ONE, #number of stopbits
xonxoff=0, #enable software flow control
rtscts=0, #enable RTS/CTS flow control
writeTimeout=None, #set a timeout for writes
dsrdtr=None #None: use rtscts setting, dsrdtr override if true or false
):
'''Initialise the asynchronous serial object
'''
Thread.__init__(self)
self.serial = serial.Serial( port,
baudrate,
bytesize,
parity,
stopbits,
packet_timeout,
xonxoff,
rtscts,
writeTimeout,
dsrdtr)
self.running = Event()
self.buffer = ''
try:
self.struct = struct.Struct(data_block_format)
except:
raise StandardError('Problem encountered loading struct with ' +data_block_format)
self.packet_size = self.struct.size
if read_q == None:
self.read_q = Queue()
else:
self.read_q = read_q
def open(self):
'''Open the serial serial bus to be read. This starts the listening
thread.
'''
self.serial.flushInput()
self.running.set()
self.start()
def write(self, data):
'''Write a packet to the serial bus.
'''
self.serial.write(apply(self.struct.pack, data))
def close(self):
'''Close the listening thread.
'''
self.running.clear()
def run(self):
'''Run is the function that runs in the new thread and is called by
start(), inherited from the Thread class
'''
try:
while(self.running.isSet()):
new_data = self.serial.read(self.packet_size-len(self.buffer))
self.buffer = self.buffer + new_data
if (len(self.buffer) == self.packet_size):
# Put the unpacked data onto the read queue
self.read_q.put(self.struct.unpack(self.buffer))
# Clear the buffer
self.buffer = ''
except KeyboardInterrupt:
self.interrupt_main()
self.close()
return None
CONTINUOUS, STEP = (0,1)
# Command format:
# 'command_name': command
base_commands = {
'reset': 0,
'home': 1,
'renumber': 2 ,
'store_current_position': 16,
'return_stored_position': 17,
'read_or_write_memory': 35,
'restore_settings': 36,
'return_setting': 53,
'echo_data': 55,
'return_current_position': 60,
}
move_commands = {
'stored_position': 18,
'absolute': 20,
'relative': 21,
'constant_speed': 22,
'stop': 23,
}
setting_commands = {
'microstep_resolution': 37,
'running_current': 38,
'hold_current': 39,
'device_mode': 40,
'target_speed': 42,
'acceleration': 43,
'maximum_range': 44,
'current_position': 45,
'max_relative_move': 46,
'home_offset': 47,
'alias_number': 48,
'lock_state': 49,
}
extra_error_codes = {
'busy': 255,
'save_position_invalid': 1600,
'save_position_not_homed': 1601,
'return_position_invalid': 1700,
'move_position_invalid': 1800,
'move_position_not_homed': 1801,
'relative_position_limited':2146,
'settings_locked': 3600,
'disable_auto_home_invalid':4008,
'bit_10_invalid': 4010,
'home_switch_invalid': 4012,
'bit_13_invalid': 4013,
}
meta_commands = {}
class device_base():
'''
device_base(connection, id, run_mode = CONTINUOUS, verbose = False)
Implements the device base class. It doesn't do very much by itself.
connection: Should be an instance of the serial_connection class.
id: A user-defined string for this device. If no id is passed then
a string representation of the hash of this instance is used
run_mode: Defines whether the device should run through every queue that gets placed
on the command queue, or whether it should do one at a time and wait for step()
to be called before running the next. The options are CONTINUOUS (the former mode)
and STEP (the latter mode).
verbose: Boolean representing whether to be verbose or not.
'''
def __init__(self, connection, id = None, run_mode = CONTINUOUS, verbose = False):
# These have to be initialised immediately to prevent a potential infinite
# recursion when the attribute handler can't find them.
self.base_commands = {}
self.meta_commands = {}
self.user_meta_commands = {}
self.setting_commands = {}
self.move_commands = {}
if id == None:
id = str(hash(self))
self.id = id
self.connection = connection
self.run_mode = run_mode
self.awaiting_action = False
self.last_packet_received = None
self.last_packet_sent = None
self.meta_command_depth = 0
self.meta_command_pause_after = False
self.pause_after = True
# Register with the connection
self.connection.register(self.packet_handler, id)
self.base_commands = {}
self.meta_commands = {}
self.user_meta_commands = {}
self.setting_commands = {}
self.move_commands = {}
self.action_state = False
self.pending_responses = 0
self.verbose = verbose
# Create data structures to store the response lookups
self.settings_lookup = {}
self.command_lookup = {}
self.move_lookup = {}
self.extra_error_codes_lookup = {}
def __getattr__(self, attr):
if self.base_commands.has_key(attr):
def base_function(data = 0):
return self.enqueue_base_command(attr, data)
return base_function
elif attr[0:5] == 'move_' and self.move_commands.has_key(attr[5:]):
def move_function(data = 0):
return self.move(attr[5:], data)
return move_function
elif attr[0:4] == 'set_' and self.setting_commands.has_key(attr[4:]):
def set_function(data = 0):
return self.set(attr[4:], data)
return set_function
elif attr[0:4] == 'get_' and self.setting_commands.has_key(attr[4:]):
def get_function(blocking = False):
return self.get(attr[4:], blocking = blocking)
return get_function
elif self.meta_commands.has_key(attr):
def do_function(data = 0):
return self.meta(attr, self.meta_commands[attr])
return do_function
elif self.user_meta_commands.has_key(attr):
def do_function(data = 0):
return self.meta(attr, self.user_meta_commands[attr])
return do_function
else:
raise AttributeError
return None
def get_id(self):
'''
devive_base.get_id()
Return the ID that this device has been assigned
'''
return self.id
def step(self):
'''
device_base.step()
This function executes the next command in the command queue.
It blocks if the previous command has not finished and returns
when it has and the next command has been sent.
This function operates by pulling packets off the queue until
we are in a position to execute the next command.
'''
self.awaiting_action = False
if len(self.command_queue) == 0:
# Nothing to do here cos there's nothing on the queue.
if self.verbose:
print 'Command queue empty for receive step command'
elif self.in_action() and self.run_mode == STEP \
and not self.connection.running.isSet():
# Device is in the action state and the queue handler isn't running,
# so we need to manually pull a packet off the queue
# (which will block until something is there)
self.connection.queue_handler(1)
elif self.in_action() and self.run_mode == STEP:
# We need to do something when we're no longer in action
| |
<filename>notebooks/drive-download-20190731T104457Z-001/20190711/data.py<gh_stars>0
from common import *
# https://www.kaggle.com/adkarhe/dicom-images
# https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/overview/resources
# https://www.kaggle.com/abhishek/train-your-own-mask-rcnn
# ------
# component
# components
# mask
# ------
import pydicom
# initial version of kaggle data processing
# https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/discussion/98478#latest-568453
# Dataset Update: Non-annotated instances/images
def run_fix_kaggle_data_error():
csv_file = '/root/share/project/kaggle/2019/chest/data/__download__/train-rle_old.csv'
remove_file = '/root/share/project/kaggle/2019/chest/data/__download__/non-diagnostic-train'
dicom_dir = '/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-train'
# csv_file = '/root/share/project/kaggle/2019/chest/data/__download__/test-rle_old.csv'
# remove_file = '/root/share/project/kaggle/2019/chest/data/__download__/non-diagnostic-test'
# dicom_dir = '/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-test'
# ---
dicom_file = get_dicom_file(dicom_dir)
dicom_id = set(dicom_file.keys())
df = pd.read_csv(csv_file)
df_id = set(df.ImageId.values)
remove_id = []
non_diagnostic = read_list_from_file(remove_file)
for k, v in dicom_file.items():
# print(k,v)
for s in non_diagnostic:
if s in v:
print(v)
remove_id.append(k)
remove_id = set(remove_id)
# ----
print('remove_id :', len(remove_id))
print('df_id :', len(df_id))
print('dicom_id :', len(dicom_id))
print('')
print('dicom_id ∩ df_id :', len(set(dicom_id).intersection(df_id)))
print('dicom_id ∩ remove_id :', len(
set(dicom_id).intersection(remove_id)))
print('df_id ∩ remove_id :', len(set(df_id).intersection(remove_id)))
exit(0)
'''
You should be expecting 10712 images in the train set
and 1377 images in the public test set.
for test *.dcm files:
remove_id : 4
df_id : 1372
dicom_id : 1377
dicom_id ∩ df_id : 1372
dicom_id ∩ remove_id : 4
df_id ∩ remove_id : 0
for train *.dcm files:
remove_id : 33
df_id : 10675
dicom_id : 10712
dicom_id ∩ df_id : 10675
dicom_id ∩ remove_id : 33
df_id ∩ remove_id : 0
'''
# ----
def get_dicom_file(folder):
dicom_file = glob.glob(folder + '/**/**/*.dcm')
dicom_file = sorted(dicom_file)
image_id = [f.split('/')[-1][:-4] for f in dicom_file]
dicom_file = dict(zip(image_id, dicom_file))
return dicom_file
# ----
# https://www.kaggle.com/mnpinto/pneumothorax-fastai-starter-u-net-128x128
def run_length_decode(rle, height=1024, width=1024, fill_value=1):
component = np.zeros((height, width), np.float32)
component = component.reshape(-1)
rle = np.array([int(s) for s in rle.split(' ')])
rle = rle.reshape(-1, 2)
start = 0
for index, length in rle:
start = start+index
end = start+length
component[start: end] = fill_value
start = end
component = component.reshape(width, height).T
return component
# 1.2.276.0.7230010.3.1.4.8323329.10005.1517875220.958951
# 209126 1 1019 6 1015 10 1012 13 1010 14 1008 16 1007 16 1006 18 1004 20 1003 20 1002 22
def run_length_encode(component):
component = component.T.flatten()
start = np.where(component[1:] > component[:-1])[0]+1
end = np.where(component[:-1] > component[1:])[0]+1
length = end-start
rle = []
for i in range(len(length)):
if i == 0:
rle.extend([start[0], length[0]])
else:
rle.extend([start[i]-end[i-1], length[i]])
rle = ' '.join([str(r) for r in rle])
return rle
def gb_to_component(df, height=1024, width=1024):
rle = df['EncodedPixels'].values
if rle[0] == '-1':
component = np.zeros((1, height, width), np.float32)
return component, 0
#box = df[['x0','y0','x1','y1']].values
component = np.array([run_length_decode(r, height, width, 1) for r in rle])
num_component = len(component)
return component, num_component
def component_to_mask(component):
mask = component.sum(0)
mask = (mask > 0.5).astype(np.float32)
return mask
def mask_to_component(mask, threshold=0.5):
H, W = mask.shape
mask = cv2.threshold(mask, threshold, 1, cv2.THRESH_BINARY)[1]
num_component, label = cv2.connectedComponents(mask.astype(np.uint8))
num_component = num_component-1
component = np.zeros((num_component, H, W), np.float32)
for i in range(0, num_component):
component[i] = label == (i+1)
return component, num_component
### draw ############################################
def draw_input_overlay(image):
overlay = cv2.applyColorMap(image, cv2.COLORMAP_BONE)
return overlay
def draw_mask_overlay(mask):
height, width = mask.shape
overlay = np.zeros((height, width, 3), np.uint8)
overlay[mask > 0] = (0, 0, 255)
return overlay
def draw_truth_overlay(image, component, alpha=0.5):
component = component*255
overlay = image.astype(np.float32)
overlay[:, :, 2] += component*alpha
overlay = np.clip(overlay, 0, 255)
overlay = overlay.astype(np.uint8)
return overlay
def draw_predict_overlay(image, component, alpha=0.5):
component = component*255
overlay = image.astype(np.float32)
overlay[:, :, 1] += component*alpha
overlay = np.clip(overlay, 0, 255)
overlay = overlay.astype(np.uint8)
return overlay
# ------
def mask_to_inner_contour(component):
component = component > 0.5
pad = np.lib.pad(component, ((1, 1), (1, 1)), 'reflect')
contour = component & (
(pad[1:-1, 1:-1] != pad[:-2, 1:-1])
| (pad[1:-1, 1:-1] != pad[2:, 1:-1])
| (pad[1:-1, 1:-1] != pad[1:-1, :-2])
| (pad[1:-1, 1:-1] != pad[1:-1, 2:])
)
return contour
def draw_contour_overlay(image, component, thickness=1):
contour = mask_to_inner_contour(component)
if thickness == 1:
image[contour] = (0, 0, 255)
else:
for y, x in np.stack(np.where(contour)).T:
cv2.circle(image, (x, y), thickness,
(0, 0, 255), lineType=cv2.LINE_4)
return image
###- combined results --#
def draw_result_overlay(input, truth, probability):
H, W = input.shape
h, w = H//2, W//2
input = draw_input_overlay(input)
input1 = cv2.resize(input, dsize=(h, w))
if truth.shape != (h, w):
truth1 = cv2.resize(truth, dsize=(h, w))
probability1 = cv2.resize(probability, dsize=(h, w))
else:
truth1 = truth
probability1 = probability
# ---
overlay1 = draw_truth_overlay(input1.copy(), truth1, 0.5)
overlay2 = draw_predict_overlay(input1.copy(), probability1, 0.5)
overlay3 = np.zeros((h, w, 3), np.uint8)
overlay3 = draw_truth_overlay(overlay3, truth1, 1.0)
overlay3 = draw_predict_overlay(overlay3, probability1, 1.0)
draw_shadow_text(overlay3, 'truth', (2, 12), 0.5, (0, 0, 255), 1)
draw_shadow_text(overlay3, 'predict', (2, 24), 0.5, (0, 255, 0), 1)
# <todo> results afer post process ...
overlay4 = np.zeros((h, w, 3), np.uint8)
overlay = np.hstack([
input,
np.hstack([
np.vstack([overlay1, overlay2]),
np.vstack([overlay4, overlay3]),
])
])
return overlay
### check #######################################################################################
def run_check_length_encode0():
dicom_file = get_dicom_file(
'/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-train')
#df = pd.read_csv('/root/share/project/kaggle/2019/chest/data/debug-rle.csv')
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.csv')
gb = df.groupby('ImageId')
uid = list(gb.groups.keys())
for i in uid:
data = pydicom.read_file(dicom_file[i])
image = data.pixel_array
df = gb.get_group(i)
components, num_component = gb_to_components(df)
mask = components_to_mask(components)
if num_component < 1:
continue
image = draw_overlay(image)
for c in range(num_component):
components[c] = draw_truth_overlay(image, components[c], alpha=0.2)
mask = draw_mask_overlay(mask)
#print('%d, %s'%(num_component,i))
# image_show('overlay',overlay,0.25)
image_show('image', image, 0.25)
# image_show_norm('mask',mask,resize=0.25)
image_show('mask', mask, resize=0.25)
image_show_norm('components', np.hstack(components), resize=0.25)
# component = split_mask_to_component(mask)
# for t,c in enumerate(component):
# image_show('c-%d'%t,c,0.25)
# run_length_encode(c)
cv2.waitKey(0)
def run_check_length_encode1():
df0 = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/debug-rle.more.csv')
gb0 = df0.groupby('ImageId')
image_id = []
encoded_pixel = []
uid = list(gb0.groups.keys())
for i in uid:
df = gb0.get_group(i)
components0, num_component0 = gb_to_component(df)
mask = components_to_mask(components0)
# -----
components1, num_component1 = mask_to_components(mask)
if num_component1 == 0:
image_id.append(i)
encoded_pixel.append('-1')
else:
for component in components1:
r = run_length_encode(component)
image_id.append(i)
encoded_pixel.append(r)
print(i)
print(num_component0, num_component1)
print(df['EncodedPixels'].values[-1])
print(encoded_pixel[-1])
print('')
df1 = pd.DataFrame(list(zip(image_id, encoded_pixel)),
columns=['ImageId', 'EncodedPixels'])
df0 = df0.sort_values(
by=['ImageId', 'EncodedPixels'], ascending=[True, True])
df1 = df1.sort_values(
by=['ImageId', 'EncodedPixels'], ascending=[True, True])
df0.reset_index(drop=True, inplace=True)
df1.reset_index(drop=True, inplace=True)
print('df0\n', df0.head(20))
print('df1\n', df1.head(20))
print('')
print(df0.equals(df1))
# print(df0.values[14])
# print(df1.values[14])
# lstrip
def run_process_0():
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.csv')
df.rename(columns={' EncodedPixels': 'EncodedPixels', }, inplace=True)
df['EncodedPixels'] = df['EncodedPixels'].str.lstrip(to_strip=None)
df.to_csv('/root/share/project/kaggle/2019/chest/data/train-rle.csv',
columns=['ImageId', 'EncodedPixels'], index=False)
zz = 0
def run_process_1():
#df = pd.read_csv('/root/share/project/kaggle/2019/chest/data/debug-rle.csv')
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.csv')
gb = df.groupby('ImageId')
count = gb.agg('count')
num = []
for image_id, encoded_pixel in df.values:
if encoded_pixel == '-1':
num.append(0)
else:
num.append(count.loc[image_id].values[0])
df['num'] = num
df.to_csv('/root/share/project/kaggle/2019/chest/data/train-rle.more.csv',
columns=['ImageId', 'count', 'EncodedPixels'], index=False)
df[:1000].to_csv('/root/share/project/kaggle/2019/chest/data/debug-rle.more.csv',
columns=['ImageId', 'count', 'EncodedPixels'], index=False)
zz = 0
def run_process_2():
dicom_file = get_dicom_file(
'/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-train')
#df = pd.read_csv('/root/share/project/kaggle/2019/chest/data/debug-rle.csv')
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.csv')
gb = df.groupby('ImageId')
gb_count = gb.agg('count')
df['x0'] = 0
df['y0'] = 0
df['x1'] = 0
df['y1'] = 0
df['count'] = 0
for t, v in enumerate(df.values):
image_id, encoded_pixel = v[:2]
if encoded_pixel == '-1':
pass
else:
df.iloc[t, df.columns.get_loc(
'count')] = gb_count.loc[image_id].values[0]
rle = encoded_pixel
component = run_length_decode(
rle, height=1024, width=1024, fill_value=1)
cc = (component > 0.5).astype(np.float32)
yy = np.any(cc > 0.5, axis=1)
xx = np.any(cc > 0.5, axis=0)
x0, x1 = np.where(xx)[0][[0, -1]]
y0, y1 = np.where(yy)[0][[0, -1]]
x1 += 1
y1 += 1
print(x0, x1, y0, y1)
df.iloc[t, df.columns.get_loc('x0')] = x0
df.iloc[t, df.columns.get_loc('y0')] = y0
df.iloc[t, df.columns.get_loc('x1')] = x1
df.iloc[t, df.columns.get_loc('y1')] = y1
cc[y0, x0:x1-1] = 0.5
cc[y1-1, x0:x1-1] = 0.5
image_show_norm('cc', cc, resize=1)
cv2.waitKey(1)
print(image_id)
# ----
column = ['ImageId', 'count', 'x0', 'y0', 'x1', 'y1', 'EncodedPixels']
df.to_csv('/root/share/project/kaggle/2019/chest/data/train-rle.more.csv',
columns=column, index=False)
df[:1000].to_csv('/root/share/project/kaggle/2019/chest/data/debug-rle.more.csv',
columns=column, index=False)
zz = 0
def run_split_dataset():
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.csv')
gb = df.groupby('ImageId')
uid = list(gb.groups.keys())
num_component = []
for i in uid:
df = gb.get_group(i)
num_component.append(df['count'].values[0])
num_component = np.array(num_component, np.int32)
neg_index = np.where(num_component == 0)[0]
pos_index = np.where(num_component >= 1)[0]
print('num_component==0 : %d' % (len(neg_index)))
print('num_component>=1 : %d' % (len(pos_index)))
print('len(uid) : %d' % (len(uid)))
np.random.shuffle(neg_index)
np.random.shuffle(pos_index)
train_split = np.concatenate([neg_index[300:], pos_index[300:], ])
valid_split = np.concatenate([neg_index[:300], pos_index[:300], ])
uid = np.array(uid, np.object)
train_split = uid[train_split]
valid_split = uid[valid_split]
np.save('/root/share/project/kaggle/2019/chest/data/split/train_%d' %
len(train_split), train_split)
np.save('/root/share/project/kaggle/2019/chest/data/split/valid_%d' %
len(valid_split), valid_split)
zz = 0
def run_split_dataset1():
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.csv')
gb = df.groupby('ImageId')
df['count'] = gb['ImageId'].transform('count')
df.loc[df['EncodedPixels'] == '-1', 'count'] = 0
image_id = list(gb.groups.keys())
num_component = []
for i in image_id:
d = gb.get_group(i)
num_component.append(d['count'].values[0])
num_component = np.array(num_component, np.int32)
neg_index = np.where(num_component == 0)[0]
pos_index = np.where(num_component >= 1)[0]
print('num_component==0 : %d' % (len(neg_index)))
print('num_component>=1 : %d' % (len(pos_index)))
print('len(image_id) : %d' % (len(image_id)))
np.random.shuffle(neg_index)
np.random.shuffle(pos_index)
#neg_split = np.array_split(neg_index,8)
#pos_split = np.array_split(pos_index,8)
# >>> 8296/8
# 1037.0
# >>> 2379/8
# 297.375
neg_split = []
pos_split = []
S = 6
for s in range(S):
if s == S-1:
neg_split.append(neg_index[s*300:])
pos_split.append(pos_index[s*300:])
else:
neg_split.append(neg_index[s*300:(s+1)*300])
pos_split.append(pos_index[s*300:(s+1)*300])
image_id = np.array(image_id, np.object)
for s in range(S):
valid_split = np.concatenate([neg_split[s], pos_split[s], ])
train_split = []
for t in range(S):
if t != s:
| |
'project': projPart,
'msg': msg
} )
if len(recs):
coll.insert_many( recs, ordered=True )
#return recs
def collectBoincStatus( db, dataDirPath, statusType ):
# will collect data only from "checked" instances
wereChecked = db['checkedInstances'].find( {'state': 'checked' } )
reportables = []
for inst in wereChecked:
#iid =inst['_id']
if 'ssh' not in inst:
logger.warning( 'no ssh info from checkedInstances for %s', inst )
else:
if 'instanceId' not in inst:
inst['instanceId'] =inst ['_id']
reportables.append( inst )
startDateTime = datetime.datetime.now( datetime.timezone.utc )
dateTimeTagFormat = '%Y-%m-%d_%H%M%S' # cant use iso format dates in filenames because colons
dateTimeTag = startDateTime.strftime( dateTimeTagFormat )
resultsLogFilePath=dataDirPath+'/%s_%s.jlog' % (statusType, dateTimeTag )
collName = '%s_%s' % (statusType, dateTimeTag )
workerCmd = "boinccmd --%s || (sleep 5 && boinccmd --%s)" % (statusType, statusType)
#logger.info( 'calling tellInstances to get status report on %d instances', len(reportables))
stepStatuses_ = tellInstances.tellInstances( reportables, workerCmd,
resultsLogFilePath=resultsLogFilePath,
download=None, downloadDestDir=None, jsonOut=None, sshAgent=args.sshAgent,
timeLimit=min(args.timeLimit, args.timeLimit), upload=None, stopOnSigterm=True,
knownHostsOnly=False
)
(eventsByInstance, badIidSet_) = demuxResults( resultsLogFilePath )
# save json file just for debugging
#with open( resultsLogFilePath+'.json', 'w') as jsonOutFile:
# json.dump( eventsByInstance, jsonOutFile, indent=2 )
# create a list of cleaned-up records to insert
insertables = []
for iid, events in eventsByInstance.items():
for event in events:
if 'instanceId' in event:
del event['instanceId']
insertables.append( {'instanceId': iid, 'events': events,
'dateTime': events[0]['dateTime'] } )
logger.info( 'inserting %d records into %s', len(insertables), collName )
db[ collName ].insert_many( insertables )
db[ collName ].create_index( 'instanceId' )
db[ collName ].create_index( 'dateTime' )
#ingestJson( resultsLogFilePath, db.name, collName )
os.remove( resultsLogFilePath )
return db[ collName ]
def report_cc_status( db, dataDirPath ):
# will report only on "checked" instances
wereChecked = db['checkedInstances'].find( {'state': 'checked' } )
reportables = []
for inst in wereChecked:
#iid =inst['_id']
if 'ssh' not in inst:
logger.warning( 'no ssh info from checkedInstances for %s', inst )
else:
inst['instanceId'] =inst ['_id']
reportables.append( inst )
resultsLogFilePath=dataDirPath+'/report_cc_status.jlog'
workerCmd = "boinccmd --get_cc_status"
logger.info( 'calling tellInstances to get cc_status report on %d instances', len(reportables))
stepStatuses = tellInstances.tellInstances( reportables, workerCmd,
resultsLogFilePath=resultsLogFilePath,
download=None, downloadDestDir=None, jsonOut=None, sshAgent=args.sshAgent,
timeLimit=min(args.timeLimit, args.timeLimit), upload=None, stopOnSigterm=True,
knownHostsOnly=False
)
# triage the statuses
goodIids = []
failedIids = []
exceptedIids = []
for statusRec in stepStatuses:
iid = statusRec['instanceId']
abbrevId = iid[0:16]
status = statusRec['status']
#logger.info( "%s (%s) %s", abbrevId, type(status), status )
if isinstance( status, int) and status == 0:
goodIids.append( iid )
elif isinstance( status, int ):
failedIids.append( iid )
else:
exceptedIids.append( iid )
logger.info( '%d completed, %d failed, %d exceptions',
len( goodIids ), len( failedIids ), len( exceptedIids ) )
# read back the results
(eventsByInstance, badIidSet) = demuxResults( resultsLogFilePath )
nCounted = 0
for iid in goodIids:
abbrevIid = iid[0:16]
events = eventsByInstance[ iid ]
onBatteries = False
for event in events:
if 'stdout' in event:
stdoutStr = event['stdout']
if 'batteries' in stdoutStr:
logger.info( "%s", stdoutStr )
onBatteries = True
if onBatteries:
nCounted += 1
logger.warning( 'instance %s on batteries', abbrevIid )
logger.info( 'nOnBatteries: %d', nCounted )
def parseProjectLines( lines ):
props = {}
ignorables = [' name: ', ' description: ', ' URL: ' ]
firstLine = True
for line in lines:
line = line.rstrip()
if not line:
continue
if firstLine:
firstLine = False
if '== Projects ==' in line:
continue
else:
logger.warning( 'improper first line')
break
if line[0] != ' ':
if line.startswith( '2)' ):
logger.info( 'found a second project; exiting')
break
#logger.info( 'ignoring %s', line.rstrip())
continue
if anyFound( ignorables, line ):
#logger.info( 'ignoring %s', line.rstrip())
continue
if ':' in line:
stripped = line.strip()
parts = stripped.split( ':', 1 ) # only the first colon will be significant
# convert to numeric or None type, if appropriate
val = parts[1].strip()
if val is None:
pass
elif val.isnumeric():
val = int( val )
elif isNumber( val ):
val = float( val )
props[ parts[0] ] = val
continue
logger.info( '> %s', line )
return props
def mergeProjectData( srcColl, destColl ):
# iterate over records, each containing output for an instance
for inRec in srcColl.find():
iid = inRec['instanceId']
eventDateTime = inRec['dateTime']
projLines = []
if iid == '<master>':
#logger.info( 'found <master> record' )
pass
else:
#logger.info( 'iid: %s', iid )
events = inRec['events']
for event in events:
if 'stdout' in event:
stdoutStr = event['stdout']
projLines.append( stdoutStr )
props = parseProjectLines( projLines )
if not props:
pass
#logger.info( 'empty props for %s', iid )
else:
props['instanceId'] = iid
props['checkedDateTime'] = eventDateTime
#logger.info( 'props: %s', props )
destColl.replace_one( {'_id': iid }, props, upsert=True )
def parseTaskLines( lines ):
tasks = []
curTask = {}
firstLine = True
for line in lines:
line = line.rstrip()
if not line:
continue
if firstLine and '== Tasks ==' in line:
continue
if line[0] != ' ':
#logger.info( 'task BOUNDARY %s', line )
numPart = line.split( ')' )[0]
taskNum = int(numPart)
#logger.info( 'TASK %d', taskNum )
curTask = { 'num': taskNum }
tasks.append( curTask )
continue
if ':' in line:
# extract a key:value pair from this line
stripped = line.strip()
parts = stripped.split( ':', 1 ) # only the first colon will be significant
# convert to numeric or None type, if appropriate
val = parts[1].strip()
if val is None:
pass
elif val.isnumeric():
val = int( val )
elif isNumber( val ):
val = float( val )
# store the value
curTask[ parts[0] ] = val
continue
logger.info( '> %s', line )
return tasks
def mergeTaskData( srcColl, destColl ):
preexisting = destColl.find( {}, {'instanceId':1, 'name':1} )
preexMap = { rec['name']: rec for rec in preexisting }
allTasks = {}
# iterate over records, each containing output for an instance
for inRec in srcColl.find():
iid = inRec['instanceId']
eventDateTime = inRec['dateTime']
#abbrevIid = iid[0:16]
taskLines = []
if iid == '<master>':
#logger.info( 'found <master> record' )
pass
else:
#logger.info( 'iid: %s', iid )
events = inRec['events']
for event in events:
if 'stdout' in event:
stdoutStr = event['stdout']
taskLines.append( stdoutStr )
#logger.info( '%s: %s', abbrevIid, stdoutStr )
#if anyFound( ['WU name:', 'fraction done', 'UNINITIALIZED'], stdoutStr ):
# logger.info( "%s: %s, %s", abbrevIid, eventDateTime[0:19], stdoutStr.strip() )
tasks = parseTaskLines( taskLines )
#print( 'tasks for', abbrevIid, 'from', eventDateTime[0:19] )
#print( tasks )
for task in tasks:
task['checkedDateTime'] = eventDateTime
if task['name'] not in preexMap:
task['startDateTimeApprox'] = eventDateTime
if task['name'] not in allTasks:
task['instanceId'] = iid
allTasks[ task['name']] = task
else:
allTasks[ task['name']].update( task )
#countsByIid = collections.Counter()
for task in allTasks.values():
taskName = task['name']
task['_id'] = taskName
destColl.replace_one( {'_id': taskName }, task, upsert=True )
#countsByIid[ task['instanceId'] ] += 1
#logger.info( 'totTasks per instance: %s', countsByIid )
return allTasks
def reportAll( db, dataDirPath ):
collNames = sorted( db.list_collection_names(
filter={ 'name': {'$regex': r'^get_cc_status_.*'} } ) )
logger.info( 'get_cc_status_ collections: %s', collNames )
for collName in collNames:
logger.info( 'getting data from %s', collName )
coll = db[collName]
# iterate over records, each containing output for an instance
for inRec in coll.find():
iid = inRec['instanceId']
eventDateTime = inRec['dateTime']
abbrevIid = iid[0:16]
if iid == '<master>':
#logger.info( 'found <master> record' )
pass
else:
#logger.info( 'iid: %s', iid )
events = inRec['events']
for event in events:
if 'stdout' in event:
stdoutStr = event['stdout']
#logger.info( '%s: %s', abbrevIid, stdoutStr )
#TODO extract more info, depending on context
if 'batteries' in stdoutStr:
logger.info( "%s: %s, %s", abbrevIid, eventDateTime, stdoutStr.strip() )
collNames = sorted( db.list_collection_names(
filter={ 'name': {'$regex': r'^get_project_status_.*'} } ) )
logger.info( 'get_project_status_ collections: %s', collNames )
for collName in collNames:
logger.info( 'getting data from %s', collName )
coll = db[collName]
# iterate over records, each containing output for an instance
for inRec in coll.find():
iid = inRec['instanceId']
eventDateTime = inRec['dateTime']
abbrevIid = iid[0:16]
if iid == '<master>':
#logger.info( 'found <master> record' )
pass
else:
#logger.info( 'iid: %s', iid )
events = inRec['events']
for event in events:
if 'stdout' in event:
stdoutStr = event['stdout']
#logger.info( '%s: %s', abbrevIid, stdoutStr )
#TODO extract more info, depending on context
#if 'downloaded' in stdoutStr or 'jobs succeeded' in stdoutStr:
#logger.info( "%s: %s, %s", abbrevIid, eventDateTime[0:19], stdoutStr.strip() )
allTasks = {}
collNames = sorted( db.list_collection_names(
filter={ 'name': {'$regex': r'^get_tasks_.*'} | |
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Dec 4, 2013
Unit test for pooling layer forward propagation.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import numpy
from veles.backends import NumpyDevice
from veles.memory import Array
import veles.prng as prng
from veles.prng.uniform import Uniform
from veles.tests import AcceleratedTest, assign_backend
import veles.znicz.gd_pooling as gd_pooling
import veles.znicz.pooling as pooling
import veles.znicz.depooling as depooling
from veles.dummy import DummyUnit
from veles.znicz.tests.unit.gd_numdiff import GDNumDiff
class TestMaxPooling(AcceleratedTest):
ABSTRACT = True
def setUp(self):
super(TestMaxPooling, self).setUp()
self._input = Array()
self._input.mem = numpy.array(
[3, 4, 3, 1, -1, -2, 1, 3, 2, 3, 3, 0, 4, 1,
(-2), 0, 4, 4, -2, 1, 3, -3, -3, 4, 1, -3, -2, -4,
(-3), 2, -1, 4, 2, 0, -3, 3, 1, -3, -4, -3, 0, -3,
(-1), 0, -2, 2, 2, -4, -1, -1, 0, -2, 1, 3, 1, 2,
2, -2, 4, 0, -1, 0, 1, 0, 0, 3, -3, 3, -1, 1,
4, 0, -1, -2, 3, 4, -4, -2, -4, 3, -2, -3, -1, -1,
(-1), -3, 3, 3, -2, -1, 3, 2, -1, -2, 4, -1, 2, 4,
(-2), -1, 1, 3, -2, -2, 0, -2, 0, 4, -1, -2, -2, -3,
3, 2, -2, 3, 1, -3, -2, -1, 4, -2, 0, -3, -1, 2,
2, -3, -1, -1, -3, -2, 2, 3, 0, -2, 1, 2, 0, -3,
(-4), 1, -1, 2, -1, 0, 3, -2, 4, -3, 4, 4, 1, -4,
0, -1, 1, 3, 0, 1, 3, 4, -3, 2, 4, 3, -1, 0,
(-1), 0, 1, -2, -4, 0, -4, -4, 2, 3, 2, -3, 1, 1,
1, -1, -4, 3, 1, -1, -3, -4, -4, 3, -1, -4, -1, 0,
(-1), -3, 4, 1, 2, -1, -2, -3, 3, 1, 3, -3, 4, -2],
dtype=self._dtype).reshape(3, 5, 7, 2)
self._gold_output = numpy.array(
[[[[4, 4], [3, 3], [3, 4], [4, -4]],
[[-3, 4], [-3, -4], [-4, -3], [1, -3]],
[[4, -2], [-1, 0], [-3, 3], [-1, 1]]],
[[[4, -3], [-4, 4], [-4, 3], [2, 4]],
[[3, 3], [-2, -3], [4, 4], [-2, -3]],
[[2, -3], [-3, 3], [1, -2], [0, -3]]],
[[[-4, 3], [3, 4], [4, 4], [1, -4]],
[[-4, 3], [-4, -4], [-4, -4], [1, 1]],
[[4, -3], [2, -3], [3, -3], [4, -2]]]], dtype=self._dtype)
self._gold_offs = numpy.array(
[[[[16, 1], [20, 7], [10, 23], [12, 27]],
[[28, 31], [34, 47], [38, 37], [54, 41]],
[[58, 57], [60, 61], [66, 65], [68, 69]]],
[[[70, 85], [76, 75], [78, 79], [96, 97]],
[[112, 101], [102, 117], [120, 107], [110, 111]],
[[126, 127], [130, 133], [136, 135], [138, 139]]],
[[[140, 157], [146, 161], [148, 151], [152, 153]],
[[184, 185], [172, 175], [190, 193], [180, 181]],
[[198, 197], [200, 203], [204, 207], [208, 209]]]],
dtype=numpy.int32)
def tearDown(self):
del self._input
super(TestMaxPooling, self).tearDown()
def test_device(self):
self._do_test(self.device)
def test_cpu(self):
self._do_test(NumpyDevice())
def _do_test(self, device):
c = pooling.MaxAbsPooling(self.parent, kx=2, ky=2)
c.input = self._input
c.initialize(device=device)
c.run()
c.output.map_read()
max_diff = numpy.fabs(self._gold_output.ravel() -
c.output.mem.ravel()).max()
self.assertLess(max_diff, 0.0001, "max difference in output matrix"
" is %.6f" % (max_diff))
c.input_offset.map_read()
self.assertTrue((c.input_offset.mem == self._gold_offs).all())
class TestStochasticPooling(AcceleratedTest):
ABSTRACT = True
def setUp(self):
super(TestStochasticPooling, self).setUp()
self.input = numpy.zeros([3, 17, 17, 7], dtype=self.dtype)
prng.get().fill(self.input)
self.random_state = prng.get().state
def _do_test(self, device, Unit):
prng.get().state = self.random_state
uniform = Uniform(self.parent, output_bytes=315)
unit = Unit(self.parent, kx=3, ky=3, sliding=(3, 3),
uniform=uniform)
unit.input = Array(self.input.copy())
unit.initialize(device=device)
unit.run()
unit.output.map_read()
unit.input_offset.map_read()
return unit.output.mem.copy(), unit.input_offset.mem.copy()
def _test_gpu_cpu(self, Unit):
c, d = self._do_test(NumpyDevice(), Unit)
a, b = self._do_test(self.device, Unit)
a -= c
b -= d
self.assertEqual(numpy.count_nonzero(a), 0)
self.assertEqual(numpy.count_nonzero(b), 0)
def test_max(self):
self._test_gpu_cpu(pooling.StochasticPooling)
def test_maxabs(self):
self._test_gpu_cpu(pooling.StochasticAbsPooling)
class TestGDMaxPooling(AcceleratedTest):
ABSTRACT = True
def setUp(self):
super(TestGDMaxPooling, self).setUp()
self._input = numpy.array(
[[[3, 3, -1, 1, 2, 3, 4],
[-2, 4, -2, 3, -3, 1, -2],
[-3, -1, 2, -3, 1, -4, 0],
[-1, -2, 2, -1, 0, 1, 1],
[2, 4, -1, 1, 0, -3, -1]],
[[4, -1, 3, -4, -4, -2, -1],
[-1, 3, -2, 3, -1, 4, 2],
[-2, 1, -2, 0, 0, -1, -2],
[3, -2, 1, -2, 4, 0, -1],
[2, -1, -3, 2, 0, 1, 0]],
[[-4, -1, -1, 3, 4, 4, 1],
[0, 1, 0, 3, -3, 4, -1],
[-1, 1, -4, -4, 2, 2, 1],
[1, -4, 1, -3, -4, -1, -1],
[-1, 4, 2, -2, 3, 3, 4]]], dtype=self._dtype)
self._input.shape = (3, 5, 7, 1)
self._input_offset = numpy.array(
[8, 10, 5, 6, 14, 17, 19, 27, 29, 30, 33, 34,
35, 38, 39, 48, 56, 51, 60, 55, 63, 65, 68, 69,
70, 73, 74, 76, 92, 86, 95, 90, 99, 100,
102, 104], dtype=numpy.int32)
self._err_output = numpy.array(
[1, 3, 0.5, -4, 1, -2, -3, -1, -1, 3, -3, -0.5,
4, -4, -0.3, -3, -1, -3, 2, -2, -4, 2, -1, -3,
(-4), 2, 3, 2, -1, -1, -3, 4, -2, 2, 0.3, -4], dtype=self._dtype)
self._gold_err_input = numpy.array(
[[[0, 0, 0, 0, 0, 0.5, -4],
[0, 1, 0, 3, 0, 0, 0],
[1, 0, 0, -2, 0, -3, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, -1, 3, 0, 0, -3, -0.5]],
[[4, 0, 0, -4, -0.3, 0, 0],
[0, 0, 0, 0, 0, 0, -3],
[0, 0, -3, 0, 0, 0, -2],
[-1, 0, 0, 0, 2, 0, 0],
[-4, 0, 2, 0, 0, -1, -3]],
[[-4, 0, 0, 2, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 4],
[0, -1, 0, 0, -3, 0, 0],
[0, -2, 2, 0, 0.3, 0, -4]]], dtype=self._dtype)
def test_fixed_gpu(self):
return self._test_fixed(self.device)
def test_fixed_cpu(self):
return self._test_fixed(NumpyDevice())
def _test_fixed(self, device):
self.info('starting OpenCL max pooling layer gradient descent '
'test...')
c = gd_pooling.GDMaxPooling(self.parent)
c.link_pool_attrs(DummyUnit(kx=2, ky=2, sliding=(2, 2)))
c.input = Array()
c.input.mem = self._input.copy()
c.input_offset = Array()
c.input_offset.mem = self._input_offset.copy()
c.err_output = Array()
c.err_output.mem = self._err_output.copy()
c.initialize(device=device)
c.err_input.map_invalidate()
c.err_input.mem[:] = 1.0e30
c.run()
c.err_input.map_read() # get results back
max_diff = numpy.fabs(self._gold_err_input.ravel() -
c.err_input.mem.ravel()).max()
self.assertLess(max_diff, 0.0001,
"max difference in err_input is %.6f" % (max_diff))
self.info("test passed")
# We cannot check by numeric differentiation here
# 'cause of the non-differentiable function "max".
class TestAvgPooling(AcceleratedTest):
ABSTRACT = True
def setUp(self):
super(TestAvgPooling, self).setUp()
self._input = Array()
self._input.mem = numpy.array(
[3, 4, 3, 1, -1, -2, 1, 3, 2, 3, 3, 0, 4, 1,
(-2), 0, 4, 4, -2, 1, 3, -3, -3, 4, 1, -3, -2, -4,
(-3), 2, -1, 4, 2, 0, -3, 3, 1, -3, -4, -3, 0, -3,
(-1), 0, -2, 2, 2, -4, -1, -1, 0, -2, 1, 3, 1, 2,
2, -2, 4, 0, -1, 0, 1, 0, 0, 3, -3, 3, -1, 1,
4, 0, -1, -2, 3, 4, -4, -2, -4, 3, -2, -3, -1, -1,
(-1), -3, 3, 3, -2, -1, 3, 2, -1, -2, 4, -1, 2, 4,
(-2), -1, 1, 3, -2, -2, 0, -2, 0, 4, -1, -2, -2, -3,
3, 2, -2, 3, 1, -3, -2, -1, 4, -2, 0, -3, -1, 2,
2, -3, -1, -1, -3, -2, 2, 3, 0, -2, 1, 2, 0, -3,
(-4), 1, -1, 2, -1, 0, 3, -2, 4, -3, 4, 4, 1, -4,
0, -1, 1, 3, 0, 1, 3, 4, -3, 2, 4, 3, -1, 0,
(-1), 0, 1, | |
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import logging
import re
import itertools
from collections import defaultdict
from collections import OrderedDict
# bsd licensed - pip install jinja2
from jinja2 import Environment, FileSystemLoader
# fsutils, , misc filesystem utils, internal
from yotta.lib import fsutils
Template_Dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
logger = logging.getLogger('cmakegen')
Ignore_Subdirs = set(('build','yotta_modules', 'yotta_targets', 'CMake'))
jinja_environment = Environment(loader=FileSystemLoader(Template_Dir), trim_blocks=True, lstrip_blocks=True)
def replaceBackslashes(s):
return s.replace('\\', '/')
def sanitizePreprocessorSymbol(sym):
return re.sub('[^a-zA-Z0-9]', '_', str(sym)).upper()
def sanitizeSymbol(sym):
return re.sub('[^a-zA-Z0-9]', '_', str(sym))
jinja_environment.filters['replaceBackslashes'] = replaceBackslashes
jinja_environment.filters['sanitizePreprocessorSymbol'] = sanitizePreprocessorSymbol
jinja_environment.globals['list'] = list
jinja_environment.globals['pathJoin'] = os.path.join
class SourceFile(object):
def __init__(self, fullpath, relpath, lang):
super(SourceFile, self).__init__()
self.fullpath = fullpath
self.relpath = relpath
self.lang = lang
def __repr__(self):
return self.fullpath
class CMakeGen(object):
def __init__(self, directory, target):
super(CMakeGen, self).__init__()
self.buildroot = directory
logger.info("generate for target: %s" % target)
self.target = target
self.configured = False
self.config_include_file = None
self.config_json_file = None
self.build_info_include_file = None
self.build_uuid = None
def _writeFile(self, path, contents):
dirname = os.path.dirname(path)
fsutils.mkDirP(dirname)
self.writeIfDifferent(path, contents)
def configure(self, component, all_dependencies):
''' Ensure all config-time files have been generated. Return a
dictionary of generated items.
'''
r = {}
builddir = self.buildroot
# only dependencies which are actually valid can contribute to the
# config data (which includes the versions of all dependencies in its
# build info) if the dependencies aren't available we can't tell what
# version they are. Anything missing here should always be a test
# dependency that isn't going to be used, otherwise the yotta build
# command will fail before we get here
available_dependencies = OrderedDict((k, v) for k, v in all_dependencies.items() if v)
self.set_toplevel_definitions = ''
if self.build_info_include_file is None:
self.build_info_include_file, build_info_definitions = self.getBuildInfo(component.path, builddir)
self.set_toplevel_definitions += build_info_definitions
if self.config_include_file is None:
self.config_include_file, config_definitions, self.config_json_file = self._getConfigData(available_dependencies, component, builddir, self.build_info_include_file)
self.set_toplevel_definitions += config_definitions
self.configured = True
return {
'merged_config_include': self.config_include_file,
'merged_config_json': self.config_json_file,
'build_info_include': self.build_info_include_file
}
def generateRecursive(self, component, all_components, builddir=None, modbuilddir=None, processed_components=None, application=None):
''' generate top-level CMakeLists for this component and its
dependencies: the CMakeLists are all generated in self.buildroot,
which MUST be out-of-source
!!! NOTE: experimenting with a slightly different way of doing
things here, this function is a generator that yields any errors
produced, so the correct use is:
for error in gen.generateRecursive(...):
print(error)
'''
assert(self.configured)
if builddir is None:
builddir = self.buildroot
if modbuilddir is None:
modbuilddir = os.path.join(builddir, 'ym')
if processed_components is None:
processed_components = dict()
if not self.target:
yield 'Target "%s" is not a valid build target' % self.target
toplevel = not len(processed_components)
logger.debug('generate build files: %s (target=%s)' % (component, self.target))
# because of the way c-family language includes work we need to put the
# public header directories of all components that this component
# depends on (directly OR indirectly) into the search path, which means
# we need to first enumerate all the direct and indirect dependencies
recursive_deps = component.getDependenciesRecursive(
available_components = all_components,
target = self.target,
available_only = True,
test = True
)
dependencies = component.getDependencies(
all_components,
target = self.target,
available_only = True,
test = True
)
for name, dep in dependencies.items():
# if dep is a test dependency, then it might not be required (if
# we're not building tests). We don't actually know at this point
if not dep:
if dep.isTestDependency():
logger.debug('Test dependency "%s" of "%s" is not installed.' % (name, component))
else:
yield 'Required dependency "%s" of "%s" is not installed.' % (name, component)
# ensure this component is assumed to have been installed before we
# check for its dependencies, in case it has a circular dependency on
# itself
processed_components[component.getName()] = component
new_dependencies = OrderedDict([(name,c) for name,c in dependencies.items() if c and not name in processed_components])
self.generate(builddir, modbuilddir, component, new_dependencies, dependencies, recursive_deps, application, toplevel)
logger.debug('recursive deps of %s:' % component)
for d in recursive_deps.values():
logger.debug(' %s' % d)
processed_components.update(new_dependencies)
for name, c in new_dependencies.items():
for error in self.generateRecursive(
c, all_components, os.path.join(modbuilddir, name), modbuilddir, processed_components, application=application
):
yield error
def checkStandardSourceDir(self, dirname, component):
# validate, , validate various things, internal
from yotta.lib import validate
err = validate.sourceDirValidationError(dirname, component.getName())
if err:
logger.warning(err)
def _validateListedSubdirsExist(self, component):
''' Return true if all the subdirectories which this component lists in
its module.json file exist (although their validity is otherwise
not checked).
If they don't, warning messages are printed.
'''
lib_subdirs = component.getLibs(explicit_only=True)
bin_subdirs = component.getBinaries()
ok = True
for d in lib_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"lib directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
for d in bin_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"bin directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
return ok
def _listSubDirectories(self, component, toplevel):
''' return: {
manual: [list of subdirectories with manual CMakeLists],
auto: [list of pairs: (subdirectories name to autogenerate, a list of source files in that dir)],
bin: {dictionary of subdirectory name to binary name},
lib: {dictionary of subdirectory name to binary name},
test: [list of directories that build tests],
resource: [list of directories that contain resources]
}
'''
manual_subdirs = []
auto_subdirs = []
header_subdirs = []
lib_subdirs = component.getLibs()
bin_subdirs = component.getBinaries()
test_subdirs = []
resource_subdirs = []
# if the application or library is set to get the sources from top level ("."),
# they'll be acumulated into a single array (top_sources below).
top_sources = []
start_on_top = "." in [os.path.normpath(x) for x in list(lib_subdirs.keys()) + list(bin_subdirs.keys())]
for f in sorted(os.listdir(component.path)):
if f in Ignore_Subdirs or f.startswith('.') or f.startswith('_'):
continue
check_cmakefile_path = os.path.join(f, 'CMakeLists.txt')
if os.path.isfile(os.path.join(component.path, check_cmakefile_path)) and not \
component.ignores(check_cmakefile_path):
self.checkStandardSourceDir(f, component)
# if the subdirectory has a CMakeLists.txt in it (and it isn't
# ignored), then delegate to that:
manual_subdirs.append(f)
# tests only supported in the `test` directory for now
if f in ('test',):
test_subdirs.append(f)
else:
if os.path.isfile(os.path.join(component.path, f)):
# top level source: check if it should be included
if not component.ignores(f) and start_on_top:
sf = self.createSourceFile(f, os.path.join(component.path, f), ".")
if sf is not None:
top_sources.append(sf)
else:
# otherwise, if the directory has source files, and is listed
# as a source/test directory, generate a CMakeLists in the
# corresponding temporary directory, and add that.
sources = self.containsSourceFiles(os.path.join(component.path, f), component)
if sources:
if f in ('test',):
auto_subdirs.append((f, sources))
test_subdirs.append(f)
elif start_on_top:
# include the sources in this directory only if it's not
# a potential test directory
from yotta.lib import validate
if not validate.isPotentialTestDir(f):
top_sources.extend(sources)
if f == component.getName():
header_subdirs.append((f, sources))
elif os.path.normpath(f) in [fsutils.fullySplitPath(x)[0] for x in lib_subdirs] or \
os.path.normpath(f) in [fsutils.fullySplitPath(x)[0] for x in bin_subdirs]:
for full_subpath in list(lib_subdirs.keys()) + list(bin_subdirs.keys()):
if fsutils.fullySplitPath(full_subpath)[0] == os.path.normpath(f):
# this might be a sub-sub directory, in which
# case we need to re-calculate the sources just
# for the part we care about:
sources = self.containsSourceFiles(os.path.join(component.path, full_subpath), component)
auto_subdirs.append((full_subpath, sources))
elif f == component.getName():
header_subdirs.append((f, sources))
elif toplevel and \
((f in ('test',)) or \
(os.path.normpath(f) in lib_subdirs or start_on_top) or \
(os.path.normpath(f) in bin_subdirs or start_on_top) and not \
component.ignores(f)):
# (if there aren't any source files then do nothing)
# !!! FIXME: ensure this warning is covered in tests
logger.warning("subdirectory \"%s\" of %s was ignored because it doesn't appear to contain any source files", f, component)
# 'resource' directory also has special meaning, but there's no
# pattern for the files which might be in here:
if f in ('resource',):
resource_subdirs.append(os.path.join(component.path, f))
# issue a warning if a differently cased or common misspelling of a
# standard directory name was encountered:
check_directory_name_cases = list(lib_subdirs.keys()) + list(bin_subdirs.keys()) + ['test', 'resource']
if f.lower() in check_directory_name_cases + ['src'] and not \
f in check_directory_name_cases and not \
component.ignores(f):
self.checkStandardSourceDir(f, component)
if top_sources:
# all the top level sources are grouped into a single cmake-generated directory
# which is given the same name as the component
auto_subdirs.append((component.getName(), top_sources))
return {
"manual": manual_subdirs,
"auto": auto_subdirs,
"headers": header_subdirs,
"bin": {component.getName(): | |
from typing import List
import matplotlib.pyplot as plt
import numbers
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.metrics import auc, plot_roc_curve, roc_curve, RocCurveDisplay
from sklearn.model_selection import KFold, LeaveOneOut, GroupKFold, LeaveOneGroupOut
from sklearn.preprocessing import label_binarize
from matplotlib import colors
import copy
import decimal
from .multiclass_fitparams import OneVsRestClassifierPatched
from ._cv_eval_set import init_eval_set, _make_transformer, _eval_set_selection
class classplot():
# 散布図カラーリスト
_SCATTER_COLORS = ['green', 'red', 'mediumblue', 'brown', 'darkmagenta', 'darkorange', 'gold', 'grey']
# クラス確率図カラーマップ
_PROB_CMAP = ['Greens', 'Reds', 'Blues', 'YlOrBr', 'Purples', 'OrRd', 'Wistia', 'Greys']
# デフォルトでの決定境界図の透明度(alpha)
_DEFAULT_SEPARATOR_ALPHA = 0.3
# デフォルトでのクラス確率図等高線モードの透明度(alpha)
_DEFAULT_PROBA_CONTOURF_ALPHA = 0.5
# デフォルトでのクラス確率図透明度補正シグモイド関数のゲイン
_DEFAULT_PROBA_CONTOURF_SIG_GAIN = 0.5
# デフォルトでのクラス確率図の等高線段階数
_DEFAULT_PROBA_CONTOURF_LEVELS = 10
# デフォルトでのクラス確率図RGB画像モードの透明度(alpha)
_DEFAULT_PROBA_RGB_ALPHA = 0.45
def _round_digits(src: float, rounddigit: int = None, method='decimal'):
"""
指定桁数で小数を丸める
Parameters
----------
src : float
丸め対象の数値
rounddigit : int
フィッティング線の表示範囲(標準偏差の何倍まで表示するか指定)
method : int
桁数決定手法('decimal':小数点以下, 'sig':有効数字(Decimal指定), 'format':formatで有効桁数指定)
"""
if method == 'decimal':
return round(src, rounddigit)
elif method == 'sig':
with decimal.localcontext() as ctx:
ctx.prec = rounddigit
return ctx.create_decimal(src)
elif method == 'format':
return '{:.{width}g}'.format(src, width=rounddigit)
def _reshape_input_data(x, y, data, x_colnames, cv_group):
"""
入力データの形式統一(pd.DataFrame or np.ndarray)
"""
# dataがpd.DataFrameのとき
if isinstance(data, pd.DataFrame):
if not isinstance(x, list):
raise Exception('`x` argument should be list[str] if `data` is pd.DataFrame')
if not isinstance(y, str):
raise Exception('`y` argument should be str if `data` is pd.DataFrame')
if x_colnames is not None:
raise Exception('`x_colnames` argument should be None if `data` is pd.DataFrame')
X = data[x].values
y_true = data[y].values
x_colnames = x
y_colname = y
cv_group_colname = cv_group
# dataがNoneのとき(x, y, cv_groupがnp.ndarray)
elif data is None:
if not isinstance(x, np.ndarray):
raise Exception('`x` argument should be np.ndarray if `data` is None')
if not isinstance(y, np.ndarray):
raise Exception('`y` argument should be np.ndarray if `data` is None')
X = x if len(x.shape) == 2 else x.reshape([x.shape[0], 1])
y_true = y.ravel()
# x_colnameとXの整合性確認
if x_colnames is None:
x_colnames = list(range(X.shape[1]))
elif X.shape[1] != len(x_colnames):
raise Exception('width of X must be equal to length of x_colnames')
else:
x_colnames = x_colnames
y_colname = 'objective_variable'
if cv_group is not None: # cv_group指定時
cv_group_colname = 'group'
data = pd.DataFrame(np.column_stack((X, y_true, cv_group)),
columns=x_colnames + [y_colname] + [cv_group_colname])
else:
cv_group_colname = None
data = pd.DataFrame(np.column_stack((X, y)),
columns=x_colnames + [y_colname])
else:
raise Exception('`data` argument should be pd.DataFrame or None')
return X, y_true, data, x_colnames, y_colname, cv_group_colname
@classmethod
def _chart_plot_2d(cls, trained_clf, x_chart, y_true_col, y_pred_col, data, x_chart_indices,
x1_start, x1_end, x2_start, x2_end, other_x, chart_scale,
proba_pred_col, proba_class_indices, ax, plot_border, plot_scatter,
scatter_color_dict, scatter_marker_dict, proba_cmap_dict, proba_type,
contourf_kws=None, imshow_kws=None, scatter_kws=None, legend_kws=None):
"""
分類チャート(決定境界図 or クラス確率図)と各種散布図の表示
(class_separator_plotあるいはclass_prob_plotメソッドの描画処理部分)
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
# 図のサイズからグリッド数を取得
xnum, ynum = plt.gcf().dpi * plt.gcf().get_size_inches()
# チャート用グリッドデータを作成
xx = np.linspace(x1_start, x1_end, num=int(xnum/chart_scale))
yy = np.linspace(x2_start, x2_end, num=int(ynum/chart_scale))
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
# 推論用に全説明変数を保持したndarrayを作成 (チャート非使用変数は固定値other_xとして追加)
n_rows = X_grid.shape[0]
X_all = []
other_add_flg = False
for i in range(2 + len(other_x)):
if i == x_chart_indices[0]: # チャート使用変数(1個目)を追加
X_all.append(X_grid[:, 0].reshape(n_rows, 1))
elif i == x_chart_indices[1]: # チャート使用変数(2個目)を追加
X_all.append(X_grid[:, 1].reshape(n_rows, 1))
elif len(other_x) >= 1 and not other_add_flg: # チャート非使用変数(1個目)を固定値として追加
X_all.append(np.full((n_rows, 1), other_x[0]))
other_add_flg = True
elif len(other_x) == 2: # チャート非使用変数(2個目)を固定値として追加
X_all.append(np.full((n_rows, 1), other_x[1]))
X_all = np.hstack(X_all)
# グリッドデータに対して推論し、推定値を作成
y_pred_grid = trained_clf.predict(X_all)
# 推定値をint型に変換
class_int_dict = dict(zip(scatter_color_dict.keys(), range(len(scatter_color_dict))))
y_pred_grid_int = np.vectorize(lambda x: class_int_dict[x])(y_pred_grid)
# グリッドデータをピボット化
y_pred_pivot = y_pred_grid_int.reshape(X1.shape)
# 決定境界図をプロット
if proba_pred_col is None:
# 決定境界色分けプロット
ax.contourf(X1, X2, y_pred_pivot,
levels=np.arange(y_pred_pivot.max() + 2) - 0.5,
**contourf_kws)
# クラス確率図をプロット
else:
# クラス数
nclass = len(proba_class_indices)
# グリッドデータに対してクラス確率算出
y_proba_grid = trained_clf.predict_proba(X_all)[:, proba_class_indices]
# contourfで等高線プロット(塗りつぶしあり)するとき
if proba_type == 'contourf':
# alpha値を保持(描画終了後に更新前に戻すため)
src_alpha = contourf_kws['alpha']
# シグモイド関数(クラス数1のときalphaで、クラス数∞のとき1に漸近)でalphaを補正
contourf_kws['alpha'] = 2*(1-src_alpha)/(1+np.exp(-cls._DEFAULT_PROBA_CONTOURF_SIG_GAIN*(nclass-1)))+2*src_alpha-1
# クラスごとに処理
for i in range(nclass):
# グリッドデータから該当クラスのみ抜き出してピボット化
y_proba_pivot = y_proba_grid[:, i].reshape(X1.shape)
# カラーマップをproba_cmap_dictの値から取得
cmap = list(proba_cmap_dict.values())[i]
# クラス確率図プロット
ax.contourf(X1, X2, y_proba_pivot,
cmap=cmap,
**contourf_kws)
# alpha値を更新(alpha/(1+alpha))
old_alpha = contourf_kws['alpha']
contourf_kws['alpha'] = old_alpha / (1 + old_alpha)
# alpha値を更新前に戻す
contourf_kws['alpha'] = src_alpha
# contourで等高線プロット(塗りつぶしなし)するとき
elif proba_type == 'contour':
# クラスごとに処理
for i in range(nclass):
# グリッドデータから該当クラスのみ抜き出してピボット化
y_proba_pivot = y_proba_grid[:, i].reshape(X1.shape)
# 線の色をscatter_color_dictの値から取得
cmap = list(proba_cmap_dict.values())[i]
#c=list(scatter_color_dict.values())[proba_class_indices[i]]
ax.contour(X1, X2, y_proba_pivot,
cmap=cmap,
**contourf_kws)
# imshowでRGB画像プロットするとき
elif proba_type == 'imshow':
# いったんRGB各色ゼロで埋める
proba_g = np.zeros(X1.shape) # 緑
proba_r = np.zeros(X1.shape) # 赤
proba_b = np.zeros(X1.shape) # 青
# RGBいずれかのカラーマップを持つクラスが存在すれば、そのクラスの確率を格納
for i, cmap in enumerate(proba_cmap_dict.values()):
if cmap == 'Greens':
proba_g = y_proba_grid[:, i].reshape(X1.shape)
elif cmap == 'Reds':
proba_r = y_proba_grid[:, i].reshape(X1.shape)
elif cmap == 'Blues':
proba_b = y_proba_grid[:, i].reshape(X1.shape)
else:
# imshowのとき、Greens, Reds, Blues以外のカラーマップを指定したらエラーを出す(4クラス以上は描画不可)
raise Exception('only "Greens, Reds, Blues" cmap are allowd if the "proba_type" argument is "imshow"')
# RGBのデータを合体して上下反転
im_grid = np.flip(np.stack([proba_r, proba_g, proba_b], 2), axis=0)
# RGB画像をプロット
ax.imshow(im_grid,
aspect="auto", extent=(x1_start, x1_end, x2_start, x2_end),
**imshow_kws)
else:
raise Exception('the "proba_type" argument must be "contourf", "contour" or "imshow"')
# 境界線をプロット
if plot_border:
ax.contour(X1, X2, y_pred_pivot,
levels=np.arange(y_pred_pivot.max() + 2) - 0.5,
colors='k',
linewidths=0.5,
antialiased=True)
# 散布図をプロット
if plot_scatter is not None:
# マーカの縁の色未指定のとき、dimgreyを指定
if 'edgecolors' not in scatter_kws.keys():
scatter_kws['edgecolors'] = 'dimgrey'
# 正誤を判定
data['error'] = (data[y_true_col] == data[y_pred_col])
# 色分け
if plot_scatter == 'error': # 正誤で色分け
cdict = {True:'blue', False:'red'}
for name, group in data.groupby('error'):
ax.scatter(group[x_chart[0]].values, group[x_chart[1]].values,
label=name, c=cdict[name],
marker=scatter_marker_dict[name],
**scatter_kws)
elif plot_scatter == 'class': # クラスで色分け
for name, group in data.groupby(y_true_col):
ax.scatter(group[x_chart[0]].values, group[x_chart[1]].values,
label=name, c=scatter_color_dict[name],
**scatter_kws)
elif plot_scatter == 'class_error': # クラスと正誤で色分け
for name, group in data.groupby([y_true_col, 'error']):
ax.scatter(group[x_chart[0]].values, group[x_chart[1]].values,
label=f'{name[0]} {name[1]}', c=scatter_color_dict[name[0]],
marker=scatter_marker_dict[name[1]],
**scatter_kws)
# 凡例表示
ax.legend(**legend_kws)
# 軸ラベルを追加
ax.set_xlabel(x_chart[0])
ax.set_ylabel(x_chart[1])
@classmethod
def _class_chart_plot(cls, trained_clf, X, y_pred, y_true, x_chart, x_not_chart, x_chart_indices,
pair_sigmarange=2.0, pair_sigmainterval=0.5, chart_extendsigma=0.5, chart_scale=1,
proba_pred = None, proba_class_indices = None, plot_border=True, plot_scatter='class',
scatter_color_dict=None, scatter_marker_dict=None, proba_cmap_dict=None, proba_type=None,
rounddigit_x3=None, cv_index=None,
subplot_kws=None, contourf_kws=None, imshow_kws=None, scatter_kws=None, legend_kws=None):
"""
分類チャート(決定境界図 or クラス確率図)表示の、説明変数の数に応じた分岐処理
(class_separator_plotあるいはclass_prob_plotメソッド処理のうち、説明変数の数に応じたデータ分割等を行う)
"""
# 説明変数の数
x_num = X.shape[1]
# チャート(決定境界図 or クラス確率図)使用DataFrame
df_chart = pd.DataFrame(X[:, x_chart_indices], columns=x_chart)
# チャート非使用DataFrame
X_not_chart = X[:, [i for i in range(X.shape[1]) if i not in x_chart_indices]]
df_not_chart = pd.DataFrame(X_not_chart, columns=x_not_chart)
# 結合&目的変数実測値と予測値追加
df_all = df_chart.join(df_not_chart)
df_all = df_all.join(pd.DataFrame(y_true, columns=['y_true']))
df_all = df_all.join(pd.DataFrame(y_pred, columns=['y_pred']))
# クラス確率追加(クラス確率図プロット時のみ)
if proba_pred is not None:
proba_pred_col = list(proba_cmap_dict.keys())
df_all = df_all.join(pd.DataFrame(proba_pred, columns=[proba_pred_col]))
else:
proba_pred_col = None
# チャート非使用変数を標準化してDataFrameに追加
if x_num >= 3:
X_not_chart_norm = stats.zscore(df_not_chart)
if isinstance(X_not_chart_norm, pd.DataFrame): # X_not_chart_normがDataFrameの時
not_chart_rename_dir = {c: f'normalize_{c}' for c in df_not_chart}
df_all = df_all.join(X_not_chart_norm.rename(columns=not_chart_rename_dir))
else: # X_not_chart_normがndarrayの時(古いscipyのバージョン時)
df_all = df_all.join(pd.DataFrame(X_not_chart_norm, columns=[f'normalize_{c}' for c in df_not_chart]))
# チャートのX1軸およびX2軸の表示範囲(最大最小値 + extendsigma)
x1_min = np.min(X[:, x_chart_indices[0]])
x1_max = np.max(X[:, x_chart_indices[0]])
x1_std = np.std(X[:, x_chart_indices[0]])
x1_start = x1_min - x1_std * chart_extendsigma
x1_end = x1_max + x1_std * chart_extendsigma
x2_min = np.min(X[:, x_chart_indices[1]])
x2_max = np.max(X[:, x_chart_indices[1]])
x2_std = np.std(X[:, x_chart_indices[1]])
x2_start = x2_min - x2_std * chart_extendsigma
x2_end = x2_max + x2_std * chart_extendsigma
# プロットする図の数(sigmarange外「2枚」 + sigmarange内「int(pair_sigmarange / pair_sigmainterval) * 2枚」)
pair_n = int(pair_sigmarange / pair_sigmainterval) * 2 + 2
# チャート非使用変数をプロットする範囲の下限(標準化後)
pair_min = -(pair_n - 2) / 2 * pair_sigmainterval
# 説明変数が2次元のとき (図は1枚のみ)
if x_num == 2:
pair_w = 1
pair_h = 1
# 説明変数が3次元のとき (図はpair_n × 1枚)
elif x_num == 3:
pair_w = 1
pair_h = pair_n
# 説明変数が4次元のとき (図はpair_n × pair_n枚)
elif x_num == 4:
pair_w = pair_n
pair_h = pair_n
# figsize (全ての図全体のサイズ)指定
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (pair_w * 6, pair_h * 5)
# プロット用のaxes作成
fig, axes = plt.subplots(pair_h, pair_w, **subplot_kws)
if cv_index is not None:
fig.suptitle(f'CV {cv_index}')
# 図ごとにプロット
for i in range(pair_h):
for j in range(pair_w):
# pair縦軸変数(標準化後)の最小値
if i == 0:
h_min = -float('inf')
h_mean = pair_min - pair_sigmainterval / 2 # チャート非使用変数指定用の平均値
else:
h_min = pair_min + (i - 1) * pair_sigmainterval
h_mean = pair_min + (i - 0.5) * pair_sigmainterval # チャート非使用変数指定用の平均値
# pair縦軸変数(標準化後)の最大値
if i == pair_h - 1:
h_max = float('inf')
else:
h_max = pair_min + i * pair_sigmainterval
# pair横軸変数(標準化後)の最小値
if j == 0:
w_min = -float('inf')
w_mean = pair_min - pair_sigmainterval / 2 # チャート非使用変数指定用の平均値
else:
w_min = pair_min + (j - 1) * pair_sigmainterval
w_mean = pair_min + (j - 0.5) * pair_sigmainterval # チャート非使用変数指定用の平均値
# pair横軸変数(標準化後)の最大値
if j == pair_w - 1:
w_max = float('inf')
else:
w_max = pair_min + j * pair_sigmainterval
# 説明変数が2次元のとき (図は1枚のみ)
if x_num == 2:
ax = axes
df_pair = df_all.copy()
other_x = []
# 説明変数が3次元のとき (図はpair_n × 1枚)
elif x_num == 3:
| |
import json
import textwrap
from datetime import datetime, date
import pytest
import transaction
from freezegun import freeze_time
from libres.db.models import Reservation
from libres.modules.errors import AffectedReservationError
from onegov.form import FormSubmission
from onegov.reservation import ResourceCollection
from onegov.ticket import TicketCollection
from tests.onegov.org.common import get_mail
def test_resource_slots(client):
resources = ResourceCollection(client.app.libres_context)
resource = resources.add("Foo", 'Europe/Zurich')
scheduler = resource.get_scheduler(client.app.libres_context)
scheduler.allocate(
dates=[
(datetime(2015, 8, 4), datetime(2015, 8, 4)),
(datetime(2015, 8, 5), datetime(2015, 8, 5))
],
whole_day=True
)
scheduler.allocate(
dates=[
(datetime(2015, 8, 6, 12, 0), datetime(2015, 8, 6, 16, 0)),
],
whole_day=False
)
scheduler.approve_reservations(
scheduler.reserve(
'<EMAIL>',
(datetime(2015, 8, 6, 12, 0), datetime(2015, 8, 6, 16, 0)),
)
)
transaction.commit()
url = '/resource/foo/slots'
assert client.get(url).json == []
url = '/resource/foo/slots?start=2015-08-04&end=2015-08-05'
result = client.get(url).json
assert len(result) == 2
assert result[0]['start'] == '2015-08-04T00:00:00+02:00'
assert result[0]['end'] == '2015-08-05T00:00:00+02:00'
assert result[0]['className'] == 'event-in-past event-available'
assert result[0]['title'] == "Ganztägig \nVerfügbar"
assert result[1]['start'] == '2015-08-05T00:00:00+02:00'
assert result[1]['end'] == '2015-08-06T00:00:00+02:00'
assert result[1]['className'] == 'event-in-past event-available'
assert result[1]['title'] == "Ganztägig \nVerfügbar"
url = '/resource/foo/slots?start=2015-08-06&end=2015-08-06'
result = client.get(url).json
assert len(result) == 1
assert result[0]['className'] == 'event-in-past event-unavailable'
assert result[0]['title'] == "12:00 - 16:00 \nBesetzt"
def test_resources(client):
client.login_admin()
resources = client.get('/resources')
new_item = resources.click('Gegenstand')
new_item.form['title'] = 'Beamer'
resource = new_item.form.submit().follow()
assert 'Beamer' in resource
edit = resource.click('Bearbeiten')
edit.form['title'] = 'Beamers'
edit.form.submit().follow()
new = resources.click('Raum')
new.form['title'] = 'Meeting Room'
resource = new.form.submit().follow()
assert 'calendar' in resource
assert 'Meeting Room' in resource
edit = resource.click('Bearbeiten')
edit.form['title'] = 'Besprechungsraum'
edit.form.submit()
resources = client.get('/resources')
assert 'Besprechungsraum' in resources
assert 'Beamers' in resources
# Check warning duplicate
duplicate = resources.click('Raum')
duplicate.form['title'] = 'Meeting Room'
page = new.form.submit()
assert "Eine Resource mit diesem Namen existiert bereits" in page
resource = client.get('/resource/meeting-room')
delete_link = resource.pyquery('a.delete-link').attr('ic-delete-from')
assert client.delete(delete_link, status=200)
def add_reservation(
resource,
client,
start,
end,
email=None,
partly_available=True,
reserve=True,
approve=True,
add_ticket=True
):
if not email:
email = f'{<EMAIL>'
allocation = resource.scheduler.allocate(
(start, end),
partly_available=partly_available,
)[0]
if reserve:
resource_token = resource.scheduler.reserve(
email,
(allocation.start, allocation.end),
)
if reserve and approve:
resource.scheduler.approve_reservations(resource_token)
if add_ticket:
with client.app.session().no_autoflush:
tickets = TicketCollection(client.app.session())
tickets.open_ticket(
handler_code='RSV', handler_id=resource_token.hex
)
return resource
def test_resource_room_deletion(client):
# TicketMessage.create(ticket, request, 'opened')
resources = ResourceCollection(client.app.libres_context)
foyer = resources.add('Foyer', 'Europe/Zurich', type='room')
# Adds allocations and reservations in the past
add_reservation(
foyer, client, datetime(2017, 1, 6, 12), datetime(2017, 1, 6, 16))
assert foyer.deletable
transaction.commit()
client.login_admin()
page = client.get('/resources').click('Foyer')
delete_link = page.pyquery('a.delete-link').attr('ic-delete-from')
assert delete_link
assert client.delete(delete_link, status=200)
# check if the tickets have been closed
tickets = TicketCollection(client.app.session())
ticket = tickets.query().one()
assert ticket.state == 'closed'
def test_reserved_resources_fields(client):
client.login_admin()
room = client.get('/resources').click('Raum')
room.form['title'] = 'Meeting Room'
room.form['definition'] = "Email *= @@@"
room = room.form.submit()
assert "'Email' ist ein reservierter Name" in room
# fieldsets act as a namespace for field names
room.form['definition'] = "# Title\nEmail *= @@@"
room = room.form.submit().follow()
assert "calendar" in room
assert "Meeting Room" in room
def test_allocations(client):
client.login_admin()
items = client.get('/resources').click('Gegenstand')
items.form['title'] = 'Beamer'
items.form.submit().follow()
# create new beamer allocation
new = client.get((
'/resource/beamer/new-allocation'
'?start=2015-08-04&end=2015-08-05'
))
new.form['items'] = 1
new.form['item_limit'] = 1
new.form.submit()
# view the beamers
slots = client.get((
'/resource/beamer/slots'
'?start=2015-08-04&end=2015-08-05'
))
assert len(slots.json) == 2
assert slots.json[0]['title'] == "Ganztägig \nVerfügbar"
# change the beamers
edit = client.get(client.extract_href(slots.json[0]['actions'][0]))
edit.form['items'] = 2
edit.form.submit()
slots = client.get((
'/resource/beamer/slots'
'?start=2015-08-04&end=2015-08-04'
))
assert len(slots.json) == 1
assert slots.json[0]['title'] == "Ganztägig \n2 Verfügbar"
# create a new daypass allocation
new = client.get((
'/resource/tageskarte/new-allocation'
'?start=2015-08-04&end=2015-08-05'
))
new.form['daypasses'] = 1
new.form['daypasses_limit'] = 1
new.form.submit()
# view the daypasses
slots = client.get((
'/resource/tageskarte/slots'
'?start=2015-08-04&end=2015-08-05'
))
assert len(slots.json) == 2
assert slots.json[0]['title'] == "Ganztägig \nVerfügbar"
# change the daypasses
edit = client.get(client.extract_href(slots.json[0]['actions'][0]))
edit.form['daypasses'] = 2
edit.form.submit()
slots = client.get((
'/resource/tageskarte/slots'
'?start=2015-08-04&end=2015-08-04'
))
assert len(slots.json) == 1
assert slots.json[0]['title'] == "Ganztägig \n2 Verfügbar"
# try to create a new allocation over an existing one
new = client.get((
'/resource/tageskarte/new-allocation'
'?start=2015-08-04&end=2015-08-04'
))
new.form['daypasses'] = 1
new.form['daypasses_limit'] = 1
new = new.form.submit()
assert "Es besteht bereits eine Einteilung im gewünschten Zeitraum" in new
# move the existing allocations
slots = client.get((
'/resource/tageskarte/slots'
'?start=2015-08-04&end=2015-08-05'
))
edit = client.get(client.extract_href(slots.json[0]['actions'][0]))
edit.form['date'] = '2015-08-06'
edit.form.submit()
edit = client.get(client.extract_href(slots.json[1]['actions'][0]))
edit.form['date'] = '2015-08-07'
edit.form.submit()
# get the new slots
slots = client.get((
'/resource/tageskarte/slots'
'?start=2015-08-06&end=2015-08-07'
))
assert len(slots.json) == 2
# delete an allocation
client.delete(client.extract_href(slots.json[0]['actions'][2]))
# get the new slots
slots = client.get((
'/resource/tageskarte/slots'
'?start=2015-08-06&end=2015-08-07'
))
assert len(slots.json) == 1
# delete an allocation
client.delete(client.extract_href(slots.json[0]['actions'][2]))
# get the new slots
slots = client.get((
'/resource/tageskarte/slots'
'?start=2015-08-06&end=2015-08-07'
))
assert len(slots.json) == 0
def test_allocation_times(client):
client.login_admin()
new = client.get('/resources').click('Raum')
new.form['title'] = 'Meeting Room'
new.form.submit()
# 12:00 - 00:00
new = client.get('/resource/meeting-room/new-allocation')
new.form['start'] = '2015-08-20'
new.form['end'] = '2015-08-20'
new.form['start_time'] = '12:00'
new.form['end_time'] = '00:00'
new.form['as_whole_day'] = 'no'
new.form.submit()
slots = client.get(
'/resource/meeting-room/slots?start=2015-08-20&end=2015-08-20'
)
assert len(slots.json) == 1
assert slots.json[0]['start'] == '2015-08-20T12:00:00+02:00'
assert slots.json[0]['end'] == '2015-08-21T00:00:00+02:00'
# 00:00 - 02:00
new = client.get('/resource/meeting-room/new-allocation')
new.form['start'] = '2015-08-22'
new.form['end'] = '2015-08-22'
new.form['start_time'] = '00:00'
new.form['end_time'] = '02:00'
new.form['as_whole_day'] = 'no'
new.form.submit()
slots = client.get(
'/resource/meeting-room/slots?start=2015-08-22&end=2015-08-22'
)
assert len(slots.json) == 1
assert slots.json[0]['start'] == '2015-08-22T00:00:00+02:00'
assert slots.json[0]['end'] == '2015-08-22T02:00:00+02:00'
# 12:00 - 00:00 over two days
new = client.get('/resource/meeting-room/new-allocation')
new.form['start'] = '2015-08-24'
new.form['end'] = '2015-08-25'
new.form['start_time'] = '12:00'
new.form['end_time'] = '00:00'
new.form['as_whole_day'] = 'no'
new.form.submit()
slots = client.get(
'/resource/meeting-room/slots?start=2015-08-24&end=2015-08-25'
)
assert len(slots.json) == 2
assert slots.json[0]['start'] == '2015-08-24T12:00:00+02:00'
assert slots.json[0]['end'] == '2015-08-25T00:00:00+02:00'
assert slots.json[1]['start'] == '2015-08-25T12:00:00+02:00'
assert slots.json[1]['end'] == '2015-08-26T00:00:00+02:00'
def test_allocation_holidays(client):
client.login_admin()
page = client.get('/holiday-settings')
page.select_checkbox('cantonal_holidays', "Zürich")
page.form.submit()
# allocations that are made during holidays
page = client.get('/resources').click('Raum')
page.form['title'] = 'Foo'
page.form.submit()
new = client.get('/resource/foo/new-allocation')
new.form['start'] = '2019-07-30'
new.form['end'] = '2019-08-02'
new.form['start_time'] = '07:00'
new.form['end_time'] = '12:00'
new.form['on_holidays'] = 'yes'
new.form['as_whole_day'] = 'no'
new.form.submit()
slots = client.get('/resource/foo/slots?start=2019-07-29&end=2019-08-03')
assert len(slots.json) == 4
assert slots.json[0]['start'].startswith('2019-07-30')
assert slots.json[1]['start'].startswith('2019-07-31')
assert slots.json[2]['start'].startswith('2019-08-01')
assert slots.json[3]['start'].startswith('2019-08-02')
# allocations that are not made during holidays
page = client.get('/resources').click('Raum')
page.form['title'] = 'Bar'
page.form.submit()
new = client.get('/resource/bar/new-allocation')
new.form['start'] = '2019-07-30'
new.form['end'] = '2019-08-02'
new.form['start_time'] = '07:00'
new.form['end_time'] = '12:00'
new.form['on_holidays'] = 'no'
new.form['as_whole_day'] = 'no'
new.form.submit()
slots = client.get('/resource/bar/slots?start=2019-07-29&end=2019-08-03')
assert len(slots.json) == 3
assert slots.json[0]['start'].startswith('2019-07-30')
assert slots.json[1]['start'].startswith('2019-07-31')
assert slots.json[2]['start'].startswith('2019-08-02')
@freeze_time("2015-08-28")
def test_auto_accept_reservations(client):
# prepare the required data
resources = ResourceCollection(client.app.libres_context)
resource = resources.by_name('tageskarte')
resource.definition = 'Note = ___'
resource.pick_up = 'You can pick it up at the counter'
scheduler = resource.get_scheduler(client.app.libres_context)
allocations = scheduler.allocate(
dates=(datetime(2015, 8, 28), datetime(2015, 8, 28)),
whole_day=True,
quota=4,
quota_limit=4
)
reserve = client.bound_reserve(allocations[0])
transaction.commit()
admin_client = client
admin_client.login_admin()
settings = admin_client.get('/ticket-settings')
settings.form['ticket_auto_accepts'] = ['RSV']
settings.form.submit()
# create a reservation
result = reserve(quota=4, whole_day=True)
assert result.json == {'success': True}
# and fill out the form
formular = client.get('/resource/tageskarte/form')
formular.form['email'] = '<EMAIL>'
formular.form['note'] = 'Foobar'
page = formular.form.submit().follow().form.submit().follow()
assert 'Ihr Anliegen wurde abgeschlossen' in page
assert 'Die Reservationen wurden angenommen' in page
assert len(client.app.smtp.outbox) == 1
message = get_mail(client.app.smtp.outbox, 0)
assert 'Ihre Reservationen wurden angenommen' in message['subject']
assert 'Foobar' in message['text']
# close the ticket and check not email is sent
tickets = client.get('/tickets/ALL/closed')
assert 'RSV-' in tickets
# Test display of status page of ticket
# Generic message, shown when ticket is open or closed
assert 'Falls Sie Dokumente über den Postweg' not in page
assert 'You can pick it up at the counter' in page
@freeze_time("2015-08-28")
def test_reserve_allocation(client):
# prepate the required data
resources = ResourceCollection(client.app.libres_context)
resource = resources.by_name('tageskarte')
resource.definition = 'Note = ___'
scheduler = resource.get_scheduler(client.app.libres_context)
allocations = scheduler.allocate(
dates=(datetime(2015, 8, 28), datetime(2015, 8, 28)),
whole_day=True,
quota=4,
quota_limit=4
)
reserve = client.bound_reserve(allocations[0])
transaction.commit()
# create a reservation
result = reserve(quota=4, whole_day=True)
assert result.json == {'success': True}
assert result.headers['X-IC-Trigger'] == 'rc-reservations-changed'
# and fill out the form
formular = client.get('/resource/tageskarte/form')
formular.form['email'] = '<EMAIL>'
formular.form['note'] = 'Foobar'
ticket = formular.form.submit().follow().form.submit().follow()
assert 'RSV-' in ticket.text
assert len(client.app.smtp.outbox) == 1
# make sure the resulting reservation has no session_id set
ids = [r.session_id for r in scheduler.managed_reservations()]
assert not any(ids)
# try to create another reservation the same time
result = reserve(quota=4, whole_day=True)
assert result.json == {
'success': False,
'message': 'Der gewünschte Zeitraum ist nicht mehr verfügbar.'
}
assert result.headers['X-IC-Trigger'] == 'rc-reservation-error'
assert json.loads(result.headers['X-IC-Trigger-Data']) == result.json
# try deleting the allocation with the existing reservation
client.login_admin()
slots = client.get((
'/resource/tageskarte/slots'
'?start=2015-08-28&end=2015-08-28'
))
assert len(slots.json) == 1
with pytest.raises(AffectedReservationError):
client.delete(client.extract_href(slots.json[0]['actions'][2]))
# open the created ticket
ticket = client.get('/tickets/ALL/open').click('Annehmen').follow()
assert 'Foobar' in ticket
| |
+ y, z + x), tvm.min(y, z) + x)
ck.verify(tvm.min(x + y, x + z), tvm.min(y, z) + x)
ck.verify(tvm.min(x - y, x - z), x - tvm.max(y, z))
ck.verify(tvm.min(y - x, z - x), tvm.min(y, z) - x)
ck.verify(tvm.min(tvm.min(x, 1), 10), tvm.min(x, 1))
ck.verify(tvm.min(tvm.min(x, 11), 10), tvm.min(x, 10))
ck.verify(tvm.min(x / 10, y / 10), tvm.min(x, y) / 10)
ck.verify(tvm.min(x / (-10), y / (-10)), tvm.max(x, y) / (-10))
ck.verify(tvm.min(x * 3, 9), tvm.min(x, 3) * 3)
ck.verify(tvm.min(3 - x, 2), 3 - tvm.max(x, 1))
def test_max_index_simplify():
ck = RewriteChecker()
x, y, z = tvm.var("x"), tvm.var("y"), tvm.var("z")
# const int bound
ck.verify(tvm.max(x % 2, y % 2 + 10), y % 2 + 10)
ck.verify(tvm.max(x + 1, x + 10), x + 10)
ck.verify(tvm.max(x + 111, x + 10), x + 111)
ck.verify(tvm.max(x + 1, x), x + 1)
ck.verify(tvm.max(x, x + 2), x + 2)
ck.verify(tvm.max(1 - x, 2 - x), 2 - x)
ck.verify(tvm.max(3 - x, 2 - x), 3 - x)
ck.verify(tvm.max((x + 3) / 4 * 4, x), (x + 3) / 4 * 4)
ck.verify(tvm.max(tvm.min(x, y), tvm.max(x, y)), tvm.max(x, y))
ck.verify(tvm.max(tvm.min(x, y), tvm.max(y, x)), tvm.max(x, y))
ck.verify(tvm.max(tvm.min(x, y), x), x)
ck.verify(tvm.max(tvm.min(y, x), x), x)
ck.verify(tvm.max(tvm.max(x, y), x), tvm.max(x, y))
ck.verify(tvm.max(tvm.max(x, y), y), tvm.max(x, y))
ck.verify(tvm.max(x, tvm.min(x, y)), x)
ck.verify(tvm.max(x, tvm.min(y, x)), x)
ck.verify(tvm.max(x, tvm.max(x, y)), tvm.max(x, y))
ck.verify(tvm.max(y, tvm.max(x, y)), tvm.max(x, y))
ck.verify(tvm.max(tvm.max(tvm.max(x, y), z), y),
tvm.max(tvm.max(x, y), z))
ck.verify(tvm.max(tvm.max(tvm.max(tvm.max(x, y), z), x * 2), y),
tvm.max(tvm.max(tvm.max(x, y), z), x * 2))
ck.verify(tvm.max(tvm.max(tvm.max(tvm.max(tvm.max(x, y), z), x * 2), z * 2), y),
tvm.max(tvm.max(tvm.max(tvm.max(x, y), z), x * 2), z * 2))
ck.verify(tvm.max(tvm.min(x, y), tvm.min(x, z)), tvm.min(tvm.max(y, z), x))
ck.verify(tvm.max(tvm.min(x, y), tvm.min(z, x)), tvm.min(tvm.max(y, z), x))
ck.verify(tvm.max(tvm.min(y, x), tvm.min(x, z)), tvm.min(tvm.max(y, z), x))
ck.verify(tvm.max(tvm.min(y, x), tvm.min(z, x)), tvm.min(tvm.max(y, z), x))
ck.verify(tvm.max(y + x, z + x), tvm.max(y, z) + x)
ck.verify(tvm.max(y + x, x + z), tvm.max(y, z) + x)
ck.verify(tvm.max(x + y, z + x), tvm.max(y, z) + x)
ck.verify(tvm.max(x + y, x + z), tvm.max(y, z) + x)
ck.verify(tvm.max(x - y, x - z), x - tvm.min(y, z))
ck.verify(tvm.max(y - x, z - x), tvm.max(y, z) - x)
ck.verify(tvm.max(tvm.max(x, 1), 10), tvm.max(x, 10))
ck.verify(tvm.max(tvm.max(x, 11), 10), tvm.max(x, 11))
ck.verify(tvm.max(x / 10, y / 10), tvm.max(x, y) / 10)
ck.verify(tvm.max(x / (-10), y / (-10)), tvm.min(x, y) / (-10))
ck.verify(tvm.max(x * 3, 9), tvm.max(x, 3) * 3)
ck.verify(tvm.max(3 - x, 1), 3 - tvm.min(x, 2))
def test_cmp_simplify():
ck = RewriteChecker()
x, y, z = tvm.var("x"), tvm.var("y"), tvm.var("z")
# const int bound
ck.verify((x % 2 + 10).equal(0), tvm.const(0, "bool"))
ck.verify(tvm.expr.NE(x % 2 + 10, 0), tvm.const(1, "bool"))
ck.verify(x % 2 + 10 > 1, tvm.const(1, "bool"))
ck.verify(x % 2 + 10 <= 1, tvm.const(0, "bool"))
ck.verify(x * 3 + 10 == 0, tvm.const(0, "bool"))
ck.verify(x * 3 + 10 != 0, tvm.const(1, "bool"))
# canonicalization
ck.verify((x - 10).equal(0), x.equal(10))
ck.verify((10 - x).equal(0), x.equal(10))
ck.verify((x * y).equal(0), tvm.expr.Or(x.equal(0), y.equal(0)))
# cmp bound
ck.verify(x + y < x + z, y < z)
ck.verify(x + y < z + x, y < z)
ck.verify(y + x < x + z, y < z)
ck.verify(y + x < z + x, y < z)
ck.verify(y - x < z - x, y < z)
ck.verify(x - y < x - z, z < y)
ck.verify(x < z + x, tvm.expr.LT(0, z))
ck.verify(x < x + z, tvm.expr.LT(0, z))
ck.verify(100 < x + 1, tvm.expr.LT(99, x))
ck.verify(1 < 100 - x, tvm.expr.LT(x, 99))
ck.verify(x * 3 < y * 3, x < y)
ck.verify(x * (-3) < y * (-3), y < x)
ck.verify(x * 3 >= y * 3, y <= x)
ck.verify(x * 4 >= 2, tvm.expr.LE(1, x))
ck.verify(x * 2 >= 50, tvm.expr.LE(25, x))
ck.verify(x / 2 < 3, x < 6)
ck.verify(x * 4 <= 2, x <= 0)
ck.verify(3 < x / 2, tvm.expr.LT(7, x))
ck.verify(x / 3 >= 0, tvm.expr.LE(-2, x))
ck.verify((0 - x * 3) <= 0, tvm.expr.LE(0, x))
ck.verify((0 - x * 3) >= 0, tvm.expr.LE(x, 0))
ck.verify(2 * x <= 0, x <= 0)
ck.verify(x * 2 >= 3, tvm.expr.LE(2, x))
ck.verify(x * 2 >= 2, tvm.expr.LE(1, x))
ck.verify(x * 2 >= 1, tvm.expr.LE(1, x))
ck.verify(x * 2 >= 0, tvm.expr.LE(0, x))
ck.verify(x * 2 >= -1, tvm.expr.LE(0, x))
ck.verify(x * 2 >= -2, tvm.expr.LE(-1, x))
ck.verify(x * 2 >= -3, tvm.expr.LE(-1, x))
ck.verify(x * 2 <= 3, tvm.expr.LE(x, 1))
ck.verify(x * 2 <= 2, tvm.expr.LE(x, 1))
ck.verify(x * 2 <= 1, tvm.expr.LE(x, 0))
ck.verify(x * 2 <= 0, tvm.expr.LE(x, 0))
ck.verify(x * 2 <= -1, tvm.expr.LE(x, -1))
ck.verify(x * 2 <= -2, tvm.expr.LE(x, -1))
ck.verify(x * 2 <= -3, tvm.expr.LE(x, -2))
ck.verify(x * (-2) >= 3, tvm.expr.LE(x, -2))
ck.verify(x * (-2) >= 2, tvm.expr.LE(x, -1))
ck.verify(x * (-2) >= 1, tvm.expr.LE(x, -1))
ck.verify(x * (-2) >= 0, tvm.expr.LE(x, 0))
ck.verify(x * (-2) >= -1, tvm.expr.LE(x, 0))
ck.verify(x * (-2) >= -2, tvm.expr.LE(x, 1))
ck.verify(x * (-2) >= -3, tvm.expr.LE(x, 1))
ck.verify(x * (-2) <= 3, tvm.expr.LE(-1, x))
ck.verify(x * (-2) <= 2, tvm.expr.LE(-1, x))
ck.verify(x * (-2) <= 1, tvm.expr.LE(0, x))
ck.verify(x * (-2) <= 0, tvm.expr.LE(0, x))
ck.verify(x * (-2) <= -1, tvm.expr.LE(1, x))
ck.verify(x * (-2) <= -2, tvm.expr.LE(1, x))
ck.verify(x * (-2) <= -3, tvm.expr.LE(2, x))
ck.verify(x / 2 >= 1, tvm.expr.LE(2, x))
ck.verify(x / 2 >= 0, tvm.expr.LE(-1, x))
ck.verify(x / 2 >= -1, tvm.expr.LE(-3, x))
ck.verify(x / 2 <= 1, tvm.expr.LE(x, 3))
ck.verify(x / 2 <= 0, tvm.expr.LE(x, 1))
ck.verify(x / 2 <= -1, tvm.expr.LE(x, -2))
ck.verify(x / 4 * 4 < x, tvm.expr.LT(0, x % 4))
ck.verify(x / 4 * 4 >= x, tvm.expr.LE(x % 4, 0))
ck.verify(x / 4 * 4 < x + y, tvm.expr.LT(0, x % 4 + y))
ck.verify(x / 4 * 4 < x - y, tvm.expr.LT(y, x % 4))
ck.verify((x + 2) / 4 * 4 >= x, tvm.expr.LE((x + 2) % 4, 2))
ck.verify((x + 2) / 4 * 4 >= x + y, tvm.expr.LE((x + 2) % 4 + y, 2))
ck.verify((x + 2) / 4 * 4 >= x - y, tvm.expr.LE((x + 2) % 4 + (-2), y))
ck.verify(tvm.min(x, 11) < 10, x < 10)
ck.verify(tvm.min(x, 8) < 10, tvm.const(1, "bool"))
ck.verify(tvm.max(8, x) > 10, tvm.expr.LT(10, x))
ck.verify(x + 1 < tvm.max(8, x), x < 7)
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 10), override=True)
ck.analyzer.update(y, tvm.arith.ConstIntBound(-10, 0), override=True)
ck.analyzer.update(z, tvm.arith.ConstIntBound(-5, 5), override=True)
ck.verify(x < 11, tvm.const(1, "bool"))
ck.verify(x <= 10, tvm.const(1, "bool"))
ck.verify(z <= 5, tvm.const(1, "bool"))
ck.verify(x + y <= 10, tvm.const(1, "bool"))
ck.verify(x + y >= -10, tvm.const(1, "bool"))
ck.verify(z - 5 <= y + 10, tvm.const(1, "bool"))
ck.verify(tvm.all(x > -1, z <= x + 5), tvm.const(1, "bool"))
ck.verify(x*y <= 0, tvm.const(1, "bool"))
ck.verify((x + 1)*(y - 1) < 0, tvm.const(1, "bool"))
ck.verify(y*y >= 0, tvm.const(1, "bool"))
ck.verify(x*6 <= -3, tvm.const(0, "bool"))
ck.verify((y - 1) % 3 == 0, (y + (-1)) % 3 == 0)
def test_logical_simplify():
ck = RewriteChecker()
x, y, z = tvm.var("x"), tvm.var("y"), tvm.var("z")
ck.verify(tvm.expr.And(tvm.expr.EQ(x, y), tvm.expr.NE(x, y)),
tvm.const(False, "bool"))
ck.verify(tvm.expr.And(tvm.expr.NE(x, y), tvm.expr.EQ(x, y)),
tvm.const(False, "bool"))
ck.verify(tvm.expr.And(x > 1, tvm.expr.Not(x > 1)), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(x <= y, y < x), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(y < x, y <= x), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(x < 1, 0 < x), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(x < 0, 1 < x), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(x < 1, 1 <= x), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(x <= 1, 1 < x), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(1 <= x, x < 1), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(1 < x, x <= 1), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(x <= 1, 2 <= x), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(2 <= x, x <= 1), tvm.const(False, "bool"))
ck.verify(tvm.expr.And(x == 1, x != 2), x == 1)
ck.verify(tvm.expr.Or(tvm.expr.EQ(x, y), tvm.expr.NE(x, y)),
tvm.const(True, "bool"))
ck.verify(tvm.expr.Or(tvm.expr.NE(x, y), tvm.expr.EQ(x, y)),
tvm.const(True, "bool"))
ck.verify(tvm.expr.Or(x > y, tvm.expr.Not(x > y)), tvm.const(True, "bool"))
ck.verify(tvm.expr.Or(x <= | |
"""
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from collections import namedtuple
from ctypes import c_uint32
from functools import partial, partialmethod
import struct
REGISTERS = {
'x0': 0, 'zero': 0,
'x1': 1, 'ra': 1,
'x2': 2, 'sp': 2,
'x3': 3, 'gp': 3,
'x4': 4, 'tp': 4,
'x5': 5, 't0': 5,
'x6': 6, 't1': 6,
'x7': 7, 't2': 7,
'x8': 8, 's0': 8, 'fp': 8,
'x9': 9, 's1': 9,
'x10': 10, 'a0': 10,
'x11': 11, 'a1': 11,
'x12': 12, 'a2': 12,
'x13': 13, 'a3': 13,
'x14': 14, 'a4': 14,
'x15': 15, 'a5': 15,
'x16': 16, 'a6': 16,
'x17': 17, 'a7': 17,
'x18': 18, 's2': 18,
'x19': 19, 's3': 19,
'x20': 20, 's4': 20,
'x21': 21, 's5': 21,
'x22': 22, 's6': 22,
'x23': 23, 's7': 23,
'x24': 24, 's8': 24,
'x25': 25, 's9': 25,
'x26': 26, 's10': 26,
'x27': 27, 's11': 27,
'x28': 28, 't3': 28,
'x29': 29, 't4': 29,
'x30': 30, 't5': 30,
'x31': 31, 't6': 31,
}
def resolve_register(reg):
# check if register corresponds to a valid name
if reg in REGISTERS:
reg = REGISTERS[reg]
# ensure register is a number
try:
reg = int(reg)
except ValueError:
raise ValueError('Register is not a number or valid name: {}'.format(reg))
# ensure register is between 0 and 31
if reg < 0 or reg > 31:
raise ValueError('Register must be between 0 and 31: {}'.format(reg))
return reg
def sign_extend(value, bits):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
def relocate_hi(imm):
if imm & 0x800:
imm += 2**12
return sign_extend((imm >> 12) & 0x000fffff, 20)
def relocate_lo(imm):
return sign_extend(imm & 0x00000fff, 12)
def r_type(rd, rs1, rs2, opcode, funct3, funct7):
rd = resolve_register(rd)
rs1 = resolve_register(rs1)
rs2 = resolve_register(rs2)
code = 0
code |= opcode
code |= rd << 7
code |= funct3 << 12
code |= rs1 << 15
code |= rs2 << 20
code |= funct7 << 25
return struct.pack('<I', code)
def i_type(rd, rs1, imm, opcode, funct3):
rd = resolve_register(rd)
rs1 = resolve_register(rs1)
if imm < -0x800 or imm > 0x7ff:
raise ValueError('12-bit immediate must be between -0x800 (-2048) and 0x7ff (2047): {}'.format(imm))
imm = c_uint32(imm).value & 0b111111111111
code = 0
code |= opcode
code |= rd << 7
code |= funct3 << 12
code |= rs1 << 15
code |= imm << 20
return struct.pack('<I', code)
def s_type(rs1, rs2, imm, opcode, funct3):
rs1 = resolve_register(rs1)
rs2 = resolve_register(rs2)
if imm < -0x800 or imm > 0x7ff:
raise ValueError('12-bit immediate must be between -0x800 (-2048) and 0x7ff (2047): {}'.format(imm))
imm = c_uint32(imm).value & 0b111111111111
imm_11_5 = (imm >> 5) & 0b1111111
imm_4_0 = imm & 0b11111
code = 0
code |= opcode
code |= imm_4_0 << 7
code |= funct3 << 12
code |= rs1 << 15
code |= rs2 << 20
code |= imm_11_5 << 25
return struct.pack('<I', code)
def b_type(rs1, rs2, imm, opcode, funct3):
rs1 = resolve_register(rs1)
rs2 = resolve_register(rs2)
if imm < -0x1000 or imm > 0x0fff:
raise ValueError('12-bit multiple of 2 immediate must be between -0x1000 (-4096) and 0x0fff (4095): {}'.format(imm))
if imm % 2 == 1:
raise ValueError('12-bit multiple of 2 immediate must be a muliple of 2: {}'.format(imm))
imm = imm // 2
imm = c_uint32(imm).value & 0b111111111111
imm_12 = (imm >> 11) & 0b1
imm_11 = (imm >> 10) & 0b1
imm_10_5 = (imm >> 4) & 0b111111
imm_4_1 = imm & 0b1111
code = 0
code |= opcode
code |= imm_11 << 7
code |= imm_4_1 << 8
code |= funct3 << 12
code |= rs1 << 15
code |= rs2 << 20
code |= imm_10_5 << 25
code |= imm_12 << 31
return struct.pack('<I', code)
def u_type(rd, imm, opcode):
rd = resolve_register(rd)
if imm < -0x80000 or imm > 0x7ffff:
raise ValueError('20-bit immediate must be between -0x80000 (-524288) and 0x7ffff (524287): {}'.format(imm))
imm = c_uint32(imm).value & 0b11111111111111111111
code = 0
code |= opcode
code |= rd << 7
code |= imm << 12
return struct.pack('<I', code)
def j_type(rd, imm, opcode):
rd = resolve_register(rd)
if imm < -0x100000 or imm > 0x0fffff:
raise ValueError('20-bit multiple of 2 immediate must be between -0x100000 (-1048576) and 0x0fffff (1048575): {}'.format(imm))
if imm % 2 == 1:
raise ValueError('20-bit multiple of 2 immediate must be a muliple of 2: {}'.format(imm))
imm = imm // 2
imm = c_uint32(imm).value & 0b11111111111111111111
imm_20 = (imm >> 19) & 0b1
imm_19_12 = (imm >> 11) & 0b11111111
imm_11 = (imm >> 10) & 0b1
imm_10_1 = imm & 0b1111111111
code = 0
code |= opcode
code |= rd << 7
code |= imm_19_12 << 12
code |= imm_11 << 20
code |= imm_10_1 << 21
code |= imm_20 << 31
return struct.pack('<I', code)
LUI = partial(u_type, opcode=0b0110111)
AUIPC = partial(u_type, opcode=0b0010111)
JAL = partial(j_type, opcode=0b1101111)
JALR = partial(i_type, opcode=0b1100111, funct3=0b000)
BEQ = partial(b_type, opcode=0b1100011, funct3=0b000)
BNE = partial(b_type, opcode=0b1100011, funct3=0b001)
BLT = partial(b_type, opcode=0b1100011, funct3=0b100)
BGE = partial(b_type, opcode=0b1100011, funct3=0b101)
BLTU = partial(b_type, opcode=0b1100011, funct3=0b110)
BGEU = partial(b_type, opcode=0b1100011, funct3=0b111)
LB = partial(i_type, opcode=0b0000011, funct3=0b000)
LH = partial(i_type, opcode=0b0000011, funct3=0b001)
LW = partial(i_type, opcode=0b0000011, funct3=0b010)
LBU = partial(i_type, opcode=0b0000011, funct3=0b100)
LHU = partial(i_type, opcode=0b0000011, funct3=0b101)
SB = partial(s_type, opcode=0b0100011, funct3=0b000)
SH = partial(s_type, opcode=0b0100011, funct3=0b001)
SW = partial(s_type, opcode=0b0100011, funct3=0b010)
ADDI = partial(i_type, opcode=0b0010011, funct3=0b000)
SLTI = partial(i_type, opcode=0b0010011, funct3=0b010)
SLTIU = partial(i_type, opcode=0b0010011, funct3=0b011)
XORI = partial(i_type, opcode=0b0010011, funct3=0b100)
ORI = partial(i_type, opcode=0b0010011, funct3=0b110)
ANDI = partial(i_type, opcode=0b0010011, funct3=0b111)
SLLI = partial(r_type, opcode=0b0010011, funct3=0b001, funct7=0b0000000)
SRLI = partial(r_type, opcode=0b0010011, funct3=0b101, funct7=0b0000000)
SRAI = partial(r_type, opcode=0b0010011, funct3=0b101, funct7=0b0100000)
ADD = partial(r_type, opcode=0b0110011, funct3=0b000, funct7=0b0000000)
SUB = partial(r_type, opcode=0b0110011, funct3=0b000, funct7=0b0100000)
SLL = partial(r_type, opcode=0b0110011, funct3=0b001, funct7=0b0000000)
SLT = partial(r_type, opcode=0b0110011, funct3=0b010, funct7=0b0000000)
SLTU = partial(r_type, opcode=0b0110011, funct3=0b011, funct7=0b0000000)
XOR = partial(r_type, opcode=0b0110011, funct3=0b100, funct7=0b0000000)
SRL = partial(r_type, opcode=0b0110011, funct3=0b101, funct7=0b0000000)
SRA = partial(r_type, opcode=0b0110011, funct3=0b101, funct7=0b0100000)
OR = partial(r_type, opcode=0b0110011, funct3=0b110, funct7=0b0000000)
AND = partial(r_type, opcode=0b0110011, funct3=0b111, funct7=0b0000000)
FENCE = partial(i_type, opcode=0b0001111, funct3=0b000)
ECALL = partial(i_type, opcode=0b1110011, funct3=0b000)
EBREAK = partial(i_type, opcode=0b1110011, funct3=0b000)
class Program:
RTypeInstruction = namedtuple('RTypeInstruction', 'inst location rd rs1 rs2')
ITypeInstruction = namedtuple('ITypeInstruction', 'inst location rd rs1 imm')
STypeInstruction = namedtuple('STypeInstruction', 'inst location rs1 rs2 imm')
BTypeInstruction = namedtuple('BTypeInstruction', 'inst location rs1 rs2 imm')
UTypeInstruction = namedtuple('UTypeInstruction', 'inst location rd imm')
JTypeInstruction = namedtuple('JTypeInstruction', 'inst location rd imm')
Blob = namedtuple('Blob', 'data')
Align = namedtuple('Align', 'boundary')
def __init__(self):
self.instructions = []
self.labels = {}
self.location = 0
@property
def machine_code(self):
code = bytearray()
for instruction in self.instructions:
if isinstance(instruction, Program.RTypeInstruction):
inst, location, rd, rs1, rs2 = instruction
code.extend(inst(rd, rs1, rs2))
elif isinstance(instruction, Program.ITypeInstruction):
inst, location, rd, rs1, imm = instruction
imm = self._resolve_immediate(imm, location, inst)
code.extend(inst(rd, rs1, imm))
elif isinstance(instruction, Program.STypeInstruction):
inst, location, rs1, rs2, imm = instruction
imm = self._resolve_immediate(imm, location, inst)
code.extend(inst(rs1, rs2, imm))
elif isinstance(instruction, Program.BTypeInstruction):
inst, location, rs1, rs2, imm = instruction
imm = self._resolve_immediate(imm, location, inst)
code.extend(inst(rs1, rs2, imm))
elif isinstance(instruction, Program.UTypeInstruction):
inst, location, rd, imm = instruction
imm = self._resolve_immediate(imm, location, inst)
code.extend(inst(rd, imm))
elif isinstance(instruction, Program.JTypeInstruction):
inst, location, rd, imm = instruction
imm = self._resolve_immediate(imm, location, inst)
code.extend(inst(rd, imm))
elif isinstance(instruction, Program.Blob):
code.extend(instruction.data)
elif isinstance(instruction, Program.Align):
while len(code) % instruction.boundary != 0:
code.append(0)
else:
raise ValueError('Invalid instruction type')
return bytes(code)
def __enter__(self):
pass
def __exit__(self, *args):
pass
def _resolve_immediate(self, imm, location, instruction):
# check if immediate | |
#!/usr/bin/env python3
"""
Comments in PyCharm style.
References
- Tag sorter by Zack
/common-samples/blob/master/tools/net/tag_sorter/tag_sorter.py
- README standard format
/common-samples/wiki/Standard-sample-documentation-template-%28README.md%29
"""
import os
import re
import typing
import argparse
# region Global sets
# A set of words that get omitted during letter-case checks.
exception_proper_nouns = {
'WmsLayer',
'ArcGIS Online',
'OAuth',
'Web Mercator',
'ArcGIS Pro',
'GeoPackage',
'loadStatus',
'Integrated Windows Authentication',
'GeoElement',
'Network Link',
'Network Link Control',
'Open Street Map',
'OpenStreetMap',
'Play a KML Tour'
}
# A set of category folder names in current sample viewer.
categories = {
'Maps',
'Layers',
'Features',
'Display information',
'Search',
'Edit data',
'Geometry',
'Route and directions',
'Analysis',
'Cloud and portal',
'Scenes',
'Utility network',
'Augmented reality'
}
# endregion
# region Static functions
def get_folder_name_from_path(path: str, index: int = -1) -> str:
"""
Get the folder name from a full path.
:param path: A string of a full/absolute path to a folder.
:param index: The index of path parts. Default to -1 to get the most
trailing folder in the path; set to certain index to get other parts.
:return: The folder name.
"""
return os.path.normpath(path).split(os.path.sep)[index]
def parse_head(head_string: str) -> (str, str):
"""
Parse the head of README and get title and description.
:param head_string: A string containing title, description and images.
:return: Stripped title and description strings.
"""
# Split title section and rule out empty lines.
parts = list(filter(bool, head_string.splitlines()))
if len(parts) < 3:
raise Exception('README should contain title, description and image.')
title = parts[0].lstrip('# ').rstrip()
description = parts[1].strip()
return title, description
def check_apis(apis_string: str) -> typing.Set[str]:
"""
Check the format for `Relevant API` section.
:param apis_string: A multiline string containing all APIs.
:return: A set of APIs. Throws if format is wrong.
"""
stripped = apis_string.strip()
apis = list(stripped.splitlines())
if not apis:
raise Exception('Empty Relevant APIs.')
s = set()
stripped_apis = []
for api in apis:
# Bullet is checked by the linter, no need to check here.
a = api.lstrip('*- ').rstrip()
s.add(a)
stripped_apis.append(a)
if '`' in a:
raise Exception('API should not include backticks.')
if '' in s:
raise Exception('Empty line in APIs.')
if len(apis) > len(s):
raise Exception('Duplicate APIs.')
if stripped_apis != sorted(stripped_apis, key=str.casefold):
raise Exception('APIs are not sorted.')
return s
def check_tags(tags_string: str) -> typing.Set[str]:
"""
Check the format for `Tags` section.
:param tags_string: A string containing all tags, with comma as delimiter.
:return: A set of tags. Throws if format is wrong.
"""
tags = tags_string.split(',')
if not tags:
raise Exception('Empty tags.')
s = set()
stripped_tags = []
for tag in tags:
t = tag.strip()
s.add(t)
stripped_tags.append(t)
if t.lower() != t and t.upper() != t and t.capitalize() != t \
and t not in exception_proper_nouns:
raise Exception(f'Wrong letter case for tag: "{t}".')
if '' in s:
raise Exception('Empty char in tags.')
if ', '.join(stripped_tags) != tags_string.strip():
raise Exception('Extra whitespaces in tags.')
if len(tags) > len(s):
raise Exception('Duplicate tags.')
if stripped_tags != sorted(stripped_tags, key=str.casefold):
raise Exception('Tags are not sorted.')
return s
def check_sentence_case(string: str) -> None:
"""
Check if a sentence follows 'sentence case'. A few examples below.
Hello world! -> YES
I'm a good guy. -> YES
a man and a gun. -> NO
A WMS layer -> YES, as it's allowed to include proper nouns
:param string: Input sentence, typically the title string.
:return: None. Throws if is not sentence case.
"""
# Check empty string.
if not string:
raise Exception('Empty title string.')
# The whole sentence get excepted.
if string in exception_proper_nouns:
return
# Split sentence into words.
words = string.split()
# First word should either be Title-cased or a proper noun (UPPERCASE).
if words[0][0].upper() != words[0][0] and words[0].upper() != words[0] \
and words[0] not in exception_proper_nouns:
raise Exception('Wrong letter case for the first word in title.')
# If a word is neither lowercase nor UPPERCASE then it is not great.
for word in words[1:]:
word = word.strip('()')
if word.lower() != word and word.upper() != word \
and word not in exception_proper_nouns:
raise Exception(f'Wrong letter case for word: "{word}" in title.')
def check_is_subsequence(list_a: typing.List[str],
list_b: typing.List[str]) -> int:
"""
Check if list A is a subsequence of list B.
E.g.
list_a = ['a', 'b', 'c']
list_b = ['a', 'h', 'b', 'g', 'd', 'c']
-> returns 0, which means all elements in list_a is also in list_b
:param list_a: A list of strings, presumably the section titles of a README.
:param list_b: A list of strings, presumably all valid titles in order.
:return: 0 if list_a is subsequence of list_b.
"""
# Empty list is always a subsequence of other lists.
if not list_a:
return True
pa = len(list_a)
for pb in range(len(list_b), 0, -1):
pa -= 1 if list_b[pb-1] == list_a[pa-1] else 0
return pa
# endregion
class ReadmeStyleChecker:
essential_headers = {
'Use case',
'How to use the sample',
'How it works',
'Relevant API',
'Tags'
}
possible_headers = [
'Use case',
'How to use the sample',
'How it works',
'Relevant API',
'Offline data',
'About the data',
'Additional information',
'Tags'
]
def __init__(self, folder_path: str):
self.folder_path = folder_path
self.folder_name = get_folder_name_from_path(folder_path)
self.readme_path = os.path.join(folder_path, 'README.md')
self.readme_contents = None
self.readme_parts = None
self.readme_headers = None
def populate_from_readme(self) -> None:
"""
Read and parse the sections from README.
:return: None. Throws if exception occurs.
"""
try:
readme_file = open(self.readme_path, 'r')
# read the readme content into a string
contents = readme_file.read()
# A regular expression that matches exactly 2 pound marks, and
# capture the trailing string.
pattern = re.compile(r'^#{2}(?!#)\s(.*)', re.MULTILINE)
self.readme_contents = contents
# Use regex to split the README by section headers, so that they are
# separated into paragraphs.
self.readme_parts = re.split(pattern, contents)
# Capture the section headers.
self.readme_headers = re.findall(pattern, contents)
except Exception as err:
raise Exception(f'Error loading file - {self.readme_path} - {err}.')
else:
readme_file.close()
def check_format_heading(self) -> None:
"""
Check if
1. essential section headers present.
2. all sections are valid.
3. section headers are in correct order.
:return: None. Throws if exception occurs.
"""
header_set = set(self.readme_headers)
possible_header_set = set(self.possible_headers)
# Check if all sections are valid.
sets_diff = header_set - possible_header_set
if sets_diff:
raise Exception(
f'Error header - Unexpected header or extra whitespace'
f' - "{sets_diff}".')
# Check if all essential section headers present.
sets_diff = self.essential_headers - header_set
if sets_diff:
raise Exception(
f'Error header - Missing essential header(s) - "{sets_diff}".')
# Check if all sections are in correct order.
index = check_is_subsequence(self.readme_headers, self.possible_headers)
if index:
raise Exception(
f'Error header - Wrong order at - '
f'"{self.readme_headers[index-1]}".')
def check_format_title_section(self) -> None:
"""
Check if
1. the head has at least 3 parts (title, description and image URLs).
2. the title string uses sentence case.
:return: None. Throws if exception occurs.
"""
try:
title, _ = parse_head(self.readme_parts[0])
check_sentence_case(title)
except Exception as err:
raise Exception(f'Error title - {err}')
def check_format_apis(self) -> None:
"""
Check if APIs
1. do not have backticks.
2. are sorted.
3. do not have duplicate entries.
:return: None. Throws if exception occurs.
"""
try:
api_section_index = self.readme_parts.index('Relevant API') + 1
check_apis(self.readme_parts[api_section_index])
except Exception as err:
raise Exception(f'Error APIs - {err}')
def check_format_tags(self) -> None:
"""
Check if tags
1. are in correct case.
2. are sorted.
3. do not have duplicate entries.
:return: None. Throws if exception occurs.
"""
try:
tags_section_index = self.readme_parts.index('Tags') + 1
check_tags(self.readme_parts[tags_section_index])
except Exception as err:
raise Exception(f'Error tags - {err}')
def check_redundant_apis_in_tags(self) -> None:
"""
Check if APIs and tags intersect.
:return: None. Throws if exception occurs.
"""
try:
tags_section_index = self.readme_parts.index('Tags') + 1
api_section_index = self.readme_parts.index('Relevant API') + 1
api_set = check_apis(self.readme_parts[api_section_index])
tag_set = check_tags(self.readme_parts[tags_section_index])
if not api_set.isdisjoint(tag_set):
raise Exception(f'Error tags - API should not be in tags')
except Exception as err:
raise Exception(f'Error checking extra tags due to previous error')
# region Main wrapper functions
def run_check(path: str, count: int) -> int:
checker = ReadmeStyleChecker(path)
# 1. Populate from README.
try:
checker.populate_from_readme()
except Exception as err:
count += 1
print(f'{count}. {checker.folder_path} - {err}')
# 2. Check format of headings, e.g. 'Use case', 'How it works', etc.
try:
checker.check_format_heading()
except Exception as err:
count += 1
| |
<gh_stars>1-10
#!/usr/bin/env python3
#"""
#Implements the Dragonfly (SAE) handshake.
#Instead of using a client (STA) and a access point (AP), we
#just programmatically create a peer to peer network of two participiants.
#Either party may initiate the SAE protocol, either party can be the client and server.
#In a mesh scenario, where two APs (two equals) are trying to establish a connection
#between each other and each one could have the role of supplicant or authenticator.
#SAE is build upon the Dragonfly Key Exchange, which is described in https://tools.ietf.org/html/rfc7664.
#https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
#"""
import time
import hashlib
import random
import logging
import socket
import re, uuid
import base64
import os
import subprocess
from collections import namedtuple
from Cryptodome.Cipher import AES
from Cryptodome import Random
import asn1tools
import sys
#Compile asn1 file for secret_key
asn1_file = asn1tools.compile_files('declaration.asn')
#create tcp/ip socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#retrieve local hostname
local_hostname = socket.gethostname()
#get fully qualified hostname
local_fqdn = socket.getfqdn()
#get the according ip address
ip_address = socket.gethostbyname(local_hostname)
#bind socket to port
server_address = ('192.168.0.3', 4380)
while True:
try:
sock.connect(server_address)
break
except ConnectionRefusedError as conn_error:
print("Attempting to connect to server...")
time.sleep(5)
except:
# print("Unexpected error", sys.exc_info()[0])
continue
print ("Connecting to %s (%s) with %s" % (local_hostname, local_fqdn, ip_address))
logger = logging.getLogger('dragonfly')
logger.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh = logging.FileHandler('dragonfly.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
Point = namedtuple("Point", "x y")
# The point at infinity (origin for the group law).
O = 'Origin'
def lsb(x):
binary = bin(x).lstrip('0b')
return binary[0]
def legendre(a, p):
return pow(a, (p - 1) // 2, p)
def tonelli_shanks(n, p):
"""
# https://rosettacode.org/wiki/Tonelli-Shanks_algorithm#Python
"""
assert legendre(n, p) == 1, "not a square (mod p)"
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
if s == 1:
return pow(n, (p + 1) // 4, p)
for z in range(2, p):
if p - 1 == legendre(z, p):
break
c = pow(z, q, p)
r = pow(n, (q + 1) // 2, p)
t = pow(n, q, p)
m = s
t2 = 0
while (t - 1) % p != 0:
t2 = (t * t) % p
for i in range(1, m):
if (t2 - 1) % p == 0:
break
t2 = (t2 * t2) % p
b = pow(c, 1 << (m - i - 1), p)
r = (r * b) % p
c = (b * b) % p
t = (t * c) % p
m = i
return r
class Curve():
"""
Mathematical operations on a Elliptic Curve.
A lot of code taken from:
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
def __init__(self, a, b, p):
self.a = a
self.b = b
self.p = p
def curve_equation(self, x):
"""
We currently use the elliptic curve
NIST P-384
"""
return (pow(x, 3) + (self.a * x) + self.b) % self.p
def is_quadratic_residue(self, x):
"""
https://en.wikipedia.org/wiki/Euler%27s_criterion
Computes Legendre Symbol.
"""
return pow(x, (self.p-1) // 2, self.p) == 1
def valid(self, P):
"""
Determine whether we have a valid representation of a point
on our curve. We assume that the x and y coordinates
are always reduced modulo p, so that we can compare
two points for equality with a simple ==.
"""
if P == O:
return True
else:
return (
(P.y**2 - (P.x**3 + self.a*P.x + self.b)) % self.p == 0 and
0 <= P.x < self.p and 0 <= P.y < self.p)
def inv_mod_p(self, x):
"""
Compute an inverse for x modulo p, assuming that x
is not divisible by p.
"""
if x % self.p == 0:
raise ZeroDivisionError("Impossible inverse")
return pow(x, self.p-2, self.p)
def ec_inv(self, P):
"""
Inverse of the point P on the elliptic curve y^2 = x^3 + ax + b.
"""
if P == O:
return P
return Point(P.x, (-P.y) % self.p)
def ec_add(self, P, Q):
"""
Sum of the points P and Q on the elliptic curve y^2 = x^3 + ax + b.
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
if not (self.valid(P) and self.valid(Q)):
raise ValueError("Invalid inputs")
# Deal with the special cases where either P, Q, or P + Q is
# the origin.
if P == O:
result = Q
elif Q == O:
result = P
elif Q == self.ec_inv(P):
result = O
else:
# Cases not involving the origin.
if P == Q:
dydx = (3 * P.x**2 + self.a) * self.inv_mod_p(2 * P.y)
else:
dydx = (Q.y - P.y) * self.inv_mod_p(Q.x - P.x)
x = (dydx**2 - P.x - Q.x) % self.p
y = (dydx * (P.x - x) - P.y) % self.p
result = Point(x, y)
# The above computations *should* have given us another point
# on the curve.
assert self.valid(result)
return result
def double_add_algorithm(self, scalar, P):
"""
Double-and-Add Algorithm for Point Multiplication
Input: A scalar in the range 0-p and a point on the elliptic curve P
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
assert self.valid(P)
b = bin(scalar).lstrip('0b')
T = P
for i in b[1:]:
T = self.ec_add(T, T)
if i == '1':
T = self.ec_add(T, P)
assert self.valid(T)
return T
class Peer:
"""
Implements https://wlan1nde.wordpress.com/2018/09/14/wpa3-improving-your-wlan-security/
Take a ECC curve from here: https://safecurves.cr.yp.to/
Example: NIST P-384
y^2 = x^3-3x+27580193559959705877849011840389048093056905856361568521428707301988689241309860865136260764883745107765439761230575
modulo p = 2^384 - 2^128 - 2^96 + 2^32 - 1
2000 NIST; also in SEC 2 and NSA Suite B
See here: https://www.rfc-editor.org/rfc/rfc5639.txt
Curve-ID: brainpoolP256r1
p =
A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377
A =
7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9
B =
26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6
x =
8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262
y =
547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997
q =
A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7
h = 1
"""
def __init__(self, password, mac_address, name):
self.name = name
self.password = password
self.mac_address = mac_address
# Try out Curve-ID: brainpoolP256t1
self.p = int('A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377', 16)
self.a = int('7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9', 16)
self.b = int('<KEY>', 16)
self.q = int('A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7', 16)
self.curve = Curve(self.a, self.b, self.p)
# A toy curve
# self.a, self.b, self.p = 2, 2, 17
# self.q = 19
# self.curve = Curve(self.a, self.b, self.p)
def initiate(self, other_mac, k=40):
"""
See algorithm in https://tools.ietf.org/html/rfc7664
in section 3.2.1
"""
self.other_mac = other_mac
found = 0
num_valid_points = 0
counter = 1
n = self.p.bit_length() + 64
while counter <= k:
base = self.compute_hashed_password(counter)
temp = self.key_derivation_function(n, base, 'Dragonfly Hunting And Pecking')
seed = (temp % (self.p - 1)) + 1
val = self.curve.curve_equation(seed)
if self.curve.is_quadratic_residue(val):
if num_valid_points < 5:
x = seed
save = base
found = 1
num_valid_points += 1
logger.debug('Got point after {} iterations'.format(counter))
counter = counter + 1
if found == 0:
logger.error('No valid point found after {} iterations'.format(k))
elif found == 1:
# https://crypto.stackexchange.com/questions/6777/how-to-calculate-y-value-from-yy-mod-prime-efficiently
# https://rosettacode.org/wiki/Tonelli-Shanks_algorithm
y = tonelli_shanks(self.curve.curve_equation(x), self.p)
PE = Point(x, y)
# check valid point
assert self.curve.curve_equation(x) == pow(y, 2, self.p)
logger.info('[{}] Using {}-th valid Point={}'.format(self.name, num_valid_points, PE))
logger.info('[{}] Point is on curve: {}'.format(self.name, self.curve.valid(PE)))
self.PE = PE
assert self.curve.valid(self.PE)
def commit_exchange(self):
"""
This is basically Diffie Hellman Key Exchange (or in our case ECCDH)
In the Commit Exchange, both sides commit to a single guess of the
password. The peers generate a scalar and an element, exchange them
with each other, and process the other's scalar and element to
generate a common and shared secret.
If we go back to elliptic curves over the real numbers, there is a nice geometric
interpretation for the ECDLP: given a starting point P, we compute 2P, 3P, . . .,
d P = T , effectively hopping back and forth on the elliptic curve. We then publish
the starting point P (a public parameter) and the final point T (the public key). In
order to break the cryptosystem, an attacker has to figure out how often we “jumped”
on the elliptic curve. The number of hops is the secret d, the private key.
"""
# seed the PBG before picking a new random number
# random.seed(time.process_time())
# None or no argument seeds from current time or from an operating
# system specific randomness source if available.
random.seed()
# Otherwise, each party chooses two random numbers, private and mask
self.private = random.randrange(1, self.p)
self.mask = random.randrange(1, self.p)
logger.debug('[{}] private={}'.format(self.name, self.private))
logger.debug('[{}] mask={}'.format(self.name, self.mask))
# These two secrets and the Password Element are then used to construct
# the scalar and element:
# what is q?
# o A point, G, on the elliptic curve, which serves as a generator for
# the ECC group. G is chosen such that its order, with respect to
# elliptic curve addition, is a sufficiently large prime.
#
# o A prime, q, which is the order of G, and thus is also the size of
# the cryptographic subgroup that is generated by G.
# https://math.stackexchange.com/questions/331329/is-it-possible-to-compute-order-of-a-point-over-elliptic-curve
# In the elliptic Curve cryptography, it is said that the order of base point
# should be a prime number, and order of a point P is defined as k, where kP=O.
# Theorem 9.2.1 The points on an elliptic curve together with O
# have cyclic subgroups. Under certain conditions all points on an
# elliptic curve form a cyclic group.
# For this specific curve the group order is a prime and, according to Theo-
# rem 8.2.4, every element is primitive.
# Question: What is the order of our PE?
# the order must be p, since p is a prime
self.scalar = (self.private + self.mask) % self.q
# If the scalar is less than two (2), the private and mask MUST be
# thrown away and new values generated. Once a valid scalar and
# Element are generated, | |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.python import failure, log
from twisted.application import service
from twisted.internet import defer
from buildbot import util
from buildbot.process.properties import Properties
from buildbot.util import ComparableMixin
from buildbot.changes import changes
def isScheduler(sch):
"Check that an object is a scheduler; used for configuration checks."
return isinstance(sch, BaseScheduler)
class BaseScheduler(service.MultiService, ComparableMixin):
"""
Base class for all schedulers; this provides the equipment to manage
reconfigurations and to handle basic scheduler state. It also provides
utility methods to begin various sorts of builds.
Subclasses should add any configuration-derived attributes to
C{base.Scheduler.compare_attrs}.
"""
compare_attrs = ('name', 'builderNames', 'properties')
def __init__(self, name, builderNames, properties):
"""
Initialize a Scheduler.
@param name: name of this scheduler (used as a key for state)
@type name: unicode
@param builderNames: list of builders this scheduler may start
@type builderNames: list of unicode
@param properties: properties to add to builds triggered by this
scheduler
@type properties: dictionary
@param consumeChanges: true if this scheduler wishes to be informed
about the addition of new changes. Defaults to False. This should
be passed explicitly from subclasses to indicate their interest in
consuming changes.
@type consumeChanges: boolean
"""
service.MultiService.__init__(self)
self.name = name
"name of this scheduler; used to identify replacements on reconfig"
errmsg = ("The builderNames argument to a scheduler must be a list "
"of Builder names.")
assert isinstance(builderNames, (list, tuple)), errmsg
for b in builderNames:
assert isinstance(b, str), errmsg
self.builderNames = builderNames
"list of builder names to start in each buildset"
self.properties = Properties()
"properties that are contributed to each buildset"
self.properties.update(properties, "Scheduler")
self.properties.setProperty("scheduler", name, "Scheduler")
self.schedulerid = None
"""ID of this scheduler; set just before the scheduler starts, and set
to None after stopService is complete."""
self.master = None
"""BuildMaster instance; set just before the scheduler starts, and set
to None after stopService is complete."""
# internal variables
self._change_subscription = None
self._state_lock = defer.DeferredLock()
self._change_consumption_lock = defer.DeferredLock()
## service handling
def _setUpScheduler(self, schedulerid, master, manager):
# this is called by SchedulerManager *before* startService
self.schedulerid = schedulerid
self.master = master
def startService(self):
service.MultiService.startService(self)
def stopService(self):
d = defer.maybeDeferred(self._stopConsumingChanges)
d.addCallback(lambda _ : service.MultiService.stopService(self))
return d
def _shutDownScheduler(self):
# called by SchedulerManager *after* stopService is complete
self.schedulerid = None
self.master = None
## state management
class Thunk: pass
def getState(self, key, default=Thunk):
"""
For use by subclasses; get a named state value from the scheduler's
state, defaulting to DEFAULT; raises C{KeyError} if default is not
given and no value exists. Scheduler must be started. Returns the
value via a deferred.
"""
d = self.master.db.schedulers.getState(self.schedulerid)
def get_value(state_dict):
if key in state_dict:
return state_dict[key]
if default is BaseScheduler.Thunk:
raise KeyError("state key '%s' not found" % (key,))
return default
d.addCallback(get_value)
return d
@util.deferredLocked('_state_lock')
def setState(self, key, value):
"""
For use by subclasses; set a named state value in the scheduler's
persistent state. Note that value must be json-able. Returns a
Deferred.
Note that this method is safe if called simultaneously in the same
process, although it is not safe between processes.
"""
d = self.master.db.schedulers.getState(self.schedulerid)
def set_value_and_store(state_dict):
state_dict[key] = value
return self.master.db.schedulers.setState(self.schedulerid, state_dict)
d.addCallback(set_value_and_store)
## status queries
# TODO: these aren't compatible with distributed schedulers
def listBuilderNames(self):
"Returns the list of builder names"
return self.builderNames
def getPendingBuildTimes(self):
"Returns a list of the next times that builds are scheduled, if known."
return []
## change handling
def startConsumingChanges(self, fileIsImportant=None, change_filter=None):
"""
Subclasses should call this method from startService to register to
receive changes. The BaseScheduler class will take care of filtering
the changes (using change_filter) and (if fileIsImportant is not None)
classifying them. See L{gotChange}. Returns a Deferred.
@param fileIsImportant: a callable provided by the user to distinguish
important and unimportant changes
@type fileIsImportant: callable
@param change_filter: a filter to determine which changes are even
considered by this scheduler, or C{None} to consider all changes
@type change_filter: L{buildbot.changes.filter.ChangeFilter} instance
"""
assert fileIsImportant is None or callable(fileIsImportant)
# register for changes with master
assert not self._change_subscription
def changeCallback(change):
# ignore changes delivered while we're not running
if not self._change_subscription:
return
if change_filter and not change_filter.filter_change(change):
return
if fileIsImportant:
try:
important = fileIsImportant(change)
except:
log.err(failure.Failure(),
'in fileIsImportant check for %s' % change)
return
else:
important = True
# use change_consumption_lock to ensure the service does not stop
# while this change is being processed
d = self._change_consumption_lock.acquire()
d.addCallback(lambda _ : self.gotChange(change, important))
def release(x):
self._change_consumption_lock.release()
d.addBoth(release)
d.addErrback(log.err, 'while processing change')
self._change_subscription = self.master.subscribeToChanges(changeCallback)
return defer.succeed(None)
def _stopConsumingChanges(self):
# (note: called automatically in stopService)
# acquire the lock change consumption lock to ensure that any change
# consumption is complete before we are done stopping consumption
d = self._change_consumption_lock.acquire()
def stop(x):
if self._change_subscription:
self._change_subscription.unsubscribe()
self._change_subscription = None
self._change_consumption_lock.release()
d.addBoth(stop)
return d
def gotChange(self, change, important):
"""
Called when a change is received; returns a Deferred. If the
C{fileIsImportant} parameter to C{startConsumingChanges} was C{None},
then all changes are considered important.
@param change: the new change object
@type change: L{buildbot.changes.changes.Change} instance
@param important: true if this is an important change, according to
C{fileIsImportant}.
@type important: boolean
@returns: Deferred
"""
raise NotImplementedError
## starting bulids
def addBuildsetForLatest(self, reason='', external_idstring=None,
branch=None, repository='', project='',
builderNames=None, properties=None):
"""
Add a buildset for the 'latest' source in the given branch,
repository, and project. This will create a relative sourcestamp for
the buildset.
This method will add any properties provided to the scheduler
constructor to the buildset, and will call the master's addBuildset
method with the appropriate parameters.
@param reason: reason for this buildset
@type reason: unicode string
@param external_idstring: external identifier for this buildset, or None
@param branch: branch to build (note that None often has a special meaning)
@param repository: repository name for sourcestamp
@param project: project name for sourcestamp
@param builderNames: builders to name in the buildset (defaults to
C{self.builderNames})
@param properties: a properties object containing initial properties for
the buildset
@type properties: L{buildbot.process.properties.Properties}
@returns: (buildset ID, buildrequest IDs) via Deferred
"""
d = self.master.db.sourcestamps.addSourceStamp(
branch=branch, revision=None, repository=repository,
project=project)
d.addCallback(self.addBuildsetForSourceStamp, reason=reason,
external_idstring=external_idstring,
builderNames=builderNames,
properties=properties)
return d
def addBuildsetForChanges(self, reason='', external_idstring=None,
changeids=[], builderNames=None, properties=None):
"""
Add a buildset for the combination of the given changesets, creating
a sourcestamp based on those changes. The sourcestamp for the buildset
will reference all of the indicated changes.
This method will add any properties provided to the scheduler
constructor to the buildset, and will call the master's addBuildset
method with the appropriate parameters.
@param reason: reason for this buildset
@type reason: unicode string
@param external_idstring: external identifier for this buildset, or None
@param changeids: nonempty list of changes to include in this buildset
@param builderNames: builders to name in the buildset (defaults to
C{self.builderNames})
@param properties: a properties object containing initial properties for
the buildset
@type properties: L{buildbot.process.properties.Properties}
@returns: (buildset ID, buildrequest IDs) via Deferred
"""
assert changeids is not []
# attributes for this sourcestamp will be based on the most recent
# change, so fetch the change with the highest id
d = self.master.db.changes.getChange(max(changeids))
def chdict2change(chdict):
if not chdict:
return None
return changes.Change.fromChdict(self.master, chdict)
d.addCallback(chdict2change)
def create_sourcestamp(change):
return self.master.db.sourcestamps.addSourceStamp(
branch=change.branch,
revision=change.revision,
repository=change.repository,
project=change.project,
changeids=changeids)
d.addCallback(create_sourcestamp)
d.addCallback(self.addBuildsetForSourceStamp, reason=reason,
external_idstring=external_idstring,
builderNames=builderNames,
properties=properties)
return d
def addBuildsetForSourceStamp(self, ssid, reason='', external_idstring=None,
properties=None, builderNames=None):
"""
Add a buildset for the given, already-existing sourcestamp.
This method will add any properties provided to the scheduler
constructor | |
file types in this logical file group - subclass needs to override this
return [".*"]
@classmethod
def type_name(cls):
return cls.__name__
@classmethod
def check_files_for_aggregation_type(cls, files):
"""Checks if the specified files can be used to set this aggregation type. Sub classes that
support aggregation creation from a folder must override this.
:param files: a list of ResourceFile objects
:return If the files meet the requirements of this aggregation type, then returns this
aggregation class name, otherwise empty string.
"""
return ""
@classmethod
def set_file_type(cls, resource, user, file_id=None, folder_path=''):
"""Sub classes must implement this method to create specific logical file (aggregation) type
:param resource: an instance of resource type CompositeResource
:param file_id: (optional) id of the resource file to be set as an aggregation type -
if this is missing then folder_path must be specified
:param folder_path: (optional) path of the folder which needs to be set to an aggregation
type - if this is missing then file_id must be specified
:param user: user who is setting the file type
:return:
"""
raise NotImplementedError()
@classmethod
def _validate_set_file_type_inputs(cls, resource, file_id=None, folder_path=''):
"""Validation of *file_id* and *folder_path* for creating file type (aggregation)
:param resource: an instance of resource type CompositeResource
:param file_id: (optional) id of the resource file to be set as an aggregation type -
if this is missing then folder_path must be specified
:param folder_path: (optional) path of the folder which needs to be set to a FileSet
aggregation type - if this is missing then file_id must be specified. If specified a
path relative to the resource.file_path will be returned
:raise ValidationError if validation fails
:return an instance of ResourceFile if validation is successful and the folder_path
"""
if file_id is None and not folder_path:
raise ValueError("Must specify id of the file or path of the folder to set as an "
"aggregation type")
if file_id is not None and folder_path:
raise ValueError("Must specify either id of the file or path of the folder to set as an "
"aggregation type, but not both.")
if cls.__name__ == 'FileSetLogicalFile' and not folder_path:
raise ValueError("Must specify path of the folder to set as a "
"fileset aggregation type")
if cls.__name__ not in ['FileSetLogicalFile', 'ModelProgramLogicalFile', 'ModelInstanceLogicalFile'] \
and file_id is None:
raise ValueError("Must specify id of the file to set as an "
"aggregation type")
res_file = None
if file_id is not None:
# user selected a file to set aggregation
res_file = get_resource_file_by_id(resource, file_id)
if res_file is None or not res_file.exists:
raise ValidationError("File not found.")
logical_file = None
if res_file.has_logical_file:
logical_file = res_file.logical_file
if logical_file is not None:
if not logical_file.is_fileset and not logical_file.is_model_instance:
msg = "Selected file {} is already part of an aggregation.".format(res_file.file_name)
raise ValidationError(msg)
elif cls.__name__ == 'ModelProgramLogicalFile':
if logical_file.is_model_instance:
msg = "Model program aggregation is not allowed within a model instance aggregation"
raise ValidationError(msg)
else:
# user selected a folder to set aggregation - check if the specified folder exists
storage = resource.get_irods_storage()
if folder_path.startswith("data/contents/"):
folder_path = folder_path[len("data/contents/"):]
path_to_check = os.path.join(resource.file_path, folder_path)
if not storage.exists(path_to_check):
msg = "Specified folder {} path does not exist in irods."
msg = msg.format(path_to_check)
raise ValidationError(msg)
if cls.__name__ in ("FileSetLogicalFile", "ModelInstanceLogicalFile", "ModelProgramLogicalFile"):
if not cls.can_set_folder_to_aggregation(resource=resource, dir_path=path_to_check):
msg = "{} aggregation can't be created from the specified folder:{}"
if cls.__name__ == "FileSetLogicalFile":
msg = msg.format("Fileset", path_to_check)
elif cls.__name__ == "ModelProgramLogicalFile":
msg = msg.format("Model program", path_to_check)
else:
msg = msg.format("Model instance", path_to_check)
raise ValidationError(msg)
return res_file, folder_path
@classmethod
def get_primary_resouce_file(cls, resource_files):
"""Returns one specific file as the primary file from the list of resource
files *resource_files*. A file is a primary file which can be used for creating a
file type (aggregation). Subclasses must implement this.
:param resource_files: a list of resource files - instances of ResourceFile
:return a resource file (instance of ResourceFile) if found, otherwise, None
"""
raise NotImplementedError
@staticmethod
def get_aggregation_display_name():
"""Sub classes must implement this method to return a name for this
logical (aggregation) type used in UI"""
raise NotImplementedError()
@staticmethod
def get_aggregation_term_label():
"""Sub classes must implement this method to return the label for this
logical (aggregation) term used in aggregation xml metadata and resource map files"""
raise NotImplementedError()
def get_aggregation_class_name(self):
"""Return the class name of the logical type (aggregation type)"""
return self.__class__.__name__
@property
def is_fileset(self):
"""Return True if this aggregation is a fileset aggregation, otherwise False"""
return self.get_aggregation_class_name() == 'FileSetLogicalFile'
@property
def is_model_program(self):
"""Return True if this aggregation is a model program aggregation, otherwise False"""
return self.get_aggregation_class_name() == 'ModelProgramLogicalFile'
@property
def is_model_instance(self):
"""Return True if this aggregation is a model instance aggregation, otherwise False"""
return self.get_aggregation_class_name() == 'ModelInstanceLogicalFile'
@property
def is_dangling(self):
"""Checks if this aggregation is a dangling aggregation or not"""
resource = self.resource
istorage = resource.get_irods_storage()
if self.files.count() == 0:
if any([self.is_fileset, self.is_model_instance, self.is_model_program]):
# check folder exist in irods
if self.folder:
path = os.path.join(resource.file_path, self.folder)
if not istorage.exists(path):
return True
else:
return True
else:
return True
return False
@staticmethod
def get_aggregation_type_name():
"""Return the appropriate aggregation name needed for aggregation xml metadata and
map document. Subclasses must implement this method.
"""
raise NotImplementedError
# used in discovery faceting to aggregate native and composite content types
@staticmethod
def get_discovery_content_type():
"""Return a human-readable content type for discovery.
This must agree between Composite Types and native types.
Subclasses must implement this method.
"""
raise NotImplementedError
@property
def has_metadata(self):
return hasattr(self, 'metadata')
@property
def size(self):
# get total size (in bytes) of all files in this file type
return sum([f.size for f in self.files.all()])
@property
def supports_resource_file_move(self):
"""allows a resource file that is part of this logical file type to be moved"""
return True
@property
def supports_resource_file_add(self):
"""allows a resource file to be added"""
return True
@property
def supports_resource_file_rename(self):
"""allows a resource file that is part of this logical file type to be renamed"""
return True
@property
def supports_zip(self):
"""a folder containing resource file(s) that are part of this logical file type
is not allowed to be zipped"""
return False
@property
def supports_delete_folder_on_zip(self):
"""allows the original folder to be deleted upon zipping of that folder"""
return True
@property
def supports_unzip(self):
"""allows a zip file that is part of this logical file type to get unzipped"""
return True
@property
def aggregation_name(self):
"""Returns aggregation name as per the aggregation naming rule defined in issue#2568"""
primary_file = self.get_primary_resouce_file(self.files.all())
if not primary_file:
return ""
return primary_file.short_path
@property
def aggregation_path(self):
"""Returns the full path of the aggregation (self) that starts with resource id
example: 0e917683abae48988bf3fc1f9df5803f/data/contents/netcdf-aggr
"""
aggr_path = os.path.join(self.resource.file_path, self.aggregation_name)
return aggr_path
@property
def metadata_short_file_path(self):
"""File path of the aggregation metadata xml file relative to {resource_id}/data/contents/
"""
return self.xml_file_short_path(resmap=False)
@property
def metadata_file_path(self):
"""Full path of the aggregation metadata xml file starting with {resource_id}/data/contents/
"""
return os.path.join(self.resource.file_path, self.metadata_short_file_path)
@property
def map_short_file_path(self):
"""File path of the aggregation map xml file relative to {resource_id}/data/contents/
"""
return self.xml_file_short_path()
@property
def map_file_path(self):
"""Full file path of the aggregation map xml file starting with {resource_id}/data/contents/
"""
return os.path.join(self.resource.file_path, self.map_short_file_path)
@property
def is_single_file_aggregation(self):
"""
Returns True if the aggregation consists of only one file, otherwise, False.
Subclasses that support only single file must override this property
:return: True or False
"""
return False
def add_resource_file(self, res_file):
"""Makes a ResourceFile (res_file) object part of this logical file object. If res_file
is already associated with any other logical file object, this function does not do
anything to that logical object. The caller needs to take necessary action for the
previously associated logical file object. If res_file is already part of this
logical file, it raise ValidationError.
:param res_file an instance of ResourceFile
"""
if res_file in self.files.all():
raise ValidationError("Resource file is already part of this logical file.")
res_file.logical_file_content_object = self
res_file.save()
def add_files_to_resource(self, resource, files_to_add, upload_folder):
"""A helper for adding any new files to resource as part of creating an aggregation
:param resource: an instance of CompositeResource
:param files_to_add: a list of file paths for files that need to | |
<filename>Experiments_Synthetic/run_method.py
import gc
from keras import backend as K
import tensorflow as tf
from sklearn.metrics import log_loss
from binnings import *
from kde import KDE_estimator
from data_generation import generate_data
from pycalib.models import IsotonicCalibration, SigmoidCalibration
from betacal import BetaCalibration
from piecewise_linear import node_scores_xy_with_crossvalidation
import pwlf
from pwlf_cv import node_scores_pwlf_with_crossvalidation
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
def construct_data_row(n_folds=None, binning_name=None, n_bins=None,
c_hat_distance_p=None, c_hat_distance_p_debiased=None,
c_hat_distance_p_square=None, c_hat_distance_p_square_debiased=None,
c_hat_distance_c=None, c_hat_distance_c_square=None,
test_c_hat_distance_p=None, test_c_hat_distance_p_debiased=None,
test_c_hat_distance_p_square=None, test_c_hat_distance_p_square_debiased=None,
test_c_hat_distance_c=None, test_c_hat_distance_c_square=None):
return {
"n_folds": n_folds,
"binning": binning_name,
"n_bins": n_bins,
"c_hat_distance_p": c_hat_distance_p,
"c_hat_distance_p_debiased": c_hat_distance_p_debiased,
"c_hat_distance_p_square": c_hat_distance_p_square,
"c_hat_distance_p_square_debiased": c_hat_distance_p_square_debiased,
"c_hat_distance_c": c_hat_distance_c,
"c_hat_distance_c_square": c_hat_distance_c_square,
"test_c_hat_distance_p": test_c_hat_distance_p,
"test_c_hat_distance_p_debiased": test_c_hat_distance_p_debiased,
"test_c_hat_distance_p_square": test_c_hat_distance_p_square,
"test_c_hat_distance_p_square_debiased": test_c_hat_distance_p_square_debiased,
"test_c_hat_distance_c": test_c_hat_distance_c,
"test_c_hat_distance_c_square": test_c_hat_distance_c_square
}
def construct_data_row_from_raw_data(n_folds=None, binning_name=None, n_bins=None,
c_hat=None, p=None, c=None,
c_hat_test=None, p_test=None, c_test=None):
c_hat_diff_p = c_hat - p
c_hat_diff_c = c_hat - c
c_hat_diff_p_test = c_hat_test - p_test
c_hat_diff_c_test = c_hat_test - c_test
return construct_data_row(n_folds=n_folds, binning_name=binning_name, n_bins=n_bins,
c_hat_distance_p=np.mean(np.abs(c_hat_diff_p)),
c_hat_distance_p_debiased=np.mean(np.abs(c_hat_diff_p)),
c_hat_distance_p_square=np.mean(np.square(c_hat_diff_p)),
c_hat_distance_p_square_debiased=np.mean(np.square(c_hat_diff_p)),
c_hat_distance_c=np.mean(np.abs(c_hat_diff_c)),
c_hat_distance_c_square=np.mean(np.square(c_hat_diff_c)),
test_c_hat_distance_p=np.mean(np.abs(c_hat_diff_p_test)),
test_c_hat_distance_p_debiased=np.mean(np.abs(c_hat_diff_p_test)),
test_c_hat_distance_p_square=np.mean(np.square(c_hat_diff_p_test)),
test_c_hat_distance_p_square_debiased=np.mean(np.square(c_hat_diff_p_test)),
test_c_hat_distance_c=np.mean(np.abs(c_hat_diff_c_test)),
test_c_hat_distance_c_square=np.mean(np.square(c_hat_diff_c_test))
)
def construct_data_row_from_binning(binning, n_folds, p_test, c_test, name_addition=""):
c_hat = binning.eval_slope_1(binning.p)
c_hat_test = binning.eval_slope_1(p_test)
c_hat_diff_p_test = c_hat_test - p_test
c_hat_diff_c_test = c_hat_test - c_test
return construct_data_row(n_folds=n_folds, binning_name=binning.binning_name + name_addition, n_bins=binning.n_bins,
c_hat_distance_p=binning.ECE_abs,
c_hat_distance_p_debiased=binning.ECE_abs_debiased,
c_hat_distance_p_square=binning.ECE_square,
c_hat_distance_p_square_debiased=binning.ECE_square_debiased,
c_hat_distance_c=np.mean(np.abs(c_hat - binning.c)),
c_hat_distance_c_square=np.mean(np.square(c_hat - binning.c)),
test_c_hat_distance_p=np.mean(np.abs(c_hat_diff_p_test)),
test_c_hat_distance_p_debiased=np.mean(np.abs(c_hat_diff_p_test)),
test_c_hat_distance_p_square=np.mean(np.square(c_hat_diff_p_test)),
test_c_hat_distance_p_square_debiased=np.mean(np.square(c_hat_diff_p_test)),
test_c_hat_distance_c=np.mean(np.abs(c_hat_diff_c_test)),
test_c_hat_distance_c_square=np.mean(np.square(c_hat_diff_c_test))
)
def construct_metadata_row_from_fit_model_data(model_name, n_nodes, n_nodes_trick, cv_folds, node_scores,
node_loss, node_ECEs_abs, node_ECEs_square, all_weights,
weights_final, weights_trick, all_cv_scores, last_epoch):
return {"binning": model_name,
"n_bins": n_nodes,
"n_bins_trick": n_nodes_trick,
"cv_folds": cv_folds,
"node_scores": node_scores,
"node_train_loss": node_loss,
"cv_score": np.min(node_scores),
"node_ECEs_abs": node_ECEs_abs,
"node_ECEs_square": node_ECEs_square,
"model_weights": all_weights,
"model_weights_final": weights_final,
"model_weights_trick": weights_trick,
"all_cv_scores": all_cv_scores,
"last_epoch": last_epoch
}
def cv_trick(bin_scores):
optimal = np.argmin(bin_scores)
pos_new = np.argmin(bin_scores)
min_bin_score = np.min(bin_scores)
max_diff = min_bin_score * 0.001
for pos in range(optimal - 1, -1, -1):
new_min_cand = bin_scores[pos]
if new_min_cand <= min_bin_score + max_diff:
pos_new = pos
return pos_new
def gc_model(model):
# Garbage collect
del model.model
del model
gc.collect()
K.clear_session()
tf.compat.v1.reset_default_graph()
def run_PW_NN_nonCV_method(method, p, y, c, p_test, c_test):
n_cv_folds = method["n_cv_folds"]
monotonic = method["monotonic"]
fn_method = method["fn_method"]
seed = method["seed"]
equal_size = method["equal_size"]
use_ce_loss = method["use_ce_loss"]
logit_scale = method["logit_scale"]
logistic_out = method["logistic_out"]
nn_number = method["nn_number"]
n_bins = method["n_bins"]
lr = method["lr"]
patience = method["patience"]
n_nodes = n_bins - 1
n_data = len(p)
name_addition = method["name_addition"]
add_to_name_mono = "_monotonic" if monotonic else ""
add_to_name_logit = "_logit" if logit_scale else ""
add_to_name_loss = "_ce" if use_ce_loss else "_bs"
add_to_name_lr = f"_lr{lr}"
add_to_name_patience = f"_p{patience}"
model_name = "PW_NN" + str(nn_number) + add_to_name_mono + name_addition + add_to_name_logit + add_to_name_loss + add_to_name_lr + add_to_name_patience
model = fn_method(k=n_nodes, max_epochs=1500, random_state=seed, equal_size=equal_size, monotonic=monotonic,
use_ce_loss=use_ce_loss, logit_scale=logit_scale, logistic_out=logistic_out, lr=lr, patience=patience)
h = model.fit(p, y, verbose=False, batch_size=min(n_data // 4, 512))
last_epoch = len(h.history['loss'])
weights_final = model.model.get_weights()
c_hat = model.predict(p)
c_hat_test = model.predict(p_test)
data_row = construct_data_row_from_raw_data(n_folds=n_cv_folds, binning_name=model_name,
n_bins=n_bins, c_hat=c_hat, p=p, c=c,
c_hat_test=c_hat_test, p_test=p_test, c_test=c_test)
gc_model(model) # Garbage collection
return data_row, last_epoch, weights_final
def run_PW_NN_CV_method(method, p, y, c, p_test, c_test):
n_cv_folds = method["n_cv_folds"]
monotonic = method["monotonic"]
max_nodes = method["max_nodes"]
fn_method = method["fn_method"]
seed = method["seed"]
equal_size = method["equal_size"]
use_ce_loss = method["use_ce_loss"]
logit_scale = method["logit_scale"]
logistic_out = method["logistic_out"]
nn_number = method["nn_number"]
lr = method["lr"]
patience = method["patience"]
n_data = len(p)
add_to_name_mono = "_monotonic" if monotonic else ""
add_to_name_logit = "_logit" if logit_scale else ""
add_to_name_loss = "_ce" if use_ce_loss else "_bs"
add_to_name_lr = f"_lr{lr}"
add_to_name_patience = f"_p{patience}"
model_name = "PW_NN" + str(nn_number) + add_to_name_mono + add_to_name_logit + add_to_name_loss + add_to_name_lr + add_to_name_patience
start_cv = time()
node_scores, all_weights, all_cv_scores, node_ECEs_square, node_ECEs_abs, node_loss = node_scores_xy_with_crossvalidation(
method=fn_method, p_hat=p, y=y,
n_splits=n_cv_folds, seed=seed,
max_nodes=min(n_data // 200, max_nodes), equal_size=equal_size,
monotonic=monotonic,
use_ce_loss=use_ce_loss,
logit_scale=logit_scale,
logistic_out=logistic_out,
lr=lr,
patience=patience
)
print("Cross-validation took %f seconds" % (time() - start_cv))
n_nodes = np.argmin(node_scores)
n_bins = n_nodes + 1
method["n_bins"] = n_bins
method["name_addition"] = ""
data_row, last_epoch, weights_final = run_PW_NN_nonCV_method(method=method, p=p, y=y, c=c, p_test=p_test,
c_test=c_test)
n_nodes_tr = cv_trick(node_scores)
n_bins_tr = n_nodes_tr + 1
method["n_bins"] = n_bins_tr
method["name_addition"] = "tr"
data_row_tr, last_epoch_tr, weights_final_tr = run_PW_NN_nonCV_method(method=method, p=p, y=y, c=c, p_test=p_test,
c_test=c_test)
metadata_row = construct_metadata_row_from_fit_model_data(model_name=model_name, n_nodes=n_nodes,
n_nodes_trick=n_nodes_tr, cv_folds=n_cv_folds,
node_scores=node_scores,
node_loss=node_loss, node_ECEs_abs=node_ECEs_abs,
node_ECEs_square=node_ECEs_square,
all_weights=all_weights,
weights_final=weights_final,
weights_trick=weights_final_tr,
all_cv_scores=all_cv_scores, last_epoch=last_epoch)
return data_row, data_row_tr, metadata_row
def run_PW_NN_sweep_method(method, p, y, c, p_test, c_test):
max_nodes = method["max_nodes"]
fn_method = method["fn_method"]
seed = method["seed"]
equal_size = method["equal_size"]
nn_number = method["nn_number"]
use_ce_loss = method["use_ce_loss"]
logit_scale = method["logit_scale"]
logistic_out = method["logistic_out"]
lr = method["lr"]
patience = method["patience"]
n_data = len(p)
add_to_name_logit = "_logit" if logit_scale else ""
add_to_name_loss = "_ce" if use_ce_loss else "_bs"
add_to_name_lr = f"_lr{lr}"
add_to_name_patience = f"_p{patience}"
model_name = "PW_NN" + str(nn_number) + "_sweep" + add_to_name_logit + add_to_name_loss + add_to_name_lr + add_to_name_patience
assert (not method["monotonic"]), "Trying to sweep monotonic PW_NN method"
all_weights = []
for n_nodes in range(max_nodes + 1):
model = fn_method(k=n_nodes, max_epochs=1500, random_state=seed, equal_size=equal_size, monotonic=False,
use_ce_loss=use_ce_loss, logit_scale=logit_scale, logistic_out=logistic_out, lr=lr, patience=patience)
h = model.fit(p, y, verbose=False, batch_size=min(n_data // 4, 512))
last_epoch = len(h.history['loss'])
print("Last epoch", last_epoch)
weights = model.model.get_weights()
all_weights.append(weights)
if nn_number == 5:
y_w = np.array(weights[-3]) # weights for y # Weights for first and last breakpoint is not used.
else:
y_w = np.array(weights[-1]) # weights for y
if not np.all((y_w[1:] - y_w[:-1]) > 0):
print("N_nodes %i is not monotonic" % n_nodes)
break
else:
model_last = model
last_last_epoch = last_epoch
print("Get predictions for n_nodes %i!" % (n_nodes - 1))
c_hat = model_last.predict(p)
c_hat_test = model_last.predict(p_test)
data_row = construct_data_row_from_raw_data(n_folds=None, binning_name=model_name,
n_bins=model_last.k + 1, c_hat=c_hat, p=p, c=c,
c_hat_test=c_hat_test, p_test=p_test, c_test=c_test)
node_loss = np.mean(np.square(c_hat - y))
weights_final = model_last.model.get_weights()
metadata_row = construct_metadata_row_from_fit_model_data(model_name=model_name, n_nodes=model_last.k,
n_nodes_trick=None, cv_folds=None, node_scores=None,
node_loss=node_loss, node_ECEs_abs=None,
node_ECEs_square=None, all_weights=all_weights,
weights_final=weights_final, weights_trick=None,
all_cv_scores=None, last_epoch=last_last_epoch)
gc_model(model)
return data_row, metadata_row
def run_PW_NN_method(method, p, y, c, p_test, c_test):
n_cv_folds = method["n_cv_folds"]
use_sweep = method["use_sweep"]
if use_sweep:
data_row, metadata_row = run_PW_NN_sweep_method(method=method, p=p, y=y, c=c, p_test=p_test, c_test=c_test)
return [data_row], [metadata_row]
elif n_cv_folds is None:
data_row, _, _ = run_PW_NN_nonCV_method(method=method, p=p, y=y, c=c, p_test=p_test, c_test=c_test)
return [data_row], []
else:
data_row, data_row_tr, metadata_row = run_PW_NN_CV_method(method=method, p=p, y=y, c=c, p_test=p_test,
c_test=c_test)
return [data_row, data_row_tr], [metadata_row]
def run_binning_CV_method(method, p, y, c, p_test, c_test):
method_name = method["method_name"]
n_cv_folds = method["n_cv_folds"]
name_addition = f"_CV{n_cv_folds}"
assert (method_name == "eq_width" or method_name == "eq_size"), "Method name incorrect in CV binning!"
use_eq_width = method_name == "eq_width"
if use_eq_width:
binning_method = EqualWidthBinning
else:
binning_method = EqualSizeBinning
bin_scores, all_cv_scores = binning_n_bins_with_crossvalidation(p=p, y=y, use_eq_width=use_eq_width,
n_splits=n_cv_folds)
n_bins = np.argmin(bin_scores)
n_bins_tr = cv_trick(bin_scores)
binning_cv = binning_method(p, y, c, n_bins)
binning_cv_tr = binning_method(p, y, c, n_bins_tr)
data_row_cv = construct_data_row_from_binning(binning=binning_cv, n_folds=n_cv_folds, p_test=p_test, c_test=c_test,
name_addition=name_addition)
data_row_cv_tr = construct_data_row_from_binning(binning=binning_cv_tr, n_folds=n_cv_folds, p_test=p_test,
c_test=c_test,
name_addition=name_addition + "tr")
return data_row_cv, data_row_cv_tr
def run_binning_nonCV_method(method, p, y, c, p_test, c_test):
method_name = method["method_name"]
n_bins = method["n_bins"]
if method_name == "eq_size":
binning = EqualSizeBinning(p, y, c, n_bins)
name_addition = f"_{n_bins}"
elif method_name == "eq_width":
binning = EqualWidthBinning(p, y, c, n_bins)
name_addition = f"_{n_bins}"
elif method_name == "monotonic_eq_size":
binning = MonotonicEqualSizeBinning(p, y, c)
name_addition = ""
return construct_data_row_from_binning(binning, n_folds=None, p_test=p_test, c_test=c_test,
name_addition=name_addition)
def run_binning_method(method, p, y, c, p_test, c_test):
n_cv_folds = method["n_cv_folds"]
if n_cv_folds is None:
data_row = run_binning_nonCV_method(method=method, p=p, y=y, c=c, p_test=p_test, c_test=c_test)
return [data_row], []
else:
data_row_cv, data_row_cv_tr = run_binning_CV_method(method=method, p=p, y=y, c=c, p_test=p_test, c_test=c_test)
return [data_row_cv, data_row_cv_tr], []
def run_kde_method(p, y, c, calibration_function, p_test, c_test):
kde_estimator = KDE_estimator(p=p, y=y, c=c, calibration_function=calibration_function, p_test=p_test,
c_test=c_test)
data_row_pointwise = construct_data_row(n_folds=None, binning_name="kde_pointwise", n_bins=None,
c_hat_distance_p=kde_estimator.pointwise_ece_abs,
c_hat_distance_p_debiased=kde_estimator.pointwise_ece_abs,
c_hat_distance_p_square=kde_estimator.pointwise_ece_sq,
c_hat_distance_p_square_debiased=kde_estimator.pointwise_ece_sq,
c_hat_distance_c=kde_estimator.pointwise_c_hat_dist_c_abs,
c_hat_distance_c_square=kde_estimator.pointwise_c_hat_dist_c_sq,
test_c_hat_distance_p=kde_estimator.pointwise_ece_abs_test,
test_c_hat_distance_p_debiased=kde_estimator.pointwise_ece_abs_test,
test_c_hat_distance_p_square=kde_estimator.pointwise_ece_sq_test,
test_c_hat_distance_p_square_debiased=kde_estimator.pointwise_ece_sq_test,
test_c_hat_distance_c=kde_estimator.pointwise_c_hat_dist_c_abs_test,
test_c_hat_distance_c_square=kde_estimator.pointwise_c_hat_dist_c_sq_test
)
data_row_integral = construct_data_row(n_folds=None, binning_name="kde_integral", n_bins=None,
c_hat_distance_p=kde_estimator.integral_ece_abs,
c_hat_distance_p_debiased=kde_estimator.integral_ece_abs,
c_hat_distance_p_square=kde_estimator.integral_ece_sq,
c_hat_distance_p_square_debiased=kde_estimator.integral_ece_sq,
c_hat_distance_c=kde_estimator.integral_c_hat_dist_c_abs,
c_hat_distance_c_square=kde_estimator.integral_c_hat_dist_c_sq,
test_c_hat_distance_p=None,
test_c_hat_distance_p_debiased=None,
test_c_hat_distance_p_square=None,
test_c_hat_distance_p_square_debiased=None,
test_c_hat_distance_c=None,
test_c_hat_distance_c_square=None
)
return [data_row_pointwise, data_row_integral], []
def run_isotonic(p, y, c, p_test, c_test):
isotonic = IsotonicCalibration()
isotonic.fit(p, y)
c_hat = isotonic.predict(p)
c_hat_test = isotonic.predict(p_test)
data_row = construct_data_row_from_raw_data(n_folds=None, binning_name="isotonic",
n_bins=len(isotonic.X_thresholds_) - 1,
c_hat=c_hat, p=p, c=c,
c_hat_test=c_hat_test, p_test=p_test, c_test=c_test)
return [data_row], []
def run_platt(p, y, c, p_test, c_test):
p_clipped = np.clip(p, 1e-8, 1 - 1e-8)
p_logit = np.log(p_clipped / (1 - p_clipped))
p_clipped_test = np.clip(p_test, 1e-8, 1 - 1e-8)
p_logit_test = np.log(p_clipped_test / (1 - p_clipped_test))
platt = SigmoidCalibration()
platt.fit(p_logit.reshape(-1, 1), y)
c_hat = platt.predict_proba(p_logit.reshape(-1, 1))
c_hat_test = platt.predict_proba(p_logit_test.reshape(-1, 1))
data_row = construct_data_row_from_raw_data(n_folds=None, binning_name="platt", n_bins=None,
c_hat=c_hat, p=p, c=c,
c_hat_test=c_hat_test, p_test=p_test, c_test=c_test)
return [data_row], []
def run_beta(p, y, c, p_test, c_test):
bc = BetaCalibration(parameters="abm")
bc.fit(p.reshape(-1, 1), y)
c_hat = bc.predict(p.reshape(-1, 1))
c_hat_test = bc.predict(p_test.reshape(-1, 1))
data_row = construct_data_row_from_raw_data(n_folds=None, binning_name="beta", n_bins=None,
c_hat=c_hat, p=p, c=c,
c_hat_test=c_hat_test, p_test=p_test, c_test=c_test)
return [data_row], []
def run_brier_score(p, y):
brier_score = np.mean(np.square(p - y))
data_row = construct_data_row(n_folds=None, binning_name="brier_score", n_bins=None,
c_hat_distance_p=brier_score,
c_hat_distance_p_debiased=brier_score,
c_hat_distance_p_square=brier_score,
c_hat_distance_p_square_debiased=brier_score,
c_hat_distance_c=brier_score,
c_hat_distance_c_square=brier_score)
return | |
<gh_stars>1000+
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import sys
sys.path.insert(1, "../../../") # allow us to run this standalone
import h2o
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
from h2o.estimators.naive_bayes import H2ONaiveBayesEstimator
from tests import pyunit_utils
def infer_distribution_helper(dist, expected_dist, kwargs1={}, kwargs2={}):
train = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_train.csv"))
test = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_test.csv"))
if dist == "multinomial":
y = "species"
elif dist == "bernoulli":
train["response"] = (train["species"] == "Iris-versicolor").asfactor()
test["response"] = (test["species"] == "Iris-versicolor").asfactor()
y = "response"
elif dist == "quasibinomial":
train["response"] = (train["species"] == "Iris-versicolor")
test["response"] = (test["species"] == "Iris-versicolor")
y = "response"
else:
y = "petal_wid"
x = train.columns
x.remove(y)
nfolds = 2
gbm = H2OGradientBoostingEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
distribution=dist, **kwargs1)
gbm.train(x=x, y=y, training_frame=train)
gbm2 = H2OGradientBoostingEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
distribution=dist, **kwargs2)
gbm2.train(x=x, y=y, training_frame=train)
se = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[gbm, gbm2],
metalearner_algorithm="gbm")
se.train(x, y, train)
assert se.metalearner().actual_params.get("distribution") == expected_dist, \
"Expected distribution {} but got {}".format(expected_dist, se.metalearner().actual_params.get("distribution"))
def infer_distribution_test():
from h2o.utils.distributions import CustomDistributionGeneric, CustomDistributionGaussian
class CustomDistributionGaussian2(CustomDistributionGeneric):
def link(self):
return "identity"
def init(self, w, o, y):
return [w * (y - o), w]
def gradient(self, y, f):
return y - f
def gamma(self, w, y, z, f):
return [w * z, w]
custom_dist1 = h2o.upload_custom_distribution(CustomDistributionGaussian)
custom_dist2 = h2o.upload_custom_distribution(CustomDistributionGaussian2)
for dist in ["poisson", "laplace", "tweedie", "gaussian", "huber", "gamma",
"quantile", "bernoulli", "quasibinomial", "multinomial"]:
infer_distribution_helper(dist, dist)
# custom distribution
infer_distribution_helper("custom", "custom",
dict(custom_distribution_func=custom_dist1),
dict(custom_distribution_func=custom_dist1))
# revert to default
infer_distribution_helper("tweedie", "gaussian", dict(tweedie_power=1.2))
infer_distribution_helper("huber", "gaussian", dict(huber_alpha=0.2))
infer_distribution_helper("quantile", "gaussian", dict(quantile_alpha=0.2))
infer_distribution_helper("custom", "gaussian",
dict(custom_distribution_func=custom_dist1),
dict(custom_distribution_func=custom_dist2))
# unaffected by param for different distribution
infer_distribution_helper("quantile", "quantile", dict(tweedie_power=1.2))
infer_distribution_helper("tweedie", "tweedie", dict(huber_alpha=0.2))
infer_distribution_helper("huber", "huber", dict(quantile_alpha=0.2))
infer_distribution_helper("custom", "custom",
dict(custom_distribution_func=custom_dist1),
dict(custom_distribution_func=custom_dist1,
tweedie_power=1.2))
def infer_family_helper(family, expected_family, link, expected_link, kwargs1=None, kwargs2=None):
kwargs1 = dict() if kwargs1 is None else kwargs1
kwargs2 = dict() if kwargs2 is None else kwargs2
train = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_train.csv"))
test = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_test.csv"))
if family == "multinomial":
y = "species"
elif family == "binomial":
train["response"] = (train["species"] == "Iris-versicolor").asfactor()
test["response"] = (test["species"] == "Iris-versicolor").asfactor()
y = "response"
elif family == "quasibinomial" or family == "fractionalbinomial":
train["response"] = (train["species"] == "Iris-versicolor") / 2
test["response"] = (test["species"] == "Iris-versicolor") / 2
y = "response"
elif family == "ordinal":
y = "response"
train[y] = (train["species"] == "Iris-versicolor")
test[y] = (test["species"] == "Iris-versicolor")
train[(train["species"] == "Iris-setosa"), y] = 2
test[(test["species"] == "Iris-setosa"), y] = 2
train[y] = train[y].asfactor()
test[y] = test[y].asfactor()
else:
y = "petal_wid"
x = train.columns
x.remove(y)
if "link" not in kwargs1 and link:
kwargs1["link"] = link
if "family" not in kwargs1:
kwargs1["family"] = family
if "link" not in kwargs2 and link:
kwargs2["link"] = link
if "family" not in kwargs2:
kwargs2["family"] = family
nfolds = 2
glm = H2OGeneralizedLinearEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
**kwargs1)
glm.train(x=x, y=y, training_frame=train)
glm2 = H2OGeneralizedLinearEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
**kwargs2)
glm2.train(x=x, y=y, training_frame=train)
se = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[glm, glm2],
metalearner_algorithm="glm")
se.train(x, y, train)
assert se.metalearner().actual_params.get("family") == expected_family, \
"Expected family {} but got {}".format(expected_family, se.metalearner().actual_params.get("family"))
if link:
assert se.metalearner().actual_params.get("link") == expected_link, \
"Expected link {} but got {}".format(expected_link, se.metalearner().actual_params.get("link"))
se_auto = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[glm, glm2],
metalearner_algorithm="auto")
se_auto.train(x, y, train)
assert se_auto.metalearner().actual_params.get("family") == expected_family, \
"Expected family {} but got {}".format(expected_family, se_auto.metalearner().actual_params.get("family"))
if link:
assert se_auto.metalearner().actual_params.get("link") == expected_link, \
"Expected link {} but got {}".format(expected_link, se_auto.metalearner().actual_params.get("link"))
def infer_family_test():
families = dict(
# family = list of links
gaussian=["identity", "log", "inverse"],
binomial=["logit"],
# fractionalbinomial=["logit"], # fractional binomial distribution does not exists
multinomial=[None],
# ordinal=["ologit"], # ordinal distribution does not exists
quasibinomial=["logit"],
poisson=["identity", "log"],
# negativebinomial=["identity", "log"], # negative binomial distribution is not implemented
gamma=["identity", "log", "inverse"],
tweedie=["tweedie"]
)
for family, links in families.items():
for link in links:
infer_family_helper(family, family, link, link)
# revert to default
infer_family_helper("gamma", "gaussian", "log", "identity", kwargs2=dict(link="inverse"))
infer_family_helper("gamma", "gaussian", "log", "identity", kwargs2=dict(family="tweedie", link="tweedie"))
def infer_mixed_family_and_dist_helper(family, expected_family, first_glm, expected_link=None, kwargs_glm=None,
kwargs_gbm=None, metalearner_params=None):
kwargs_glm = dict() if kwargs_glm is None else kwargs_glm
kwargs_gbm = dict() if kwargs_gbm is None else kwargs_gbm
metalearner_params = dict() if metalearner_params is None else metalearner_params
distribution = family if not family == "binomial" else "bernoulli"
expected_distribution = expected_family if not expected_family == "binomial" else "bernoulli"
train = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_train.csv"))
test = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_test.csv"))
if family == "multinomial":
y = "species"
elif family == "binomial":
train["response"] = (train["species"] == "Iris-versicolor").asfactor()
test["response"] = (test["species"] == "Iris-versicolor").asfactor()
y = "response"
elif family == "quasibinomial" or family == "fractionalbinomial":
train["response"] = (train["species"] == "Iris-versicolor") / 2
test["response"] = (test["species"] == "Iris-versicolor") / 2
y = "response"
elif family == "ordinal":
y = "response"
train[y] = (train["species"] == "Iris-versicolor")
test[y] = (test["species"] == "Iris-versicolor")
train[(train["species"] == "Iris-setosa"), y] = 2
test[(test["species"] == "Iris-setosa"), y] = 2
train[y] = train[y].asfactor()
test[y] = test[y].asfactor()
else:
y = "petal_wid"
x = train.columns
x.remove(y)
if "family" not in kwargs_glm:
kwargs_glm["family"] = family
if "distribution" not in kwargs_gbm:
kwargs_gbm["distribution"] = distribution
nfolds = 2
glm = H2OGeneralizedLinearEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
**kwargs_glm)
glm.train(x=x, y=y, training_frame=train)
gbm = H2OGradientBoostingEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
**kwargs_gbm)
gbm.train(x=x, y=y, training_frame=train)
se = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[glm, gbm] if first_glm else [gbm, glm],
metalearner_algorithm="glm",
metalearner_params={k: v for k, v in metalearner_params.items() if
k != "distribution"})
se.train(x, y, train)
assert se.metalearner().actual_params.get("family") == expected_family, \
"Expected family {} but got {}".format(expected_family, se.metalearner().actual_params.get("family"))
if expected_link:
assert se.metalearner().actual_params.get("link") == expected_link, \
"Expected link {} but got {}".format(expected_link, se.metalearner().actual_params.get("link"))
se_auto = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[glm, gbm] if first_glm else [gbm, glm],
metalearner_algorithm="auto",
metalearner_params={k: v for k, v in metalearner_params.items() if
k != "distribution"})
se_auto.train(x, y, train)
assert se_auto.metalearner().actual_params.get("family") == expected_family, \
"Expected family {} but got {}".format(expected_family, se_auto.metalearner().actual_params.get("family"))
if expected_link:
assert se_auto.metalearner().actual_params.get("link") == expected_link, \
"Expected link {} but got {}".format(expected_link, se_auto.metalearner().actual_params.get("link"))
se_gbm = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[glm, gbm] if first_glm else [gbm, glm],
metalearner_algorithm="gbm",
metalearner_params={k: v for k, v in metalearner_params.items() if
k != "family" and k != "link"})
se_gbm.train(x, y, train)
assert se_gbm.metalearner().actual_params.get("distribution") == expected_distribution, \
"Expected distribution {} but got {}".format(expected_distribution,
se_gbm.metalearner().actual_params.get("distribution"))
def infer_mixed_family_and_dist_test():
families = dict(
# family = list of links
gaussian=["identity", "log", "inverse"],
binomial=["logit"],
# fractionalbinomial=["logit"], # fractional binomial distribution does not exists
multinomial=[None],
# ordinal=["ologit"], # ordinal distribution does not exists
quasibinomial=["logit"],
poisson=["identity", "log"],
# negativebinomial=["identity", "log"], # negative binomial distribution is not implemented
gamma=["identity", "log", "inverse"],
tweedie=["tweedie"]
)
for family in families.keys():
infer_mixed_family_and_dist_helper(family, family, False)
infer_mixed_family_and_dist_helper(family, family, True)
# revert to default
infer_mixed_family_and_dist_helper("gamma", "gaussian", False, kwargs_glm=dict(family="tweedie"))
infer_mixed_family_and_dist_helper("gamma", "gaussian", True, kwargs_glm=dict(family="tweedie"))
infer_mixed_family_and_dist_helper("gamma", "gaussian", False, kwargs_gbm=dict(distribution="tweedie"))
infer_mixed_family_and_dist_helper("gamma", "gaussian", True, kwargs_gbm=dict(distribution="tweedie"))
# should inherit the link if all GLMs share the same link
infer_mixed_family_and_dist_helper("gamma", "gamma", True, expected_link="log", kwargs_glm=dict(link="log"))
# We are looking on first GLM base model not just first base model for link inference
infer_mixed_family_and_dist_helper("gamma", "gamma", False, expected_link="log", kwargs_glm=dict(link="log"))
# should not change when we specify the default link
infer_mixed_family_and_dist_helper("tweedie", "tweedie", False, kwargs_glm=dict(link="tweedie"))
infer_mixed_family_and_dist_helper("tweedie", "tweedie", True, kwargs_glm=dict(link="tweedie"))
def metalearner_obeys_metalearner_params_test():
metalearner_params = dict(distribution="poisson", family="poisson")
for family in ["gaussian", "tweedie"]:
infer_mixed_family_and_dist_helper(family, "poisson", False, metalearner_params=metalearner_params)
infer_mixed_family_and_dist_helper(family, "poisson", True, metalearner_params=metalearner_params)
# without metalearner_params it would revert to default
infer_mixed_family_and_dist_helper("gamma", "poisson", False, kwargs_glm=dict(family="tweedie"),
metalearner_params=metalearner_params)
infer_mixed_family_and_dist_helper("gamma", "poisson", True, kwargs_glm=dict(family="tweedie"),
metalearner_params=metalearner_params)
infer_mixed_family_and_dist_helper("gamma", "poisson", False, kwargs_gbm=dict(distribution="tweedie"),
metalearner_params=metalearner_params)
infer_mixed_family_and_dist_helper("gamma", "poisson", True, kwargs_gbm=dict(distribution="tweedie"),
metalearner_params=metalearner_params)
# without metalerarner_params could inherit the link if all GLMs share the same link
infer_mixed_family_and_dist_helper("gamma", "gamma", False, expected_link="identity", kwargs_glm=dict(link="log"),
metalearner_params=dict(family="gamma", link="identity", distribution="gamma"))
infer_mixed_family_and_dist_helper("gamma", "gamma", True, expected_link="identity", kwargs_glm=dict(link="log"),
metalearner_params=dict(family="gamma", link="identity", distribution="gamma"))
# don't propagate link from different family
infer_mixed_family_and_dist_helper("gamma", "gaussian", False, expected_link="identity",
kwargs_glm=dict(link="log"),
metalearner_params=dict(family="gaussian", distribution="gaussian"))
infer_mixed_family_and_dist_helper("gamma", "gaussian", True, expected_link="identity", kwargs_glm=dict(link="log"),
metalearner_params=dict(family="gaussian", distribution="gaussian"))
infer_mixed_family_and_dist_helper("gamma", "gaussian", False, expected_link="identity",
kwargs_glm=dict(link="log"),
metalearner_params=dict(family="gaussian", link="identity",
distribution="gaussian"))
infer_mixed_family_and_dist_helper("gamma", "gaussian", True, expected_link="identity", kwargs_glm=dict(link="log"),
metalearner_params=dict(family="gaussian", link="identity",
distribution="gaussian"))
def infer_uses_defaults_when_base_model_doesnt_support_distributions_test():
train = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_train.csv"))
test = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_test.csv"))
x_reg = train.columns
y_reg = "petal_wid"
x_reg.remove(y_reg)
nfolds = 2
glm_reg = H2OGeneralizedLinearEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
family="tweedie"
)
glm_reg.train(x=x_reg, y=y_reg, training_frame=train)
gbm_reg = H2OGradientBoostingEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
distribution="tweedie"
)
gbm_reg.train(x=x_reg, y=y_reg, training_frame=train)
drf_reg = H2ORandomForestEstimator(nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True
)
drf_reg.train(x=x_reg, y=y_reg, training_frame=train)
se_reg_0 = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[glm_reg, gbm_reg],
metalearner_algorithm="gbm")
se_reg_0.train(x_reg, y_reg, train)
assert se_reg_0.metalearner().actual_params.get("distribution") == "tweedie", \
"Expected distribution {} but got {}".format("tweedie",
se_reg_0.metalearner().actual_params.get("distribution"))
se_reg_1 = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[glm_reg, gbm_reg, drf_reg],
metalearner_algorithm="gbm")
se_reg_1.train(x_reg, y_reg, train)
assert se_reg_1.metalearner().actual_params.get("distribution") == "gaussian", \
"Expected distribution {} but got {}".format("gaussian",
se_reg_1.metalearner().actual_params.get("distribution"))
se_reg_2 = H2OStackedEnsembleEstimator(training_frame=train,
validation_frame=test,
base_models=[drf_reg, glm_reg, gbm_reg],
metalearner_algorithm="gbm")
se_reg_2.train(x_reg, y_reg, train)
assert se_reg_2.metalearner().actual_params.get("distribution") == "gaussian", \
"Expected distribution {} but got {}".format("gaussian",
se_reg_2.metalearner().actual_params.get("distribution"))
def basic_inference_works_for_DRF_and_NB_test():
train | |
import argparse
import asyncio
import json
import logging
import urllib.parse
import atexit
import time
import functools
import webbrowser
import multiprocessing
import socket
import sys
import typing
from random import randrange
from Utils import get_item_name_from_id, get_location_name_from_address, ReceivedItem
exit_func = atexit.register(input, "Press enter to close.")
import ModuleUpdate
ModuleUpdate.update()
import colorama
import websockets
import prompt_toolkit
from prompt_toolkit.patch_stdout import patch_stdout
from NetUtils import Endpoint
import WebUI
import Regions
import Utils
def create_named_task(coro, *args, name=None):
if not name:
name = coro.__name__
if sys.version_info.major > 2 and sys.version_info.minor > 7:
return asyncio.create_task(coro, *args, name=name)
else:
return asyncio.create_task(coro, *args)
class Context():
def __init__(self, snes_address, server_address, password, found_items, port: int):
self.snes_address = snes_address
self.server_address = server_address
# WebUI Stuff
self.ui_node = WebUI.WebUiClient()
self.custom_address = None
self.webui_socket_port: typing.Optional[int] = port
self.hint_cost = 0
self.check_points = 0
self.forfeit_mode = ''
self.remaining_mode = ''
self.hint_points = 0
# End WebUI Stuff
self.exit_event = asyncio.Event()
self.watcher_event = asyncio.Event()
self.input_queue = asyncio.Queue()
self.input_requests = 0
self.snes_socket = None
self.snes_state = SNES_DISCONNECTED
self.snes_attached_device = None
self.snes_reconnect_address = None
self.snes_recv_queue = asyncio.Queue()
self.snes_request_lock = asyncio.Lock()
self.is_sd2snes = False
self.snes_write_buffer = []
self.server_task = None
self.server: typing.Optional[Endpoint] = None
self.password = password
self.server_version = (0, 0, 0)
self.team = None
self.slot = None
self.player_names: typing.Dict[int: str] = {}
self.locations_checked = set()
self.locations_scouted = set()
self.items_received = []
self.locations_info = {}
self.awaiting_rom = False
self.rom = None
self.prev_rom = None
self.auth = None
self.found_items = found_items
self.finished_game = False
self.slow_mode = False
@property
def endpoints(self):
if self.server:
return [self.server]
else:
return []
async def disconnect(self):
if self.server and not self.server.socket.closed:
await self.server.socket.close()
self.ui_node.send_connection_status(self)
if self.server_task is not None:
await self.server_task
async def send_msgs(self, msgs):
if not self.server or not self.server.socket.open or self.server.socket.closed:
return
await self.server.socket.send(json.dumps(msgs))
color_codes = {'reset': 0, 'bold': 1, 'underline': 4, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33, 'blue': 34,
'magenta': 35, 'cyan': 36, 'white': 37, 'black_bg': 40, 'red_bg': 41, 'green_bg': 42, 'yellow_bg': 43,
'blue_bg': 44, 'purple_bg': 45, 'cyan_bg': 46, 'white_bg': 47}
def color_code(*args):
return '\033[' + ';'.join([str(color_codes[arg]) for arg in args]) + 'm'
def color(text, *args):
return color_code(*args) + text + color_code('reset')
RECONNECT_DELAY = 5
ROM_START = 0x000000
WRAM_START = 0xF50000
WRAM_SIZE = 0x20000
SRAM_START = 0xE00000
ROMNAME_START = SRAM_START + 0x2000
ROMNAME_SIZE = 0x15
INGAME_MODES = {0x07, 0x09, 0x0b}
ENDGAME_MODES = {0x19, 0x1a}
SAVEDATA_START = WRAM_START + 0xF000
SAVEDATA_SIZE = 0x500
RECV_PROGRESS_ADDR = SAVEDATA_START + 0x4D0 # 2 bytes
RECV_ITEM_ADDR = SAVEDATA_START + 0x4D2 # 1 byte
RECV_ITEM_PLAYER_ADDR = SAVEDATA_START + 0x4D3 # 1 byte
ROOMID_ADDR = SAVEDATA_START + 0x4D4 # 2 bytes
ROOMDATA_ADDR = SAVEDATA_START + 0x4D6 # 1 byte
SCOUT_LOCATION_ADDR = SAVEDATA_START + 0x4D7 # 1 byte
SCOUTREPLY_LOCATION_ADDR = SAVEDATA_START + 0x4D8 # 1 byte
SCOUTREPLY_ITEM_ADDR = SAVEDATA_START + 0x4D9 # 1 byte
SCOUTREPLY_PLAYER_ADDR = SAVEDATA_START + 0x4DA # 1 byte
location_table_uw = {"Blind's Hideout - Top": (0x11d, 0x10),
"Blind's Hideout - Left": (0x11d, 0x20),
"Blind's Hideout - Right": (0x11d, 0x40),
"Blind's Hideout - Far Left": (0x11d, 0x80),
"Blind's Hideout - Far Right": (0x11d, 0x100),
'Secret Passage': (0x55, 0x10),
'Waterfall Fairy - Left': (0x114, 0x10),
'Waterfall Fairy - Right': (0x114, 0x20),
"King's Tomb": (0x113, 0x10),
'Floodgate Chest': (0x10b, 0x10),
"Link's House": (0x104, 0x10),
'Kakariko Tavern': (0x103, 0x10),
'Chicken House': (0x108, 0x10),
"Aginah's Cave": (0x10a, 0x10),
"Sahasrahla's Hut - Left": (0x105, 0x10),
"Sahasrahla's Hut - Middle": (0x105, 0x20),
"Sahasrahla's Hut - Right": (0x105, 0x40),
'Kakariko Well - Top': (0x2f, 0x10),
'Kakariko Well - Left': (0x2f, 0x20),
'Kakariko Well - Middle': (0x2f, 0x40),
'Kakariko Well - Right': (0x2f, 0x80),
'Kakariko Well - Bottom': (0x2f, 0x100),
'Lost Woods Hideout': (0xe1, 0x200),
'Lumberjack Tree': (0xe2, 0x200),
'Cave 45': (0x11b, 0x400),
'Graveyard Cave': (0x11b, 0x200),
'Checkerboard Cave': (0x126, 0x200),
'Mini Moldorm Cave - Far Left': (0x123, 0x10),
'Mini Moldorm Cave - Left': (0x123, 0x20),
'Mini Moldorm Cave - Right': (0x123, 0x40),
'Mini Moldorm Cave - Far Right': (0x123, 0x80),
'Mini Moldorm Cave - Generous Guy': (0x123, 0x400),
'Ice Rod Cave': (0x120, 0x10),
'Bonk Rock Cave': (0x124, 0x10),
'Desert Palace - Big Chest': (0x73, 0x10),
'Desert Palace - Torch': (0x73, 0x400),
'Desert Palace - Map Chest': (0x74, 0x10),
'Desert Palace - Compass Chest': (0x85, 0x10),
'Desert Palace - Big Key Chest': (0x75, 0x10),
'Desert Palace - Boss': (0x33, 0x800),
'Eastern Palace - Compass Chest': (0xa8, 0x10),
'Eastern Palace - Big Chest': (0xa9, 0x10),
'Eastern Palace - Cannonball Chest': (0xb9, 0x10),
'Eastern Palace - Big Key Chest': (0xb8, 0x10),
'Eastern Palace - Map Chest': (0xaa, 0x10),
'Eastern Palace - Boss': (0xc8, 0x800),
'Hyrule Castle - Boomerang Chest': (0x71, 0x10),
'Hyrule Castle - Map Chest': (0x72, 0x10),
"Hyrule Castle - Zelda's Chest": (0x80, 0x10),
'Sewers - Dark Cross': (0x32, 0x10),
'Sewers - Secret Room - Left': (0x11, 0x10),
'Sewers - Secret Room - Middle': (0x11, 0x20),
'Sewers - Secret Room - Right': (0x11, 0x40),
'Sanctuary': (0x12, 0x10),
'Castle Tower - Room 03': (0xe0, 0x10),
'Castle Tower - Dark Maze': (0xd0, 0x10),
'Spectacle Rock Cave': (0xea, 0x400),
'Paradox Cave Lower - Far Left': (0xef, 0x10),
'Paradox Cave Lower - Left': (0xef, 0x20),
'Paradox Cave Lower - Right': (0xef, 0x40),
'Paradox Cave Lower - Far Right': (0xef, 0x80),
'Paradox Cave Lower - Middle': (0xef, 0x100),
'Paradox Cave Upper - Left': (0xff, 0x10),
'Paradox Cave Upper - Right': (0xff, 0x20),
'Spiral Cave': (0xfe, 0x10),
'Tower of Hera - Basement Cage': (0x87, 0x400),
'Tower of Hera - Map Chest': (0x77, 0x10),
'Tower of Hera - Big Key Chest': (0x87, 0x10),
'Tower of Hera - Compass Chest': (0x27, 0x20),
'Tower of Hera - Big Chest': (0x27, 0x10),
'Tower of Hera - Boss': (0x7, 0x800),
'Hype Cave - Top': (0x11e, 0x10),
'Hype Cave - Middle Right': (0x11e, 0x20),
'Hype Cave - Middle Left': (0x11e, 0x40),
'Hype Cave - Bottom': (0x11e, 0x80),
'Hype Cave - Generous Guy': (0x11e, 0x400),
'Peg Cave': (0x127, 0x400),
'Pyramid Fairy - Left': (0x116, 0x10),
'Pyramid Fairy - Right': (0x116, 0x20),
'Brewery': (0x106, 0x10),
'C-Shaped House': (0x11c, 0x10),
'Chest Game': (0x106, 0x400),
'Mire Shed - Left': (0x10d, 0x10),
'Mire Shed - Right': (0x10d, 0x20),
'Superbunny Cave - Top': (0xf8, 0x10),
'Superbunny Cave - Bottom': (0xf8, 0x20),
'Spike Cave': (0x117, 0x10),
'Hookshot Cave - Top Right': (0x3c, 0x10),
'Hookshot Cave - Top Left': (0x3c, 0x20),
'Hookshot Cave - Bottom Right': (0x3c, 0x80),
'Hookshot Cave - Bottom Left': (0x3c, 0x40),
'Mimic Cave': (0x10c, 0x10),
'Swamp Palace - Entrance': (0x28, 0x10),
'Swamp Palace - Map Chest': (0x37, 0x10),
'Swamp Palace - Big Chest': (0x36, 0x10),
'Swamp Palace - Compass Chest': (0x46, 0x10),
'Swamp Palace - Big Key Chest': (0x35, 0x10),
'Swamp Palace - West Chest': (0x34, 0x10),
'Swamp Palace - Flooded Room - Left': (0x76, 0x10),
'Swamp Palace - Flooded Room - Right': (0x76, 0x20),
'Swamp Palace - Waterfall Room': (0x66, 0x10),
'Swamp Palace - Boss': (0x6, 0x800),
"Thieves' Town - Big Key Chest": (0xdb, 0x20),
"Thieves' Town - Map Chest": (0xdb, 0x10),
"Thieves' Town - Compass Chest": (0xdc, 0x10),
"Thieves' Town - Ambush Chest": (0xcb, 0x10),
"Thieves' Town - Attic": (0x65, 0x10),
"Thieves' Town - Big Chest": (0x44, 0x10),
"Thieves' Town - Blind's Cell": (0x45, 0x10),
"Thieves' Town - Boss": (0xac, 0x800),
'Skull Woods - Compass Chest': (0x67, 0x10),
'Skull Woods - Map Chest': (0x58, 0x20),
'Skull Woods - Big Chest': (0x58, 0x10),
'Skull Woods - Pot Prison': (0x57, 0x20),
'Skull Woods - Pinball Room': (0x68, 0x10),
'Skull Woods - Big Key Chest': (0x57, 0x10),
'Skull Woods - Bridge Room': (0x59, 0x10),
'Skull Woods - Boss': (0x29, 0x800),
'Ice Palace - Compass Chest': (0x2e, 0x10),
'Ice Palace - Freezor Chest': (0x7e, 0x10),
'Ice Palace - Big Chest': (0x9e, 0x10),
'Ice Palace - Iced T Room': (0xae, 0x10),
'Ice Palace - Spike Room': (0x5f, 0x10),
'Ice Palace - Big Key Chest': (0x1f, 0x10),
'Ice Palace - Map Chest': (0x3f, 0x10),
'Ice Palace - Boss': (0xde, 0x800),
'Misery Mire - Big Chest': (0xc3, 0x10),
'Misery Mire - Map Chest': (0xc3, 0x20),
'Misery Mire - Main Lobby': (0xc2, 0x10),
'Misery Mire - Bridge Chest': (0xa2, 0x10),
'Misery Mire - Spike Chest': (0xb3, 0x10),
'Misery Mire - Compass Chest': (0xc1, 0x10),
'Misery Mire - Big Key Chest': (0xd1, 0x10),
'Misery Mire - Boss': (0x90, 0x800),
'Turtle Rock - Compass Chest': (0xd6, 0x10),
'Turtle Rock - Roller Room - | |
<gh_stars>0
"""
short term archiving
case_st_archive, restore_from_archive, archive_last_restarts
are members of class Case from file case.py
"""
import shutil, glob, re, os
from CIME.XML.standard_module_setup import *
from CIME.utils import run_and_log_case_status, ls_sorted_by_mtime, symlink_force, safe_copy, find_files
from CIME.date import get_file_date
from CIME.XML.archive import Archive
from CIME.XML.files import Files
from os.path import isdir, join
logger = logging.getLogger(__name__)
###############################################################################
def _get_archive_file_fn(copy_only):
###############################################################################
"""
Returns the function to use for archiving some files
"""
return safe_copy if copy_only else shutil.move
###############################################################################
def _get_datenames(casename, rundir):
###############################################################################
"""
Returns the date objects specifying the times of each file
Note we are assuming that the coupler restart files exist and are consistent with other component datenames
Not doc-testable due to filesystem dependence
"""
expect(isdir(rundir), 'Cannot open directory {} '.format(rundir))
files = sorted(glob.glob(os.path.join(rundir, casename + '.cpl.r.*.nc')))
if not files:
files = sorted(glob.glob(os.path.join(rundir, casename + '.cpl_0001.r.*.nc')))
logger.debug(" cpl files : {} ".format(files))
if not files:
logger.warning('Cannot find a {}.cpl*.r.*.nc file in directory {} '.format(casename, rundir))
datenames = []
for filename in files:
file_date = get_file_date(filename)
datenames.append(file_date)
return datenames
def _datetime_str(_date):
"""
Returns the standard format associated with filenames.
>>> _datetime_str(date(5, 8, 22))
'0005-08-22-00000'
>>> _datetime_str(get_file_date("0011-12-09-00435"))
'0011-12-09-00435'
"""
format_string = "{year:04d}-{month:02d}-{day:02d}-{seconds:05d}"
return format_string.format(year = _date.year(),
month = _date.month(),
day = _date.day(),
seconds = _date.second_of_day())
def _datetime_str_mpas(_date):
"""
Returns the mpas format associated with filenames.
>>> _datetime_str_mpas(date(5, 8, 22))
'0005-08-22_00:00:00'
>>> _datetime_str_mpas(get_file_date("0011-12-09-00435"))
'0011-12-09_00:07:15'
"""
format_string = "{year:04d}-{month:02d}-{day:02d}_{hours:02d}:{minutes:02d}:{seconds:02d}"
return format_string.format(year = _date.year(),
month = _date.month(),
day = _date.day(),
hours = _date.hour(),
minutes = _date.minute(),
seconds = _date.second())
###############################################################################
def _get_ninst_info(case, compclass):
###############################################################################
"""
Returns the number of instances used by a component and suffix strings for filenames
Not doc-testable due to case dependence
"""
ninst = case.get_value('NINST_' + compclass.upper())
ninst_strings = []
if ninst is None:
ninst = 1
for i in range(1,ninst+1):
if ninst > 1:
ninst_strings.append('_' + '{:04d}'.format(i))
logger.debug("ninst and ninst_strings are: {} and {} for {}".format(ninst, ninst_strings, compclass))
return ninst, ninst_strings
###############################################################################
def _get_component_archive_entries(components, archive):
###############################################################################
"""
Each time this generator function is called, it yields a tuple
(archive_entry, compname, compclass) for one component in this
case's compset components.
"""
for compname in components:
logger.debug("compname is {} ".format(compname))
archive_entry = archive.get_entry(compname)
if archive_entry is None:
logger.debug("No entry found for {}".format(compname))
compclass = None
else:
compclass = archive.get(archive_entry, "compclass")
yield(archive_entry, compname, compclass)
###############################################################################
def _archive_rpointer_files(casename, ninst_strings, rundir, save_interim_restart_files, archive,
archive_entry, archive_restdir, datename, datename_is_last):
###############################################################################
if datename_is_last:
# Copy of all rpointer files for latest restart date
rpointers = glob.glob(os.path.join(rundir, 'rpointer.*'))
for rpointer in rpointers:
safe_copy(rpointer, os.path.join(archive_restdir, os.path.basename(rpointer)))
else:
# Generate rpointer file(s) for interim restarts for the one datename and each
# possible value of ninst_strings
if save_interim_restart_files:
# parse env_archive.xml to determine the rpointer files
# and contents for the given archive_entry tag
rpointer_items = archive.get_rpointer_contents(archive_entry)
# loop through the possible rpointer files and contents
for rpointer_file, rpointer_content in rpointer_items:
temp_rpointer_file = rpointer_file
temp_rpointer_content = rpointer_content
# put in a temporary setting for ninst_strings if they are empty
# in order to have just one loop over ninst_strings below
if rpointer_content != 'unset':
if not ninst_strings:
ninst_strings = ["empty"]
for ninst_string in ninst_strings:
rpointer_file = temp_rpointer_file
rpointer_content = temp_rpointer_content
if ninst_string == 'empty':
ninst_string = ""
for key, value in [('$CASE', casename),
('$DATENAME', _datetime_str(datename)),
('$MPAS_DATENAME', _datetime_str_mpas(datename)),
('$NINST_STRING', ninst_string)]:
rpointer_file = rpointer_file.replace(key, value)
rpointer_content = rpointer_content.replace(key, value)
# write out the respective files with the correct contents
rpointer_file = os.path.join(archive_restdir, rpointer_file)
logger.info("writing rpointer_file {}".format(rpointer_file))
f = open(rpointer_file, 'w')
for output in rpointer_content.split(','):
f.write("{} \n".format(output))
f.close()
else:
logger.info("rpointer_content unset, not creating rpointer file {}".format(rpointer_file))
###############################################################################
def _archive_log_files(dout_s_root, rundir, archive_incomplete, archive_file_fn):
###############################################################################
"""
Find all completed log files, or all log files if archive_incomplete is True, and archive them.
Each log file is required to have ".log." in its name, and completed ones will end with ".gz"
Not doc-testable due to file system dependence
"""
archive_logdir = os.path.join(dout_s_root, 'logs')
if not os.path.exists(archive_logdir):
os.makedirs(archive_logdir)
logger.debug("created directory {} ".format(archive_logdir))
if archive_incomplete == False:
log_search = '*.log.*.gz'
else:
log_search = '*.log.*'
logfiles = glob.glob(os.path.join(rundir, log_search))
for logfile in logfiles:
srcfile = join(rundir, os.path.basename(logfile))
destfile = join(archive_logdir, os.path.basename(logfile))
archive_file_fn(srcfile, destfile)
logger.info("moving {} to {}".format(srcfile, destfile))
###############################################################################
def _archive_history_files(archive, archive_entry,
compclass, compname, histfiles_savein_rundir,
last_date, archive_file_fn, dout_s_root, casename, rundir):
###############################################################################
"""
perform short term archiving on history files in rundir
Not doc-testable due to case and file system dependence
"""
# determine history archive directory (create if it does not exist)
archive_histdir = os.path.join(dout_s_root, compclass, 'hist')
if not os.path.exists(archive_histdir):
os.makedirs(archive_histdir)
logger.debug("created directory {}".format(archive_histdir))
# the compname is drv but the files are named cpl
if compname == 'drv':
compname = 'cpl'
if compname == 'clm':
compname = r'clm2?'
# determine ninst and ninst_string
# archive history files - the only history files that kept in the
# run directory are those that are needed for restarts
for suffix in archive.get_hist_file_extensions(archive_entry):
if compname.find('mpas') == 0 or compname == 'mali':
newsuffix = compname + r'\d*'
else:
newsuffix = casename + r'\.' + compname + r'_?' + r'\d*'
newsuffix += r'\.' + suffix
if not suffix.endswith('$'):
newsuffix += r'\.'
logger.debug("short term archiving suffix is {} ".format(newsuffix))
pfile = re.compile(newsuffix)
histfiles = [f for f in os.listdir(rundir) if pfile.search(f)]
logger.debug("histfiles = {} ".format(histfiles))
if histfiles:
for histfile in histfiles:
file_date = get_file_date(os.path.basename(histfile))
if last_date is None or file_date is None or file_date <= last_date:
srcfile = join(rundir, histfile)
expect(os.path.isfile(srcfile),
"history file {} does not exist ".format(srcfile))
destfile = join(archive_histdir, histfile)
if histfile in histfiles_savein_rundir:
logger.info("copying {} to {} ".format(srcfile, destfile))
safe_copy(srcfile, destfile)
else:
logger.info("moving {} to {} ".format(srcfile, destfile))
archive_file_fn(srcfile, destfile)
###############################################################################
def get_histfiles_for_restarts(rundir, archive, archive_entry, restfile, testonly=False):
###############################################################################
"""
query restart files to determine history files that are needed for restarts
Not doc-testable due to filesystem dependence
"""
# Make certain histfiles is a set so we don't repeat
histfiles = set()
rest_hist_varname = archive.get_entry_value('rest_history_varname', archive_entry)
if rest_hist_varname != 'unset':
cmd = "ncdump -v {} {} ".format(rest_hist_varname, os.path.join(rundir, restfile))
if testonly:
out = "{} =".format(rest_hist_varname)
else:
rc, out, error = run_cmd(cmd)
if rc != 0:
logger.info(" WARNING: {} failed rc={:d}\n out={}\n err={}".format(cmd, rc, out, error))
logger.debug(" get_histfiles_for_restarts: \n out={}".format(out))
searchname = "{} =".format(rest_hist_varname)
if searchname in out:
offset = out.index(searchname)
items = out[offset:].split(",")
for item in items:
# the following match has an option of having any number of '.'s and '/'s
# at the beginning of the history filename
matchobj = re.search(r"\"\S+\s*\"", item)
if matchobj:
histfile = matchobj.group(0).strip('" ')
histfile = os.path.basename(histfile)
# append histfile to the list ONLY if it exists in rundir before the archiving
if histfile in histfiles:
logger.warning("WARNING, tried to add a duplicate file to histfiles")
if os.path.isfile(os.path.join(rundir,histfile)):
histfiles.add(histfile)
else:
logger.debug(" get_histfiles_for_restarts: histfile {} does not exist ".format(histfile))
return histfiles
###############################################################################
def _archive_restarts_date(case, casename, rundir, archive,
datename, datename_is_last, last_date,
archive_restdir, archive_file_fn, components=None,
link_to_last_restart_files=False, testonly=False):
###############################################################################
"""
Archive restart files for a single date
Returns a dictionary of histfiles that need saving in the run
directory, indexed by compname
"""
logger.info('-------------------------------------------')
logger.info('Archiving restarts for date {}'.format(datename))
logger.info('-------------------------------------------')
logger.debug("last date: {}".format(last_date))
if components is None:
components = case.get_compset_components()
components.append('drv')
components.append('dart')
histfiles_savein_rundir_by_compname = {}
for (archive_entry, compname, compclass) in _get_component_archive_entries(components, archive):
if compclass:
logger.info('Archiving restarts for {} ({})'.format(compname, compclass))
# archive restarts
histfiles_savein_rundir = _archive_restarts_date_comp(case, casename, rundir,
archive, archive_entry,
compclass, compname,
datename, datename_is_last,
last_date, archive_restdir,
archive_file_fn,
link_to_last_restart_files=
link_to_last_restart_files,
testonly=testonly)
histfiles_savein_rundir_by_compname[compname] = histfiles_savein_rundir
return histfiles_savein_rundir_by_compname
###############################################################################
def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry,
compclass, compname, datename, datename_is_last,
last_date, archive_restdir, archive_file_fn,
link_to_last_restart_files=False, testonly=False):
###############################################################################
"""
Archive restart files for a single date and single component
If link_to_last_restart_files is True, then make a symlink to the
last set of restart files (i.e., the set with datename_is_last
True); if False (the default), copy them. (This has no effect on the
history files that are associated with these restart files.)
"""
datename_str = _datetime_str(datename)
if datename_is_last or case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'):
if not os.path.exists(archive_restdir):
os.makedirs(archive_restdir)
# archive the rpointer file(s) for this datename and all possible ninst_strings
_archive_rpointer_files(casename, _get_ninst_info(case, compclass)[1], rundir,
case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'),
archive, archive_entry, archive_restdir, datename, datename_is_last)
# move all but latest restart files into the archive restart directory
# copy latest restart files to archive restart directory
histfiles_savein_rundir = []
# determine function to use for last set of restart files
if link_to_last_restart_files:
| |
failobj
def __getitem__(self, id):
"""__getitem__(self, id) -> object
Return a Prosite entry. id is either the id or accession
for the entry. Raises a KeyError if there's an error.
"""
from Bio import ExPASy
# First, check to see if enough time has passed since my
# last query.
self.limiter.wait()
try:
handle = ExPASy.get_prosite_entry(id)
except IOError:
raise KeyError(id)
try:
handle = File.StringHandle(_extract_record(handle))
except ValueError:
raise KeyError(id)
if self.parser is not None:
return self.parser.parse(handle)
return handle.read()
class RecordParser(AbstractParser):
"""Parses Prosite data into a Record object.
"""
def __init__(self):
self._scanner = _Scanner()
self._consumer = _RecordConsumer()
def parse(self, handle):
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _Scanner:
"""Scans Prosite-formatted data.
Tested with:
Release 15.0, July 1998
"""
def feed(self, handle, consumer):
"""feed(self, handle, consumer)
Feed in Prosite data for scanning. handle is a file-like
object that contains prosite data. consumer is a
Consumer object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
consumer.finished = False
while not consumer.finished:
line = uhandle.peekline()
if not line:
break
elif is_blank_line(line):
# Skip blank lines between records
uhandle.readline()
continue
elif line[:2] == 'ID':
self._scan_record(uhandle, consumer)
elif line[:2] == 'CC':
self._scan_copyrights(uhandle, consumer)
else:
raise ValueError("There doesn't appear to be a record")
def _scan_copyrights(self, uhandle, consumer):
consumer.start_copyrights()
self._scan_line('CC', uhandle, consumer.copyright, any_number=1)
self._scan_terminator(uhandle, consumer)
consumer.end_copyrights()
def _scan_record(self, uhandle, consumer):
consumer.start_record()
for fn in self._scan_fns:
fn(self, uhandle, consumer)
# In Release 15.0, C_TYPE_LECTIN_1 has the DO line before
# the 3D lines, instead of the other way around.
# Thus, I'll give the 3D lines another chance after the DO lines
# are finished.
if fn is self._scan_do.im_func:
self._scan_3d(uhandle, consumer)
consumer.end_record()
def _scan_line(self, line_type, uhandle, event_fn,
exactly_one=None, one_or_more=None, any_number=None,
up_to_one=None):
# Callers must set exactly one of exactly_one, one_or_more, or
# any_number to a true value. I do not explicitly check to
# make sure this function is called correctly.
# This does not guarantee any parameter safety, but I
# like the readability. The other strategy I tried was have
# parameters min_lines, max_lines.
if exactly_one or one_or_more:
read_and_call(uhandle, event_fn, start=line_type)
if one_or_more or any_number:
while 1:
if not attempt_read_and_call(uhandle, event_fn,
start=line_type):
break
if up_to_one:
attempt_read_and_call(uhandle, event_fn, start=line_type)
def _scan_id(self, uhandle, consumer):
self._scan_line('ID', uhandle, consumer.identification, exactly_one=1)
def _scan_ac(self, uhandle, consumer):
self._scan_line('AC', uhandle, consumer.accession, exactly_one=1)
def _scan_dt(self, uhandle, consumer):
self._scan_line('DT', uhandle, consumer.date, exactly_one=1)
def _scan_de(self, uhandle, consumer):
self._scan_line('DE', uhandle, consumer.description, exactly_one=1)
def _scan_pa(self, uhandle, consumer):
self._scan_line('PA', uhandle, consumer.pattern, any_number=1)
def _scan_ma(self, uhandle, consumer):
self._scan_line('MA', uhandle, consumer.matrix, any_number=1)
## # ZN2_CY6_FUNGAL_2, DNAJ_2 in Release 15
## # contain a CC line buried within an 'MA' line. Need to check
## # for that.
## while 1:
## if not attempt_read_and_call(uhandle, consumer.matrix, start='MA'):
## line1 = uhandle.readline()
## line2 = uhandle.readline()
## uhandle.saveline(line2)
## uhandle.saveline(line1)
## if line1[:2] == 'CC' and line2[:2] == 'MA':
## read_and_call(uhandle, consumer.comment, start='CC')
## else:
## break
def _scan_pp(self, uhandle, consumer):
#New PP line, PostProcessing, just after the MA line
self._scan_line('PP', uhandle, consumer.postprocessing, any_number=1)
def _scan_ru(self, uhandle, consumer):
self._scan_line('RU', uhandle, consumer.rule, any_number=1)
def _scan_nr(self, uhandle, consumer):
self._scan_line('NR', uhandle, consumer.numerical_results,
any_number=1)
def _scan_cc(self, uhandle, consumer):
self._scan_line('CC', uhandle, consumer.comment, any_number=1)
def _scan_dr(self, uhandle, consumer):
self._scan_line('DR', uhandle, consumer.database_reference,
any_number=1)
def _scan_3d(self, uhandle, consumer):
self._scan_line('3D', uhandle, consumer.pdb_reference,
any_number=1)
def _scan_pr(self, uhandle, consumer):
#New PR line, ProRule, between 3D and DO lines
self._scan_line('PR', uhandle, consumer.prorule, any_number=1)
def _scan_do(self, uhandle, consumer):
self._scan_line('DO', uhandle, consumer.documentation, exactly_one=1)
def _scan_terminator(self, uhandle, consumer):
self._scan_line('//', uhandle, consumer.terminator, exactly_one=1)
#This is a list of scan functions in the order expected in the file file.
#The function definitions define how many times each line type is exected
#(or if optional):
_scan_fns = [
_scan_id,
_scan_ac,
_scan_dt,
_scan_de,
_scan_pa,
_scan_ma,
_scan_pp,
_scan_ru,
_scan_nr,
_scan_cc,
# This is a really dirty hack, and should be fixed properly at
# some point. ZN2_CY6_FUNGAL_2, DNAJ_2 in Rel 15 and PS50309
# in Rel 17 have lines out of order. Thus, I have to rescan
# these, which decreases performance.
_scan_ma,
_scan_nr,
_scan_cc,
_scan_dr,
_scan_3d,
_scan_pr,
_scan_do,
_scan_terminator
]
class _RecordConsumer(AbstractConsumer):
"""Consumer that converts a Prosite record to a Record object.
Members:
data Record with Prosite data.
"""
def __init__(self):
self.data = None
def start_record(self):
self.data = Record()
def end_record(self):
self._clean_record(self.data)
def identification(self, line):
cols = line.split()
if len(cols) != 3:
raise ValueError("I don't understand identification line\n%s" \
% line)
self.data.name = self._chomp(cols[1]) # don't want ';'
self.data.type = self._chomp(cols[2]) # don't want '.'
def accession(self, line):
cols = line.split()
if len(cols) != 2:
raise ValueError("I don't understand accession line\n%s" % line)
self.data.accession = self._chomp(cols[1])
def date(self, line):
uprline = line.upper()
cols = uprline.split()
# Release 15.0 contains both 'INFO UPDATE' and 'INF UPDATE'
if cols[2] != '(CREATED);' or \
cols[4] != '(DATA' or cols[5] != 'UPDATE);' or \
cols[7][:4] != '(INF' or cols[8] != 'UPDATE).':
raise ValueError("I don't understand date line\n%s" % line)
self.data.created = cols[1]
self.data.data_update = cols[3]
self.data.info_update = cols[6]
def description(self, line):
self.data.description = self._clean(line)
def pattern(self, line):
self.data.pattern = self.data.pattern + self._clean(line)
def matrix(self, line):
self.data.matrix.append(self._clean(line))
def postprocessing(self, line):
postprocessing = self._clean(line).split(";")
self.data.postprocessing.extend(postprocessing)
def rule(self, line):
self.data.rules.append(self._clean(line))
def numerical_results(self, line):
cols = self._clean(line).split(";")
for col in cols:
if not col:
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/RELEASE':
release, seqs = data.split(",")
self.data.nr_sp_release = release
self.data.nr_sp_seqs = int(seqs)
elif qual == '/FALSE_NEG':
self.data.nr_false_neg = int(data)
elif qual == '/PARTIAL':
self.data.nr_partial = int(data)
elif qual in ['/TOTAL', '/POSITIVE', '/UNKNOWN', '/FALSE_POS']:
m = re.match(r'(\d+)\((\d+)\)', data)
if not m:
raise Exception("Broken data %s in comment line\n%s" \
% (repr(data), line))
hits = tuple(map(int, m.groups()))
if(qual == "/TOTAL"):
self.data.nr_total = hits
elif(qual == "/POSITIVE"):
self.data.nr_positive = hits
elif(qual == "/UNKNOWN"):
self.data.nr_unknown = hits
elif(qual == "/FALSE_POS"):
self.data.nr_false_pos = hits
else:
raise ValueError("Unknown qual %s in comment line\n%s" \
% (repr(qual), line))
def comment(self, line):
#Expect CC lines like this:
#CC /TAXO-RANGE=??EPV; /MAX-REPEAT=2;
#Can (normally) split on ";" and then on "="
cols = self._clean(line).split(";")
for col in cols:
if not col or col[:17] == 'Automatic scaling':
# DNAJ_2 in Release 15 has a non-standard comment line:
# CC Automatic scaling using reversed database
# Throw it away. (Should I keep it?)
continue
if col.count("=") == 0 :
#Missing qualifier! Can we recover gracefully?
#For example, from Bug 2403, in PS50293 have:
#CC /AUTHOR=K_Hofmann; N_Hulo
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/TAXO-RANGE':
self.data.cc_taxo_range = data
elif qual == '/MAX-REPEAT':
self.data.cc_max_repeat = data
elif qual == '/SITE':
pos, desc = data.split(",")
self.data.cc_site.append((int(pos), desc))
elif qual == '/SKIP-FLAG':
self.data.cc_skip_flag = data
elif qual == '/MATRIX_TYPE':
self.data.cc_matrix_type = data
elif qual == '/SCALING_DB':
self.data.cc_scaling_db = data
elif qual == '/AUTHOR':
self.data.cc_author = data
elif qual == '/FT_KEY':
self.data.cc_ft_key = data
elif qual == '/FT_DESC':
self.data.cc_ft_desc = data
elif qual == '/VERSION':
self.data.cc_version = data
else:
raise ValueError("Unknown qual %s in comment line\n%s" \
% (repr(qual), line))
def database_reference(self, line):
refs = self._clean(line).split(";")
for ref in refs:
if not ref:
continue
acc, name, type = [word.strip() for word in ref.split(",")]
if type == 'T':
self.data.dr_positive.append((acc, name))
elif type == 'F':
self.data.dr_false_pos.append((acc, name))
elif type == 'N':
self.data.dr_false_neg.append((acc, name))
elif type == 'P':
self.data.dr_potential.append((acc, name))
elif type == '?':
self.data.dr_unknown.append((acc, name))
else:
raise ValueError("I don't understand type flag %s" % type)
def pdb_reference(self, line):
cols = line.split()
for id in cols[1:]: # get all but the '3D' col
self.data.pdb_structs.append(self._chomp(id))
def prorule(self, line):
#Assume that each PR line can contain multiple ";" separated rules
rules = self._clean(line).split(";")
self.data.prorules.extend(rules)
def documentation(self, line):
self.data.pdoc = self._chomp(self._clean(line))
def terminator(self, line):
self.finished = True
def _chomp(self, word, to_chomp='.,;'):
# Remove the punctuation at the end of a word.
if word[-1] in to_chomp:
return word[:-1]
return word
def _clean(self, line, rstrip=1):
# Clean up a line.
if rstrip:
return line[5:].rstrip()
return line[5:]
def scan_sequence_expasy(seq=None, id=None, exclude_frequent=None):
"""scan_sequence_expasy(seq=None, | |
<reponame>oneflyingfish/tvm<filename>tests/python/contrib/test_ethosu/infra.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides infrastructure to verify the correctness of
the command stream produced.
Currently it will invoke vela to generate a vela-optimized tflite
in which the command stream is contained as a custom operator.
This class include methods to parse the custom operator to extract
the command stream and perform an equivalency check for single operator
test cases.
"""
from typing import List
import os
import struct
import numpy
import math
from enum import IntEnum
import tensorflow as tf
from ethosu.vela.register_command_stream_generator import CmdMode
from ethosu.vela.register_command_stream_generator import cmd0
from ethosu.vela.register_command_stream_generator import cmd1
import tvm
from tvm import relay
import tvm.relay.backend.contrib.ethosu.op as ethosu_ops
from tvm.topi.nn.utils import get_pad_tuple
from tests.python.relay.aot.aot_test_utils import (
AOTCompiledTestModel,
AOTDataLinkage,
AOTTestModel,
AOTTestRunner,
compile_models,
run_and_check,
)
class AttachType(IntEnum):
kGroupRoot = 1
kInline = 2
kInlinedAlready = 3
kScope = 4
kScanUpdate = 5
class VelaArtifacts:
def __init__(self):
self.cs = dict()
self.flash = dict()
self.sram = dict()
self.npu_ops = set()
def print_payload(payload):
cmds = deserialize_command_stream(payload)
for cmd_val in cmds:
cmd, val = parse_cmd(cmd_val)
s = str(cmd)
s = s.ljust(40)
s += str(val)
print(s)
def parse_cmd(binary_cmd):
code = binary_cmd[0] & 0x0000FFFF # lower 16 bits
param = binary_cmd[0] >> 16 # higher 16 bits
payload_mode = CmdMode(code & CmdMode.Mask)
if payload_mode == CmdMode.Payload32:
command = cmd1(code & CmdMode.CmdOpMask)
value = binary_cmd[1]
else:
command = cmd0(code & CmdMode.CmdOpMask)
value = param
return command, value
def check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_cmds=None):
if ignore_cmds is None:
ignore_cmds = []
if vela_value != tvm_value and vela_cmd not in ignore_cmds:
raise RuntimeError(
"ValueMismatch :: vela={}, tvm={} for command:{}".format(
vela_value, tvm_value, vela_cmd
)
)
def verify_cmms(cmms_tvm_blob, cmms_vela_blob):
vela_cmm = deserialize_command_stream(cmms_vela_blob)
tvm_cmm = deserialize_command_stream(cmms_tvm_blob)
cmms_zip = zip(vela_cmm, tvm_cmm)
first_ifm_found = False
last_ofm_found = False
ignore_commands = (
cmd1.NPU_SET_DMA0_SRC,
cmd1.NPU_SET_DMA0_DST,
cmd1.NPU_SET_WEIGHT_BASE,
cmd1.NPU_SET_OFM_BASE0,
cmd1.NPU_SET_IFM_BASE0,
cmd1.NPU_SET_SCALE_BASE,
)
ofm_region_params = []
ofm_bases = []
for vela_cmm, tvm_cmm in cmms_zip:
vela_cmd, vela_value = parse_cmd(vela_cmm)
tvm_cmd, tvm_value = parse_cmd(tvm_cmm)
assert vela_cmd == tvm_cmd
# The first IFM region could be different, but it needs to be 1 and 3.
if vela_cmd == cmd0.NPU_SET_IFM_REGION and not first_ifm_found:
if vela_value == 1 and tvm_value == 3:
first_ifm_found = True
continue
if vela_cmd == cmd1.NPU_SET_IFM_BASE0 and not first_ifm_found:
if tvm_value != 0:
raise RuntimeError("ValueError :: tvm primary ifm base should be zero")
continue
# OFM regions should be cached to be checked later
if vela_cmd == cmd0.NPU_SET_OFM_REGION:
ofm_region_params.append((vela_value, tvm_value))
continue
# OFM bases should be cached to be checked later
if vela_cmd == cmd1.NPU_SET_OFM_BASE0:
ofm_bases.append((vela_value, tvm_value))
continue
check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_commands)
# The last OFM region could be different but it should be 1 and 4.
last_vela_ofm_region, last_tvm_ofm_region = ofm_region_params.pop(-1)
if not (last_vela_ofm_region == 1 and last_tvm_ofm_region == 4):
raise RuntimeError(
"ValueMismatch :: vela={}, tvm={} for last ofm region it should be 1 and 4 respectively".format(
last_vela_ofm_region, last_tvm_ofm_region
)
)
# The rest of the OFM regions should be the same.
for vela_value, tvm_value in ofm_region_params:
check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_commands)
# The last OFM base should be zero for tvm
_, last_tvm_ofm_base = ofm_bases.pop(-1)
if not last_tvm_ofm_base == 0:
raise RuntimeError("ValueError :: tvm primary ofm base should be zero")
def deserialize_command_stream(blob):
assert isinstance(blob, bytes)
payload_bytes = struct.unpack("<{0}I".format(len(blob) // 4), blob)
cmms = []
# remove_header
payload_bytes = payload_bytes[8:]
idx = 0
while idx < len(payload_bytes):
cmd = []
code = payload_bytes[idx]
idx += 1
cmd.append(code)
payload_mode = CmdMode(code & CmdMode.Mask)
if payload_mode == CmdMode.Payload32:
value = payload_bytes[idx]
idx += 1
cmd.append(value)
cmms.append(cmd)
return cmms
def create_test_runner(accel="ethos-u55-256"):
file_dir = os.path.dirname(os.path.abspath(__file__))
test_root = os.path.join(file_dir, "reference_system")
_, ethosu_variant, ethosu_macs = accel.split("-")
ethosu_variant = ethosu_variant.upper()
return AOTTestRunner(
makefile="corstone300",
prologue="""
uart_init();
EthosuInit();
struct ethosu_driver* ethos_u = ethosu_reserve_driver();
""",
epilogue="""
ethosu_release_driver(ethos_u);
""",
includes=["uart.h", "ethosu_55.h", "ethosu_mod.h", "hard_fault.h"],
parameters={
"ETHOSU_TEST_ROOT": test_root,
"NPU_MACS": ethosu_macs,
"NPU_VARIANT": ethosu_variant,
},
pass_config={
"relay.ext.ethos-u.options": {
"accelerator_config": accel,
},
},
)
def build_source(module, inputs, outputs, accel="ethos-u55-256", output_tolerance=0):
test_runner = create_test_runner(accel)
return compile_models(
models=AOTTestModel(
module=module,
inputs=inputs,
outputs=outputs,
output_tolerance=output_tolerance,
extra_memory_in_bytes=0,
),
interface_api="c",
use_unpacked_api=True,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
def verify_source(
models: List[AOTCompiledTestModel],
accel="ethos-u55-256",
):
"""
This method verifies the generated source from an NPU module by building it and running on an FVP.
"""
interface_api = "c"
test_runner = create_test_runner(accel)
run_and_check(
models,
test_runner,
interface_api,
workspace_byte_alignment=16,
data_linkage=AOTDataLinkage(section="ethosu_scratch", alignment=16),
)
def flatten_numpy_data(data):
"""Flatten the numpy tensor to be single dimensional"""
total_elements = data.size
reshaped_data = numpy.reshape(data, [total_elements])
return reshaped_data
class InputGenerator:
def __init__(self, random_state):
self._random_state = random_state
def generate(self, size, dtype):
if dtype == numpy.float32:
print("random float32")
return self._random_state.uniform(-1, 1, size).astype(dtype)
else:
print("random (u)int min=%d max=%d", numpy.iinfo(dtype).min, numpy.iinfo(dtype).max)
low = numpy.iinfo(dtype).min
high = numpy.iinfo(dtype).max + 1
return self._random_state.randint(low, high, size, dtype)
def generate_ref_data_tflite(model):
"""
This method generates reference data by running the specified model on tflite with random input data.
The random input data and generated output data are returned.
"""
expected_output_data = {}
interpreter = tf.lite.Interpreter(model_content=model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Initialize random generators with a fixed seed to get deterministic results
seed = 0
random_state = numpy.random.RandomState(seed)
inputgen = InputGenerator(random_state)
# Generate input data
input_data = {
input_detail["name"]: inputgen.generate(
input_detail["shape"],
input_detail["dtype"],
)
for input_detail in input_details
}
for index, value in enumerate(input_data.values()):
interpreter.set_tensor(index, value)
interpreter.invoke()
expected_output_data = [
interpreter.get_tensor(output_detail["index"]) for output_detail in output_details
]
return input_data, expected_output_data
def make_partitioned_function(relay_op):
ifm0 = relay.analysis.free_vars(relay_op)
ifm_shape = ifm0[0].type_annotation.shape
ifm_dtype = ifm0[0].type_annotation.dtype
ifm = relay.var("ifm", shape=ifm_shape, dtype=ifm_dtype)
glb_ethosu = relay.GlobalVar("tvmgen_default_ethosu_main_0")
func = (
relay.Function(ifm0, relay_op)
.with_attr("Inline", 1)
.with_attr("Compiler", "ethos-u")
.with_attr("global_symbol", "tvmgen_default_ethosu_main_0")
.with_attr("Primitive", 1)
)
mod = tvm.IRModule()
mod[glb_ethosu] = func
mod = relay.transform.InferType()(mod)
call = relay.Call(glb_ethosu, [ifm])
mod["main"] = relay.Function([ifm], call)
mod = relay.transform.InferType()(mod)
return mod
def generate_weights_data(shape, dtype):
size = 1
for dim in shape:
size *= dim
return (numpy.arange(size) % 255).reshape(shape).astype(dtype)
def get_convolutional_args(call, include_buffers=False, remove_constants=False):
"""A method to extract the arguments from conv2d or depthwise_conv2d extern call."""
args = call.args
conv_args = []
remove_indices = [0]
if remove_constants:
remove_indices += [41, 42, 44, 45]
for i, arg in enumerate(args):
if i in remove_indices:
continue
elif isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
conv_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
conv_args.append(arg.index)
else:
conv_args.append(arg)
return conv_args
def compute_ofm_shape(ifm_shape, padding, kernel_shape, strides, dilation=[1, 1]):
assert len(strides) == 2
assert len(dilation) == 2
assert len(kernel_shape) == 2
if padding.lower() == "valid":
h = math.ceil((ifm_shape[1] - (kernel_shape[0] - 1) * dilation[0]) / strides[0])
w = math.ceil((ifm_shape[2] - (kernel_shape[1] - 1) * dilation[1]) / strides[1])
if padding.lower() == "same":
h = math.ceil(ifm_shape[1] / strides[0])
w = math.ceil(ifm_shape[2] / strides[1])
ofm_shape = [ifm_shape[0], h, w, ifm_shape[3]]
return ofm_shape
def compute_padding_shape(ifm_shape, ofm_shape, padding, kernel_shape, strides, dilation=[1, 1]):
assert len(strides) == 2
assert len(dilation) == 2
assert len(kernel_shape) == 2
if padding.lower() == "valid":
return [0, 0, 0, 0]
if padding.lower() == "same":
effective_kernel_shape = [
dilation[0] * (kernel_shape[0] - 1) + 1,
dilation[1] * (kernel_shape[1] - 1) + 1,
]
pad_along_height = max(
(ofm_shape[1] - 1) * strides[0] + effective_kernel_shape[0] - ifm_shape[1], 0
)
pad_along_width = max(
(ofm_shape[2] - 1) * strides[1] + effective_kernel_shape[1] - ifm_shape[2], 0
)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return [pad_top, pad_left, pad_bottom, pad_right]
def make_ethosu_conv2d(
ifm,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
lut=relay.const([], dtype="int8"),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int8",
scale_bias_dtype="uint8",
rounding_mode="TFL",
upscale="NONE",
):
# conv params
weight_shape = (ofm_channels, kernel_shape[0], kernel_shape[1], ifm_channels)
padding = get_pad_tuple(padding, kernel_shape)
scale_bias_data = generate_weights_data((weight_shape[0], 10), scale_bias_dtype)
scale_bias = relay.const(scale_bias_data, dtype=scale_bias_dtype)
weight_data = generate_weights_data(weight_shape, weight_dtype)
weight = relay.const(weight_data, dtype=weight_dtype)
conv = ethosu_ops.ethosu_conv2d(
ifm,
weight,
scale_bias,
lut=lut,
ifm_scale=0.5,
ifm_zero_point=10,
weight_zero_point=12,
ofm_scale=0.25,
ofm_zero_point=14,
kernel_shape=kernel_shape,
ofm_channels=ofm_channels,
strides=strides,
padding=padding,
dilation=dilation,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if | |
<reponame>hanya/pyuno3ext<gh_stars>1-10
#**************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#**************************************************************
import sys
import pyuno
import socket # since on Windows sal3.dll no longer calls WSAStartup
import types
import traceback
# all functions and variables starting with a underscore (_) must be considered private
# and can be changed at any time. Don't use them
_g_ctx = pyuno.getComponentContext()
def getComponentContext():
""" Returns the UNO component context, that was used to initialize the PyUNO. """
return _g_ctx
def getConstantByName(constant):
""" Looks up the value of an IDL constant or an enum by giving its explicit name. """
return pyuno.getConstantByName(constant)
def getTypeByName(typeName):
""" Returns a uno.Type instance of the type given by typeName. In case the
type does not exist, a com.sun.star.uno.RuntimeException is raised.
"""
return pyuno.getTypeByName(typeName)
def createUnoStruct(typeName, *args):
""" Creates an UNO struct or exception given by typeName. The parameter args may
1) be empty. In this case, you get a default constructed uno structure.
( e.g. createUnoStruct( "com.sun.star.uno.Exception" ) )
2) be a sequence with exactly one element, that contains an instance of typeName.
In this case, a copy constructed instance of typeName is returned
( e.g. createUnoStruct( "com.sun.star.uno.Exception" , e ) )
3) be a sequence, where the length of the sequence must match the number of
elements within typeName (e.g.
createUnoStruct( "com.sun.star.uno.Exception", "foo error" , self) ). The
elements with in the sequence must match the type of each struct element,
otherwise an exception is thrown.
"""
return getClass(typeName)(*args)
def getClass(typeName):
""" Returns the class of a concrete uno exception, struct or interface """
return pyuno.getClass(typeName)
def isInterface(obj):
""" Returns true, when obj is a class of an UNO interface"""
return pyuno.isInterface(obj)
def generateUuid():
""" Returns a 16 byte sequence containing a newly generated UUID or GUID, see rtl/uuid.h """
return pyuno.generateUuid()
def systemPathToFileUrl(systemPath):
""" Returns a file-url for the given system path """
return pyuno.systemPathToFileUrl(systemPath)
def fileUrlToSystemPath(url):
""" Returns a system path (determined by the system, the python interpreter is running on) """
return pyuno.fileUrlToSystemPath(url)
def absolutize(path, relativeUrl):
""" Returns an absolute file url from the given urls """
return pyuno.absolutize(path, relativeUrl)
def getCurrentContext():
""" Returns the currently valid current context.
see http://udk.openoffice.org/common/man/concept/uno_contexts.html#current_context
for an explanation on the current context concept
"""
return pyuno.getCurrentContext()
def setCurrentContext(newContext):
""" Sets newContext as new uno current context. The newContext must
implement the XCurrentContext interface. The implemenation should
handle the desired properties and delegate unknown properties to the
old context. Ensure to reset the old one when you leave your stack ...
see http://udk.openoffice.org/common/man/concept/uno_contexts.html#current_context
"""
return pyuno.setCurrentContext(newContext)
class Enum:
""" Represents a UNO idl enum, use an instance of this class to explicitly pass a boolean to UNO """
#typeName the name of the enum as a string
#value the actual value of this enum as a string
def __init__(self, typeName, value):
self.typeName = typeName
self.value = value
pyuno.checkEnum(self)
def __repr__(self):
return "<uno.Enum %s (%r)>" % (self.typeName, self.value)
def __eq__(self, that):
if not isinstance(that, Enum):
return False
return self.typeName == that.typeName and self.value == that.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.__repr__().__hash__()
class Type:
""" Represents a UNO type, use an instance of this class to explicitly pass a boolean to UNO """
# typeName # Name of the UNO type
# typeClass # python Enum of TypeClass, see com/sun/star/uno/TypeClass.idl
def __init__(self, typeName, typeClass):
self.typeName = typeName
self.typeClass = typeClass
pyuno.checkType(self)
def __repr__(self):
return "<Type instance %s (%r)>" % (self.typeName, self.typeClass)
def __eq__(self, that):
if not isinstance(that, Type):
return False
return self.typeClass == that.typeClass and self.typeName == that.typeName
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.typeName.__hash__()
class Bool(object):
""" Represents a UNO boolean, use an instance of this class to explicitly
pass a boolean to UNO.
Note: This class is deprecated. Use python's True and False directly instead
"""
def __new__(cls, value):
if isinstance(value, (str, unicode)) and value == "true":
return True
if isinstance(value, (str, unicode)) and value == "false":
return False
if value:
return True
return False
class Char:
""" Represents an UNO char, use an instance of this class to explicitly pass a char to UNO """
# @param value pass a Unicode string with length 1
def __init__(self, value):
assert isinstance(value, unicode)
assert len(value) == 1
self.value = value
def __repr__(self):
return "<Char instance %s>" % (self.value,)
def __eq__(self, that):
if isinstance(that, (str, unicode)):
if len(that) > 1:
return False
return self.value == that[0]
if isinstance(that, Char):
return self.value == that.value
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.__repr__.__hash__()
class ByteSequence:
def __init__(self, value):
if isinstance(value, (bytes, bytearray)):
self.value = value
elif isinstance(value, ByteSequence):
self.value = value.value
else:
raise TypeError("expected byte, bytearray or ByteSequence")
def __repr__(self):
return "<ByteSequence instance '%s'>" % (self.value,)
def __eq__(self, that):
if isinstance(that, ByteSequence):
return self.value == that.value
elif isinstance(that, (bytes, bytearray)):
return self.value == that
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.value)
def __getitem__(self, index):
return self.value[index]
def __iter__(self):
return self.value.__iter__()
def __add__(self, b):
if isinstance(b, (bytes, bytearray)):
return ByteSequence(self.value + b)
elif isinstance(b, ByteSequence):
return ByteSequence(self.value + b.value)
raise TypeError("expected byte, bytearray or ByteSequence as operand")
def __hash__(self):
return self.value.__hash__()
class Any:
""" Use only in connection with uno.invoke() to pass an explicit typed any """
def __init__(self, type, value):
if isinstance(type, Type):
self.type = type
else:
self.type = getTypeByName(type)
self.value = value
def invoke(object, methodname, argTuple):
""" Use this function to pass exactly typed anys to the callee (using uno.Any) """
return pyuno.invoke(object, methodname, argTuple)
#---------------------------------------------------------------------------------------
# don't use any functions beyond this point, private section, likely to change
#---------------------------------------------------------------------------------------
# private, referenced from the pyuno shared library
def _uno_struct__init__(self, *args):
if len(args) == 1 and hasattr(args[0], "__class__") and args[0].__class__ == self.__class__ :
self.__dict__["value"] = args[0]
else:
self.__dict__["value"] = pyuno._createUnoStructHelper(self.__class__.__pyunostruct__, args)
# private, referenced from the pyuno shared library
def _uno_struct__getattr__(self, name):
return getattr(self.__dict__["value"], name)
# private, referenced from the pyuno shared library
def _uno_struct__setattr__(self, name, value):
return setattr(self.__dict__["value"], name, value)
# private, referenced from the pyuno shared library
def _uno_struct__repr__(self):
return repr(self.__dict__["value"])
def _uno_struct__str__(self):
return str(self.__dict__["value"])
# private, referenced from the pyuno shared library
def _uno_struct__eq__(self, cmp):
if hasattr(cmp, "value"):
return self.__dict__["value"] == cmp.__dict__["value"]
return False
def _uno_struct__ne__(self, other):
return not self.__eq__(other)
def _uno_struct__dir__(self):
return dir(self.__dict__["value"]) + list(self.__dict__.keys()) + \
list(self.__class__.__dict__.keys())
# referenced from pyuno shared lib and pythonscript.py
def _uno_extract_printable_stacktrace(trace):
return "\n".join([" {}:{} in function {}() [{}]".format(*entries)
for entries in traceback.extract_tb(trace)[::-1]])
class _UNOModule(types.ModuleType):
""" Extended module class for UNO based modules.
Real value is not taken from pyuno until first request.
After first request of the value, it is kept as an attribute.
"""
def __init__(self, fullname, loader):
types.ModuleType.__init__(self, fullname)
self.__file__ = "<" + fullname + ">"
self.__loader__ = loader
self.__path__ = fullname
self.__package__ = ""
self.__initializing__ = False
def __str__(self):
return "<UNOModule '" + self.__path__ + "'>"
def __dir__(self):
try:
if hasattr(self, "__all__"):
module_names = self.__all__
else:
module_names = pyuno.getModuleElementNames(self.__path__)
self.__all__ = list(module_names)
except:
module_names = []
return list(self.__class__.__dict__.keys()) + list(self.__dict__.keys()) + list(module_names)
def __getattr__(self, name):
try:
value = pyuno.importValue(self.__path__, name)
except:
if pyuno.hasModule(self.__path__ + "." + name):
value = _UNOModuleLoader().load_module(self.__path__ + "." + name)
elif name == "__all__" or name == "*":
try:
module_names = pyuno.getModuleElementNames(self.__path__)
self.__all__ = module_names
return module_names
except:
raise AttributeError(name)
elif name.startswith("typeOf"):
try:
value = pyuno.getTypeByName(self.__path__ + "." + name[6:])
except:
raise AttributeError(
"type {}.{} is unknown".format(self.__path__, name))
else:
raise AttributeError(name)
setattr(self, name, value)
| |
if
``True``, it returns the coefficients with respect to
the basis for the total degree of this element
OUTPUT:
A list of elements of the base field.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 2, 2, 3))
sage: A.basis(3)
[t, x*z, x*y]
sage: (t + 3*x*y).basis_coefficients()
[1, 0, 3]
sage: (t + x).basis_coefficients()
Traceback (most recent call last):
...
ValueError: This element is not homogeneous
sage: B.<c,d> = GradedCommutativeAlgebra(QQ, degrees=((2,0), (0,4)))
sage: B.basis(4)
[d, c^2]
sage: (c^2 - 1/2 * d).basis_coefficients(total=True)
[-1/2, 1]
sage: (c^2 - 1/2 * d).basis_coefficients()
Traceback (most recent call last):
...
ValueError: This element is not homogeneous
"""
if not self.is_homogeneous(total):
raise ValueError('This element is not homogeneous')
basis = self.parent().basis(self.degree(total))
F = self.parent().base()
lift = self.lift()
monos = self.monomials()
c = [lift.monomial_coefficient(x.lift()) for x in basis]
return c
class GCAlgebra_multigraded(GCAlgebra):
"""
A multi-graded commutative algebra.
INPUT:
- ``base`` -- the base field
- ``degrees`` -- a tuple or list specifying the degrees of the
generators
- ``names`` -- (optional) names of the generators: a list of
strings or a single string with the names separated by
commas; if not specified, the generators are named ``x0``,
``x1``, ...
- ``R`` -- (optional) the ring over which the algebra is defined
- ``I`` -- (optional) an ideal in ``R``; it should include, among
other relations, the squares of the generators of odd degree
When defining such an algebra, each entry of ``degrees`` should be
a list, tuple, or element of an additive (free) abelian
group. Regardless of how the user specifies the degrees, Sage
converts them to group elements.
The arguments ``R`` and ``I`` are primarily for use by the
:meth:`GCAlgebra.quotient` method.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0,1), (1,1)))
sage: A
Graded Commutative Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (1, 1)) over Rational Field
sage: a**2
0
sage: c.degree(total=True)
2
sage: c**2
c^2
sage: c.degree()
(1, 1)
Although the degree of ``c`` was defined using a Python tuple, it
is returned as an element of an additive abelian group, and so it
can be manipulated via arithmetic operations::
sage: type(c.degree())
<class 'sage.groups.additive_abelian.additive_abelian_group.AdditiveAbelianGroup_fixed_gens_with_category.element_class'>
sage: 2 * c.degree()
(2, 2)
sage: (a*b).degree() == a.degree() + b.degree()
True
The :meth:`basis` method and the :meth:`Element.degree` method both accept
the boolean keyword ``total``. If ``True``, use the total degree::
sage: A.basis(2, total=True)
[a*b, c]
sage: c.degree(total=True)
2
"""
def __init__(self, base, degrees, names=None, R=None, I=None):
"""
Initialize ``self``.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0,1), (1,1)))
sage: TestSuite(A).run()
"""
total_degs = [total_degree(d) for d in degrees]
GCAlgebra.__init__(self, base, R=R, I=I, names=names, degrees=total_degs)
self._degrees_multi = degrees
self._grading_rank = len(list(degrees[0]))
def _repr_(self):
"""
Print representation.
EXAMPLES::
sage: GradedCommutativeAlgebra(QQ, degrees=((1,0,0), (0,0,1), (1,1,1)))
Graded Commutative Algebra with generators ('x0', 'x1', 'x2') in degrees ((1, 0, 0), (0, 0, 1), (1, 1, 1)) over Rational Field
"""
s = GCAlgebra._repr_(self)
old = '{}'.format(self._degrees)
new = '{}'.format(self._degrees_multi)
return s.replace(old, new)
_base_repr = _repr_
def quotient(self, I, check=True):
"""
Create the quotient of this algebra by a two-sided ideal ``I``.
INPUT:
- ``I`` -- a two-sided homogeneous ideal of this algebra
- ``check`` -- (default: ``True``) if ``True``, check whether
``I`` is generated by homogeneous elements
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(GF(5), degrees=(2, 3, 2, 4))
sage: I = A.ideal([x*t+y^2, x*z - t])
sage: B = A.quotient(I)
sage: B
Graded Commutative Algebra with generators ('x', 'y', 'z', 't') in degrees (2, 3, 2, 4) with relations [x*t, x*z - t] over Finite Field of size 5
sage: B(x*t)
0
sage: B(x*z)
t
sage: A.basis(7)
[y*t, y*z^2, x*y*z, x^2*y]
sage: B.basis(7)
[y*t, y*z^2, x^2*y]
"""
if check and any(not i.is_homogeneous() for i in I.gens()):
raise ValueError("The ideal must be homogeneous")
NCR = self.cover_ring()
gens1 = list(self.defining_ideal().gens())
gens2 = [i.lift() for i in I.gens()]
gens = [g for g in gens1 + gens2 if g != NCR.zero()]
J = NCR.ideal(gens, side='twosided')
return GCAlgebra_multigraded(self.base_ring(), self._names,
self._degrees_multi, NCR, J)
def _coerce_map_from_(self, other):
r"""
Returns ``True`` if there is a coercion map from ``R`` to ``self``.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra({a: c})
sage: B._coerce_map_from_(A)
True
sage: B._coerce_map_from_(QQ)
True
sage: B._coerce_map_from_(GF(3))
False
"""
if isinstance(other, GCAlgebra_multigraded):
if self._degrees_multi != other._degrees_multi:
return False
elif isinstance(other, GCAlgebra): # Not multigraded
return False
return super(GCAlgebra_multigraded, self)._coerce_map_from_(other)
def basis(self, n, total=False):
"""
Basis in degree ``n``.
- ``n`` -- degree or integer
- ``total`` (optional, default False) -- if True, return the
basis in total degree ``n``.
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(GF(2), degrees=((1,0), (0,1), (1,1)))
sage: A.basis((1,1))
[c, a*b]
sage: A.basis(2, total=True)
[c, b^2, a*b, a^2]
Since 2 is a not a multi-index, we don't need to specify ``total=True``::
sage: A.basis(2)
[c, b^2, a*b, a^2]
If ``total==True``, then ``n`` can still be a tuple, list,
etc., and its total degree is used instead::
sage: A.basis((1,1), total=True)
[c, b^2, a*b, a^2]
"""
tot_basis = GCAlgebra.basis(self, total_degree(n))
if total or n in ZZ:
return tot_basis
G = AdditiveAbelianGroup([0] * self._grading_rank)
n = G(vector(n))
return [b for b in tot_basis if b.degree() == n]
def differential(self, diff):
"""
Construct a differential on ``self``.
INPUT:
- ``diff`` -- a dictionary defining a differential
The keys of the dictionary are generators of the algebra, and
the associated values are their targets under the
differential. Any generators which are not specified are
assumed to have zero differential.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: A.differential({a: c})
Differential of Graded Commutative Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (0, 2)) over Rational Field
Defn: a --> c
b --> 0
c --> 0
"""
return Differential_multigraded(self, diff)
def cdg_algebra(self, differential):
r"""
Construct a differential graded commutative algebra from ``self``
by specifying a differential.
INPUT:
- ``differential`` -- a dictionary defining a differential or
a map defining a valid differential
The keys of the dictionary are generators of the algebra, and
the associated values are their targets under the
differential. Any generators which are not specified are
assumed to have zero differential. Alternatively, the
differential can be defined using the :meth:`differential`
method; see below for an example.
.. SEEALSO::
:meth:`differential`
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: A.cdg_algebra({a: c})
Commutative Differential Graded Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (0, 2)) over Rational Field with differential:
a --> c
b --> 0
c --> 0
sage: d = A.differential({a: c})
sage: A.cdg_algebra(d)
Commutative Differential Graded Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (0, 2)) over Rational Field with differential:
a --> c
b --> 0
c --> 0
"""
return DifferentialGCAlgebra_multigraded(self, differential)
class Element(GCAlgebra.Element):
def degree(self, total=False):
"""
Return the degree of this element.
INPUT:
- ``total`` -- if ``True``, return the total degree, an
integer; otherwise, return the degree as an element of
an additive free abelian group
If not requesting the total degree, raise an error if the
element is not homogeneous.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(GF(2), degrees=((1,0), (0,1), (1,1)))
sage: (a**2*b).degree()
(2, 1)
sage: (a**2*b).degree(total=True)
3
sage: (a**2*b + c).degree()
Traceback (most recent call last):
...
ValueError: This element is not homogeneous
sage: (a**2*b + c).degree(total=True)
3
sage: A(0).degree()
Traceback (most recent call last):
...
ValueError: The zero element does not have a well-defined degree
"""
if total:
return GCAlgebra.Element.degree(self)
if self.is_zero():
raise ValueError("The zero element does not have a well-defined degree")
degrees = self.parent()._degrees_multi
n = self.parent().ngens()
exps = self.lift().dict().keys()
l = [sum(exp[i] * degrees[i] for i in range(n)) for exp in exps]
if len(set(l)) == 1:
return l[0]
else:
raise ValueError('This element is not homogeneous')
###########################################################
## Differential algebras
class DifferentialGCAlgebra(GCAlgebra):
"""
A commutative differential graded algebra.
INPUT:
- ``A`` -- a graded | |
{'key': 'properties.customDomainVerificationTest', 'type': 'str'},
'custom_domain_verification_failure_info': {'key': 'properties.customDomainVerificationFailureInfo', 'type': 'ErrorEntity'},
'has_conflict_on_scale_unit': {'key': 'properties.hasConflictOnScaleUnit', 'type': 'bool'},
'has_conflict_across_subscription': {'key': 'properties.hasConflictAcrossSubscription', 'type': 'bool'},
'conflicting_app_resource_id': {'key': 'properties.conflictingAppResourceId', 'type': 'str'},
'c_name_records': {'key': 'properties.cNameRecords', 'type': '[str]'},
'txt_records': {'key': 'properties.txtRecords', 'type': '[str]'},
'a_records': {'key': 'properties.aRecords', 'type': '[str]'},
'alternate_c_name_records': {'key': 'properties.alternateCNameRecords', 'type': '[str]'},
'alternate_txt_records': {'key': 'properties.alternateTxtRecords', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
c_name_records: Optional[List[str]] = None,
txt_records: Optional[List[str]] = None,
a_records: Optional[List[str]] = None,
alternate_c_name_records: Optional[List[str]] = None,
alternate_txt_records: Optional[List[str]] = None,
**kwargs
):
super(CustomHostnameAnalysisResult, self).__init__(kind=kind, **kwargs)
self.is_hostname_already_verified = None
self.custom_domain_verification_test = None
self.custom_domain_verification_failure_info = None
self.has_conflict_on_scale_unit = None
self.has_conflict_across_subscription = None
self.conflicting_app_resource_id = None
self.c_name_records = c_name_records
self.txt_records = txt_records
self.a_records = a_records
self.alternate_c_name_records = alternate_c_name_records
self.alternate_txt_records = alternate_txt_records
class CustomOpenIdConnectProvider(ProxyOnlyResource):
"""CustomOpenIdConnectProvider.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param registration:
:type registration: ~azure.mgmt.web.v2020_06_01.models.OpenIdConnectRegistration
:param login:
:type login: ~azure.mgmt.web.v2020_06_01.models.OpenIdConnectLogin
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'registration': {'key': 'properties.registration', 'type': 'OpenIdConnectRegistration'},
'login': {'key': 'properties.login', 'type': 'OpenIdConnectLogin'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
registration: Optional["OpenIdConnectRegistration"] = None,
login: Optional["OpenIdConnectLogin"] = None,
**kwargs
):
super(CustomOpenIdConnectProvider, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.registration = registration
self.login = login
class DatabaseBackupSetting(msrest.serialization.Model):
"""Database backup settings.
All required parameters must be populated in order to send to Azure.
:param database_type: Required. Database type (e.g. SqlAzure / MySql). Possible values include:
"SqlAzure", "MySql", "LocalMySql", "PostgreSql".
:type database_type: str or ~azure.mgmt.web.v2020_06_01.models.DatabaseType
:param name:
:type name: str
:param connection_string_name: Contains a connection string name that is linked to the
SiteConfig.ConnectionStrings.
This is used during restore with overwrite connection strings options.
:type connection_string_name: str
:param connection_string: Contains a connection string to a database which is being backed up
or restored. If the restore should happen to a new database, the database name inside is the
new one.
:type connection_string: str
"""
_validation = {
'database_type': {'required': True},
}
_attribute_map = {
'database_type': {'key': 'databaseType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'connection_string_name': {'key': 'connectionStringName', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
}
def __init__(
self,
*,
database_type: Union[str, "DatabaseType"],
name: Optional[str] = None,
connection_string_name: Optional[str] = None,
connection_string: Optional[str] = None,
**kwargs
):
super(DatabaseBackupSetting, self).__init__(**kwargs)
self.database_type = database_type
self.name = name
self.connection_string_name = connection_string_name
self.connection_string = connection_string
class DataSource(msrest.serialization.Model):
"""Class representing data source used by the detectors.
:param instructions: Instructions if any for the data source.
:type instructions: list[str]
:param data_source_uri: Datasource Uri Links.
:type data_source_uri: list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]
"""
_attribute_map = {
'instructions': {'key': 'instructions', 'type': '[str]'},
'data_source_uri': {'key': 'dataSourceUri', 'type': '[NameValuePair]'},
}
def __init__(
self,
*,
instructions: Optional[List[str]] = None,
data_source_uri: Optional[List["NameValuePair"]] = None,
**kwargs
):
super(DataSource, self).__init__(**kwargs)
self.instructions = instructions
self.data_source_uri = data_source_uri
class DataTableResponseColumn(msrest.serialization.Model):
"""Column definition.
:param column_name: Name of the column.
:type column_name: str
:param data_type: Data type which looks like 'String' or 'Int32'.
:type data_type: str
:param column_type: Column Type.
:type column_type: str
"""
_attribute_map = {
'column_name': {'key': 'columnName', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
'column_type': {'key': 'columnType', 'type': 'str'},
}
def __init__(
self,
*,
column_name: Optional[str] = None,
data_type: Optional[str] = None,
column_type: Optional[str] = None,
**kwargs
):
super(DataTableResponseColumn, self).__init__(**kwargs)
self.column_name = column_name
self.data_type = data_type
self.column_type = column_type
class DataTableResponseObject(msrest.serialization.Model):
"""Data Table which defines columns and raw row values.
:param table_name: Name of the table.
:type table_name: str
:param columns: List of columns with data types.
:type columns: list[~azure.mgmt.web.v2020_06_01.models.DataTableResponseColumn]
:param rows: Raw row values.
:type rows: list[list[str]]
"""
_attribute_map = {
'table_name': {'key': 'tableName', 'type': 'str'},
'columns': {'key': 'columns', 'type': '[DataTableResponseColumn]'},
'rows': {'key': 'rows', 'type': '[[str]]'},
}
def __init__(
self,
*,
table_name: Optional[str] = None,
columns: Optional[List["DataTableResponseColumn"]] = None,
rows: Optional[List[List[str]]] = None,
**kwargs
):
super(DataTableResponseObject, self).__init__(**kwargs)
self.table_name = table_name
self.columns = columns
self.rows = rows
class DefaultErrorResponse(msrest.serialization.Model):
"""App Service error response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: Error model.
:vartype error: ~azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseError
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'DefaultErrorResponseError'},
}
def __init__(
self,
**kwargs
):
super(DefaultErrorResponse, self).__init__(**kwargs)
self.error = None
class DefaultErrorResponseError(msrest.serialization.Model):
"""Error model.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Standardized string to programmatically identify the error.
:vartype code: str
:ivar message: Detailed error description and debugging information.
:vartype message: str
:ivar target: Detailed error description and debugging information.
:vartype target: str
:param details:
:type details: list[~azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseErrorDetailsItem]
:ivar innererror: More information to debug error.
:vartype innererror: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'innererror': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[DefaultErrorResponseErrorDetailsItem]'},
'innererror': {'key': 'innererror', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["DefaultErrorResponseErrorDetailsItem"]] = None,
**kwargs
):
super(DefaultErrorResponseError, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = details
self.innererror = None
class DefaultErrorResponseErrorDetailsItem(msrest.serialization.Model):
"""Detailed errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Standardized string to programmatically identify the error.
:vartype code: str
:ivar message: Detailed error description and debugging information.
:vartype message: str
:ivar target: Detailed error description and debugging information.
:vartype target: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DefaultErrorResponseErrorDetailsItem, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
class DeletedAppRestoreRequest(ProxyOnlyResource):
"""Details about restoring a deleted app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param deleted_site_id: ARM resource ID of the deleted app. Example:
/subscriptions/{subId}/providers/Microsoft.Web/deletedSites/{deletedSiteId}.
:type deleted_site_id: str
:param recover_configuration: If true, deleted site configuration, in addition to content, will
be restored.
:type recover_configuration: bool
:param snapshot_time: Point in time to restore the deleted app from, formatted as a DateTime
string.
If unspecified, default value is the time that the app was deleted.
:type snapshot_time: str
:param use_dr_secondary: If true, the snapshot is retrieved from DRSecondary endpoint.
:type use_dr_secondary: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deleted_site_id': {'key': 'properties.deletedSiteId', 'type': 'str'},
'recover_configuration': {'key': 'properties.recoverConfiguration', 'type': 'bool'},
'snapshot_time': {'key': 'properties.snapshotTime', 'type': 'str'},
'use_dr_secondary': {'key': 'properties.useDRSecondary', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
deleted_site_id: Optional[str] = None,
recover_configuration: Optional[bool] = None,
snapshot_time: Optional[str] = None,
use_dr_secondary: Optional[bool] = None,
**kwargs
):
super(DeletedAppRestoreRequest, self).__init__(kind=kind, **kwargs)
self.deleted_site_id = deleted_site_id
self.recover_configuration = recover_configuration
self.snapshot_time = snapshot_time
self.use_dr_secondary = use_dr_secondary
class DeletedSite(ProxyOnlyResource):
"""A deleted app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar deleted_site_id: Numeric id for the deleted site.
:vartype deleted_site_id: int
:ivar deleted_timestamp: Time in UTC when the app was deleted.
:vartype deleted_timestamp: str
:ivar subscription: Subscription containing the deleted site.
:vartype subscription: str
:ivar resource_group: ResourceGroup that contained the deleted site.
| |
<reponame>scott-techart/flottitools
from contextlib import contextmanager
import maya.OpenMaya as OpenMaya
import maya.OpenMayaAnim as OpenMayaAnim
import pymel.core as pm
import flottitools.utils.namespaceutils as nsutils
import flottitools.utils.selectionutils as selutils
import flottitools.utils.skeletonutils as skelutils
DEFAULT_SKINCLUSTER_KWARGS = {'bindMethod': 0,
'normalizeWeights': True,
'weightDistribution': 0,
'maximumInfluences': 4,
'obeyMaxInfluences': True,
'dropoffRate': 4,
'removeUnusedInfluence': False}
def get_skincluster(pynode):
"""Returns the pynode's connected skinCluster.
Returns None if no skinCluster found.
:param pynode: Any pynode that has a skinCluster connected to it.
:return: nt.SkinCluster
"""
try:
shape_node = pynode.getShape()
except AttributeError:
# shape nodes do not have a .getShape() method. So, if we get an attribute error assume pynode is a shape node.
shape_node = pynode
if not shape_node:
return
skin = shape_node.listHistory(type='skinCluster', future=False)
try:
return skin[0]
except IndexError:
return None
@contextmanager
def max_influences_normalize_weights_disabled(skin_cluster, normalize_on_exit=False):
"""Disables normalize weights and maintain max influences
attributes of a skin cluster on enter and re-enables them on exit.
Some Maya commands like skinPercent can produce slightly unpredictable
results when the skinCluster's normalize weights attribute is enabled.
"""
influence_state = skin_cluster.maintainMaxInfluences.get()
normalize_state = skin_cluster.setNormalizeWeights(q=True)
skin_cluster.maintainMaxInfluences.set(False)
skin_cluster.setNormalizeWeights(0)
try:
yield
finally:
skin_cluster.maintainMaxInfluences.set(influence_state)
skin_cluster.setNormalizeWeights(normalize_state)
if normalize_on_exit:
skin_cluster.forceNormalizeWeights()
def bind_mesh_to_joints(mesh, joints, **skincluster_kwargs):
kwargs = DEFAULT_SKINCLUSTER_KWARGS.copy()
kwargs.update(skincluster_kwargs)
with selutils.preserve_selection():
pm.select(joints, r=True)
skin_cluster = pm.skinCluster(joints, mesh, toSelectedBones=True, **kwargs)
return skin_cluster
def bind_mesh_like_mesh(source_mesh, target_mesh, source_skincluster=None):
source_skincluster = source_skincluster or get_skincluster(source_mesh)
source_influences = source_skincluster.influenceObjects()
return bind_mesh_to_joints(target_mesh, source_influences)
def bind_mesh_geodesic_voxel(mesh, joints, resolution=None, falloff=1.0, **skinCluster_kwargs):
""" This cannot be run in Maya standalone or batch mode!
geomBind() requires a GPU.
"""
resolution = resolution or (64, 64)
kwargs = DEFAULT_SKINCLUSTER_KWARGS.copy()
kwargs.update(skinCluster_kwargs)
skin_cluster = bind_mesh_to_joints(mesh, joints)
try:
# according to the command reference 3 is the only valid bindMethod value for geomBind()
pm.geomBind(skin_cluster, bindMethod=3, geodesicVoxelParams=resolution,
maxInfluences=kwargs.get('maximumInfluences'), falloff=falloff)
return skin_cluster
except RuntimeError:
return skin_cluster
def bind_mesh_delta_mush(mesh, joints, cleanup=False, **skinCluster_kwargs):
skin_cluster = bind_mesh_to_joints(mesh, joints, **skinCluster_kwargs)
max_infs = skinCluster_kwargs.get('maximumInfluences', 4)
skin_cluster = apply_delta_mush_skinning(mesh, skin_cluster, max_influences=max_infs,
sample_existing_skinning=False, cleanup=cleanup)
return skin_cluster
def get_vert_indexes_with_exceeding_influences(skinned_mesh, skin_cluster=None, max_influences=4):
skin_cluster = skin_cluster or get_skincluster(skinned_mesh)
exceeding_verts = {}
verts_to_weighted_influences = get_vert_indexes_to_weighted_influences(skin_cluster)
for vert, influences_to_weights in verts_to_weighted_influences.items():
if len(influences_to_weights.values()) > max_influences:
exceeding_verts[vert] = influences_to_weights
return exceeding_verts
def get_skinned_meshes_from_scene():
skinned_meshes = _get_skinned_meshes()
return skinned_meshes
def get_skinnned_meshes_in_list(nodes):
skinned_meshes = _get_skinned_meshes(nodes)
skinned_meshes_in_nodes = [x for x in skinned_meshes if x in nodes]
return skinned_meshes_in_nodes
def _get_skinned_meshes(nodes=None):
if nodes is None:
skin_clusters = pm.ls(typ='skinCluster')
else:
skin_clusters = pm.ls(nodes, typ='skinCluster')
skinned_meshes = []
for skincl in skin_clusters:
shapes = skincl.getGeometry()
meshes = pm.ls(shapes, type='mesh')
meshes_xforms = [m.getParent() for m in meshes]
skinned_meshes.extend(meshes_xforms)
# make sure there are no duplicates in the list
skinned_meshes = list(set(skinned_meshes))
return skinned_meshes
def prune_exceeding_influences(vertex, skin_cluster=None, influences_to_weights=None, max_influences=4):
skin_cluster = skin_cluster or get_skincluster(vertex)
influences_to_weights = influences_to_weights or get_weighted_influences(vertex, skin_cluster)
pruned_infs_to_weights = get_pruned_influences_to_weights(influences_to_weights, max_influences=max_influences)
pm.skinPercent(skin_cluster, vertex, transformValue=pruned_infs_to_weights.items())
def prune_exceeding_skinned_mesh(skinned_mesh, vert_indexes_to_infs_and_wts=None, skincluster=None, max_influences=4):
skincluster = skincluster or get_skincluster(skinned_mesh)
vert_indexes_to_infs_and_wts = vert_indexes_to_infs_and_wts or get_vert_indexes_with_exceeding_influences(skinned_mesh)
with max_influences_normalize_weights_disabled(skincluster, normalize_on_exit=True):
for vert_index, infs_to_wts in vert_indexes_to_infs_and_wts.items():
vert = skinned_mesh.vtx[vert_index]
prune_exceeding_influences(
vert, skin_cluster=skincluster, influences_to_weights=infs_to_wts, max_influences=max_influences)
def get_pruned_influences_to_weights(influences_to_weights, max_influences=4, divisor=1.0):
to_sort = list(influences_to_weights.values())
to_sort.sort(reverse=True)
max_index = max_influences - 1
try:
prune_val = to_sort[max_index]
if to_sort[max_index] == to_sort[max_influences]:
prune_val = to_sort[max_influences - 2]
except IndexError:
prune_val = -1.0
pruned_inf_to_weight = {}
not_pruned_infs = []
for inf, weight in influences_to_weights.items():
if weight < prune_val:
pruned_inf_to_weight[inf] = 0.0
else:
pruned_inf_to_weight[inf] = (weight / divisor)
not_pruned_infs.append(inf)
# very rare edge case where all influences have equal weight and exceed max
if len(not_pruned_infs) > max_influences:
not_pruned_infs.sort()
for exceeding_inf in not_pruned_infs[max_influences:]:
pruned_inf_to_weight[exceeding_inf] = 0.0
return pruned_inf_to_weight
def get_non_normalized_vert_indexes(vertices, skin_cluster=None, tolerance=.000001):
skin_cluster = skin_cluster or get_skincluster(vertices[0])
non_normalized_vert_indexes_to_total_weight = {}
vert_inf_wts = get_vert_indexes_to_weighted_influences(skin_cluster, vertices)
for vert_index, infs_wts in vert_inf_wts.items():
weights = infs_wts.values()
total_weight = sum(weights)
if abs(total_weight - 1.0) > tolerance:
non_normalized_vert_indexes_to_total_weight[vert_index] = total_weight
return non_normalized_vert_indexes_to_total_weight
def move_weight_to_parent_and_remove_influence(influence_origin, skin_cluster):
return move_weight_and_remove_influence(influence_origin, influence_origin.getParent(), skin_cluster)
def move_weight_and_remove_influence(influence_origin, influence_destination, skin_cluster):
pm.select(clear=True)
skin_cluster.selectInfluenceVerts(influence_origin)
bad_vertices = [x for x in pm.selected(fl=True) if isinstance(x, pm.MeshVertex)]
if bad_vertices:
for bad_vert in bad_vertices:
# the transformMoveWeights flag in skinPercent does not quite work like you would expect so I wrote my own
move_weights(skin_cluster, bad_vert, influence_origin, influence_destination)
skin_cluster.removeInfluence(influence_origin)
return bad_vertices
def remove_influences_from_skincluster(skin_cluster, influences):
with max_influences_normalize_weights_disabled(skin_cluster):
skin_cluster.removeInfluence(influences)
def move_weights(skin_cluster, vertex, origin_inf, destination_inf):
"""Sets origin_inf weight to 0.0 and adds its original weight to destination_inf."""
infs_to_weights = get_weighted_influences(vertex, skin_cluster)
initial_origin_weight = infs_to_weights.get(origin_inf, 0.0)
initial_destination_weight = infs_to_weights.get(destination_inf, 0.0)
destination_weight = initial_origin_weight + initial_destination_weight
infs_to_weights[origin_inf] = 0.0
infs_to_weights[destination_inf] = destination_weight
pm.skinPercent(skin_cluster, vertex, transformValue=infs_to_weights.items())
def normalize_skinned_meshes(skinned_meshes):
for skinned_mesh in skinned_meshes:
normalize_skinned_mesh(skinned_mesh)
def normalize_skinned_mesh(skinned_mesh):
skin_cluster = get_skincluster(skinned_mesh)
skin_cluster.forceNormalizeWeights()
skin_cluster.setNormalizeWeights(1)
def apply_delta_mush_skinning(skinned_mesh, skin_cluster=None, max_influences=4,
sample_existing_skinning=True, cleanup=False):
mush_mesh, mush_skincluster = duplicate_skinned_mesh(skinned_mesh, skin_cluster, sample_existing_skinning)
mush_node = apply_delta_mush(mush_mesh)
skin_cluster = bake_deformer_to_skin(mush_mesh, skinned_mesh, max_influences=max_influences, cleanup=cleanup)
pm.delete([mush_mesh, mush_node])
return skin_cluster
def duplicate_skinned_mesh(skinned_mesh, skin_cluster=None, copy_skinning=True):
skin_cluster = skin_cluster or get_skincluster(skinned_mesh)
influences = skin_cluster.getInfluence()
return duplicate_skinned_mesh_to_influences(skinned_mesh, influences, copy_skinning=copy_skinning)
def duplicate_skinned_mesh_to_influences(skinned_mesh, influences, copy_skinning=True, bind_method=bind_mesh_to_joints, dup_namespace=None, dup_parent=None):
dup_namespace = dup_namespace or pm.namespaceInfo(currentNamespace=True)
with nsutils.preserve_namespace(dup_namespace):
skinned_mesh_duplicate = nsutils.duplicate_to_namespace(
skinned_mesh, dup_namespace=dup_namespace, dup_parent=dup_parent)[0]
skincluster_duplicate = bind_method(skinned_mesh_duplicate, influences)
if copy_skinning:
copy_weights(skinned_mesh, skinned_mesh_duplicate)
# copy_weights_vert_order(skinned_mesh, skinned_mesh_duplicate)
return skinned_mesh_duplicate, skincluster_duplicate
def duplicate_skinned_mesh_and_skeleton(skinned_mesh, dup_namespace=None, copy_skinning=True, bind_method=bind_mesh_to_joints, dup_parent=None):
skin_cluster = get_skincluster(skinned_mesh)
source_influences = skin_cluster.influenceObjects()
source_skeleton_root = skelutils.get_root_joint_from_child(source_influences[0])
if dup_namespace:
nsutils.add_namespace_to_root(dup_namespace)
dup_root = skelutils.duplicate_skeleton(source_skeleton_root, dup_namespace=dup_namespace, dup_parent=dup_parent)
dup_skel = skelutils.get_hierarchy_from_root(dup_root, joints_only=True)
dup_mesh, dup_cluster = duplicate_skinned_mesh_to_influences(skinned_mesh, dup_skel,
copy_skinning=copy_skinning, bind_method=bind_method, dup_namespace=dup_namespace, dup_parent=dup_parent)
return dup_mesh, dup_root, dup_cluster
def apply_delta_mush(mesh, distanceWeight=1.0, displacement=1.0, **deltaMush_kwargs):
default_deltaMush_kwargs = {'smoothingIterations': 20,
'smoothingStep': 1.0,
'pinBorderVertices': False,
'envelope': 1.0,
'inwardConstraint': 0.0,
'outwardConstraint': 0.0}
default_deltaMush_kwargs.update(deltaMush_kwargs)
delta_mush_node = pm.deltaMush(mesh, **default_deltaMush_kwargs)
# these are arguments that the Delta Mush UI has but are not valid arguments for the deltaMush() command.
delta_mush_node.distanceWeight.set(distanceWeight)
delta_mush_node.displacement.set(displacement)
return delta_mush_node
def bake_deformer_to_skin(source_mesh, target_mesh, source_skeleton=None, target_skeleton=None,
max_influences=4, cleanup=False):
source_skeleton = source_skeleton or get_skincluster(source_mesh).getInfluence()
target_skeleton = target_skeleton or get_skincluster(target_mesh).getInfluence()
source_root = source_skeleton[0]
target_root = target_skeleton[0]
pm.bakeDeformer(srcMeshName=source_mesh,
srcSkeletonName=source_root,
dstMeshName=target_mesh,
dstSkeletonName=target_root,
maxInfluences=max_influences)
# bakeDeformer deletes and re-creates the dest mesh's skin cluster
# so we need to query target_mesh's skin cluster again.
target_skin_cluster = get_skincluster(target_mesh)
if cleanup:
# bakeDeformer does not seem to respect max influences or normalize weights.
# these cleanup operations are behind a feature flag because they are very slow, but effective.
prune_exceeding_skinned_mesh(target_mesh, skincluster=target_skin_cluster, max_influences=max_influences)
target_skin_cluster.forceNormalizeWeights()
pm.warning("Bake Deformer process complete. You should probably save your work and restart Maya now. This process takes tons of memory and does not give it back when it's done.")
return target_skin_cluster
def copy_weights(source_mesh, target_nodes, **copySkinWeights_kwargs):
"""Copies weights using pm.copySkinWeights with the most commonly used default kwargs.
target_nodes can be a single skinned mesh or vertex or it can be a list of vertices.
"""
default_copySkinWeightsKwargs = {'noMirror': True,
'surfaceAssociation': 'closestPoint',
'influenceAssociation': ('label', 'name', 'closestJoint'),
'normalize': True}
default_copySkinWeightsKwargs.update(copySkinWeights_kwargs)
pm.copySkinWeights(source_mesh, target_nodes, **default_copySkinWeightsKwargs)
def get_root_joint_from_skinned_mesh(skinned_mesh):
skin_cluster = get_skincluster(skinned_mesh)
influences = skin_cluster.getInfluence()
return skelutils.get_root_joint_from_child(influences[0])
def get_vert_indexes_to_weighted_influences(skin_cluster, vertices=None):
"""
Return a dictionary of vertex indices as keys and influence to weights dictionaries as values.
Only returns influences that have greater-than zero weights.
Original code from <NAME> https://www.charactersetup.com/tutorial_skinWeights.html
Tweaked to use PyNode influences and only check the vertices passed in (or all vertices if None).
:param skin_cluster: PyNode SkinCluster
:param vertices: PyNode MeshVertex Return weights for only the vertices provided.
If None return values for all vertices.
:returns: {vert_index: {influence: weight_value}}
"""
# poly mesh and skinCluster name
clusterName = skin_cluster.name()
# get the MFnSkinCluster for clusterName
selList = OpenMaya.MSelectionList()
selList.add(clusterName)
clusterNode = OpenMaya.MObject()
selList.getDependNode(0, clusterNode)
skinFn = OpenMayaAnim.MFnSkinCluster(clusterNode)
# get the MDagPath for all influence
infDags = OpenMaya.MDagPathArray()
skinFn.influenceObjects(infDags)
# Get PyNodes for influences. They are returned in the same order as the OpenMaya commands return influence indices.
# They have the advantage of containing more information like the influence name.
# Their index can be derived from the skinCluster using skin_cluster.indexForInfluenceObject(influence)
influences = skin_cluster.influenceObjects()
# need a influence index to influences mapping because influence indices can diverge from the order they come from
# skinCluster.influenceObjects() if methods like removeInfluence() have been used on the skinCluster before.
inf_index_to_infs = {}
for influence in influences:
inf_index = get_influence_index(influence, skin_cluster)
inf_index_to_infs[inf_index] = influence
# get the MPlug for the weightList and weights attributes
weight_list_plug = skinFn.findPlug('weightList')
weights_plug = skinFn.findPlug('weights')
wlAttr = weight_list_plug.attribute()
wAttr = weights_plug.attribute()
wInfIds = OpenMaya.MIntArray()
# the weights are stored in dictionary, the key is the vertId,
# the value is another dictionary whose key is the influence id and
# value is the weight for that influence
weights = {}
if vertices:
# get influences and weights for only the vertices passed in
vert_indices = [v.index() for v | |
"""
Created on March 14, 2017
Originally written by <NAME> in 2015
@author: <NAME>
"""
import os
from datetime import datetime
import numpy as np
import pandas as pd
import pytz
def storms(precipitation, perc_snow, mass=1, time=4,
stormDays=None, stormPrecip=None, ps_thresh=0.5):
"""
Calculate the decimal days since the last storm given a precip time series,
percent snow, mass threshold, and time threshold
- Will look for pixels where perc_snow > 50% as storm locations
- A new storm will start if the mass at the pixel has exceeded the mass
limit, this ensures that the enough has accumulated
Args:
precipitation: Precipitation values
perc_snow: Precent of precipitation that was snow
mass: Threshold for the mass to start a new storm
time: Threshold for the time to start a new storm
stormDays: If specified, this is the output from a previous run of storms
stormPrecip: Keeps track of the total storm precip
Returns:
tuple:
- **stormDays** - Array representing the days since the last storm at
a pixel
- **stormPrecip** - Array representing the precip accumulated during
the most recent storm
Created April 17, 2015
@author: <NAME>
"""
# either preallocate or use the input
if stormDays is None:
stormDays = np.zeros(precipitation.shape)
if stormPrecip is None:
stormPrecip = np.zeros(precipitation.shape)
# if there is no snow, don't reset the counter
# This ensures that the albedo won't be reset
stormDays += 1
if np.sum(perc_snow) == 0:
# stormDays = np.add(stormDays, 1)
stormPrecip = np.zeros(precipitation.shape)
return stormDays, stormPrecip
# determine locations where it has snowed
idx = perc_snow >= ps_thresh
# determine locations where the time threshold has passed
# these areas, the stormPrecip will be set back to zero
idx_time = stormDays >= time
stormPrecip[idx_time] = 0
# add the values to the stormPrecip
stormPrecip[idx] =+ precipitation[idx]
# see if the mass threshold has been passed
idx_mass = stormPrecip >= mass
# reset the stormDays to zero where the storm is present
stormDays[idx_mass] = 0
return stormDays, stormPrecip
def time_since_storm(precipitation, perc_snow, time_step=1/24, mass=1.0, time=4,
stormDays=None, stormPrecip=None, ps_thresh=0.5):
"""
Calculate the decimal days since the last storm given a precip time series,
percent snow, mass threshold, and time threshold
- Will look for pixels where perc_snow > 50% as storm locations
- A new storm will start if the mass at the pixel has exceeded the mass
limit, this ensures that the enough has accumulated
Args:
precipitation: Precipitation values
perc_snow: Percent of precipitation that was snow
time_step: Step in days of the model run
mass: Threshold for the mass to start a new storm
time: Threshold for the time to start a new storm
stormDays: If specified, this is the output from a previous run of storms
else it will be set to the date_time value
stormPrecip: Keeps track of the total storm precip
Returns:
tuple:
- **stormDays** - Array representing the days since the last storm at
a pixel
- **stormPrecip** - Array representing the precip accumulated during
the most recent storm
Created Janurary 5, 2016
@author: <NAME>
"""
# either preallocate or use the input
if stormDays is None:
stormDays = np.zeros(precipitation.shape)
if stormPrecip is None:
stormPrecip = np.zeros(precipitation.shape)
# if there is no snow, don't reset the counter
# This ensures that the albedo won't be reset
stormDays += time_step
if np.sum(perc_snow) == 0:
# stormDays = np.add(stormDays, 1)
stormPrecip = np.zeros(precipitation.shape)
return stormDays, stormPrecip
# determine locations where it has snowed
idx = perc_snow >= ps_thresh
# determine locations where the time threshold has passed
# these areas, the stormPrecip will be set back to zero
idx_time = stormDays >= time
stormPrecip[idx_time] = 0
# add the values to the stormPrecip
stormPrecip[idx] =+ precipitation[idx]
idx_mass = stormPrecip >= mass
# reset the stormDays to zero where the storm is present
stormDays[idx_mass] = 0
return stormDays, stormPrecip
def time_since_storm_basin(precipitation, storm, stormid, storming, time, time_step=1/24, stormDays=None):
"""
Calculate the decimal days since the last storm given a precip time series,
days since last storm in basin, and if it is currently storming
- Will assign uniform decimal days since last storm to every pixel
Args:
precipitation: Precipitation values
storm: current or most recent storm
time_step: step in days of the model run
last_storm_day_basin: time since last storm for the basin
stormid: ID of current storm
storming: if it is currently storming
time: current time
stormDays: unifrom days since last storm on pixel basis
Returns:
stormDays: unifrom days since last storm on pixel basis
Created May 9, 2017
@author: <NAME> modified by <NAME>
"""
# either preallocate or use the input
if stormDays is None:
stormDays = np.zeros(precipitation.shape)
#if before first storm, just add timestep
if not storming and time <= storm['start'] and stormid == 0:
stormDays += time_step
# if during a storm than reset to zero
elif time <= storm['end']:
stormDays = np.zeros(precipitation.shape)
# else assign uniform to days from last storm for the basin
else:
stormDays += time_step
return stormDays
def time_since_storm_pixel(precipitation, dpt, perc_snow, storming, time_step=1/24, stormDays=None, mass=1.0, ps_thresh=0.5):
"""
Calculate the decimal days since the last storm given a precip time series
- Will assign decimal days since last storm to every pixel
Args:
precipitation: Precipitation values
dpt: dew point values
perc_snow: percent_snow values
storming: if it is stomring
time_step: step in days of the model run
stormDays: unifrom days since last storm on pixel basis
mass: Threshold for the mass to start a new storm
ps_thresh: Threshold for percent_snow
Returns:
stormDays: days since last storm on pixel basis
Created October 16, 2017
@author: <NAME>
"""
# either preallocate or use the input
if stormDays is None:
stormDays = np.zeros(precipitation.shape)
# add timestep
stormDays += time_step
# only reset if stomring and not overly warm
if storming and dpt.min() < 2.0:
# determine location where there is enough mass
idx_mass = precipitation >= mass
# determine locations where it has snowed
idx = perc_snow >= ps_thresh
# reset the stormDays to zero where the storm is present
stormDays[(idx_mass & idx)] = 0
return stormDays
def tracking_by_station(precip, mass_thresh = 0.01, steps_thresh = 3):
"""
Processes the vector station data prior to the data being distributed
Args:
precipitation: precipitation values
time: Time step that smrf is on
time_steps_since_precip: time steps since the last precipitation
storm_lst: list that store the storm cycles in order. A storm is recorded by
its start and its end. The list
is passed by reference and modified internally.
Each storm entry should be in the format of:
[{start:Storm Start, end:Storm End}]
e.g.
[
{start:date_time1,end:date_time2,'BOG1':100, 'ATL1':85},
{start:date_time3,end:date_time4,'BOG1':50, 'ATL1':45},
]
#would be a two storms at stations BOG1 and ATL1
mass_thresh: mass amount that constitutes a real precip event,
default = 0.01.
steps_thresh: Number of time steps that constitutes the end of a precip
event, default = 2 steps (typically 2 hours)
Returns:
tuple:
- **storms** - A list of dictionaries containing storm start,stop,
mass accumulated, of given storm.
- **storm_count** - A total number of storms found
Created April 24, 2017
@author: <NAME>
"""
storm_columns = ['start','end']
stations = list(precip)
storm_columns+=stations
storms = []
stations = list(precip)
is_storming = False
time_steps_since_precip= 0
tzinfo = pytz.timezone('UTC')
for i,row in precip.iterrows():
time = pd.Timestamp(i)
#Storm Idenificiation
if row.max() > mass_thresh:
#Start a new storm
if not is_storming:
new_storm = {}
new_storm['start'] = time
for sta,p in row.iteritems():
new_storm[sta] = 0
#Create a new row
is_storming = True
#print "=="*10 + "> New storm!"
time_steps_since_precip = 0
#Always add the latest end date to avoid unclosed storms
new_storm['end'] = time
#Accumulate precip for storm total
for sta,mass in row.iteritems():
new_storm[sta] += mass
elif is_storming and time_steps_since_precip < steps_thresh:
#storm_lst[-1]['end'] = time
new_storm['end'] = time
time_steps_since_precip+=1
#print "=="*10 +"> Hours since precip = {0}".format(time_steps_since_precip)
#print "=="*10 + "> still storming but no precip!"
if time_steps_since_precip >= steps_thresh and is_storming:
is_storming = False
storms.append(new_storm)
#print "=="*10 + "> not storming!"
#Append the last storm if we ended during a storm
if | |
fov):
self.fov = fov
# this is vertical fov
P = perspective(self.fov, float(self.width) /
float(self.height), 0.01, 100)
self.P = np.ascontiguousarray(P, np.float32)
def set_projection_matrix(self, w, h, fu, fv, u0, v0, znear, zfar):
L = -(u0) * znear / fu;
R = +(w-u0) * znear / fu;
T = -(v0) * znear / fv;
B = +(h-v0) * znear / fv;
P = np.zeros((4, 4), dtype=np.float32);
P[0, 0] = 2 * znear / (R-L);
P[1, 1] = 2 * znear / (T-B);
P[2, 0] = (R+L)/(L-R);
P[2, 1] = (T+B)/(B-T);
P[2, 2] = (zfar +znear) / (zfar - znear);
P[2, 3] = 1.0;
P[3, 2] = (2*zfar*znear)/(znear - zfar);
self.P = P
def set_light_color(self, color):
self.lightcolor = color
def render(self, cls_indexes, image_tensor, seg_tensor, normal_tensor=None, pc1_tensor=None, pc2_tensor=None):
frame = 0
GL.glClearColor(0, 0, 0, 1)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glEnable(GL.GL_DEPTH_TEST)
#GL.glLightModeli(GL.GL_LIGHT_MODEL_TWO_SIDE, GL.GL_TRUE)
if self.render_marker:
# render some grid and directions
GL.glUseProgram(self.shaderProgram_simple)
GL.glBindVertexArray(self.grid)
GL.glUniformMatrix4fv(GL.glGetUniformLocation(
self.shaderProgram_simple, 'V'), 1, GL.GL_TRUE, self.V)
GL.glUniformMatrix4fv(GL.glGetUniformLocation(
self.shaderProgram_simple, 'P'), 1, GL.GL_FALSE, self.P)
GL.glDrawElements(GL.GL_LINES, 160,
GL.GL_UNSIGNED_INT, np.arange(160, dtype=np.int))
GL.glBindVertexArray(0)
GL.glUseProgram(0)
# end rendering markers
size = 0
for i in range(len(cls_indexes)):
index = cls_indexes[i]
is_materialed = self.is_materialed[index]
if is_materialed:
num = len(self.materials[index])
for idx in range(num):
is_texture = self.is_textured[index][idx] #index
if is_texture:
shader = self.shaderProgram_textureMat
elif self.textures[index][idx] == 'color':
shader = self.shaderProgram_textureless
else:
shader = self.shaderProgram_material
GL.glUseProgram(shader)
GL.glUniformMatrix4fv(GL.glGetUniformLocation(shader, 'V'), 1, GL.GL_TRUE, self.V)
GL.glUniformMatrix4fv(GL.glGetUniformLocation(shader, 'P'), 1, GL.GL_FALSE, self.P)
GL.glUniformMatrix4fv(GL.glGetUniformLocation(shader, 'pose_trans'), 1, GL.GL_FALSE, self.poses_trans[i])
GL.glUniformMatrix4fv(GL.glGetUniformLocation(shader, 'pose_rot'), 1, GL.GL_TRUE, self.poses_rot[i])
GL.glUniform3f(GL.glGetUniformLocation(shader, 'light_position'), *self.lightpos)
GL.glUniform3f(GL.glGetUniformLocation(shader, 'instance_color'), *self.colors[index])
GL.glUniform3f(GL.glGetUniformLocation(shader, 'light_color'), *self.lightcolor)
GL.glUniform3f(GL.glGetUniformLocation(shader, 'mat_diffuse'), *self.materials[index][idx][:3])
GL.glUniform3f(GL.glGetUniformLocation(shader, 'mat_specular'), *self.materials[index][idx][3:6])
GL.glUniform3f(GL.glGetUniformLocation(shader, 'mat_ambient'), *self.materials[index][idx][6:9])
GL.glUniform1f(GL.glGetUniformLocation(shader, 'mat_shininess'), self.materials[index][idx][-1])
try:
if is_texture:
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.textures[index][idx]) #self.instances[index]
GL.glUniform1i(self.texUnitUniform_textureMat, 0)
GL.glBindVertexArray(self.VAOs[self.instances[index]+idx]) #
GL.glDrawElements(GL.GL_TRIANGLES, self.faces[self.instances[index]+idx].size,
GL.GL_UNSIGNED_INT, self.faces[self.instances[index]+idx])
finally:
GL.glBindVertexArray(0)
GL.glUseProgram(0)
GL.glDisable(GL.GL_DEPTH_TEST)
# mapping
self.r.map_tensor(int(self.color_tex), int(self.width), int(self.height), image_tensor.data_ptr())
self.r.map_tensor(int(self.color_tex_3), int(self.width), int(self.height), seg_tensor.data_ptr())
if normal_tensor is not None:
self.r.map_tensor(int(self.color_tex_2), int(self.width), int(self.height), normal_tensor.data_ptr())
if pc1_tensor is not None:
self.r.map_tensor(int(self.color_tex_4), int(self.width), int(self.height), pc1_tensor.data_ptr())
if pc2_tensor is not None:
self.r.map_tensor(int(self.color_tex_5), int(self.width), int(self.height), pc2_tensor.data_ptr())
'''
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
frame = GL.glReadPixels(0, 0, self.width, self.height, GL.GL_BGRA, GL.GL_FLOAT)
#frame = np.frombuffer(frame,dtype = np.float32).reshape(self.width, self.height, 4)
frame = frame.reshape(self.height, self.width, 4)[::-1, :]
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
#normal = GL.glReadPixels(0, 0, self.width, self.height, GL.GL_BGRA, GL.GL_FLOAT)
#normal = np.frombuffer(frame, dtype=np.uint8).reshape(self.width, self.height, 4)
#normal = normal[::-1, ]
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
seg = GL.glReadPixels(0, 0, self.width, self.height, GL.GL_BGRA, GL.GL_FLOAT)
#seg = np.frombuffer(frame, dtype=np.uint8).reshape(self.width, self.height, 4)
seg = seg.reshape(self.height, self.width, 4)[::-1, :]
#pc = GL.glReadPixels(0, 0, self.width, self.height, GL.GL_DEPTH_COMPONENT, GL.GL_FLOAT)
# seg = np.frombuffer(frame, dtype=np.uint8).reshape(self.width, self.height, 4)
#pc = np.stack([pc,pc, pc, np.ones(pc.shape)], axis = -1)
#pc = pc[::-1, ]
#pc = (1-pc) * 10
# points in object coordinate
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT3)
pc2 = GL.glReadPixels(0, 0, self.width, self.height, GL.GL_RGBA, GL.GL_FLOAT)
pc2 = pc2.reshape(self.height, self.width, 4)[::-1, :]
pc2 = pc2[:,:,:3]
# points in camera coordinate
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT4)
pc3 = GL.glReadPixels(0, 0, self.width, self.height, GL.GL_RGBA, GL.GL_FLOAT)
pc3 = pc3.reshape(self.height, self.width, 4)[::-1, :]
pc3 = pc3[:,:,:3]
return [frame, seg, pc2, pc3]
'''
def set_light_pos(self, light):
self.lightpos = light
def get_num_objects(self):
return len(self.objects)
def set_poses(self, poses):
self.poses_rot = [np.ascontiguousarray(
quat2rotmat(item[3:])) for item in poses]
self.poses_trans = [np.ascontiguousarray(
xyz2mat(item[:3])) for item in poses]
def set_allocentric_poses(self, poses):
self.poses_rot = []
self.poses_trans = []
for pose in poses:
x, y, z = pose[:3]
quat_input = pose[3:]
dx = np.arctan2(x, -z)
dy = np.arctan2(y, -z)
# print(dx, dy)
quat = euler2quat(-dy, -dx, 0, axes='sxyz')
quat = qmult(quat, quat_input)
self.poses_rot.append(np.ascontiguousarray(quat2rotmat(quat)))
self.poses_trans.append(np.ascontiguousarray(xyz2mat(pose[:3])))
def release(self):
print(self.glstring)
self.clean()
self.r.release()
def clean(self):
GL.glDeleteTextures([self.color_tex, self.color_tex_2,
self.color_tex_3, self.color_tex_4, self.depth_tex])
self.color_tex = None
self.color_tex_2 = None
self.color_tex_3 = None
self.color_tex_4 = None
self.depth_tex = None
GL.glDeleteFramebuffers(1, [self.fbo])
self.fbo = None
GL.glDeleteBuffers(len(self.VAOs), self.VAOs)
self.VAOs = []
GL.glDeleteBuffers(len(self.VBOs), self.VBOs)
self.VBOs = []
GL.glDeleteTextures(self.textures)
self.textures = []
self.objects = [] # GC should free things here
self.faces = [] # GC should free things here
self.poses_trans = [] # GC should free things here
self.poses_rot = [] # GC should free things here
def transform_vector(self, vec):
vec = np.array(vec)
zeros = np.zeros_like(vec)
vec_t = self.transform_point(vec)
zero_t = self.transform_point(zeros)
v = vec_t - zero_t
return v
def transform_point(self, vec):
vec = np.array(vec)
if vec.shape[0] == 3:
v = self.V.dot(np.concatenate([vec, np.array([1])]))
return v[:3]/v[-1]
elif vec.shape[0] == 4:
v = self.V.dot(vec)
return v/v[-1]
else:
return None
def transform_pose(self, pose):
pose_rot = quat2rotmat(pose[3:])
pose_trans = xyz2mat(pose[:3])
pose_cam = self.V.dot(pose_trans.T).dot(pose_rot).T
return np.concatenate([mat2xyz(pose_cam), safemat2quat(pose_cam[:3, :3].T)])
def get_num_instances(self):
return len(self.instances)
def get_poses(self):
mat = [self.V.dot(self.poses_trans[i].T).dot(
self.poses_rot[i]).T for i in range(self.get_num_instances())]
poses = [np.concatenate(
[mat2xyz(item), safemat2quat(item[:3, :3].T)]) for item in mat]
return poses
def get_egocentric_poses(self):
return self.get_poses()
def get_allocentric_poses(self):
poses = self.get_poses()
poses_allocentric = []
for pose in poses:
dx = np.arctan2(pose[0], -pose[2])
dy = np.arctan2(pose[1], -pose[2])
quat = euler2quat(-dy, -dx, 0, axes='sxyz')
quat = qmult(qinverse(quat), pose[3:])
poses_allocentric.append(np.concatenate([pose[:3], quat]))
#print(quat, pose[3:], pose[:3])
return poses_allocentric
def get_centers(self):
centers = []
for i in range(len(self.poses_trans)):
pose_trans = self.poses_trans[i]
proj = (self.P.T.dot(self.V.dot(
pose_trans.T).dot(np.array([0, 0, 0, 1]))))
proj /= proj[-1]
centers.append(proj[:2])
centers = np.array(centers)
centers = (centers + 1) / 2.0
centers[:, 1] = 1 - centers[:, 1]
centers = centers[:, ::-1] # in y, x order
return centers
def vis(self, poses, cls_indexes, color_idx=None, color_list=None, cam_pos=[0, 0, 2.0], V=None,
distance=2.0, shifted_pose=None, interact=0, window_name='test'):
"""
a complicated visualization module
"""
theta = 0
cam_x, cam_y, cam_z = cam_pos
sample = []
new_poses = []
origin = np.linalg.inv(unpack_pose(poses[0]))
if shifted_pose is not None:
origin = np.linalg.inv(shifted_pose)
for pose in poses:
pose = unpack_pose(pose)
pose = origin.dot(pose)
new_poses.append(pack_pose(pose))
poses = new_poses
cam_pos = np.array([cam_x, cam_y, cam_z])
self.set_camera(cam_pos, cam_pos * 2 , [0, 1, 0])
if V is not None:
self.V = V
cam_pos = V[:3, 3]
self.set_light_pos(cam_pos)
self.set_poses(poses)
mouse_events = {
'view_dir': - self.V[:3, 3],
'view_origin': np.array([0, 0, 0.]), # anchor
'_mouse_ix': -1,
'_mouse_iy': -1,
'down': False,
'shift': False,
'trackball': Trackball(self.width, self.height, cam_pos=cam_pos)
}
image_tensor = torch.cuda.FloatTensor(self.height, self.width, 4).detach()
seg_tensor = torch.cuda.FloatTensor(self.height, self.width, 4).detach()
def update_dir():
view_dir = mouse_events['view_origin'] - self.V[:3, 3]
self.set_camera(self.V[:3, 3], self.V[:3, 3] - view_dir, [0, 1, 0]) # would shift along the sphere
self.V = self.V.dot(mouse_events['trackball'].property["model"].T)
def change_dir(event, x, y, flags, param): # fix later to be a finalized version
if event == cv2.EVENT_LBUTTONDOWN:
mouse_events['_mouse_ix'], mouse_events['_mouse_iy'] = x, y
mouse_events['down'] = True
if event == cv2.EVENT_MBUTTONDOWN:
mouse_events['_mouse_ix'], mouse_events['_mouse_iy'] = x, y
mouse_events['shift'] = True
if event == cv2.EVENT_MOUSEMOVE:
if mouse_events['down']:
dx = (x - mouse_events['_mouse_ix']) / -10.
dy = (y - mouse_events['_mouse_iy']) / -10.
mouse_events['trackball'].on_mouse_drag(x,y,dx,dy)
update_dir()
if mouse_events['shift']:
dx = (x - mouse_events['_mouse_ix']) / (-4000. / self.V[2, 3])
dy = (y - mouse_events['_mouse_iy']) / (-4000. / self.V[2, 3])
self.V[:3, 3] += 0.5 * np.array([dx, dy, 0])
mouse_events['view_origin'] += 0.5 * np.array([-dx, dy, 0]) # change
update_dir()
if event == cv2.EVENT_LBUTTONUP:
mouse_events['down'] = False
if event == cv2.EVENT_MBUTTONUP:
mouse_events['shift'] = False
if interact > 0:
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, change_dir)
# update_dir()
img = np.zeros([self.height, self.width, 3])
while True:
new_cam_pos = -self.V[:3, :3].T.dot(self.V[:3, 3])
q = cv2.waitKey(3)
if interact > 0:
if q == ord('w'):
cam_z += 0.05
elif q == ord('s'):
cam_z -= 0.05
interact = 2
elif q == ord('u'):
interact = 1
elif q == ord('a'):
theta -= 0.1
elif q == ord('d'):
theta += 0.1
elif q == ord('x'):
self.V[:3, 3] += 0.02 * (self.V[:3, 3] - mouse_events['view_origin'])
update_dir()
elif q == ord('c'): # move closer
self.V[:3, 3] -= 0.02 * (self.V[:3, 3] - mouse_events['view_origin'])
update_dir()
elif q == ord('z'): # reset
self.set_camera(cam_pos, cam_pos * 2 , [0, 1, 0])
mouse_events['trackball'].reinit(cam_pos)
mouse_events['view_origin'] = np.zeros(3)
elif q == ord('i'):
for pose in poses:
pose[1] += 0.02
elif q == ord('k'):
for pose in poses:
pose[1] -= 0.02
elif q == ord('j'):
for pose in poses:
pose[0] -= 0.02
elif q == ord('l'):
for pose in poses:
pose[0] += 0.02
elif q == ord('n'):
print('camera V', self.V)
elif q == ord('p'):
cur_dir = os.path.dirname(os.path.abspath(__file__))
Image.fromarray(
(np.clip(frame[0][:, :, [2,1,0]] * 255, 0, 255)).astype(np.uint8)).save(cur_dir + '/test.png')
elif q == ord('q'): # wth
break
elif q == ord('r'): # rotate
for pose in poses:
pose[3:] = qmult(axangle2quat(
[0, 0, | |
points[1].name)
return super().name
def free_point(self, **kwargs):
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment('point on line $%{line:line}$', {'line': self})
point = CoreScene.Point(self.scene, CoreScene.Point.Origin.line, line=self, **kwargs)
point.belongs_to(self)
return point
def intersection_point(self, obj, **kwargs):
"""
Creates an intersection point of the line and given object (line or circle).
Requires a constraint for determinate placement if the object a circle
"""
self.scene.assert_line_or_circle(obj)
assert self != obj, 'The line does not cross itself'
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = LazyComment('crossing point of %s and %s', self.label, obj.label)
if isinstance(obj, CoreScene.Circle):
crossing = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.circle_x_line,
circle=obj, line=self, **kwargs
)
else:
existing = next((pt for pt in self.all_points if pt in obj), None)
if existing:
return existing.with_extra_args(**kwargs)
crossing = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.line_x_line,
line0=self, line1=obj, **kwargs
)
crossing.belongs_to(self)
crossing.belongs_to(obj)
return crossing
def perpendicular_constraint(self, other, **kwargs):
"""
self ⟂ other
"""
self.point0.segment(self.point1).perpendicular_constraint(other.point0.segment(other.point1), **kwargs)
def __contains__(self, obj):
if obj is None:
return False
if isinstance(obj, CoreScene.Point):
return obj in self.all_points
if isinstance(obj, CoreScene.Vector):
return obj.start in self.all_points and obj.end in self.all_points
assert False, 'Operator not defined for %s and Line' % type(obj)
class Circle(Object):
prefix = 'Circ_'
def __init__(self, scene, **kwargs):
CoreScene.Object.__init__(self, scene, **kwargs)
self.all_points = []
if not scene.is_frozen:
if self.centre == self.radius.points[0]:
self.all_points.append(self.radius.points[1])
elif self.centre == self.radius.points[1]:
self.all_points.append(self.radius.points[0])
def centre_point(self, **kwargs):
return self.centre.with_extra_args(**kwargs)
def free_point(self, **kwargs):
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = LazyComment('point on circle %s', self.label)
point = CoreScene.Point(self.scene, CoreScene.Point.Origin.circle, circle=self, **kwargs)
point.belongs_to(self)
return point
def intersection_point(self, obj, **kwargs):
"""
Creates an intersection point of the circle and given object (line or circle).
Requires a constraint for determinate placement
"""
self.scene.assert_line_or_circle(obj)
assert self != obj, 'The circle does not cross itself'
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = LazyComment('crossing point of %s and %s', self.label, obj.label)
if isinstance(obj, CoreScene.Circle):
crossing = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.circle_x_circle,
circle0=self, circle1=obj, **kwargs
)
else:
crossing = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.circle_x_line,
circle=self, line=obj, **kwargs
)
crossing.belongs_to(self)
crossing.belongs_to(obj)
return crossing
def __contains__(self, obj):
if obj is None:
return False
if isinstance(obj, CoreScene.Point):
return obj in self.all_points
assert False, 'Operator not defined for %s and Circle' % type(obj)
class Vector(Figure):
def __init__(self, start, end):
assert isinstance(start, CoreScene.Point)
assert isinstance(end, CoreScene.Point)
assert start.scene == end.scene
self.start = start
self.end = end
self.points = (start, end)
self.__segment = None
@property
def as_segment(self):
if self.__segment is None:
self.__segment = self.start.segment(self.end)
return self.__segment
def angle(self, other):
angle = self.scene._get_angle(self, other)
if not self.scene.is_frozen:
for vec in (self, other):
for cnstr in vec.scene.constraints(Constraint.Kind.not_equal):
if set(cnstr.params) == set(vec.points):
break
else:
vec.as_segment.non_zero_length_constraint(comment=Comment(
'$%{vector:side}$ is side of $%{angle:angle}$',
{'side': vec, 'angle': angle}
))
return angle
@property
def scene(self):
return self.start.scene
@property
def reversed(self):
return self.end.vector(self.start)
def parallel_constraint(self, vector, **kwargs):
"""
Self and vector have the same direction.
This constraint also fulfilled if at least one of the vectors has zero length.
"""
assert isinstance(vector, CoreScene.Vector)
assert self.scene == vector.scene
return self.scene.constraint(Constraint.Kind.parallel_vectors, self, vector, **kwargs)
def __str__(self):
return '%s %s' % (self.start, self.end)
def _get_segment(self, point0, point1):
assert isinstance(point0, CoreScene.Point)
assert isinstance(point1, CoreScene.Point)
assert point0.scene == self
assert point1.scene == self
key = frozenset([point0, point1])
#key = (point0, point1)
segment = self.__segments.get(key)
if segment is None:
segment = CoreScene.Segment(point0, point1)
self.__segments[key] = segment
return segment
class Segment(Figure):
def __init__(self, pt0, pt1):
self.points = (pt0, pt1)
self.point_set = frozenset(self.points)
self.__middle_point = None
@property
def scene(self):
return self.points[0].scene
def middle_point(self, **kwargs):
"""
Constructs middle point of the segment
"""
if self.__middle_point:
return self.__middle_point.with_extra_args(**kwargs)
delta = self.points[0].vector(self.points[1])
coef = divide(1, 2)
for pt in self.scene.points():
if pt.origin == CoreScene.Point.Origin.translated:
if pt.base == self.points[0] and pt.delta == delta and pt.coef == coef:
middle = pt
break
if pt.base == self.points[1] and pt.delta == delta.reversed and pt.coef == coef:
middle = pt
break
else:
middle = CoreScene.Point(
self.scene, CoreScene.Point.Origin.translated,
base=self.points[0], delta=delta, coef=coef, **kwargs
)
middle.collinear_constraint(*self.points, guaranteed=True)
from .property import MiddleOfSegmentProperty
self.scene.add_property(MiddleOfSegmentProperty(middle, self))
self.__middle_point = middle
return middle
def free_point(self, **kwargs):
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment('point on segment $%{segment:seg}$', {'seg': self})
point = self.line_through(layer='auxiliary').free_point(**kwargs)
point.inside_constraint(self)
return point
def line_through(self, **kwargs):
return self.points[0].line_through(self.points[1], **kwargs)
def perpendicular_bisector_line(self, **kwargs):
"""
Perpendicular bisector
"""
middle = self.middle_point(layer='auxiliary')
line = self.line_through(layer='auxiliary')
if kwargs.get('comment') is None:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'perpendicular bisector of $%{segment:seg}$',
{'seg': self}
)
bisector = middle.perpendicular_line(line, **kwargs)
comment=Comment(
'$%{line:bisector}$ is the perpendicular bisector of $%{segment:seg}$',
{'bisector': bisector, 'seg': self}
)
bisector.perpendicular_constraint(line, comment=comment)
return bisector
def perpendicular_constraint(self, other, **kwargs):
"""
self ⟂ other
"""
for cnstr in self.scene.constraints(Constraint.Kind.perpendicular):
if set(cnstr.params) == {self, other}:
cnstr.update(kwargs)
return
self.scene.constraint(Constraint.Kind.perpendicular, self, other, **kwargs)
def ratio_constraint(self, segment, coef, **kwargs):
"""
|self| == |segment| * coef
coef is a non-zero number
"""
assert isinstance(segment, CoreScene.Segment)
assert self.scene == segment.scene
assert coef != 0
for cnstr in self.scene.constraints(Constraint.Kind.length_ratio):
if set(cnstr.params) == {self, segment, coef}:
cnstr.update(kwargs)
return
comment = kwargs.get('comment')
if not comment:
kwargs = dict(kwargs)
if coef == 1:
pattern = '$|%{segment:seg0}| = |%{segment:seg1}|$'
else:
pattern = '$|%{segment:seg0}| = %{multiplier:coef} |%{segment:seg1}|$'
kwargs['comment'] = Comment(
pattern, {'seg0': self, 'seg1': segment, 'coef': coef}
)
return self.scene.constraint(Constraint.Kind.length_ratio, self, segment, coef, **kwargs)
def congruent_constraint(self, segment, **kwargs):
"""
|self| == |vector|
"""
self.ratio_constraint(segment, 1, **kwargs)
def non_zero_length_constraint(self, **kwargs):
"""
|self| > 0
"""
self.points[0].not_equal_constraint(self.points[1], **kwargs)
def length_constraint(self, length, **kwargs):
"""
|self| == length
"""
if length > 0:
self.non_zero_length_constraint(**kwargs)
#TODO: equal_constraint otherwise?
self.scene.constraint(Constraint.Kind.distance, self, length, **kwargs)
def __str__(self):
return '%s %s' % self.points
def _get_angle(self, vector0, vector1):
assert isinstance(vector0, CoreScene.Vector)
assert isinstance(vector1, CoreScene.Vector)
assert vector0.scene == self
assert vector1.scene == self
key = frozenset([vector0, vector1])
angle = self.__angles.get(key)
if angle is None:
angle = CoreScene.Angle(vector0, vector1)
if angle.vertex is None and angle.pseudo_vertex:
if angle.vectors[0].end == angle.vectors[1].start:
from .property import SumOfTwoAnglesProperty
#TODO add comment
self.add_property(SumOfTwoAnglesProperty(
angle, angle.vectors[0].reversed.angle(angle.vectors[1]), 180
))
elif angle.vectors[0].start == angle.vectors[1].end:
from .property import SumOfTwoAnglesProperty
#TODO add comment
self.add_property(SumOfTwoAnglesProperty(
angle, angle.vectors[0].angle(angle.vectors[1].reversed), 180
))
elif angle.vectors[0].end == angle.vectors[1].end:
#TODO vertical angles
pass
self.__angles[key] = angle
return angle
class Angle(Figure):
def __init__(self, vector0, vector1):
assert vector0 != vector1 and vector0 != vector1.reversed
self.vectors = (vector0, vector1)
self.vertex = vector0.start if vector0.start == vector1.start else None
if self.vertex:
self.pseudo_vertex = self.vertex
else:
self.pseudo_vertex = next((p for p in vector0.points if p in vector1.points), None)
self.point_set = frozenset([*vector0.points, *vector1.points])
self.__bisector = None
@property
def scene(self):
return self.vectors[0].scene
@property
def endpoints(self):
assert self.vertex, 'Cannot locate endpoints of angle with no vertex'
return (self.vectors[0].end, self.vectors[1].end)
def bisector_line(self, **kwargs):
assert self.pseudo_vertex, 'Cannot construct bisector of angle %s with no vertex' % self
if self.__bisector:
return self.__bisector.with_extra_args(**kwargs)
v = self.pseudo_vertex
vec0 = self.vectors[0]
e0 = vec0.end if v == vec0.start else v.translated_point(vec0, layer='invisible')
vec1 = self.vectors[1]
e1 = vec1.end if v == vec1.start else v.translated_point(vec1, layer='invisible')
circle = v.circle_through(e0, layer='invisible')
line = v.line_through(e1, layer='invisible')
X = circle.intersection_point(line, layer='invisible')
v.same_direction_constraint(X, e1)
Y = X.translated_point(v.vector(e0), layer='invisible')
self.point_on_bisector_constraint(Y, guaranteed=True)
if kwargs.get('comment') is None:
kwargs = dict(kwargs)
kwargs['comment'] = Comment('bisector of $%{angle:angle}$', {'angle': self})
self.__bisector = v.line_through(Y, **kwargs)
return self.__bisector
def point_on_bisector_constraint(self, point, **kwargs):
bisector = self.pseudo_vertex.vector(point)
if kwargs.get('comment') is None:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'$%{ray:bisector}$ is the bisector of $%{angle:angle}$',
{'bisector': bisector, 'angle': self}
)
angle0 = self.vectors[0].angle(bisector)
angle1 = self.vectors[1].angle(bisector)
if self.vertex:
point.inside_constraint(self, **kwargs)
self.ratio_constraint(angle0, 2, **kwargs)
self.ratio_constraint(angle1, 2, **kwargs)
angle0.ratio_constraint(angle1, 1, **kwargs)
def ratio_constraint(self, angle, ratio, **kwargs):
# self = angle * ratio
self.scene.assert_angle(angle)
self.scene.constraint(Constraint.Kind.angles_ratio, self, angle, ratio, **kwargs)
def value_constraint(self, degree, **kwargs):
if kwargs.get('comment') is None:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'$%{anglemeasure:angle} = %{degree:degree}$',
{'angle': self, 'degree': degree}
)
self.scene.constraint(Constraint.Kind.angle_value, self, degree, **kwargs)
def is_acute_constraint(self, **kwargs):
self.scene.constraint(Constraint.Kind.acute_angle, self, **kwargs)
def is_obtuse_constraint(self, **kwargs):
self.scene.constraint(Constraint.Kind.obtuse_angle, self, **kwargs)
def is_right_constraint(self, **kwargs):
self.vectors[0].as_segment.line_through().perpendicular_constraint(
self.vectors[1].as_segment.line_through(),
**kwargs
)
def __str__(self):
if self.vertex:
return '\\angle %s %s %s' % (self.vectors[0].end, self.vertex, self.vectors[1].end)
return '\\angle(%s, %s)' % self.vectors
class Triangle(Figure):
def __init__(self, pt0, pt1, pt2):
self.points = (pt0, pt1, pt2)
self.__sides = None
self.__angles = None
self.__permutations = None
@property
def scene(self):
return self.points[0].scene
@property
def is_equilateral(self):
for cnstr in self.scene.constraints(Constraint.Kind.equilateral):
if set(cnstr.params[0].points) == | |
<reponame>IBM/graph4nlp
import json
from stanfordcorenlp import StanfordCoreNLP
from graph4nlp.pytorch.data.data import GraphData
from graph4nlp.pytorch.modules.utils.vocab_utils import VocabModel
from .base import StaticGraphConstructionBase
import networkx as nx
class IEBasedGraphConstruction(StaticGraphConstructionBase):
"""
Information Extraction based graph construction class
Parameters
----------
embedding_style: dict
Specify embedding styles including ``word_emb_type``, ``node_edge_level_emb_type`` and ``graph_level_emb_type``.
vocab: VocabModel
Vocabulary including all words appeared in graphs.
"""
def __init__(self, embedding_style, vocab, hidden_size=300, fix_word_emb=True, dropout=None, use_cuda=True):
super(IEBasedGraphConstruction, self).__init__(word_vocab=vocab,
embedding_styles=embedding_style,
hidden_size=hidden_size,
fix_word_emb=fix_word_emb,
dropout=dropout, use_cuda=use_cuda)
self.vocab = vocab
self.verbase = 1
def add_vocab(self, g):
"""
Add node tokens appeared in graph g to vocabulary.
Parameters
----------
g: GraphData
Graph data-structure.
"""
for i in range(g.get_node_num()):
attr = g.get_node_attrs(i)[i]
self.vocab.word_vocab._add_words([attr["token"]])
@classmethod
def topology(cls, raw_text_data, nlp_processor, merge_strategy, edge_strategy):
"""
Graph building method.
Parameters
----------
raw_text_data: str
Raw text data, it can be multi-sentences.
nlp_processor: StanfordCoreNLP
NLP parsing tools
merge_strategy: None or str, option=[None, "share_common_tokens", "user_define"]
Strategy to merge sub-graphs into one graph
``None``: All subjects in extracted triples are connected by a "GLOBAL_NODE"
using a "global" edge
``"share_common_tokens"``: The entity nodes share the same tokens are connected
using a "COM" edge
``"user_define"``: We will give this option to the user. User can override this method to define your merge
strategy.
edge_strategy: None or str, option=[None, "homogeneous", "heterogeneous", "as_node"]
Strategy to process edge.
``None``: It will be the default option. We will do as ``"homogeneous"``.
``"homogeneous"``: We will drop the edge type information.
If there is a linkage among node ``i`` and node ``j``, we will add an edge whose weight
is ``1.0``. Otherwise there is no edge.
``heterogeneous``: We will keep the edge type information.
An edge will have type information like ``n_subj``.
It is not implemented yet.
``as_node``: We will view the edge as a graph node.
If there is an edge whose type is ``k`` between node ``i`` and node ``j``,
we will insert a node ``k`` into the graph and link node (``i``, ``k``) and (``k``, ``j``).
It is not implemented yet.
Returns
-------
graph: GraphData
The merged graph data-structure.
"""
cls.verbase = 1
# Do coreference resolution on the whole 'raw_text_data'
props_coref = {
'annotators': 'tokenize, ssplit, pos, lemma, ner, parse, coref',
"tokenize.options":
"splitHyphenated=true,normalizeParentheses=true,normalizeOtherBrackets=true",
"tokenize.whitespace": False,
'ssplit.isOneSentence': False,
'outputFormat': 'json'
}
coref_json = nlp_processor.annotate(raw_text_data.strip(), properties=props_coref)
coref_dict = json.loads(coref_json)
# Extract and preserve necessary parsing results from coref_dict['sentences']
# sent_dict['tokenWords']: list of tokens in a sentence
sentences = []
for sent in coref_dict['sentences']:
sent_dict = {}
sent_dict['sentNum'] = sent['index'] # start from 0
sent_dict['tokens'] = sent['tokens']
sent_dict['tokenWords'] = [token['word'] for token in sent['tokens']]
sent_dict['sentText'] = ' '.join(sent_dict['tokenWords'])
sentences.append(sent_dict)
for k, v in coref_dict['corefs'].items():
# v is a list of dict, each dict contains a str
# v[0] contains 'original entity str'
# v[1:] contain 'pron strs' refers to 'original entity str'
ent_text = v[0]['text'] # 'original entity str'
if ',' in ent_text:
# cut the 'original entity str' if it is too long
ent_text = ent_text.split(',')[0].strip()
ent_sentNum = v[0]['sentNum'] - 1 # the sentNum 'original entity str' appears in
ent_startIndex = v[0]['startIndex'] - 1 # the startIndex 'original entity str' appears in
ent_endIndex = v[0]['endIndex'] - 1 # the endIndex 'original entity str' appears in
for pron in v[1:]:
pron_text = pron['text'] # 'pron strs'
if ent_text == pron_text or v[0]['text'] == pron_text:
continue
pron_sentNum = pron['sentNum'] - 1 # the sentNum 'pron str' appears in
pron_startIndex = pron['startIndex'] - 1
pron_endIndex = pron['endIndex'] - 1
# replace 'pron str' with 'original entity str'
sentences[pron_sentNum]['tokenWords'][pron_startIndex] = ent_text
for rm_idx in range(pron_startIndex+1, pron_endIndex):
sentences[pron_sentNum]['tokenWords'][rm_idx] = ""
# build resolved text
for sent_id, _ in enumerate(sentences):
sentences[sent_id]['tokenWords'] = list(filter(lambda a: a != "", sentences[sent_id]['tokenWords']))
sentences[sent_id]['resolvedText'] = ' '.join(sentences[sent_id]['tokenWords'])
# use OpenIE to extract triples from resolvedText
props_openie = {
'annotators': 'tokenize, ssplit, pos, ner, parse, openie',
"tokenize.options":
"splitHyphenated=true,normalizeParentheses=true,normalizeOtherBrackets=true",
"tokenize.whitespace": False,
'ssplit.isOneSentence': False,
'outputFormat': 'json',
"openie.triple.strict": "true"
}
all_sent_triples = {}
for sent in sentences:
resolved_sent = sent['resolvedText']
openie_json = nlp_processor.annotate(resolved_sent.strip(), properties=props_openie)
openie_dict = json.loads(openie_json)
for triple_dict in openie_dict['sentences'][0]['openie']:
sbj = triple_dict['subject']
rel = triple_dict['relation']
if rel in ['was', 'is', 'were', 'are']:
continue
obj = triple_dict['object']
# If two triples have the same subject and relation,
# only preserve the one has longer object
if sbj+'_'+rel not in all_sent_triples.keys():
all_sent_triples[sbj+'_'+rel] = [sbj, rel, obj]
else:
if len(obj)>len(all_sent_triples[sbj+'_'+rel][2]):
all_sent_triples[sbj + '_' + rel] = [sbj, rel, obj]
all_sent_triples_list = list(all_sent_triples.values()) # triples extracted from all sentences
# remove similar triples
triples_rm_list = []
for i, lst_i in enumerate(all_sent_triples_list[:-1]):
for j, lst_j in enumerate(all_sent_triples_list[i+1:]):
str_i = ' '.join(lst_i)
str_j = ' '.join(lst_j)
if str_i in str_j or str_j in str_i or \
lst_i[0]+lst_i[2]==lst_j[0]+lst_j[2] or \
lst_i[1]+lst_i[2]==lst_j[1]+lst_j[2]:
if len(lst_i[1])>len(lst_j[1]):
triples_rm_list.append(lst_j)
else:
triples_rm_list.append(lst_i)
for lst in triples_rm_list:
all_sent_triples_list.remove(lst)
global_triples = cls._graph_connect(all_sent_triples_list, merge_strategy)
all_sent_triples_list.extend(global_triples)
parsed_results = {}
parsed_results['graph_content'] = []
graph_nodes = []
for triple in all_sent_triples_list:
if edge_strategy is None or edge_strategy == "homogeneous":
if triple[0] not in graph_nodes:
graph_nodes.append(triple[0])
if triple[2] not in graph_nodes:
graph_nodes.append(triple[2])
triple_info = {'edge_tokens': triple[1].split(),
'src': {
'tokens': triple[0].split(),
'id': graph_nodes.index(triple[0])
},
'tgt': {
'tokens': triple[2].split(),
'id': graph_nodes.index(triple[2])
}}
parsed_results['graph_content'].append(triple_info)
elif edge_strategy == "as_node":
if triple[0] not in graph_nodes:
graph_nodes.append(triple[0])
if triple[1] not in graph_nodes:
graph_nodes.append(triple[1])
if triple[2] not in graph_nodes:
graph_nodes.append(triple[2])
triple_info_0_1 = {'edge_tokens': [],
'src': {
'tokens': triple[0].split(),
'id': graph_nodes.index(triple[0]),
'type': 'ent_node'
},
'tgt': {
'tokens': triple[1].split(),
'id': graph_nodes.index(triple[1]),
'type': 'edge_node'
}}
triple_info_1_2 = {'edge_tokens': [],
'src': {
'tokens': triple[1].split(),
'id': graph_nodes.index(triple[1]),
'type': 'edge_node'
},
'tgt': {
'tokens': triple[2].split(),
'id': graph_nodes.index(triple[2]),
'type': 'ent_node'
}}
parsed_results['graph_content'].append(triple_info_0_1)
parsed_results['graph_content'].append(triple_info_1_2)
else:
raise NotImplementedError()
parsed_results['node_num'] = len(graph_nodes)
parsed_results['graph_nodes'] = graph_nodes
graph = cls._construct_static_graph(parsed_results, edge_strategy=edge_strategy)
if cls.verbase:
for info in parsed_results['graph_content']:
print(info)
print("is_connected="+str(nx.is_connected(nx.Graph(graph.to_dgl().to_networkx()))))
return graph
def embedding(self, node_attributes, edge_attributes):
node_emb, edge_emb = self.embedding_layer(
node_attributes, edge_attributes)
return node_emb, edge_emb
@classmethod
def _construct_static_graph(cls, parsed_object, edge_strategy=None):
"""
Build dependency-parsing-tree based graph for single sentence.
Parameters
----------
parsed_object: dict
``parsed_object`` contains all triples extracted from the raw_text_data.
edge_strategy: None or str, option=[None, "homogeneous", "heterogeneous", "as_node"]
Strategy to process edge.
``None``: It will be the default option. We will do as ``"homogeneous"``.
``"homogeneous"``: We will drop the edge type information.
If there is a linkage among node ``i`` and node ``j``, we will add an edge whose weight
is ``1.0``. Otherwise there is no edge.
``heterogeneous``: We will keep the edge type information.
An edge will have type information like ``n_subj``.
It is not implemented yet.
``as_node``: We will view the edge as a graph node.
If there is an edge whose type is ``k`` between node ``i`` and node ``j``,
we will insert a node ``k`` into the graph and link node (``i``, ``k``) and (``k``, ``j``).
It is not implemented yet.
Returns
-------
graph: GraphData
graph structure for single sentence
"""
ret_graph = GraphData()
node_num = parsed_object["node_num"]
ret_graph.add_nodes(node_num)
for triple_info in parsed_object["graph_content"]:
if edge_strategy is None or edge_strategy == "homogeneous":
ret_graph.add_edge(triple_info["src"]['id'], triple_info['tgt']['id'])
elif edge_strategy == 'as_node':
ret_graph.add_edge(triple_info["src"]['id'], triple_info['tgt']['id'])
else:
raise NotImplementedError()
ret_graph.node_attributes[triple_info["src"]['id']]['token'] = triple_info["src"]['tokens']
ret_graph.node_attributes[triple_info["tgt"]['id']]['token'] = triple_info['tgt']['tokens']
if edge_strategy == 'as_node':
ret_graph.node_attributes[triple_info["src"]['id']]['type'] = triple_info["src"]['type']
ret_graph.node_attributes[triple_info["tgt"]['id']]['type'] = triple_info['tgt']['type']
# TODO: add edge_attributes
return ret_graph
@classmethod
def _graph_connect(cls, triple_list, merge_strategy=None):
"""
This method will connect entities in the ``triple_list`` to ensure the graph
is connected.
Parameters
----------
triple_list: list of [subject, relation, object]
A list of all triples extracted from ``raw_text_data`` using coref and openie.
merge_strategy: None or str, option=[None, "tailhead", "sequential", "user_define"]
Strategy to merge sub-graphs into one graph
``None``: Do not add additional nodes and edges.
``global``: All subjects in extracted triples are connected by a "GLOBAL_NODE"
using a "global" edge
``"user_define"``: We will give this option to the user. User can override this method to define your merge
strategy.
Returns
-------
global_triples: list of [subject, relation, object]
The added triples using merge_strategy.
"""
if merge_strategy == 'global':
graph_nodes = []
global_triples = []
for triple in triple_list:
if triple[0] not in graph_nodes:
graph_nodes.append(triple[0])
global_triples.append([triple[0], 'global', 'GLOBAL_NODE'])
if triple[2] not in graph_nodes:
graph_nodes.append(triple[2])
return global_triples
elif merge_strategy == None:
return []
else:
raise NotImplementedError()
# ``"share_common_tokens"``: The entity nodes share the | |
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.subtract(vlad,a)
vlad = tf.sign(vlad)*tf.sqrt(tf.abs(vlad))
vlad = tf.nn.l2_normalize(vlad,1)
vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.nn.l2_normalize(vlad,1)
return vlad
class AddNetVLAD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.subtract(vlad,a)
vlad = tf.nn.l2_normalize(vlad,1)
# vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.reduce_sum(vlad, 2)
vlad = tf.nn.l2_normalize(vlad,1)
return vlad
class GRUNetVLAD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
gru_size = FLAGS.gru_cells
number_of_layers = FLAGS.gru_layers
random_frames = FLAGS.gru_random_sequence
iterations = FLAGS.iterations
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.subtract(vlad,a)
if FLAGS.afterNorm:
vlad = tf.nn.l2_normalize(vlad,1) # [b,f,c]
# vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.transpose(vlad,perm=[0,2,1]) # [b, c, f]
stacked_GRU = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.GRUCell(gru_size)
for _ in range(number_of_layers)
], state_is_tuple=False)
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_GRU, vlad,
dtype=tf.float32)
# vlad = tf.reduce_sum(vlad, 2)
state = tf.nn.l2_normalize(state,1)
return state
class GRUthenNetVLAD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
gru_size = FLAGS.gru_cells
number_of_layers = FLAGS.gru_layers
random_frames = FLAGS.gru_random_sequence
iterations = FLAGS.iterations
reshaped_input = tf.reshape(reshaped_input, [-1, self.max_frames, self.feature_size])
stacked_GRU = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.GRUCell(gru_size)
for _ in range(number_of_layers)
], state_is_tuple=False)
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_GRU, reshaped_input,
dtype=tf.float32)
reshaped_input = tf.reshape(outputs, [-1, gru_size])
cluster_weights = tf.get_variable("cluster_weights",
[gru_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(gru_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(gru_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,gru_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(gru_size)))
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,gru_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.subtract(vlad,a)
vlad = tf.nn.l2_normalize(vlad,1)
# vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.transpose(vlad,perm=[0,2,1]) # [b, c, f]
# vlad = tf.reduce_sum(vlad, 2)
state = tf.nn.l2_normalize(state,1)
return state
class GraphicalNetVLAD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.subtract(vlad,a)
vlad = tf.nn.l2_normalize(vlad,1)
graph_weights = tf.get_variable("graph_weights",
[self.feature_size, self.feature_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
vlad_proj = tf.transpose(vlad,perm=[0,2,1])
vlad_proj = tf.reshape(vlad_proj, [-1, self.feature_size])
vlad_proj = tf.matmul(vlad_proj, graph_weights) #[batch, cluster, feature]
vlad_proj = tf.reshape(vlad_proj, [-1, self.cluster_size, self.feature_size])
G_vlad = tf.matmul(vlad_proj, tf.transpose(vlad_proj,perm=[0,2,1]))
G_vlad = tf.nn.softmax(G_vlad, dim=2)
graph_weights2 = tf.get_variable("graph_weights2",
[self.feature_size, self.feature_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
vlad = tf.matmul(G_vlad, tf.transpose(vlad,perm=[0,2,1]))
vlad = tf.reshape(vlad, [-1, self.feature_size])
vlad = tf.matmul(vlad,graph_weights2)
vlad = tf.reshape(vlad, [-1, self.cluster_size, self.feature_size])
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.nn.l2_normalize(vlad,1)
vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.nn.l2_normalize(vlad,1)
return vlad
class GraphicalNetVLAD_simple():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.subtract(vlad,a)
vlad = tf.nn.l2_normalize(vlad,1)
graph_weights = tf.get_variable("graph_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
vlad_proj = tf.transpose(vlad,perm=[0,2,1])
vlad_proj = tf.reshape(vlad_proj, [-1, self.feature_size])
vlad_proj = tf.matmul(vlad_proj, graph_weights) #[batch, cluster, feature]
vlad_proj = tf.reshape(vlad_proj, [-1, self.cluster_size, self.cluster_size])
G_vlad = tf.matmul(vlad_proj, tf.transpose(vlad_proj,perm=[0,2,1]))
G_vlad = tf.nn.softmax(G_vlad, dim=2)
# graph_weights2 = tf.get_variable("graph_weights2",
# [self.feature_size, self.feature_size],
# initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
vlad = tf.matmul(G_vlad, tf.transpose(vlad,perm=[0,2,1]))
# vlad = tf.reshape(vlad, [-1, self.feature_size])
# vlad = tf.matmul(vlad,graph_weights2)
# vlad = tf.reshape(vlad, [-1, self.cluster_size, self.feature_size])
# vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.nn.l2_normalize(vlad,1)
vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.nn.l2_normalize(vlad,1)
return vlad
class NetVLAGD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
gate_weights = tf.get_variable("gate_weights",
[1, self.cluster_size,self.feature_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
gate_weights = tf.sigmoid(gate_weights)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlagd = tf.matmul(activation,reshaped_input)
vlagd = tf.multiply(vlagd,gate_weights)
vlagd = tf.transpose(vlagd,perm=[0,2,1])
vlagd = tf.nn.l2_normalize(vlagd,1)
vlagd = tf.reshape(vlagd,[-1,self.cluster_size*self.feature_size])
vlagd = tf.nn.l2_normalize(vlagd,1)
return vlagd
class GatedDBoF():
def __init__(self, feature_size,max_frames,cluster_size, max_pool, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.max_pool = max_pool
def forward(self, reshaped_input):
feature_size = self.feature_size
cluster_size = self.cluster_size
add_batch_norm = self.add_batch_norm
max_frames = self.max_frames
is_training = self.is_training
max_pool = self.max_pool
cluster_weights = tf.get_variable("cluster_weights",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
activation_sum = tf.reduce_sum(activation,1)
activation_max = tf.reduce_max(activation,1)
activation_max = tf.nn.l2_normalize(activation_max,1)
dim_red = tf.get_variable("dim_red",
[cluster_size, feature_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
cluster_weights_2 = tf.get_variable("cluster_weights_2",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights_2", cluster_weights_2)
activation = tf.matmul(activation_max, dim_red)
activation = tf.matmul(activation, cluster_weights_2)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn_2")
else:
cluster_biases = tf.get_variable("cluster_biases_2",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases_2", cluster_biases)
activation += cluster_biases
activation = tf.sigmoid(activation)
activation = tf.multiply(activation,activation_sum)
activation = tf.nn.l2_normalize(activation,1)
return activation
class SoftDBoF():
def __init__(self, feature_size,max_frames,cluster_size, max_pool, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.max_pool = max_pool
def forward(self, reshaped_input):
feature_size = self.feature_size
cluster_size = self.cluster_size
| |
<gh_stars>1-10
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2010-2013 <NAME>, Jupiter Jazz Limited
# Copyright (c) 2014-2018 <NAME>, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
from distutils import archive_util, dir_util
from xml.etree.ElementTree import ElementTree
import argparse
import fnmatch
import glob
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import time
import traceback
import zipfile
# -------------------------------------------------------------------------------------------------
# Constants.
# -------------------------------------------------------------------------------------------------
VERSION = "2.5.4"
SETTINGS_FILENAME = "appleseed.package.configuration.xml"
# -------------------------------------------------------------------------------------------------
# Utility functions.
# -------------------------------------------------------------------------------------------------
def info(message):
print(" " + message)
def progress(message):
print(" " + message + "...")
def fatal(message):
print("Fatal: " + message + ". Aborting.")
if sys.exc_info()[0]:
print(traceback.format_exc())
sys.exit(1)
def exe(filepath):
return filepath + ".exe" if os.name == "nt" else filepath
def safe_delete_file(path):
try:
if os.path.exists(path):
os.remove(path)
except OSError:
fatal("Failed to delete file '" + path + "'")
def on_rmtree_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed.
# Let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def safe_delete_directory(path):
Attempts = 10
for attempt in range(Attempts):
try:
if os.path.exists(path):
shutil.rmtree(path, onerror=on_rmtree_error)
return
except OSError:
if attempt < Attempts - 1:
time.sleep(0.5)
else:
fatal("Failed to delete directory '" + path + "'")
def safe_make_directory(path):
if not os.path.isdir(path):
os.makedirs(path)
def pushd(path):
old_path = os.getcwd()
os.chdir(path)
return old_path
def extract_zip_file(zip_path, output_path):
zf = zipfile.ZipFile(zip_path)
zf.extractall(output_path)
zf.close()
def copy_glob(input_pattern, output_path):
for input_file in glob.glob(input_pattern):
shutil.copy(input_file, output_path)
def make_writable(filepath):
os.chmod(filepath, stat.S_IRUSR | stat.S_IWUSR)
def merge_tree(src, dst, symlinks=False, ignore=None):
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
merge_tree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types.
shutil.copy2(srcname, dstname)
# Catch the Error from the recursive copytree so that we can
# continue with other files.
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows.
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error, errors
# -------------------------------------------------------------------------------------------------
# Settings.
# -------------------------------------------------------------------------------------------------
class Settings:
def load(self):
print("Loading settings from " + SETTINGS_FILENAME + "...")
tree = ElementTree()
try:
tree.parse(SETTINGS_FILENAME)
except IOError:
fatal("Failed to load configuration file '" + SETTINGS_FILENAME + "'")
self.__load_values(tree)
self.__print_summary()
def __load_values(self, tree):
self.platform = self.__get_required(tree, "platform")
self.configuration = self.__get_required(tree, "configuration")
self.appleseed_path = self.__get_required(tree, "appleseed_path")
self.appleseed_headers_path = self.__get_required(tree, "appleseed_headers_path")
self.qt_runtime_path = self.__get_required(tree, "qt_runtime_path")
self.platform_runtime_path = self.__get_required(tree, "platform_runtime_path")
self.python_path = self.__get_required(tree, "python_path")
self.package_output_path = self.__get_required(tree, "package_output_path")
def __get_required(self, tree, key):
value = tree.findtext(key)
if value is None:
fatal("Missing value \"{0}\" in configuration file".format(key))
return value
def __print_summary(self):
print("")
print(" Platform: " + self.platform)
print(" Configuration: " + self.configuration)
print(" Path to appleseed: " + self.appleseed_path)
print(" Path to appleseed headers: " + self.appleseed_headers_path)
print(" Path to Qt runtime: " + self.qt_runtime_path)
if os.name == "nt":
print(" Path to platform runtime: " + self.platform_runtime_path)
print(" Path to Python 2.7: " + self.python_path)
print(" Output directory: " + self.package_output_path)
print("")
# -------------------------------------------------------------------------------------------------
# Package information.
# -------------------------------------------------------------------------------------------------
class PackageInfo:
def __init__(self, settings, no_zip):
self.no_zip = no_zip
self.settings = settings
def load(self):
print("Loading package information...")
self.retrieve_git_tag()
self.build_package_path()
self.print_summary()
def retrieve_git_tag(self):
old_path = pushd(self.settings.appleseed_path)
self.version = subprocess.Popen("git describe --long", stdout=subprocess.PIPE, shell=True).stdout.read().strip()
os.chdir(old_path)
def build_package_path(self):
package_name = "appleseed-" + self.version + "-" + self.settings.platform + ".zip"
self.package_path = os.path.join(self.settings.package_output_path, self.version, package_name)
def print_summary(self):
print("")
print(" Version: " + self.version)
if not self.no_zip:
print(" Package path: " + self.package_path)
else:
print(" Package directory: " + self.settings.package_output_path)
print("")
# -------------------------------------------------------------------------------------------------
# Base package builder.
# -------------------------------------------------------------------------------------------------
class PackageBuilder:
def __init__(self, settings, package_info):
self.settings = settings
self.package_info = package_info
def build_package(self):
print("Building package:")
print("")
self.orchestrate()
print("")
print("The package was successfully built.")
def orchestrate(self):
self.remove_leftovers()
self.retrieve_sandbox_from_git_repository()
self.deploy_sandbox_to_stage()
self.cleanup_stage()
self.add_local_binaries_to_stage()
self.add_local_libraries_to_stage()
self.add_headers_to_stage()
self.add_shaders_to_stage()
self.add_scripts_to_stage()
self.add_local_schema_files_to_stage()
self.add_text_files_to_stage()
self.add_dummy_files_into_empty_directories()
self.disable_system_qt_plugins()
self.alter_stage()
if self.package_info.no_zip:
self.deploy_stage_to_package_directory()
else:
self.build_final_zip_file()
self.remove_stage()
def remove_leftovers(self):
progress("Removing leftovers from previous invocations")
safe_delete_directory("appleseed")
safe_delete_file("sandbox.zip")
safe_delete_file(self.package_info.package_path)
def retrieve_sandbox_from_git_repository(self):
progress("Retrieving sandbox from Git repository")
old_path = pushd(os.path.join(self.settings.appleseed_path, "sandbox"))
self.run("git archive --format=zip --output=" + os.path.join(old_path, "sandbox.zip") + " --worktree-attributes HEAD")
os.chdir(old_path)
def deploy_sandbox_to_stage(self):
progress("Deploying sandbox to staging directory")
extract_zip_file("sandbox.zip", "appleseed/")
safe_delete_file("sandbox.zip")
def cleanup_stage(self):
progress("Cleaning up staging directory")
# Remove API reference documentation.
safe_delete_directory("appleseed/documentation/apireference")
# Remove the test suite.
safe_delete_directory("appleseed/tests/test scenes")
# Remove voluminous unit tests/benchmarks data.
safe_delete_file("appleseed/tests/unit benchmarks/inputs/test_knn_particles.bin")
safe_delete_file("appleseed/tests/unit benchmarks/inputs/test_knn_photons.bin")
# Temporarily remove Alembic assembly C++ plugin.
safe_delete_directory("appleseed/samples/cpp/alembicassembly")
def add_local_binaries_to_stage(self):
progress("Adding local binaries to staging directory")
safe_make_directory("appleseed/bin")
dir_util.copy_tree(os.path.join(self.settings.appleseed_path, "sandbox/bin", self.settings.configuration), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("maketx")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oiiotool")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oslc")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oslinfo")), "appleseed/bin/")
def add_local_libraries_to_stage(self):
progress("Adding local libraries to staging directory")
safe_make_directory("appleseed/lib")
dir_util.copy_tree(os.path.join(self.settings.appleseed_path, "sandbox/lib", self.settings.configuration), "appleseed/lib/")
#
# This method is used by the Mac and Linux package builders.
# It requires the following members to be defined:
#
# self.shared_lib_ext
# self.get_dependencies_for_file()
#
def add_unix_dependencies_to_stage(self):
# Get shared libs needed by binaries.
bin_libs = set()
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
libs = self.get_dependencies_for_file(os.path.join("appleseed/bin", filename))
bin_libs = bin_libs.union(libs)
# Get shared libs needed by appleseed.python.
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
appleseedpython_shared_lib = "_appleseedpython" + self.shared_lib_ext
if appleseedpython_shared_lib in filenames:
libs = self.get_dependencies_for_file(os.path.join(dirpath, appleseedpython_shared_lib))
bin_libs = bin_libs.union(libs)
# Get shared libs needed by libraries.
lib_libs = set()
for lib in bin_libs:
libs = self.get_dependencies_for_file(lib)
lib_libs = lib_libs.union(libs)
all_libs = bin_libs.union(lib_libs)
if False:
# Print dependencies.
info(" Dependencies:")
for lib in all_libs:
info(" " + lib)
# Copy needed libs to lib directory.
dest_dir = os.path.join("appleseed", "lib/")
for lib in all_libs:
# The library might already exist, but without writing rights.
lib_name = os.path.basename(lib)
dest_path = os.path.join(dest_dir, lib_name)
if not os.path.exists(dest_path):
progress(" Copying {0} to {1}".format(lib, dest_dir))
try:
shutil.copy(lib, dest_dir)
make_writable(dest_path)
except IOError:
info("WARNING: could not copy {0} to {1}".format(lib, dest_dir))
def add_headers_to_stage(self):
progress("Adding headers to staging directory")
safe_make_directory("appleseed/include")
ignore_files = shutil.ignore_patterns("*.cpp", "*.c", "*.xsd", "snprintf", "version.h.in")
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "foundation"), "appleseed/include/foundation", ignore=ignore_files)
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "main"), "appleseed/include/main", ignore=ignore_files)
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "renderer"), "appleseed/include/renderer", ignore=ignore_files)
def add_shaders_to_stage(self):
progress("Adding shaders to staging directory")
safe_delete_directory("appleseed/shaders")
shutil.copytree(os.path.join(self.settings.appleseed_path, "sandbox/shaders"), "appleseed/shaders")
shutil.copytree(os.path.join(self.settings.appleseed_path, "src/appleseed.shaders/src"), "appleseed/shaders/src")
def add_scripts_to_stage(self):
progress("Adding scripts to staging directory")
shutil.copy("cleanmany.py", "appleseed/bin/")
shutil.copy("convertmany.py", "appleseed/bin/")
shutil.copy("rendermany.py", "appleseed/bin/")
shutil.copy("rendernode.py", "appleseed/bin/")
shutil.copy("rendermanager.py", "appleseed/bin/")
def add_local_schema_files_to_stage(self):
progress("Adding local schema files to staging directory")
safe_make_directory("appleseed/schemas")
copy_glob(os.path.join(self.settings.appleseed_path, "sandbox/schemas/*.xsd"), "appleseed/schemas/")
def add_text_files_to_stage(self):
progress("Adding text files")
shutil.copy(os.path.join(self.settings.appleseed_path, "LICENSE.txt"), "appleseed/")
shutil.copy(os.path.join(self.settings.appleseed_path, "README.md"), "appleseed/")
shutil.copy(os.path.join(self.settings.appleseed_path, "THIRDPARTIES.txt"), "appleseed/")
def add_dummy_files_into_empty_directories(self):
progress("Adding dummy files to preserve empty directories")
for dirpath, dirnames, filenames in os.walk("."):
if len(dirnames) == 0 and len(filenames) == 0:
self.create_preserve_file(dirpath)
def disable_system_qt_plugins(self):
progress("Disabling system's Qt plugins")
with open("appleseed/bin/qt.conf", "w") as f:
pass
def create_preserve_file(self, path):
with open(os.path.join(path, "preserve.txt"), "w") as f:
f.write("This file allows to preserve this otherwise empty directory.\n")
# This method is overridden in the platform-specific builders below.
def alter_stage(self):
return
def deploy_stage_to_package_directory(self):
package_directory = os.path.join(self.settings.package_output_path, "appleseed")
progress("Removing existing package directory")
safe_delete_directory(package_directory)
progress("Deploying staging directory to package directory")
shutil.copytree("appleseed", package_directory)
def build_final_zip_file(self):
progress("Building final zip file from staging directory")
| |
call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_no_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_volte_merge_drop_second_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
2. Call from PhoneA (VoLTE) to PhoneC (VoLTE), accept on PhoneC.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_volte_merge_drop_second_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
2. Call from PhoneA (VoLTE) to PhoneC (VoLTE), accept on PhoneC.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-C, verify call continues.
5. On PhoneA disconnect call between A-B, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_volte_merge_drop_first_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
2. Call from PhoneA (VoLTE) to PhoneC (VoLTE), accept on PhoneC.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneB, verify call continues.
5. End call on PhoneC, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_volte_merge_drop_first_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
2. Call from PhoneA (VoLTE) to PhoneC (VoLTE), accept on PhoneC.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-B, verify call continues.
5. On PhoneA disconnect call between A-C, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_volte_merge_drop_second_call_from_participant_no_cep(
self):
""" Test VoLTE Conference Call among three phones. No CEP.
Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
On PhoneA, merge to conference call (No CEP).
End call on PhoneC, verify call continues.
End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_no_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_volte_merge_drop_second_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
2. Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_volte_merge_drop_second_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
2. Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-C, verify call continues.
5. On PhoneA disconnect call between A-B, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_volte_merge_drop_first_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
2. Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneB, verify call continues.
5. End call on PhoneC, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_volte_merge_drop_first_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (VoLTE), accept on PhoneB.
2. Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-B, verify call continues.
5. On PhoneA disconnect call between A-C, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_volte_merge_drop_second_call_from_participant_no_cep(
self):
""" Test VoLTE Conference Call among three phones. No CEP.
Call from PhoneB (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
On PhoneA, merge to conference call (No CEP).
End call on PhoneC, verify call continues.
End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mt_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_no_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_volte_merge_drop_second_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneB (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
2. Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mt_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_volte_merge_drop_second_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneB (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
2. Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-C, verify call continues.
5. On PhoneA disconnect call between A-B, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mt_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_volte_merge_drop_first_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneB (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
2. Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneB, verify call continues.
5. End call on PhoneC, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mt_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_volte_merge_drop_first_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneB (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
2. Call from PhoneC (VoLTE) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-B, verify call continues.
5. On PhoneA disconnect call between A-C, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id | |
from os import add_dll_directory
from tkinter import *
import tkinter as tk
from tkinter import ttk
from PIL import ImageTk,Image
from PIL import ImageTk
import psycopg2
from tkinter_custom_button import TkinterCustomButton
import tkinter.messagebox
root=tk.Tk()
root.configure(background='#ffff99')
employee_id = ""
password_employee = ""
manager_id = ""
password_manager = ""
iid=""
eid=""
did=""
dept_no=""
root.geometry("900x650")
canvas=tk.Canvas(root,width=900,height=650)
canvas.grid(columnspan=5,rowspan=5)
tk.Label(root, text='Some File').grid(row=0)
e1 = tk.Entry(root)
e1.grid(row=0, column=1)
#canvas.configure(background='#ffff99')
root.title('Pharmaceutical Compnay Database')
title_bar = Frame(root, bg='#2e2e2e', relief='raised', bd=2,highlightthickness=0)
def on_resize(event):
# resize the background image to the size of label
image = bg.resize((event.width, event.height), Image.ANTIALIAS)
# update the image of the label
l.image = ImageTk.PhotoImage(image)
l.config(image=l.image)
bg=Image.open("bg3.png")
l = tk.Label(root)
l.place(x=0, y=0, relwidth=1, relheight=1) # make label l to fit the parent window always
l.bind('<Configure>', on_resize) # on_resize will be executed whenever label l is resized
#root.wm_attributes("-transparentcolor", 'white')
def PageOne():
def EmployeeLogin():
button1.destroy()
button2.destroy()
#logo_label.destroy()
label1=Label(root,text="Employee Login Portal",font=('Times_New_Roman',35))
label1.grid(column=1,row=0)
label1.configure(background='white')
def open_popup(msg):
top= Toplevel(root)
top.geometry("750x250")
top.title("OOPS")
Label(top, text= msg, font=('Times_New_Roman',15)).place(x=150,y=80)
def EmployeedetailsPage(*args):
print("username : " + username.get())
print("password : " + password.get())
employee_id = username.get()
password_employee = password.get()
connection=psycopg2.connect(user="postgres",
password="// <PASSWORD>",
host="127.0.0.1",
port="5432",
database="pharmacompany")
cur=connection.cursor()
cur.execute("SELECT Password from LOGININFO where " + "Employee_ID = " + "'{}'".format(employee_id))
output=cur.fetchall()
incorrect_password = False
if output == []:
open_popup("User name doesn't exist, please check again!")
else:
if(password_employee != str(output[0][0])):
open_popup("Password entered for " + employee_id + " is incorrect, please enter again!")
if(incorrect_password == False):
incorrect_password = True
usernameLabel.destroy()
usernameEntry.destroy()
IIDentry.destroy()
passwordLabel.destroy()
label1.destroy()
Backbutton1.destroy()
Loginbutton.destroy()
cur.execute("SELECT Fname, Lname, EID, Job_Position, Salary, Age, Start_Date, Dept_No from EMPLOYEE where " + "EID = " + "'{}'".format(employee_id))
output=cur.fetchall()
if output == [] or incorrect_password:
output = ["N/A", "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", "N/A"]
else:
output = output[0]
label2=Label(root,text="Profile",font=('Times_New_Roman',35))
label2.grid(column=1,row=0)
label2.configure(background='white')
lbl1=Label(root, text="<NAME>", fg='red', font=("Helvetica", 16))
lbl1.place(x=80, y=250)
lfld1=Label(root, text= output[0], fg='black', font=("Helvetica", 16))
lfld1.configure(background='white')
lfld1.place(x=250, y=250)
lbl2=Label(root, text="<NAME>", fg='red', font=("Helvetica", 16))
lbl2.place(x=80, y=350)
lfld2=Label(root, text=output[1], fg='black', font=("Helvetica", 16))
lfld2.configure(background='white')
lfld2.place(x=250, y=350)
lbl3=Label(root, text="Employee ID", fg='red', font=("Helvetica", 16))
lbl3.place(x=80, y=450)
lfld3=Label(root, text=output[2], fg='black', font=("Helvetica", 16))
lfld3.configure(background='white')
lfld3.place(x=250, y=450)
lbl4=Label(root, text="Job Position", fg='red', font=("Helvetica", 16))
lbl4.place(x=80, y=550)
lfld4=Label(root, text=output[3], fg='black', font=("Helvetica", 16))
lfld4.configure(background='white')
lfld4.place(x=250, y=550)
lbl5=Label(root, text="Salary", fg='red', font=("Helvetica", 16))
lbl5.place(x=450, y=250)
lfld5=Label(root, text=output[4], fg='black', font=("Helvetica", 16))
lfld5.configure(background='white')
lfld5.place(x=650, y=250)
lbl6=Label(root, text="Age", fg='red', font=("Helvetica", 16))
lbl6.place(x=450, y=350)
lfld6=Label(root, text=output[5], fg='black', font=("Helvetica", 16))
lfld6.configure(background='white')
lfld6.place(x=650, y=350)
lbl7=Label(root, text="Start Date", fg='red', font=("Helvetica", 16))
lbl7.place(x=450, y=450)
lfld7=Label(root, text=output[6], fg='black', font=("Helvetica", 16))
lfld7.configure(background='white')
lfld7.place(x=650, y=450)
lbl8=Label(root, text="Department Number", fg='red', font=("Helvetica", 16))
lbl8.place(x=450, y=550)
lfld8=Label(root, text=output[7], fg='black', font=("Helvetica", 16))
lfld8.configure(background='white')
lfld8.place(x=650, y=550)
connection.commit()
connection.close()
def Logout():
usernameLabel.destroy()
passwordLabel.destroy()
usernameEntry.destroy()
IIDentry.destroy()
label1.destroy()
Loginbutton.destroy()
Backbutton1.destroy()
label2.destroy()
Loginbutton.destroy()
Logoutbutton.destroy()
lbl8.destroy()
lbl1.destroy()
lbl2.destroy()
lbl3.destroy()
lbl4.destroy()
lbl5.destroy()
lbl6.destroy()
lbl7.destroy()
lfld8.destroy()
lfld1.destroy()
lfld2.destroy()
lfld3.destroy()
lfld4.destroy()
lfld5.destroy()
lfld6.destroy()
lfld7.destroy()
PageOne()
Logoutbutton=Button(root,text="Logout ",font=('Times_New_Roman',15),command=Logout)
Logoutbutton.grid(column=0,row=0)
Logoutbutton.configure(background='white')
def backButton1():
usernameLabel.destroy()
passwordLabel.destroy()
usernameEntry.destroy()
IIDentry.destroy()
label1.destroy()
Loginbutton.destroy()
Backbutton1.destroy()
PageOne()
Backbutton1=Button(root,text="Back ",font=('Times_New_Roman',15),command=backButton1)
Backbutton1.grid(column=0,row=0)
Backbutton1.configure(background='white')
usernameLabel = Label(root, text="Employee ID",font=('Times_New_Roman',15))
usernameLabel.grid(row=1, column=0)
usernameLabel.configure(background='white')
username = StringVar()
usernameEntry = Entry(root, textvariable=username)
usernameEntry.grid(row=1, column=1,padx=5,pady=5,ipady=10)
usernameEntry.configure(background='white')
#password label and password entry box
passwordLabel = Label(root,text="Password",font=('Times_New_Roman',15))
passwordLabel.grid(row=2, column=0)
passwordLabel.configure(background='white')
password = StringVar()
IIDentry = Entry(root, textvariable=password, show='*')
IIDentry.grid(row=2, column=1,padx=5,pady=5,ipady=10)
IIDentry.configure(background='white')
Loginbutton=Button(root,text="Login ",font=('Times_New_Roman',25),command=EmployeedetailsPage)
Loginbutton.grid(column=2,row=2)
Loginbutton.configure(background='white')
def ManagerLogin():
button1.destroy()
button2.destroy()
#logo_label.destroy()
label1=Label(root,text="Manager Login Portal",font=('Times_New_Roman',35))
label1.grid(column=1,row=0)
label1.configure(background='white')
def backButton1():
usernameLabel.destroy()
passwordLabel.destroy()
usernameEntry.destroy()
IIDentry.destroy()
label1.destroy()
Loginbutton.destroy()
Backbutton1.destroy()
PageOne()
def ManagerPage2():
def open_popup(msg):
top= Toplevel(root)
top.geometry("750x250")
top.title("Child Window")
Label(top, text= msg, font=('Times_New_Roman',15)).place(x=150,y=80)
print("username : " + username.get())
print("password : " + password.<PASSWORD>())
employee_id = username.get()
password_employee = <PASSWORD>()
connection=psycopg2.connect(user="postgres",
password="//",
host="127.0.0.1",
port="5432",
database="pharmacompany")
cur=connection.cursor()
cur.execute("SELECT Password from LOGININFO where " + "Employee_ID = " + "'{}'".format(employee_id))
output=cur.fetchall()
connection.commit()
connection.close()
incorrect_password = False
if output == []:
open_popup("User name doesn't exist, please check again!")
else:
temp = employee_id[::-1]
if(not(temp[2] == "M" and len(employee_id) >= 4 or employee_id == "AA01")):
open_popup(employee_id + " is not a manager or admin!")
elif(password_employee != str(output[0][0])):
open_popup("Password entered for " + employee_id + " is incorrect, please enter again!")
if(incorrect_password == False):
incorrect_password = True
usernameLabel.destroy()
passwordLabel.destroy()
usernameEntry.destroy()
IIDentry.destroy()
label1.destroy()
Loginbutton.destroy()
Backbutton1.destroy()
def AddDrug():
Accounting.destroy()
button3.destroy()
button1.destroy()
Rawbutton1.destroy()
EmpDetails.destroy()
ViewDetails.destroy()
Logoutbutton.destroy()
def Insertdata():
connection=psycopg2.connect(user="postgres",
password="//",
host="127.0.0.1",
port="5432",
database="pharmacompany")
cur=connection.cursor()
cur.execute("INSERT INTO DRUG VALUES (" + str(drugid.get()) + "," + "'{}'".format(name.get()) + "," + str(price.get()) + "," + "'{}'".format(com.get()) + ")")
open_popup("Drug data added successfully!")
connection.commit()
connection.close()
def Back():
Label1.destroy()
lbl1.destroy()
lbl2.destroy()
lbl3.destroy()
lbl4.destroy()
lfld1.destroy()
lfld2.destroy()
lfld3.destroy()
lfld4.destroy()
Backbutt.destroy()
SubmitButton.destroy()
ManagerPage2()
Label1=Label(root, text="Add Drug Details", fg='Black', font=("Helvetica", 20))
Label1.grid(column=1,row=0)
lbl1=Label(root, text="Drug ID", fg='red', font=("Helvetica", 16))
lbl1.place(x=80, y=150)
drugid=IntVar()
lfld1=Entry(root, textvariable= drugid)
lfld1.configure(background='white')
lfld1.place(x=250, y=150)
lbl2=Label(root, text="Drug Name", fg='red', font=("Helvetica", 16))
lbl2.place(x=80, y=250)
name=StringVar()
lfld2=Entry(root, textvariable=name)
lfld2.configure(background='white')
lfld2.place(x=250, y=250)
lbl3=Label(root, text="Price", fg='red', font=("Helvetica", 16))
lbl3.place(x=80, y=350)
price=IntVar()
lfld3=Entry(root, textvariable=price)
lfld3.configure(background='white')
lfld3.place(x=250, y=350)
lbl4=Label(root, text="Composition", fg='red', font=("Helvetica", 16))
lbl4.place(x=80, y=450)
com=StringVar()
lfld4=Entry(root, textvariable=com)
lfld4.configure(background='white')
lfld4.place(x=250, y=450)
Backbutt=Button(root,text="Back",font=('Times_New_Roman',25),command=Back)
Backbutt.grid(column=1,row=4)
Backbutt.configure(background='white')
SubmitButton=Button(root,text="Submit",font=('Times_New_Roman',25),command=Insertdata)
SubmitButton.grid(column=2,row=4)
SubmitButton.configure(background='white')
button3=Button(root,text="Add Drug Data",font=('Times_New_Roman',25),command=AddDrug)
button3.grid(column=2,row=1)
button3.configure(background='white')
def Viewdrugstatus():
Accounting.destroy()
button3.destroy()
Logoutbutton.destroy()
ViewDetails.destroy()
button1.destroy()
EmpDetails.destroy()
Rawbutton1.destroy()
def Back():
bt2.destroy()
bt3.destroy()
bt4.destroy()
bt5.destroy()
bt7.destroy()
EmpDetails.destroy()
bt6.destroy()
DrugLabel.destroy()
drugEntry.destroy()
Backbutton.destroy()
ManagerPage2()
Backbutton=Button(root,text="Back ",font=('Times_New_Roman',15),command=Back)
Backbutton.grid(column=2,row=0)
Backbutton.configure(background='white')
DrugLabel = Label(root,text="Drug ID",font=('Times_New_Roman',15))
DrugLabel.grid(row=0, column=0)
DrugLabel.configure(background='white')
DID = StringVar()
drugEntry = Entry(root, textvariable=DID)
drugEntry.grid(row=0, column=1,padx=5,pady=5,ipady=10)
drugEntry.configure(background='white')
def formPage():
Backbutton.destroy()
bt2.destroy()
bt3.destroy()
bt4.destroy()
bt5.destroy()
bt6.destroy()
bt7.destroy()
DrugLabel.destroy()
drugEntry.destroy()
def Backform():
lbl1.destroy()
lbl2.destroy()
lbl3.destroy()
lbl4.destroy()
lbl5.destroy()
lfld1.destroy()
lfld2.destroy()
lfld3.destroy()
lfld4.destroy()
lfld5.destroy()
Backbuttonf.destroy()
Updatebuttonf.destroy()
Deletebuttonf.destroy()
insertbuttonf.destroy()
Viewdrugstatus()
def open_popup(msg):
top= Toplevel(root)
top.geometry("750x250")
top.title("OOPS")
Label(top, text= msg, font=('Times_New_Roman',15)).place(x=150,y=80)
print("did : " + DID.get())
did = DID.get()
connection=psycopg2.connect(user="postgres",
password="//",
host="127.0.0.1",
port="5432",
database="pharmacompany")
cur=connection.cursor()
cur.execute("SELECT DrugID from FORMULATION where " + "DrugID = " + "'{}'".format(did))
output=cur.fetchall()
if output == []:
open_popup("Drug ID doesn't exist, please check again!")
connection.commit()
connection.close()
connection=psycopg2.connect(user="postgres",
password="//",
host="127.0.0.1",
port="5432",
database="pharmacompany")
cur=connection.cursor()
cur.execute("SELECT PID, Testing, Conclusion, RID, DrugID from FORMULATION where " + "DrugID = " + "'{}'".format(did))
output=cur.fetchall()
connection.commit()
connection.close()
if(output == []):
output = ["N/A", "N/A", "N/A", "N/A", "N/A"]
elif (employee_id[0] != "F" ):
output = ["N/A", "N/A", "N/A", "N/A", "N/A"]
open_popup("Access denied")
else:
output = output[0]
lbl1=Label(root, text="Process ID", fg='red', font=("Helvetica", 16))
lbl1.place(x=80, y=250)
lfld1=Label(root, text= output[0], fg='black', font=("Helvetica", 16))
lfld1.configure(background='white')
lfld1.place(x=250, y=250)
lbl2=Label(root, text="Testing", fg='red', font=("Helvetica", 16))
lbl2.place(x=80, y=350)
lfld2=Label(root, text=output[1], fg='black', font=("Helvetica", 16))
lfld2.configure(background='white')
lfld2.place(x=250, y=350)
lbl3=Label(root, text="Conclusion", fg='red', font=("Helvetica", 16))
lbl3.place(x=80, y=450)
lfld3=Label(root, text=output[2], fg='black', font=("Helvetica", 16))
lfld3.configure(background='white')
lfld3.place(x=250, y=450)
lbl4=Label(root, text="Raw Material ID", fg='red', font=("Helvetica", 16))
lbl4.place(x=80, y=550)
lfld4=Label(root, text=output[3], fg='black', font=("Helvetica", 16))
lfld4.configure(background='white')
lfld4.place(x=250, y=550)
lbl5=Label(root, text="Drug ID", fg='red', font=("Helvetica", 16))
lbl5.place(x=450, y=250)
lfld5=Label(root, text=output[4], fg='black', font=("Helvetica", 16))
lfld5.configure(background='white')
lfld5.place(x=650, y=250)
def Deletef():
print("did : " + DID.get())
did = DID.get()
connection=psycopg2.connect(user="postgres",
password="//",
host="127.0.0.1",
port="5432",
database="pharmacompany")
cur=connection.cursor()
cur.execute("DELETE from FORMULATION where " + "DrugID = " + did)
cur.execute("SELECT * from FORMULATION")
output=cur.fetchall()
print(output)
if output == []:
open_popup("Drug ID doesn't exist, please check again!")
else:
open_popup("Deleted Successfully")
connection.commit()
connection.close()
def Updatef():
a=Label(root, text="Process ID", fg='red', font=("Helvetica", 16))
a.place(x=80, y=250)
ai=IntVar()
aent=Entry(root,textvariable=ai)
aent.place(x=250,y=250)
b=Label(root, text="Testing", fg='red', font=("Helvetica", 16))
b.place(x=80, y=350)
bi=StringVar()
bent=Entry(root,textvariable=bi)
bent.place(x=250,y=350)
c=Label(root, text="Conclusion", fg='red', font=("Helvetica", 16))
c.place(x=80, y=450)
ci=StringVar()
cent=Entry(root,textvariable=ci)
cent.place(x=250,y=450)
d=Label(root, text="Raw Material ID", fg='red', font=("Helvetica", 16))
d.place(x=80, y=550)
di=IntVar()
dent=Entry(root,textvariable=di)
dent.place(x=250,y=550)
e=Label(root, text="Drug ID", fg='red', font=("Helvetica", 16))
e.place(x=450, y=250)
ei=IntVar()
eent=Entry(root,textvariable=ei)
eent.place(x=650,y=250)
def update():
connection=psycopg2.connect(user="postgres",
password="//",
host="127.0.0.1",
port="5432",
database="pharmacompany")
cur=connection.cursor()
#UPDATE FORMULATION SET PID = 1, TESTING = "NO", CONCLUSION = "NO", RID = 1, DRUGID = 001
cur.execute("UPDATE FORMULATION SET PID = " + str(ai.get()) + "," + "Testing = " + "'{}'".format(bi.get()) + "," + "Conclusion = " + "'{}'".format(ci.get()) + "," + "RID = " + str(di.get()) + "," + "DrugID = " + str(ei.get()) + " where " + "DrugID = " + did)
cur.execute("SELECT * from FORMULATION")
output=cur.fetchall()
print(output)
if output == []:
open_popup("Drug ID doesn't exist, please check again!")
else:
open_popup("Updated Successfully")
connection.commit()
connection.close()
def Back():
a.destroy()
b.destroy()
c.destroy()
d.destroy()
e.destroy()
aent.destroy()
bent.destroy()
cent.destroy()
dent.destroy()
eent.destroy()
Backbutton12.destroy()
Savebutton.destroy()
formPage()
#UPDATE FOMULATION SET VAR = VALUE, WHERE DRUG ID = 001
Backbutton12=Button(root,text="Back ",font=('Times_New_Roman',15),command=Back)
Backbutton12.grid(column=2,row=0)
Backbutton12.configure(background='white')
Savebutton=Button(root,text="Save ",font=('Times_New_Roman',15),command=update)
Savebutton.grid(column=3,row=0)
Savebutton.configure(background='white')
Deletebuttonf.destroy()
Updatebuttonf.destroy()
Backbuttonf.destroy()
insertbuttonf.destroy()
| |
import __clrclasses__.System.Configuration as Configuration
import __clrclasses__.System.IO as IO
import __clrclasses__.System.Security as Security
import __clrclasses__.System.Resources as Resources
import __clrclasses__.System.Globalization as Globalization
import __clrclasses__.System.Diagnostics as Diagnostics
import __clrclasses__.System.Reflection as Reflection
import __clrclasses__.System.Deployment as Deployment
import __clrclasses__.System.Runtime as Runtime
import __clrclasses__.System.Threading as Threading
import __clrclasses__.System.Dynamic as Dynamic
import __clrclasses__.System.Linq as Linq
import __clrclasses__.System.Management as Management
import __clrclasses__.System.Web as Web
import __clrclasses__.System.Timers as Timers
import __clrclasses__.System.Net as Net
import __clrclasses__.System.Windows as Windows
import __clrclasses__.System.Media as Media
import __clrclasses__.System.Collections as Collections
import __clrclasses__.System.ComponentModel as ComponentModel
import __clrclasses__.System.CodeDom as CodeDom
import __clrclasses__.System.Text as Text
from __clrclasses__.System.Collections import IList as _n_0_t_0
from __clrclasses__.System.Collections import IStructuralComparable as _n_0_t_1
from __clrclasses__.System.Collections import IStructuralEquatable as _n_0_t_2
from __clrclasses__.System.Collections import IComparer as _n_0_t_3
from __clrclasses__.System.Collections import IEnumerator as _n_0_t_4
from __clrclasses__.System.Collections import IDictionary as _n_0_t_5
from __clrclasses__.System.Collections import IEqualityComparer as _n_0_t_6
from __clrclasses__.System.Collections.Generic import IEnumerable as _n_1_t_0
from __clrclasses__.System.Collections.Generic import IComparer as _n_1_t_1
from __clrclasses__.System.Collections.Generic import IList as _n_1_t_2
from __clrclasses__.System.Collections.Generic import IReadOnlyList as _n_1_t_3
from __clrclasses__.System.Collections.Generic import IEnumerator as _n_1_t_4
from __clrclasses__.System.Collections.Generic import IEqualityComparer as _n_1_t_5
from __clrclasses__.System.Collections.ObjectModel import ReadOnlyCollection as _n_2_t_0
from __clrclasses__.System.ComponentModel import TypeConverter as _n_3_t_0
from __clrclasses__.System.Configuration.Assemblies import AssemblyHashAlgorithm as _n_4_t_0
from __clrclasses__.System.Globalization import CultureInfo as _n_5_t_0
from __clrclasses__.System.Globalization import NumberStyles as _n_5_t_1
from __clrclasses__.System.Globalization import UnicodeCategory as _n_5_t_2
from __clrclasses__.System.Globalization import Calendar as _n_5_t_3
from __clrclasses__.System.Globalization import DateTimeStyles as _n_5_t_4
from __clrclasses__.System.Globalization import TimeSpanStyles as _n_5_t_5
from __clrclasses__.System.Globalization import DaylightTime as _n_5_t_6
from __clrclasses__.System.IO import TextWriter as _n_6_t_0
from __clrclasses__.System.IO import TextReader as _n_6_t_1
from __clrclasses__.System.IO import Stream as _n_6_t_2
from __clrclasses__.System.Reflection import BindingFlags as _n_7_t_0
from __clrclasses__.System.Reflection import Binder as _n_7_t_1
from __clrclasses__.System.Reflection import AssemblyName as _n_7_t_2
from __clrclasses__.System.Reflection import Assembly as _n_7_t_3
from __clrclasses__.System.Reflection import Module as _n_7_t_4
from __clrclasses__.System.Reflection import ParameterInfo as _n_7_t_5
from __clrclasses__.System.Reflection import MemberInfo as _n_7_t_6
from __clrclasses__.System.Reflection import MethodInfo as _n_7_t_7
from __clrclasses__.System.Reflection import ICustomAttributeProvider as _n_7_t_8
from __clrclasses__.System.Reflection import IReflect as _n_7_t_9
from __clrclasses__.System.Reflection import MethodBase as _n_7_t_10
from __clrclasses__.System.Reflection import GenericParameterAttributes as _n_7_t_11
from __clrclasses__.System.Reflection import FieldInfo as _n_7_t_12
from __clrclasses__.System.Reflection import EventInfo as _n_7_t_13
from __clrclasses__.System.Reflection import PropertyInfo as _n_7_t_14
from __clrclasses__.System.Reflection import TypeInfo as _n_7_t_15
from __clrclasses__.System.Reflection.Emit import AssemblyBuilder as _n_8_t_0
from __clrclasses__.System.Reflection.Emit import AssemblyBuilderAccess as _n_8_t_1
from __clrclasses__.System.Runtime.ExceptionServices import FirstChanceExceptionEventArgs as _n_9_t_0
from __clrclasses__.System.Runtime.Hosting import ApplicationActivator as _n_10_t_0
from __clrclasses__.System.Runtime.Hosting import ActivationArguments as _n_10_t_1
from __clrclasses__.System.Runtime.InteropServices import _Exception as _n_11_t_0
from __clrclasses__.System.Runtime.InteropServices import _Activator as _n_11_t_1
from __clrclasses__.System.Runtime.InteropServices import _Attribute as _n_11_t_2
from __clrclasses__.System.Runtime.InteropServices import _MemberInfo as _n_11_t_3
from __clrclasses__.System.Runtime.InteropServices import _Type as _n_11_t_4
from __clrclasses__.System.Runtime.InteropServices import StructLayoutAttribute as _n_11_t_5
from __clrclasses__.System.Runtime.Remoting import ObjectHandle as _n_12_t_0
from __clrclasses__.System.Runtime.Remoting import ObjRef as _n_12_t_1
from __clrclasses__.System.Runtime.Serialization import ISerializable as _n_13_t_0
from __clrclasses__.System.Runtime.Serialization import IDeserializationCallback as _n_13_t_1
from __clrclasses__.System.Security import PermissionSet as _n_14_t_0
from __clrclasses__.System.Security import IEvidenceFactory as _n_14_t_1
from __clrclasses__.System.Security import HostSecurityManager as _n_14_t_2
from __clrclasses__.System.Security import SecurityState as _n_14_t_3
from __clrclasses__.System.Security.Policy import Evidence as _n_15_t_0
from __clrclasses__.System.Security.Policy import PolicyLevel as _n_15_t_1
from __clrclasses__.System.Security.Policy import ApplicationTrust as _n_15_t_2
from __clrclasses__.System.Security.Policy import StrongName as _n_15_t_3
from __clrclasses__.System.Security.Principal import PrincipalPolicy as _n_16_t_0
from __clrclasses__.System.Security.Principal import IPrincipal as _n_16_t_1
from __clrclasses__.System.Text import Encoding as _n_17_t_0
from __clrclasses__.System.Text import NormalizationForm as _n_17_t_1
from __clrclasses__.System.Threading import HostExecutionContextManager as _n_18_t_0
from __clrclasses__.System.Threading import WaitHandle as _n_18_t_1
from __clrclasses__.System.Threading import LazyThreadSafetyMode as _n_18_t_2
from __clrclasses__.System.Threading import CancellationToken as _n_18_t_3
import typing
T = typing.TypeVar('T')
TInput = typing.TypeVar('TInput')
TOutput = typing.TypeVar('TOutput')
TEventArgs = typing.TypeVar('TEventArgs')
TResult = typing.TypeVar('TResult')
T1 = typing.TypeVar('T1')
class _AppDomain():
@property
def BaseDirectory(self) -> str:"""BaseDirectory { get; } -> str"""
@property
def DynamicDirectory(self) -> str:"""DynamicDirectory { get; } -> str"""
@property
def Evidence(self) -> _n_15_t_0:"""Evidence { get; } -> Evidence"""
@property
def FriendlyName(self) -> str:"""FriendlyName { get; } -> str"""
@property
def RelativeSearchPath(self) -> str:"""RelativeSearchPath { get; } -> str"""
@property
def ShadowCopyFiles(self) -> bool:"""ShadowCopyFiles { get; } -> bool"""
@property
def AssemblyLoad(self) -> AssemblyLoadEventHandler:
"""AssemblyLoad Event: AssemblyLoadEventHandler"""
@property
def AssemblyResolve(self) -> ResolveEventHandler:
"""AssemblyResolve Event: ResolveEventHandler"""
@property
def DomainUnload(self) -> EventHandler:
"""DomainUnload Event: EventHandler"""
@property
def ProcessExit(self) -> EventHandler:
"""ProcessExit Event: EventHandler"""
@property
def ResourceResolve(self) -> ResolveEventHandler:
"""ResourceResolve Event: ResolveEventHandler"""
@property
def TypeResolve(self) -> ResolveEventHandler:
"""TypeResolve Event: ResolveEventHandler"""
@property
def UnhandledException(self) -> UnhandledExceptionEventHandler:
"""UnhandledException Event: UnhandledExceptionEventHandler"""
def AppendPrivatePath(self, path: str):...
def ClearPrivatePath(self):...
def ClearShadowCopyPath(self):...
def CreateInstance(self, assemblyName: str, typeName: str, ignoreCase: bool, bindingAttr: _n_7_t_0, binder: _n_7_t_1, args: Array[object], culture: _n_5_t_0, activationAttributes: Array[object], securityAttributes: _n_15_t_0) -> _n_12_t_0:...
def CreateInstance(self, assemblyName: str, typeName: str, activationAttributes: Array[object]) -> _n_12_t_0:...
def CreateInstance(self, assemblyName: str, typeName: str) -> _n_12_t_0:...
def CreateInstanceFrom(self, assemblyFile: str, typeName: str, ignoreCase: bool, bindingAttr: _n_7_t_0, binder: _n_7_t_1, args: Array[object], culture: _n_5_t_0, activationAttributes: Array[object], securityAttributes: _n_15_t_0) -> _n_12_t_0:...
def CreateInstanceFrom(self, assemblyFile: str, typeName: str, activationAttributes: Array[object]) -> _n_12_t_0:...
def CreateInstanceFrom(self, assemblyFile: str, typeName: str) -> _n_12_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1, dir: str, evidence: _n_15_t_0, requiredPermissions: _n_14_t_0, optionalPermissions: _n_14_t_0, refusedPermissions: _n_14_t_0, isSynchronized: bool) -> _n_8_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1, dir: str, evidence: _n_15_t_0, requiredPermissions: _n_14_t_0, optionalPermissions: _n_14_t_0, refusedPermissions: _n_14_t_0) -> _n_8_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1, evidence: _n_15_t_0, requiredPermissions: _n_14_t_0, optionalPermissions: _n_14_t_0, refusedPermissions: _n_14_t_0) -> _n_8_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1, dir: str, requiredPermissions: _n_14_t_0, optionalPermissions: _n_14_t_0, refusedPermissions: _n_14_t_0) -> _n_8_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1, dir: str, evidence: _n_15_t_0) -> _n_8_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1, requiredPermissions: _n_14_t_0, optionalPermissions: _n_14_t_0, refusedPermissions: _n_14_t_0) -> _n_8_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1, evidence: _n_15_t_0) -> _n_8_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1, dir: str) -> _n_8_t_0:...
def DefineDynamicAssembly(self, name: _n_7_t_2, access: _n_8_t_1) -> _n_8_t_0:...
def DoCallBack(self, theDelegate: CrossAppDomainDelegate):...
def Equals(self, other: object) -> bool:...
def ExecuteAssembly(self, assemblyFile: str, assemblySecurity: _n_15_t_0, args: Array[str]) -> int:...
def ExecuteAssembly(self, assemblyFile: str) -> int:...
def ExecuteAssembly(self, assemblyFile: str, assemblySecurity: _n_15_t_0) -> int:...
def GetAssemblies(self) -> Array[_n_7_t_3]:...
def GetData(self, name: str) -> object:...
def GetHashCode(self) -> int:...
def GetIDsOfNames(self, riid: Guid, rgszNames: IntPtr, cNames: UInt32, lcid: UInt32, rgDispId: IntPtr):...
def GetLifetimeService(self) -> object:...
def GetType(self) -> Type:...
def GetTypeInfo(self, iTInfo: UInt32, lcid: UInt32, ppTInfo: IntPtr):...
def GetTypeInfoCount(self, pcTInfo: UInt32):...
def InitializeLifetimeService(self) -> object:...
def Invoke(self, dispIdMember: UInt32, riid: Guid, lcid: UInt32, wFlags: int, pDispParams: IntPtr, pVarResult: IntPtr, pExcepInfo: IntPtr, puArgErr: IntPtr):...
def Load(self, assemblyString: str, assemblySecurity: _n_15_t_0) -> _n_7_t_3:...
def Load(self, assemblyRef: _n_7_t_2, assemblySecurity: _n_15_t_0) -> _n_7_t_3:...
def Load(self, rawAssembly: Array[Byte], rawSymbolStore: Array[Byte], securityEvidence: _n_15_t_0) -> _n_7_t_3:...
def Load(self, rawAssembly: Array[Byte], rawSymbolStore: Array[Byte]) -> _n_7_t_3:...
def Load(self, rawAssembly: Array[Byte]) -> _n_7_t_3:...
def Load(self, assemblyString: str) -> _n_7_t_3:...
def Load(self, assemblyRef: _n_7_t_2) -> _n_7_t_3:...
def SetAppDomainPolicy(self, domainPolicy: _n_15_t_1):...
def SetCachePath(self, s: str):...
def SetData(self, name: str, data: object):...
def SetPrincipalPolicy(self, policy: _n_16_t_0):...
def SetShadowCopyPath(self, s: str):...
def SetThreadPrincipal(self, principal: _n_16_t_1):...
def ToString(self) -> str:...
class AccessViolationException(SystemException, _n_13_t_0, _n_11_t_0):
def __init__(self, message: str, innerException: Exception) -> AccessViolationException:...
def __init__(self, message: str) -> AccessViolationException:...
def __init__(self) -> AccessViolationException:...
class Action(MulticastDelegate, ICloneable, _n_13_t_0, typing.Generic[T]):
def __init__(self, object: object, method: IntPtr) -> Action:...
def BeginInvoke(self, obj: T, callback: AsyncCallback, object: object) -> IAsyncResult:...
def EndInvoke(self, result: IAsyncResult):...
def Invoke(self, obj: T):...
class ActivationContext(IDisposable, _n_13_t_0):
@property
def ApplicationManifestBytes(self) -> Array[Byte]:"""ApplicationManifestBytes { get; } -> Array"""
@property
def DeploymentManifestBytes(self) -> Array[Byte]:"""DeploymentManifestBytes { get; } -> Array"""
@property
def Form(self) -> ActivationContext.ContextForm:"""Form { get; } -> ActivationContext.ContextForm"""
@property
def Identity(self) -> ApplicationIdentity:"""Identity { get; } -> ApplicationIdentity"""
@staticmethod
def CreatePartialActivationContext(identity: ApplicationIdentity, manifestPaths: Array[str]) -> ActivationContext:...
@staticmethod
def CreatePartialActivationContext(identity: ApplicationIdentity) -> ActivationContext:...
class ContextForm(Enum, IComparable, IFormattable, IConvertible):
Loose: int
StoreBounded: int
value__: int
class Activator(_n_11_t_1):
@staticmethod
def CreateComInstanceFrom(assemblyName: str, typeName: str, hashValue: Array[Byte], hashAlgorithm: _n_4_t_0) -> _n_12_t_0:...
@staticmethod
def CreateComInstanceFrom(assemblyName: str, typeName: str) -> _n_12_t_0:...
@staticmethod
def CreateInstance(activationContext: ActivationContext, activationCustomData: Array[str]) -> _n_12_t_0:...
@staticmethod
def CreateInstance(activationContext: ActivationContext) -> _n_12_t_0:...
@staticmethod
def CreateInstance(domain: AppDomain, assemblyName: str, typeName: str, ignoreCase: bool, bindingAttr: _n_7_t_0, binder: _n_7_t_1, args: Array[object], culture: _n_5_t_0, activationAttributes: Array[object]) -> _n_12_t_0:...
@staticmethod
def CreateInstance(domain: AppDomain, assemblyName: str, typeName: str, ignoreCase: bool, bindingAttr: _n_7_t_0, binder: _n_7_t_1, args: Array[object], culture: _n_5_t_0, activationAttributes: Array[object], securityAttributes: _n_15_t_0) -> _n_12_t_0:...
@staticmethod
def CreateInstance(domain: AppDomain, assemblyName: str, typeName: str) -> _n_12_t_0:...
@staticmethod
def CreateInstance(assemblyName: str, typeName: str, ignoreCase: bool, bindingAttr: _n_7_t_0, binder: _n_7_t_1, args: Array[object], culture: _n_5_t_0, activationAttributes: Array[object]) -> _n_12_t_0:...
@staticmethod
def CreateInstance(assemblyName: str, typeName: str, ignoreCase: bool, bindingAttr: _n_7_t_0, binder: _n_7_t_1, args: Array[object], culture: _n_5_t_0, activationAttributes: Array[object], securityInfo: _n_15_t_0) -> _n_12_t_0:...
@staticmethod
def CreateInstance() -> typing.Any:...
@staticmethod
def CreateInstance(type: Type, nonPublic: bool) -> object:...
@staticmethod
def CreateInstance(assemblyName: str, typeName: str, activationAttributes: Array[object]) -> _n_12_t_0:...
@staticmethod
def CreateInstance(assemblyName: str, typeName: str) -> _n_12_t_0:...
@staticmethod
def CreateInstance(type: Type) -> object:...
@staticmethod
def CreateInstance(type: Type, args: Array[object], activationAttributes: Array[object]) -> object:...
@staticmethod
def CreateInstance(type: Type, args: Array[object]) -> object:...
@staticmethod
def CreateInstance(type: Type, bindingAttr: _n_7_t_0, binder: _n_7_t_1, args: Array[object], culture: _n_5_t_0, activationAttributes: Array[object]) -> object:...
| |
<gh_stars>10-100
"""@file layer.py
Neural network layers """
import string
import tensorflow as tf
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn, dynamic_rnn
from nabu.neuralnetworks.components import ops, rnn_cell, rnn, rnn_cell_impl
from ops import capsule_initializer
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.framework import ops
_alphabet_str = string.ascii_lowercase
class Capsule(tf.layers.Layer):
"""a capsule layer"""
def __init__(
self, num_capsules, capsule_dim,
kernel_initializer=None,
logits_initializer=None,
logits_prior=False,
routing_iters=3,
activation_fn=None,
probability_fn=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
"""Capsule layer constructor
args:
num_capsules: number of output capsules
capsule_dim: output capsule dimsension
kernel_initializer: an initializer for the prediction kernel
logits_initializer: the initializer for the initial logits
routing_iters: the number of routing iterations (default: 5)
activation_fn: a callable activation function (default: squash)
probability_fn: a callable that takes in logits and returns weights
(default: tf.nn.softmax)
activity_regularizer: Regularizer instance for the output (callable)
trainable: wether layer is trainable
name: the name of the layer
"""
super(Capsule, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.num_capsules = num_capsules
self.capsule_dim = capsule_dim
self.kernel_initializer = kernel_initializer or capsule_initializer()
self.logits_initializer = logits_initializer or tf.zeros_initializer()
self.logits_prior = logits_prior
self.routing_iters = routing_iters
self.activation_fn = activation_fn or ops.squash
self.probability_fn = probability_fn or tf.nn.softmax
def build(self, input_shape):
"""creates the variables of this layer
args:
input_shape: the shape of the input
"""
# pylint: disable=W0201
# input dimensions
num_capsules_in = input_shape[-2].value
capsule_dim_in = input_shape[-1].value
if num_capsules_in is None:
raise ValueError('number of input capsules must be defined')
if capsule_dim_in is None:
raise ValueError('input capsules dimension must be defined')
self.kernel = self.add_variable(
name='kernel',
dtype=self.dtype,
shape=[num_capsules_in, capsule_dim_in, self.num_capsules, self.capsule_dim],
initializer=self.kernel_initializer)
self.logits = self.add_variable(
name='init_logits',
dtype=self.dtype,
shape=[num_capsules_in, self.num_capsules],
initializer=self.logits_initializer,
trainable=self.logits_prior
)
super(Capsule, self).build(input_shape)
# pylint: disable=W0221
def call(self, inputs):
"""
apply the layer
args:
inputs: the inputs to the layer. the final two dimensions are
num_capsules_in and capsule_dim_in
returns the output capsules with the last two dimensions
num_capsules and capsule_dim
"""
# compute the predictions
predictions, logits = self.predict(inputs)
# cluster the predictions
outputs = self.cluster(predictions, logits)
return outputs
def predict(self, inputs):
"""
compute the predictions for the output capsules and initialize the
routing logits
args:
inputs: the inputs to the layer. the final two dimensions are
num_capsules_in and capsule_dim_in
returns: the output capsule predictions
"""
with tf.name_scope('predict'):
# number of shared dimensions
rank = len(inputs.shape)
shared = rank-2
# put the input capsules as the first dimension
inputs = tf.transpose(inputs, [shared] + range(shared) + [rank-1])
# compute the predictins
predictions = tf.map_fn(
fn=lambda x: tf.tensordot(x[0], x[1], [[shared], [0]]),
elems=(inputs, self.kernel),
dtype=self.dtype or tf.float32)
# transpose back
predictions = tf.transpose(
predictions, range(1, shared+1)+[0]+[rank-1, rank])
logits = self.logits
for i in range(shared):
if predictions.shape[shared-i-1].value is None:
shape = tf.shape(predictions)[shared-i-1]
else:
shape = predictions.shape[shared-i-1].value
tile = [shape] + [1]*len(logits.shape)
logits = tf.tile(tf.expand_dims(logits, 0), tile)
return predictions, logits
def predict_slow(self, inputs):
"""
compute the predictions for the output capsules and initialize the
routing logits
args:
inputs: the inputs to the layer. the final two dimensions are
num_capsules_in and capsule_dim_in
returns: the output capsule predictions
"""
with tf.name_scope('predict'):
# number of shared dimensions
rank = len(inputs.shape)
shared = rank-2
if shared > 26-4:
raise ValueError('Not enough letters in the alphabet to use Einstein notation')
# input_shape = [shared (typicaly batch_size,time),Nin,Din], kernel_shape = [Nin, Din, Nout, Dout],
# predictions_shape = [shared,Nin,Nout,Dout]
shared_shape_str = _alphabet_str[0:shared]
input_shape_str = shared_shape_str+'wx'
kernel_shape_str = 'wxyz'
output_shape_str = shared_shape_str+'wyz'
ein_not = '%s,%s->%s' % (input_shape_str, kernel_shape_str, output_shape_str)
predictions = tf.einsum(ein_not, inputs, self.kernel)
logits = self.logits
for i in range(shared):
if predictions.shape[shared-i-1].value is None:
shape = tf.shape(predictions)[shared-i-1]
else:
shape = predictions.shape[shared-i-1].value
tile = [shape] + [1]*len(logits.shape)
logits = tf.tile(tf.expand_dims(logits, 0), tile)
return predictions, logits
def cluster(self, predictions, logits):
"""cluster the predictions into output capsules
args:
predictions: the predicted output capsules
logits: the initial routing logits
returns:
the output capsules
"""
with tf.name_scope('cluster'):
# define m-step
def m_step(l):
"""m step"""
with tf.name_scope('m_step'):
# compute the capsule contents
w = self.probability_fn(l)
caps = tf.reduce_sum(
tf.expand_dims(w, -1)*predictions, -3)
return caps, w
# define body of the while loop
def body(l):
"""body"""
caps, _ = m_step(l)
caps = self.activation_fn(caps)
# compare the capsule contents with the predictions
similarity = tf.reduce_sum(
predictions*tf.expand_dims(caps, -3), -1)
return l + similarity
# get the final logits with the while loop
lo = tf.while_loop(
lambda l: True,
body, [logits],
maximum_iterations=self.routing_iters)
# get the final output capsules
capsules, _ = m_step(lo)
capsules = self.activation_fn(capsules)
return capsules
def compute_output_shape(self, input_shape):
"""compute the output shape"""
if input_shape[-2].value is None:
raise ValueError(
'The number of capsules must be defined, but saw: %s'
% input_shape)
if input_shape[-1].value is None:
raise ValueError(
'The capsule dimension must be defined, but saw: %s'
% input_shape)
return input_shape[:-2].concatenate(
[self.num_capsules, self.capsule_dim])
class BRCapsuleLayer(object):
"""a Bidirectional recurrent capsule layer"""
def __init__(
self, num_capsules, capsule_dim, routing_iters=3, activation=None, input_probability_fn=None,
recurrent_probability_fn=None, rec_only_vote=False, logits_prior=False, accumulate_input_logits=True,
accumulate_state_logits=True):
"""
BRCapsuleLayer constructor
Args:
TODO
"""
self.num_capsules = num_capsules
self.capsule_dim = capsule_dim
self.routing_iters = routing_iters
self._activation = activation
self.input_probability_fn = input_probability_fn
self.recurrent_probability_fn = recurrent_probability_fn
self.rec_only_vote = rec_only_vote
self.logits_prior = logits_prior
self.accumulate_input_logits = accumulate_input_logits
self.accumulate_state_logits = accumulate_state_logits
def __call__(self, inputs, sequence_length, scope=None):
"""
Create the variables and do the forward computation
Args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
Returns:
the output of the layer
"""
with tf.variable_scope(scope or type(self).__name__):
# create the rnn cell that will be used for the forward and backward
# pass
if self.rec_only_vote:
rnn_cell_fw = rnn_cell.RecCapsuleCellRecOnlyVote(
num_capsules=self.num_capsules,
capsule_dim=self.capsule_dim,
routing_iters=self.routing_iters,
activation=self._activation,
input_probability_fn=self.input_probability_fn,
recurrent_probability_fn=self.recurrent_probability_fn,
logits_prior=self.logits_prior,
accumulate_input_logits=self.accumulate_input_logits,
accumulate_state_logits=self.accumulate_state_logits,
reuse=tf.get_variable_scope().reuse)
rnn_cell_bw = rnn_cell.RecCapsuleCellRecOnlyVote(
num_capsules=self.num_capsules,
capsule_dim=self.capsule_dim,
routing_iters=self.routing_iters,
activation=self._activation,
input_probability_fn=self.input_probability_fn,
recurrent_probability_fn=self.recurrent_probability_fn,
logits_prior=self.logits_prior,
accumulate_input_logits=self.accumulate_input_logits,
accumulate_state_logits=self.accumulate_state_logits,
reuse=tf.get_variable_scope().reuse)
else:
rnn_cell_fw = rnn_cell.RecCapsuleCell(
num_capsules=self.num_capsules,
capsule_dim=self.capsule_dim,
routing_iters=self.routing_iters,
activation=self._activation,
input_probability_fn=self.input_probability_fn,
recurrent_probability_fn=self.recurrent_probability_fn,
logits_prior=self.logits_prior,
reuse=tf.get_variable_scope().reuse)
rnn_cell_bw = rnn_cell.RecCapsuleCell(
num_capsules=self.num_capsules,
capsule_dim=self.capsule_dim,
routing_iters=self.routing_iters,
activation=self._activation,
input_probability_fn=self.input_probability_fn,
recurrent_probability_fn=self.recurrent_probability_fn,
logits_prior=self.logits_prior,
reuse=tf.get_variable_scope().reuse)
# do the forward computation
outputs_tupple, _ = bidirectional_dynamic_rnn(
rnn_cell_fw, rnn_cell_bw, inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs = tf.concat(outputs_tupple, 2)
return outputs
class BLSTMCapsuleLayer(object):
"""a Bidirectional lstm capsule layer"""
def __init__(
self, num_capsules, capsule_dim, routing_iters=3, activation=None, input_probability_fn=None,
recurrent_probability_fn=None, logits_prior=False, accumulate_input_logits=True,
accumulate_state_logits=True, gates_fc=False, use_output_matrix=False):
"""
BRCapsuleLayer constructor
Args:
TODO
"""
self.num_capsules = num_capsules
self.capsule_dim = capsule_dim
self.routing_iters = routing_iters
self._activation = activation
self.input_probability_fn = input_probability_fn
self.recurrent_probability_fn = recurrent_probability_fn
self.logits_prior = logits_prior
self.accumulate_input_logits = accumulate_input_logits
self.accumulate_state_logits = accumulate_state_logits
self.gates_fc = gates_fc
self.use_output_matrix = use_output_matrix
def __call__(self, inputs, sequence_length, scope=None):
"""
Create the variables and do the forward computation
Args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
Returns:
the output of the layer
"""
with tf.variable_scope(scope or type(self).__name__):
# create the rnn cell that will be used for the forward and backward
# pass
if self.use_output_matrix:
lstm_cell_type = rnn_cell.LSTMCapsuleCellRecOnlyVoteOutputMatrix
else:
lstm_cell_type = rnn_cell.LSTMCapsuleCellRecOnlyVote
lstm_cell_fw = lstm_cell_type(
num_capsules=self.num_capsules,
capsule_dim=self.capsule_dim,
routing_iters=self.routing_iters,
activation=self._activation,
input_probability_fn=self.input_probability_fn,
recurrent_probability_fn=self.recurrent_probability_fn,
logits_prior=self.logits_prior,
accumulate_input_logits=self.accumulate_input_logits,
accumulate_state_logits=self.accumulate_state_logits,
gates_fc=self.gates_fc,
reuse=tf.get_variable_scope().reuse)
lstm_cell_bw = lstm_cell_type(
num_capsules=self.num_capsules,
capsule_dim=self.capsule_dim,
routing_iters=self.routing_iters,
activation=self._activation,
input_probability_fn=self.input_probability_fn,
recurrent_probability_fn=self.recurrent_probability_fn,
logits_prior=self.logits_prior,
accumulate_input_logits=self.accumulate_input_logits,
accumulate_state_logits=self.accumulate_state_logits,
gates_fc=self.gates_fc,
reuse=tf.get_variable_scope().reuse)
# do the forward computation
outputs_tupple, _ = bidirectional_dynamic_rnn(
lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs = tf.concat(outputs_tupple, 2)
return outputs
class BRNNLayer(object):
"""a BRNN layer"""
def __init__(self, num_units, activation_fn=tf.nn.tanh, linear_out_flag=False):
"""
BRNNLayer constructor
Args:
num_units: The number of units in the one directon
activation_fn: activation function
linear_out_flag: if set to True, activation function will only be applied
to the recurrent output.
"""
self.num_units = num_units
self.activation_fn = activation_fn
self.linear_out_flag = linear_out_flag
def __call__(self, inputs, sequence_length, scope=None):
"""
Create the variables and do the forward computation
Args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
Returns:
the output of the layer
"""
with tf.variable_scope(scope or type(self).__name__):
# create the rnn cell that will be used for the forward and backward
# pass
if self.linear_out_flag:
rnn_cell_type = rnn_cell.RNNCellLinearOut
else:
rnn_cell_type = tf.contrib.rnn.BasicRNNCell
rnn_cell_fw = rnn_cell_type(
num_units=self.num_units,
activation=self.activation_fn,
reuse=tf.get_variable_scope().reuse)
rnn_cell_bw = rnn_cell_type(
num_units=self.num_units,
activation=self.activation_fn,
reuse=tf.get_variable_scope().reuse)
# do the forward computation
outputs_tupple, _ = bidirectional_dynamic_rnn(
rnn_cell_fw, rnn_cell_bw, inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs = tf.concat(outputs_tupple, 2)
return outputs
class LSTMLayer(object):
"""a LSTM layer"""
def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, activation_fn=tf.nn.tanh):
"""
LSTMLayer constructor
Args:
num_units: The number of units in the one directon
layer_norm: whether layer normalization should be applied
recurrent_dropout: the recurrent dropout keep probability
activation_fn: activation function
"""
self.num_units = num_units
self.layer_norm = layer_norm
self.recurrent_dropout = recurrent_dropout
self.activation_fn = activation_fn
def __call__(self, inputs, sequence_length, scope=None):
"""
Create the variables and do the forward computation
Args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
Returns:
the output of the layer
"""
with tf.variable_scope(scope or type(self).__name__):
# create the lstm cell that will be used for the forward and backward
# pass
lstm_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units=self.num_units,
activation=self.activation_fn,
layer_norm=self.layer_norm,
dropout_keep_prob=self.recurrent_dropout,
reuse=tf.get_variable_scope().reuse)
# do the forward computation
outputs, _ = dynamic_rnn(
lstm_cell, inputs, dtype=tf.float32,
sequence_length=sequence_length)
return outputs
class BLSTMLayer(object):
"""a BLSTM layer"""
def __init__(
self, num_units, layer_norm=False, recurrent_dropout=1.0, activation_fn=tf.nn.tanh,
separate_directions=False, linear_out_flag=False, fast_version=False):
"""
BLSTMLayer constructor
Args:
num_units: The number of units in the one directon
layer_norm: whether layer normalization should be applied
recurrent_dropout: the recurrent dropout keep probability
separate_directions: wether the forward and backward directions should
be separated for deep networks.
fast_version: deprecated
"""
self.num_units = num_units
self.layer_norm = layer_norm
self.recurrent_dropout = recurrent_dropout
self.activation_fn = activation_fn
self.separate_directions = separate_directions
self.linear_out_flag = linear_out_flag
self.fast_version = fast_version
def __call__(self, inputs, sequence_length, scope=None):
"""
Create the variables and do the forward computation
Args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
Returns:
the output of the layer
"""
with tf.variable_scope(scope or type(self).__name__):
# create the lstm cell that will be used for the forward and backward
# pass
if self.linear_out_flag:
lstm_cell_type = rnn_cell.LayerNormBasicLSTMCellLineairOut
else:
lstm_cell_type = tf.contrib.rnn.LayerNormBasicLSTMCell
lstm_cell_fw = lstm_cell_type(
num_units=self.num_units,
activation=self.activation_fn,
layer_norm=self.layer_norm,
dropout_keep_prob=self.recurrent_dropout,
reuse=tf.get_variable_scope().reuse)
lstm_cell_bw = lstm_cell_type(
num_units=self.num_units,
activation=self.activation_fn,
layer_norm=self.layer_norm,
dropout_keep_prob=self.recurrent_dropout,
reuse=tf.get_variable_scope().reuse)
# do the forward computation
if not self.separate_directions:
outputs_tupple, _ = bidirectional_dynamic_rnn(
lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)
outputs = tf.concat(outputs_tupple, 2)
else:
outputs, _ = rnn.bidirectional_dynamic_rnn_2inputs(
lstm_cell_fw, lstm_cell_bw, inputs[0], inputs[1], dtype=tf.float32,
sequence_length=sequence_length)
return outputs
class LeakyLSTMLayer(object):
"""a leaky LSTM layer"""
def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):
"""
LeakyLSTMLayer constructor
Args:
num_units: The number of units in the one directon
layer_norm: whether layer normalization should be applied
recurrent_dropout: the recurrent dropout keep probability
leak_factor: the leak factor (if 1, there is no leakage)
"""
self.num_units = num_units
self.layer_norm = layer_norm
self.recurrent_dropout = recurrent_dropout
self.leak_factor = leak_factor
def __call__(self, inputs, sequence_length, scope=None):
"""
Create the variables and do the forward computation
Args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
Returns:
the output of the layer
"""
with tf.variable_scope(scope or type(self).__name__):
# create | |
<filename>main/settings.py
"""
Django settings for ocw_studio.
"""
import logging
import os
import platform
from urllib.parse import urlparse
import dj_database_url
from django.core.exceptions import ImproperlyConfigured
from mitol.common.envs import (
get_bool,
get_delimited_list,
get_features,
get_int,
get_site_name,
get_string,
import_settings_modules,
init_app_settings,
)
from main.sentry import init_sentry
VERSION = "0.31.0"
SITE_ID = get_int(
name="OCW_STUDIO_SITE_ID",
default=1,
description="The default site id for django sites framework",
)
# Sentry
ENVIRONMENT = get_string(
name="OCW_STUDIO_ENVIRONMENT",
default="dev",
description="The execution environment that the app is in (e.g. dev, staging, prod)",
required=True,
)
# this is only available to heroku review apps
HEROKU_APP_NAME = get_string(
name="HEROKU_APP_NAME", default=None, description="The name of the review app"
)
# initialize Sentry before doing anything else so we capture any config errors
SENTRY_DSN = get_string(
name="SENTRY_DSN", default="", description="The connection settings for Sentry"
)
SENTRY_LOG_LEVEL = get_string(
name="SENTRY_LOG_LEVEL", default="ERROR", description="The log level for Sentry"
)
init_sentry(
dsn=SENTRY_DSN,
environment=ENVIRONMENT,
version=VERSION,
log_level=SENTRY_LOG_LEVEL,
heroku_app_name=HEROKU_APP_NAME,
)
init_app_settings(namespace="OCW_STUDIO", site_name="OCW Studio")
SITE_NAME = get_site_name()
import_settings_modules(
globals(),
"mitol.common.settings.base",
"mitol.common.settings.webpack",
"mitol.mail.settings.email",
"mitol.authentication.settings.touchstone",
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_string(
name="SECRET_KEY", default=None, description="Django secret key.", required=True
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = get_bool(
name="DEBUG",
default=False,
dev_only=True,
description="Set to True to enable DEBUG mode. Don't turn on in production.",
)
ALLOWED_HOSTS = ["*"]
SECURE_SSL_REDIRECT = get_bool(
name="OCW_STUDIO_SECURE_SSL_REDIRECT",
default=True,
description="Application-level SSL redirect setting.",
)
USE_X_FORWARDED_HOST = get_bool(
name="USE_X_FORWARDED_HOST",
default=False,
description="Set HOST header to original domain accessed by user",
)
USE_X_FORWARDED_PORT = get_bool(
name="USE_X_FORWARDED_PORT",
default=False,
description="Use the PORT from original url accessed by user",
)
WEBPACK_LOADER = {
"DEFAULT": {
"CACHE": not DEBUG,
"BUNDLE_DIR_NAME": "bundles/",
"STATS_FILE": os.path.join(BASE_DIR, "webpack-stats.json"),
"POLL_INTERVAL": 0.1,
"TIMEOUT": None,
"IGNORE": [r".+\.hot-update\.+", r".+\.js\.map"],
}
}
# configure a custom user model
AUTH_USER_MODEL = "users.User"
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"compat",
"guardian",
"hijack",
"hijack_admin",
"server_status",
"safedelete",
# django-robots
"rest_framework",
"social_django",
"robots",
"anymail",
# Put our apps after this point
"main",
"users",
"websites",
"ocw_import",
"news",
"content_sync",
"gdrive_sync",
"videos",
# common apps, need to be after ocw-studio apps for template overridding
"mitol.common.apps.CommonApp",
"mitol.authentication.apps.AuthenticationApp",
"mitol.mail.apps.MailApp",
)
if ENVIRONMENT not in {"prod", "production"}:
INSTALLED_APPS += ("localdev",)
DISABLE_WEBPACK_LOADER_STATS = get_bool(
name="DISABLE_WEBPACK_LOADER_STATS",
default=False,
description="Disabled webpack loader stats",
)
if not DISABLE_WEBPACK_LOADER_STATS:
INSTALLED_APPS += ("webpack_loader",)
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.contrib.sites.middleware.CurrentSiteMiddleware",
"main.middleware.CachelessAPIMiddleware",
)
# enable the nplusone profiler only in debug mode
if DEBUG:
INSTALLED_APPS += ("nplusone.ext.django",)
MIDDLEWARE += ("nplusone.ext.django.NPlusOneMiddleware",)
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = "/"
LOGIN_ERROR_URL = "/"
ROOT_URLCONF = "main.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [f"{BASE_DIR}/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "main.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DEFAULT_DATABASE_CONFIG = dj_database_url.parse(
get_string(
name="DATABASE_URL",
default="sqlite:///{0}".format(os.path.join(BASE_DIR, "db.sqlite3")),
description="The connection url to the Postgres database",
required=True,
write_app_json=False,
)
)
DEFAULT_DATABASE_CONFIG["CONN_MAX_AGE"] = get_int(
name="OCW_STUDIO_DB_CONN_MAX_AGE",
default=0,
description="Maximum age of connection to Postgres in seconds",
)
# If True, disables server-side database cursors to prevent invalid cursor errors when using pgbouncer
DEFAULT_DATABASE_CONFIG["DISABLE_SERVER_SIDE_CURSORS"] = get_bool(
name="OCW_STUDIO_DB_DISABLE_SS_CURSORS",
default=True,
description="Disables Postgres server side cursors",
)
if get_bool(
name="OCW_STUDIO_DB_DISABLE_SSL",
default=False,
description="Disables SSL to postgres if set to True",
):
DEFAULT_DATABASE_CONFIG["OPTIONS"] = {}
else:
DEFAULT_DATABASE_CONFIG["OPTIONS"] = {"sslmode": "require"}
DATABASES = {"default": DEFAULT_DATABASE_CONFIG}
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# django-robots
ROBOTS_USE_HOST = False
ROBOTS_CACHE_TIMEOUT = get_int(
name="ROBOTS_CACHE_TIMEOUT",
default=60 * 60 * 24,
description="How long the robots.txt file should be cached",
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# Serve static files with dj-static
STATIC_URL = "/static/"
STATIC_ROOT = "staticfiles"
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
# Important to define this so DEBUG works properly
INTERNAL_IPS = (
get_string(
name="HOST_IP", default="127.0.0.1", description="This server's host IP"
),
)
# e-mail configurable admins
ADMIN_EMAIL = get_string(
name="OCW_STUDIO_ADMIN_EMAIL",
default="",
description="E-mail to send 500 reports to.",
)
if ADMIN_EMAIL != "":
ADMINS = (("Admins", ADMIN_EMAIL),)
else:
ADMINS = ()
# Logging configuration
LOG_LEVEL = get_string(
name="OCW_STUDIO_LOG_LEVEL", default="INFO", description="The log level default"
)
DJANGO_LOG_LEVEL = get_string(
name="DJANGO_LOG_LEVEL", default="INFO", description="The log level for django"
)
# For logging to a remote syslog host
LOG_HOST = get_string(
name="OCW_STUDIO_LOG_HOST",
default="localhost",
description="Remote syslog server hostname",
)
LOG_HOST_PORT = get_int(
name="OCW_STUDIO_LOG_HOST_PORT",
default=514,
description="Remote syslog server port",
)
HOSTNAME = platform.node().split(".")[0]
# nplusone profiler logger configuration
NPLUSONE_LOGGER = logging.getLogger("nplusone")
NPLUSONE_LOG_LEVEL = logging.ERROR
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse",
}
},
"formatters": {
"verbose": {
"format": (
"[%(asctime)s] %(levelname)s %(process)d [%(name)s] "
"%(filename)s:%(lineno)d - "
"[{hostname}] - %(message)s"
).format(hostname=HOSTNAME),
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
"syslog": {
"level": LOG_LEVEL,
"class": "logging.handlers.SysLogHandler",
"facility": "local7",
"formatter": "verbose",
"address": (LOG_HOST, LOG_HOST_PORT),
},
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
},
"loggers": {
"django": {
"propagate": True,
"level": DJANGO_LOG_LEVEL,
"handlers": ["console", "syslog"],
},
"django.request": {
"handlers": ["mail_admins"],
"level": DJANGO_LOG_LEVEL,
"propagate": True,
},
"nplusone": {
"handlers": ["console"],
"level": "ERROR",
},
},
"root": {
"handlers": ["console", "syslog"],
"level": LOG_LEVEL,
},
}
# server-status
STATUS_TOKEN = get_string(
name="STATUS_TOKEN", default="", description="Token to access the status API."
)
HEALTH_CHECK = ["CELERY", "REDIS", "POSTGRES"]
GA_TRACKING_ID = get_string(
name="GA_TRACKING_ID", default="", description="Google analytics tracking ID"
)
REACT_GA_DEBUG = get_bool(
name="REACT_GA_DEBUG",
default=False,
dev_only=True,
description="Enable debug for react-ga, development only",
)
MEDIA_ROOT = get_string(
name="MEDIA_ROOT",
default="/var/media/",
description="The root directory for locally stored media. Typically not used.",
)
MEDIA_URL = "/media/"
OCW_STUDIO_USE_S3 = get_bool(
name="OCW_STUDIO_USE_S3",
default=False,
description="Use S3 for storage backend (required on Heroku)",
)
MAX_S3_GET_ITERATIONS = get_int(
name="MAX_S3_GET_ITERATIONS",
default=3,
description="Max retry attempts to get an S3 object",
)
AWS_ACCESS_KEY_ID = get_string(
name="AWS_ACCESS_KEY_ID", default=None, description="AWS Access Key for S3 storage."
)
AWS_SECRET_ACCESS_KEY = get_string(
name="AWS_SECRET_ACCESS_KEY",
default=None,
description="AWS Secret Key for S3 storage.",
)
AWS_STORAGE_BUCKET_NAME = get_string(
name="AWS_STORAGE_BUCKET_NAME", default=None, description="S3 Bucket name."
)
AWS_PREVIEW_BUCKET_NAME = get_string(
name="AWS_PREVIEW_BUCKET_NAME", default=None, description="S3 preview bucket name."
)
AWS_PUBLISH_BUCKET_NAME = get_string(
name="AWS_PUBLISH_BUCKET_NAME", default=None, description="S3 publish bucket name."
)
AWS_QUERYSTRING_AUTH = get_bool(
name="AWS_QUERYSTRING_AUTH",
default=False,
description="Enables querystring auth for S3 urls",
)
AWS_DEFAULT_ACL = "public-read"
AWS_ACCOUNT_ID = get_string(name="AWS_ACCOUNT_ID", description="AWS Account ID")
AWS_REGION = get_string(
name="AWS_REGION", default="us-east-1", description="AWS Region"
)
AWS_ROLE_NAME = get_string(
name="AWS_ROLE_NAME",
default=None,
description="AWS role name to be used for MediaConvert jobs",
)
# Provide nice validation of the configuration
if OCW_STUDIO_USE_S3 and (
not AWS_ACCESS_KEY_ID or not AWS_SECRET_ACCESS_KEY or not AWS_STORAGE_BUCKET_NAME
):
raise ImproperlyConfigured(
"You have enabled S3 support, but are missing one of "
"AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, or "
"AWS_STORAGE_BUCKET_NAME"
)
if OCW_STUDIO_USE_S3:
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# Google Drive and Video settings
DRIVE_SERVICE_ACCOUNT_CREDS = get_string(
name="DRIVE_SERVICE_ACCOUNT_CREDS",
default=None,
description="The contents of the Service Account credentials JSON to use for Google API auth",
)
DRIVE_SHARED_ID = get_string(
name="DRIVE_SHARED_ID",
default=None,
description="ID of the Shared Drive (a.k.a. Team Drive). This is equal to the top-level folder ID.",
)
DRIVE_IMPORT_RECENT_FILES_SECONDS = get_int(
name="DRIVE_IMPORT_RECENT_FILES_SECONDS",
default=3600,
description=(
"The frequency to check for new google drive files/videos, in seconds"
),
)
DRIVE_S3_UPLOAD_PREFIX = get_string(
name="DRIVE_S3_UPLOAD_PREFIX",
default="gdrive_uploads",
description=("Prefix to be used for S3 keys of files uploaded from Google Drive"),
)
DRIVE_UPLOADS_PARENT_FOLDER_ID = get_string(
name="DRIVE_UPLOADS_PARENT_FOLDER_ID",
default=None,
description="Gdrive folder for video uploads",
required=False,
)
VIDEO_S3_TRANSCODE_PREFIX = get_string(
name="VIDEO_S3_TRANSCODE_PREFIX",
default="aws_mediaconvert_transcodes",
description=(
"Prefix to be used for S3 keys of files transcoded from AWS MediaConvert"
),
)
YT_ACCESS_TOKEN = get_string(
name="YT_ACCESS_TOKEN", default="", description="Youtube access token"
)
YT_CATEGORY_ID = get_int(
name="YT_CATEGORY_ID",
default=27,
description="Default video category ID for youtube",
)
YT_CLIENT_ID = get_string(
name="YT_CLIENT_ID", default="", description="Youtube Client ID"
)
YT_CLIENT_SECRET = get_string(
name="YT_CLIENT_SECRET", default="", description="Youtube client secret key"
)
YT_REFRESH_TOKEN = get_string(
name="YT_REFRESH_TOKEN", default="", description="YT_REFRESH_TOKEN"
)
YT_PROJECT_ID = get_string(
name="YT_PROJECT_ID", default="", description="Youtube project ID"
)
YT_STATUS_UPDATE_FREQUENCY = get_int(
name="YT_STATUS_UPDATE_FREQUENCY",
default=60,
description="The frequency to check for status updates on uploaded youtube videos",
)
YT_UPLOAD_FREQUENCY = get_int(
name="YT_UPLOAD_FREQUENCY",
default=60,
description="The frequency to check for videos to upload to Youtube",
)
YT_UPLOAD_LIMIT = get_int(
name="YT_UPLOAD_LIMIT",
default=50,
description="Max Youtube uploads allowed per day",
)
# OCW metadata fields
FIELD_RESOURCETYPE = get_string(
name="FIELD_RESOURCETYPE",
default="resourcetype",
description="The site config metadata field for the resource type",
)
# YouTube OCW metadata fields
YT_FIELD_CAPTIONS = get_string(
name="YT_FIELD_CAPTIONS",
default="video_files.video_captions_file",
description="The site config metadata field for the caption url",
)
YT_FIELD_ID = get_string(
name="YT_FIELD_ID",
default="video_metadata.youtube_id",
description="The site config metadata field for YouTube ID",
)
YT_FIELD_DESCRIPTION = get_string(
name="YT_FIELD_DESCRIPTION",
default="description",
description="The site config metadata field for YouTube description",
)
YT_FIELD_SPEAKERS = get_string(
name="YT_FIELD_SPEAKERS",
default="video_metadata.video_speakers",
description="The site config metadata field for YouTube speakers",
)
YT_FIELD_TAGS = get_string(
name="YT_FIELD_TAGS",
default="video_metadata.video_tags",
description="The site config metadata field for YouTube video tags",
)
YT_FIELD_THUMBNAIL = get_string(
name="YT_FIELD_THUMBNAIL",
default="video_files.video_thumbnail_file",
description="The site config metadata field for YouTube thumbnail url",
)
YT_FIELD_TRANSCRIPT = get_string(
name="YT_FIELD_TRANSCRIPT",
default="video_files.video_transcript_file",
description="The site config metadata field for the transcript url",
)
UPDATE_TAGGED_3PLAY_TRANSCRIPT_FREQUENCY = get_int(
name="UPDATE_TAGGED_3PLAY_TRANSCRIPT_FREQUENCY",
default=3600,
description="The frequency to check for videos tagged as updated in 3play",
)
UPDATE_MISSING_TRANSCRIPT_FREQUENCY = get_int(
name="UPDATE_MISSING_TRANSCRIPT_FREQUENCY",
default=43200,
description="The frequency to check for transcripts for published videos with blank transcripts",
)
# Celery
REDISCLOUD_URL = get_string(
name="REDISCLOUD_URL", default=None, description="RedisCloud connection url"
)
if REDISCLOUD_URL is not None:
_redis_url = REDISCLOUD_URL
else:
_redis_url = get_string(
name="REDIS_URL", default=None, description="Redis URL for non-production use"
)
CELERY_BROKER_URL = get_string(
name="CELERY_BROKER_URL",
default=_redis_url,
description="Where celery should get tasks, default is Redis URL",
)
CELERY_RESULT_BACKEND = get_string(
name="CELERY_RESULT_BACKEND",
default=_redis_url,
description="Where celery should put task results, default is Redis URL",
)
CELERY_TASK_ALWAYS_EAGER = get_bool(
name="CELERY_TASK_ALWAYS_EAGER",
default=False,
dev_only=True,
description="Enables eager execution | |
'%=' ) expression
pass
if self.input.LA(1) == 57 or (122 <= self.input.LA(1) <= 131):
self.input.consume()
self._state.errorRecovery = False
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
mse = MismatchedSetException(None, self.input)
raise mse
self._state.following.append(self.FOLLOW_expression_in_synpred169_sol2235)
self.expression()
self._state.following.pop()
# $ANTLR end "synpred169_sol"
# $ANTLR start "synpred185_sol"
def synpred185_sol_fragment(self, ):
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:258:5: ( identifier )
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:258:5: identifier
pass
self._state.following.append(self.FOLLOW_identifier_in_synpred185_sol2419)
self.identifier()
self._state.following.pop()
# $ANTLR end "synpred185_sol"
# $ANTLR start "synpred187_sol"
def synpred187_sol_fragment(self, ):
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:260:5: ( assemblyExpression )
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:260:5: assemblyExpression
pass
self._state.following.append(self.FOLLOW_assemblyExpression_in_synpred187_sol2431)
self.assemblyExpression()
self._state.following.pop()
# $ANTLR end "synpred187_sol"
# $ANTLR start "synpred199_sol"
def synpred199_sol_fragment(self, ):
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:272:5: ( numberLiteral )
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:272:5: numberLiteral
pass
self._state.following.append(self.FOLLOW_numberLiteral_in_synpred199_sol2503)
self.numberLiteral()
self._state.following.pop()
# $ANTLR end "synpred199_sol"
# $ANTLR start "synpred200_sol"
def synpred200_sol_fragment(self, ):
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:273:5: ( StringLiteral )
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:273:5: StringLiteral
pass
self.match(self.input, StringLiteral, self.FOLLOW_StringLiteral_in_synpred200_sol2509)
# $ANTLR end "synpred200_sol"
# $ANTLR start "synpred207_sol"
def synpred207_sol_fragment(self, ):
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:280:54: ( '(' ( assemblyExpression )? ( ',' assemblyExpression )* ')' )
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:280:54: '(' ( assemblyExpression )? ( ',' assemblyExpression )* ')'
pass
self.match(self.input, 69, self.FOLLOW_69_in_synpred207_sol2561)
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:280:58: ( assemblyExpression )?
alt118 = 2
LA118_0 = self.input.LA(1)
if (LA118_0 == StringLiteral or LA118_0 == HexLiteral or (DecimalNumber <= LA118_0 <= HexNumber) or LA118_0 == Identifier or LA118_0 == 61 or LA118_0 == 80 or LA118_0 == 88 or LA118_0 == 94 or LA118_0 == 100) :
alt118 = 1
if alt118 == 1:
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:0:0: assemblyExpression
pass
self._state.following.append(self.FOLLOW_assemblyExpression_in_synpred207_sol2563)
self.assemblyExpression()
self._state.following.pop()
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:280:78: ( ',' assemblyExpression )*
while True: #loop119
alt119 = 2
LA119_0 = self.input.LA(1)
if (LA119_0 == 63) :
alt119 = 1
if alt119 == 1:
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:280:80: ',' assemblyExpression
pass
self.match(self.input, 63, self.FOLLOW_63_in_synpred207_sol2568)
self._state.following.append(self.FOLLOW_assemblyExpression_in_synpred207_sol2570)
self.assemblyExpression()
self._state.following.pop()
else:
break #loop119
self.match(self.input, 70, self.FOLLOW_70_in_synpred207_sol2575)
# $ANTLR end "synpred207_sol"
# Delegated rules
def synpred149_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred149_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred136_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred136_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred61_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred61_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred157_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred157_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred187_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred187_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred144_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred144_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred153_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred153_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred78_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred78_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred148_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred148_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred99_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred99_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred135_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred135_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred139_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred139_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred199_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred199_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred77_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred77_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred100_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred100_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred156_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred156_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred169_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred169_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred76_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred76_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred142_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred142_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred147_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred147_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred185_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred185_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred125_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred125_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred138_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred138_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred80_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred80_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred155_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred155_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred207_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred207_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred137_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred137_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred146_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred146_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred200_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred200_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
def synpred158_sol(self):
self._state.backtracking += 1
start = self.input.mark()
try:
self.synpred158_sol_fragment()
except BacktrackingFailed:
success = False
else:
success = True
self.input.rewind(start)
self._state.backtracking -= 1
return success
# lookup tables for DFA #2
DFA2_eot = DFA.unpack(
u"\40\uffff"
)
DFA2_eof = DFA.unpack(
u"\40\uffff"
)
DFA2_min = DFA.unpack(
u"\2\4\36\uffff"
)
DFA2_max = DFA.unpack(
u"\2\154\36\uffff"
)
DFA2_accept = DFA.unpack(
u"\2\uffff\1\1\1\2\34\uffff"
)
DFA2_special = DFA.unpack(
u"\40\uffff"
)
DFA2_transition = [
DFA.unpack(u"\1\2\1\3\13\uffff\5\3\1\uffff\2\3\2\uffff\2\3\1\uffff"
u"\1\3\24\uffff\1\2\1\1\5\2\3\uffff\1\3\7\uffff\1\3\12\uffff\2\3"
u"\6\uffff\1\3\10\uffff\14\3"),
DFA.unpack(u"\1\2\1\3\13\uffff\5\3\1\uffff\2\3\2\uffff\2\3\1\uffff"
u"\1\3\25\uffff\1\3\10\uffff\1\3\7\uffff\1\3\12\uffff\2\3\6\uffff"
u"\1\3\10\uffff\14\3"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #2
class DFA2(DFA):
pass
# lookup tables for DFA #13
DFA13_eot = DFA.unpack(
u"\15\uffff"
)
DFA13_eof = DFA.unpack(
u"\15\uffff"
)
DFA13_min = DFA.unpack(
u"\1\21\14\uffff"
)
DFA13_max = DFA.unpack(
u"\1\144\14\uffff"
)
DFA13_accept = DFA.unpack(
u"\1\uffff\1\2\1\1\12\uffff"
)
DFA13_special = DFA.unpack(
u"\15\uffff"
)
DFA13_transition = [
DFA.unpack(u"\5\2\10\uffff\1\2\36\uffff\1\2\2\uffff\1\1\6\uffff"
u"\1\2\1\uffff\4\2\1\uffff\3\2\3\uffff\1\2\3\uffff\1\2\10\uffff\4"
u"\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #13
class DFA13(DFA):
pass
# lookup tables for DFA #16
DFA16_eot = DFA.unpack(
u"\17\uffff"
)
DFA16_eof = DFA.unpack(
u"\17\uffff"
)
DFA16_min = DFA.unpack(
u"\1\21\3\uffff\1\12\12\uffff"
)
DFA16_max = DFA.unpack(
u"\1\144\3\uffff\1\130\12\uffff"
)
DFA16_accept = DFA.unpack(
u"\1\uffff\1\1\4\uffff\1\2\1\3\1\4\1\5\1\7\1\10\1\6\2\uffff"
)
DFA16_special = DFA.unpack(
u"\17\uffff"
)
DFA16_transition = [
DFA.unpack(u"\5\1\10\uffff\1\1\36\uffff\1\1\11\uffff\1\6\1\uffff"
u"\1\7\1\10\1\11\1\4\1\uffff\1\12\1\13\1\1\3\uffff\1\1\3\uffff\1"
u"\1\10\uffff\4\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\14\23\uffff\1\14\36\uffff\1\14\7\uffff\1\1\22\uffff"
u"\1\14"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #16
class DFA16(DFA):
pass
# lookup tables for DFA #24
DFA24_eot = DFA.unpack(
u"\12\uffff"
)
DFA24_eof = DFA.unpack(
u"\1\2\11\uffff"
)
DFA24_min = DFA.unpack(
u"\1\6\11\uffff"
)
DFA24_max = | |
from __future__ import absolute_import, division
from psychopy import locale_setup, core, gui, data#, event#, logging#, visual
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
# Make sure that under Psychopy preferences, under audio library pygame is listed first.
# Make sure the bottem dialog bar auto hides
# DIALOG BOX RESOURCES
# http://www.blog.pythonlibrary.org/2010/07/10/the-dialogs-of-wxpython-part-2-of-2/
# Clock resources: psychopy-users/tFghyXkOx5U
#from psychopy.gui import wxgui
import os # handy system and path functions
import sys # to get file system encoding
import wx
import numpy as np
import glob
# sys.path.insert(0, '../DataHandlingScripts')
import CheckExistingNeuroPsychData
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))#.decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
# import parameters from a config file
sys.path.append(os.path.join(_thisDir, '..','ConfigFiles'))
from NCM_NeuroPsych_Config import *
# Check to see if the output data folder has been identified
try:
# try to load the config file
from NeuropsychDataFolder import *
# See if the variable is in it
print('Data being saved to: %s'%(NeuropsychDataFolder))
if not os.path.exists(NeuropsychDataFolder):
raise ValueError('Folder does not exist.')
except:
app = wx.App()
dlg = wx.DirDialog(None, "Choose data output directory", "", wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if dlg.ShowModal() == wx.ID_OK:
print(dlg.GetPath())
OutFolder = dlg.GetPath()
dlg.Destroy()
# write the selected folder to the config file
fid = open(os.path.join(_thisDir, '..','ConfigFiles','NeuropsychDataFolder.py'),'w')
fid.write('NeuropsychDataFolder = \'%s\''%(OutFolder))
fid.close()
NeuropsychDataFolder = OutFolder
# from tkinter
# ed = filedialog.askdirectory()
Top = 20
Left = 20
RowWidth = 50
ColWidth = 100
# Button height cannot be changed
ButtonHeight = -1
ButtonWidth = 80
LabelOffset = 10
BoxVerticalShift = 12
# Allow flexible number of rows
NRows = 12
# Create a list of what pixel each row is to be set to
RowPixel = []
for i in range(NRows):
RowPixel.append(Top + i*RowWidth)
# Use the number of rows to define the GUI height
GUIHeight = max(RowPixel) + RowWidth
#Row1 = Top
#Row2 = Top + RowWidth
#Row3 = Top + 2*RowWidth
#Row4 = Top + 3*RowWidth
#Row5 = Top + 4*RowWidth
#Row6 = Top + 5*RowWidth
#Row7 = Top + 6*RowWidth
#Row8 = Top + 7*RowWidth
#Row9 = Top + 8*RowWidth
#Row10 = Top + 9*RowWidth
#Row11 = Top + 10*RowWidth
# Allow flexible number of columns
NCols = 7
# Create a list of what pixel each row is to be set to
ColPixel = []
for i in range(NCols):
ColPixel.append(Top + i*ColWidth)
# Use the number of rows to define the GUI height
GUIWidth = max(ColPixel) + 2*ColWidth
NColForBox = NCols
#Col1 = Left
#ColPixel[1] = Left + ColWidth
#ColPixel[2] = Left + 2*ColWidth
#ColPixel[3] = Left + 3*ColWidth
#ColPixel[4] = Left + 4*ColWidth
#ColPixel[5] = Left + 5*ColWidth
class Mywin(wx.Frame):
def __init__(self, parent, title):
# size = (width, height)
# Create the GUI window
super(Mywin, self).__init__(parent, title = title,size = (GUIWidth,GUIHeight))
self.panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
self.DataFolder = NeuropsychDataFolder
print(NeuropsychDataFolder)
if not os.path.exists(self.DataFolder):
# If my specified folder does not exist, then put the data up two folders.
self.DataFolder = "../../data"
if not os.path.exists(self.DataFolder):
os.mkdir(self.DataFolder)
self.VisitFolderPath = 'empty'
# Setup the Participant ID entry
self.PartIDLabel = wx.StaticText(self.panel, -1, label = "Participant ID:", pos = (ColPixel[0],RowPixel[0]))
self.PartID = wx.TextCtrl(self.panel,-1,'9999999',size=(ButtonWidth,-1),pos = (ColPixel[1],RowPixel[0]))
self.btnPartEntry = wx.Button(self.panel,-1,label = "Submit", pos = (ColPixel[2],RowPixel[0]), size = ((ButtonWidth, ButtonHeight)))
self.btnPartEntry.Bind(wx.EVT_BUTTON, self.OnCickPartEntry)
self.PartIDLabel = wx.StaticText(self.panel, -1, label = "Output folder:", pos = (ColPixel[3], RowPixel[0]))
# Create Default values for the load levels for the two tasks
self.FRTBlockLoadLevels = '0.0 0.125 0.25 0.375 0.5'
self.DMSBlockLoadLevels = '1 3 5 6 7'
self.VSTMBlockLoadLevels = '1 2 3 4 5'
self.DMSFontSize = '60'
self.DMSTag = 0
self.VSTMTag = 0
self.NBackTag = 0
self.NBackPracticeTag = 0
# #### Row
CurrentRow = RowPixel[1]
self.titleRMem = wx.StaticText(self.panel, -1, label = "Memory", pos = (ColPixel[0]+LabelOffset/2,CurrentRow+LabelOffset))
# Buttons
self.btnRMemC2 = wx.Button(self.panel,-1,"Immediate", pos = (ColPixel[1],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
self.btnRMemC2.Bind(wx.EVT_BUTTON,self.OnClickedRMemC2)
self.btnRMemC5 = wx.Button(self.panel,-1,"-- D --", pos = (ColPixel[4],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
self.btnRMemC5.Bind(wx.EVT_BUTTON,self.OnClickedRMemC5)
self.btnRMemC6 = wx.Button(self.panel,-1,"-- R --", pos = (ColPixel[5],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
self.btnRMemC6.Bind(wx.EVT_BUTTON,self.OnClickedRMemC6)
# Box
RowMemBoxR2 = wx.StaticBox(self.panel, -1, size = ((ColWidth+5)*NColForBox, RowWidth-5), pos = (ColPixel[0],CurrentRow-BoxVerticalShift))
# Checkboxes
self.cbRMemC2 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[1] + ButtonWidth+5,CurrentRow))
self.cbRMemC5 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[4] + ButtonWidth+5,CurrentRow))
self.cbRMemC6 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[5] + ButtonWidth+5,CurrentRow))
# #### Row
# CurrentRow = RowPixel[2]
# self.titleR9 = wx.StaticText(self.panel, -1, label = "Fluid", pos = (ColPixel[0]+LabelOffset/2,CurrentRow+LabelOffset))
# # Buttons
# #self.btnR9C2 = wx.Button(self.panel,-1,"Paper Folding", pos = (ColPixel[1],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# #self.btnR9C2.Bind(wx.EVT_BUTTON,self.OnClickedR9C2)
# self.btnR9C2 = wx.Button(self.panel,-1,"Mat. Practice", pos = (ColPixel[1],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR9C2.Bind(wx.EVT_BUTTON,self.OnClickedR9C2)
# self.btnR9C3 = wx.Button(self.panel,-1,"Matrices", pos = (ColPixel[2],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR9C3.Bind(wx.EVT_BUTTON,self.OnClickedR9C3)
# # Box
# Row9BoxR2 = wx.StaticBox(self.panel, -1, size = ((ColWidth+5)*NColForBox,RowWidth-5), pos = (ColPixel[0],CurrentRow-BoxVerticalShift))
# # Checkboxes
# self.cbR9C2 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[1] + ButtonWidth+5,CurrentRow))
# self.cbR9C3 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[2] + ButtonWidth+5,CurrentRow))
# self.cbR9C2 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[1] + ButtonWidth+5,CurrentRow))
# self.cbR9C3 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[2] + ButtonWidth+5,CurrentRow))
# ### Row
# CurrentRow = RowPixel[3]
# self.titleR5 = wx.StaticText(self.panel, -1, label = "DMS/Letters", pos = (ColPixel[0]+LabelOffset/2,CurrentRow+LabelOffset))
# # Buttons
# self.btnR5C1 = wx.Button(self.panel,-1,"Instructions", pos = (ColPixel[1],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR5C1.Bind(wx.EVT_BUTTON,self.OnClickedR5C1)
# self.btnR5C2 = wx.Button(self.panel,-1,"Practice", pos = (ColPixel[2],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR5C2.Bind(wx.EVT_BUTTON,self.OnClickedR5C2)
# self.btnR5C3 = wx.Button(self.panel,-1,"Stair", pos = (ColPixel[3],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR5C3.Bind(wx.EVT_BUTTON,self.OnClickedR5C3)
# self.btnR5C6 = wx.Button(self.panel,-1,"Block", pos = (ColPixel[6],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR5C6.Bind(wx.EVT_BUTTON,self.OnClickedR5C6)
# # Text box for the capacity value
# self.txtR5C4 = wx.StaticText(self.panel, -1, label = "Cap =", pos = (ColPixel[4]+5,CurrentRow+LabelOffset))
# self.txtR5C5 = wx.StaticText(self.panel, -1, label = "000", pos = (ColPixel[5]-ColWidth/2+5,CurrentRow+LabelOffset))
# self.btnR5C5a = wx.Button(self.panel,-1,"Enter", pos = (ColPixel[5]-5,CurrentRow), size = ((ButtonWidth/2+5, ButtonHeight)))
# self.btnR5C5b = wx.Button(self.panel,-1,"Load", pos = (ColPixel[5]+40,CurrentRow), size = ((ButtonWidth/2+5, ButtonHeight)))
# self.btnR5C5a.Bind(wx.EVT_BUTTON, self.OnClickedDMSCapEnter)
# self.btnR5C5b.Bind(wx.EVT_BUTTON, self.LoadDMSCapacity)
# # Make a box around the Capacity text and entry buttons
# Row5BoxR5 = wx.StaticBox(self.panel, -1, size = ((ColWidth*2 - 12),RowWidth-5), pos = (ColPixel[4]+4,CurrentRow-BoxVerticalShift))
# # Box
# Row1BoxR5 = wx.StaticBox(self.panel, -1, size = ((ColWidth+5)*NColForBox,RowWidth-5), pos = (ColPixel[0],CurrentRow-BoxVerticalShift))
# # Checkboxes
# self.cbR5C2 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[2] + ButtonWidth+5,CurrentRow))
# self.cbR5C3 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[3] + ButtonWidth+5,CurrentRow))
# self.cbR5C6 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[6] + ButtonWidth+5,CurrentRow))
#
# #### Row
# CurrentRow = RowPixel[4]
# self.title1 = wx.StaticText(self.panel, -1, label = "Stroop", pos = (ColPixel[0]+LabelOffset/2,CurrentRow+LabelOffset))
# # Buttons
# self.btnR1C2 = wx.Button(self.panel,-1,"Color", pos = (ColPixel[1],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR1C2.Bind(wx.EVT_BUTTON,self.OnClickedR1C2)
# self.btnR1C3 = wx.Button(self.panel,-1,"Word", pos = (ColPixel[2],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR1C3.Bind(wx.EVT_BUTTON,self.OnClickedR1C3)
# self.btnR1C4 = wx.Button(self.panel,-1,"ColorWord", pos = (ColPixel[3],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR1C4.Bind(wx.EVT_BUTTON,self.OnClickedR1C4)
# # Box
# Row1Box = wx.StaticBox(self.panel, -1, size = ((ColWidth+5)*NColForBox,RowWidth-5), pos = (ColPixel[0],CurrentRow-BoxVerticalShift))
# # Checkboxes
# self.cbR1C2 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[1] + ButtonWidth+5,CurrentRow))
# self.cbR1C3 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[2] + ButtonWidth+5,CurrentRow))
# self.cbR1C4 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[3] + ButtonWidth+5,CurrentRow))
#
# #### Row
# CurrentRow = RowPixel[5]
# self.titleR2 = wx.StaticText(self.panel, -1, label = "Card Sort", pos = (ColPixel[0]+LabelOffset/2,CurrentRow+LabelOffset))
# # Buttons
# self.btnR2C2 = wx.Button(self.panel,-1,"WCST", pos = (ColPixel[1],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR2C2.Bind(wx.EVT_BUTTON,self.OnClickedR2C2)
# # Box
# Row1BoxR2 = wx.StaticBox(self.panel, -1, size = ((ColWidth+5)*NColForBox,RowWidth-5), pos = (ColPixel[0],CurrentRow-BoxVerticalShift))
# # Checkboxes
# self.cbR2C2 = wx.CheckBox(self.panel, -1, label = "", pos = (ColPixel[1] + ButtonWidth+5,CurrentRow))
# ### Row
# CurrentRow = RowPixel[6]
# self.titleR3 = wx.StaticText(self.panel, -1, label = "Spatial/Dots", pos = (ColPixel[0]+LabelOffset/2,CurrentRow+LabelOffset))
# # Buttons
# self.btnR3C1 = wx.Button(self.panel,-1,"Instructions", pos = (ColPixel[1],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR3C1.Bind(wx.EVT_BUTTON,self.OnClickedR3C1)
# self.btnR3C2 = wx.Button(self.panel,-1,"Practice", pos = (ColPixel[2],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR3C2.Bind(wx.EVT_BUTTON,self.OnClickedR3C2)
# self.btnR3C3 = wx.Button(self.panel,-1,"Stair", pos = (ColPixel[3],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR3C3.Bind(wx.EVT_BUTTON,self.OnClickedR3C3)
# self.btnR3C4 = wx.Button(self.panel,-1,"Block", pos = (ColPixel[6],CurrentRow), size = ((ButtonWidth, ButtonHeight)))
# self.btnR3C4.Bind(wx.EVT_BUTTON,self.OnClickedR3C4)
# # Text box for the capacity value
# self.txtR3C4 = wx.StaticText(self.panel, -1, label = "Cap =", pos = (ColPixel[4]+5,CurrentRow+LabelOffset))
# self.txtR3C5 = wx.StaticText(self.panel, -1, label = "000", pos = (ColPixel[5]-ColWidth/2+5,CurrentRow+LabelOffset))
# self.btnR3C5a = wx.Button(self.panel,-1,"Enter", pos = (ColPixel[5]-5,CurrentRow), size = ((ButtonWidth/2+5, ButtonHeight)))
# self.btnR3C5b = wx.Button(self.panel,-1,"Load", pos = (ColPixel[5]+40,CurrentRow), size = ((ButtonWidth/2+5, ButtonHeight)))
# self.btnR3C5a.Bind(wx.EVT_BUTTON, self.OnClickedVSTMCapEnter)
# self.btnR3C5b.Bind(wx.EVT_BUTTON, self.LoadVSTMCapacity)
#
# # Make a box around the Capacity text and entry buttons
# Row3BoxR5 = wx.StaticBox(self.panel, -1, size = ((ColWidth*2 - 12),RowWidth-5), pos | |
import torch
import numpy as np
import torch_utils
from Models import base_model
import losses as my_losses
import torch_utils as my_utils
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import time
import os
from handlers import output_handler, mz_sampler
from Evaluation import mzEvaluator as my_evaluator
import datetime
import json
import matchzoo
import interactions
from handlers.output_handler import FileHandler
from handlers.tensorboard_writer import TensorboardWrapper
from matchzoo.preprocessors.tfidf_preprocessor import TFIDF
from setting_keywords import KeyWordSettings
from matchzoo.metrics import average_precision, discounted_cumulative_gain, \
mean_average_precision, mean_reciprocal_rank, normalized_discounted_cumulative_gain, precision
from Fitting.densebaseline_fit import DenseBaselineFitter
import torch.nn.functional as F
from tqdm import tqdm
class VisualFitter(DenseBaselineFitter):
def __init__(self, net: base_model.BaseModel,
loss = "bpr",
n_iter = 100,
testing_epochs = 5,
batch_size = 16,
reg_l2 = 1e-3,
learning_rate = 1e-4,
early_stopping = 0, # means no early stopping
decay_step = None,
decay_weight = None,
optimizer_func = None,
use_cuda = False,
num_negative_samples = 4,
logfolder = None,
curr_date = None,
**kargs):
super(VisualFitter, self).__init__(net, loss, n_iter, testing_epochs, batch_size, reg_l2, learning_rate,
early_stopping, decay_step, decay_weight, optimizer_func,
use_cuda, num_negative_samples, logfolder, curr_date, **kargs)
self.use_visual = kargs[KeyWordSettings.UseVisual]
self.image_loader = kargs[KeyWordSettings.ImageLoaderKey]
self.index2queries = kargs[KeyWordSettings.Index2Query]
self.index2docs = kargs[KeyWordSettings.Index2Doc]
def fit(self, train_iteractions: interactions.MatchInteraction,
verbose = True, # for printing out evaluation during training
topN = 10,
val_interactions: interactions.MatchInteraction = None,
test_interactions: interactions.MatchInteraction = None):
"""
Fit the model.
Parameters
----------
train_iteractions: :class:`matchzoo.DataPack` The input sequence dataset.
val_interactions: :class:`matchzoo.DataPack`
test_interactions: :class:`matchzoo.DataPack`
"""
self._initialize(train_iteractions)
best_hit, best_ndcg, best_epoch, test_ndcg, test_hit = 0, 0, 0, 0, 0
test_results_dict = None
iteration_counter = 0
count_patience_epochs = 0
for epoch_num in range(self._n_iter):
# ------ Move to here ----------------------------------- #
self._net.train(True)
query_ids, left_contents, left_lengths, left_imgs_indices, \
doc_ids, right_contents, right_lengths, right_imgs_indices, \
neg_doc_ids, neg_docs_contents, neg_docs_lens, neg_docs_imgs_indices = \
self._sampler.get_train_instances_visual(train_iteractions, self._num_negative_samples)
queries, query_content, query_lengths, query_imgs, \
docs, doc_content, doc_lengths, doc_imgs, \
neg_doc_ids, neg_docs_contents, neg_docs_lens, neg_docs_imgs_indices = my_utils.shuffle(query_ids, left_contents, left_lengths, left_imgs_indices,
doc_ids, right_contents, right_lengths, right_imgs_indices,
neg_doc_ids, neg_docs_contents, neg_docs_lens, neg_docs_imgs_indices)
epoch_loss, total_pairs = 0.0, 0
t1 = time.time()
for (minibatch_num,
(batch_query, batch_query_content, batch_query_len, batch_query_imgs_indices,
batch_doc, batch_doc_content, batch_docs_lens, batch_doc_imgs_indices,
batch_neg_doc, batch_neg_doc_content, batch_neg_docs_lens, batch_neg_docs_imgs_indices)) \
in enumerate(my_utils.minibatch(queries, query_content, query_lengths, query_imgs,
docs, doc_content, doc_lengths, doc_imgs,
neg_doc_ids, neg_docs_contents, neg_docs_lens, neg_docs_imgs_indices,
batch_size = self._batch_size)):
t10 = time.time()
# add idf here...
additional_data = {}
if len(TFIDF.get_term_idf()) != 0:
query_idf_dict = TFIDF.get_term_idf()
query_idfs = [[query_idf_dict.get(int(word_idx), 0.0) for word_idx in row] for row in batch_query_content]
query_idfs = torch_utils.gpu(torch.from_numpy(np.array(query_idfs)).float(), self._use_cuda)
additional_data["query_idf"] = query_idfs
batch_query = my_utils.gpu(torch.from_numpy(batch_query), self._use_cuda)
batch_query_content = my_utils.gpu(torch.from_numpy(batch_query_content), self._use_cuda)
batch_doc = my_utils.gpu(torch.from_numpy(batch_doc), self._use_cuda)
batch_doc_content = my_utils.gpu(torch.from_numpy(batch_doc_content), self._use_cuda)
batch_neg_doc_content = my_utils.gpu(torch.from_numpy(batch_neg_doc_content), self._use_cuda)
if self.use_visual: # load images tensors
batch_query_imgs_indices = my_utils.gpu(torch.from_numpy(batch_query_imgs_indices), self._use_cuda)
batch_doc_imgs_indices = my_utils.gpu(torch.from_numpy(batch_doc_imgs_indices), self._use_cuda)
batch_neg_docs_imgs_indices = my_utils.gpu(torch.from_numpy(batch_neg_docs_imgs_indices), self._use_cuda)
additional_data[KeyWordSettings.QueryImagesIndices] = batch_query_imgs_indices.unsqueeze(1) # (B, 1, M1)
additional_data[KeyWordSettings.DocImagesIndices] = batch_doc_imgs_indices.unsqueeze(1) # (B, 1, M2)
additional_data[KeyWordSettings.NegDocImagesIndices] = batch_neg_docs_imgs_indices # (B, n, M2)
total_pairs += self._batch_size * self._num_negative_samples
self._optimizer.zero_grad()
if self._loss in ["bpr", "hinge", "pce", "bce", "cosine_max_margin_loss_dvsh"]:
loss = self._get_multiple_negative_predictions_normal(batch_query, batch_query_content,
batch_doc, batch_doc_content, batch_neg_doc, batch_neg_doc_content,
batch_query_len, batch_docs_lens, batch_neg_docs_lens, self._num_negative_samples,
**additional_data)
epoch_loss += loss.item()
iteration_counter += 1
# if iteration_counter % 2 == 0: break
TensorboardWrapper.mywriter().add_scalar("loss/minibatch_loss", loss.item(), iteration_counter)
loss.backward()
self._optimizer.step()
t11 = time.time()
# print("Running time for one mini-batch: ", t11 - t10, "seconds")
epoch_loss /= float(total_pairs)
TensorboardWrapper.mywriter().add_scalar("loss/epoch_loss_avg", epoch_loss, epoch_num)
# print("Number of Minibatches: ", minibatch_num, "Avg. loss of epoch: ", epoch_loss)
t2 = time.time()
epoch_train_time = t2 - t1
if verbose: # validation after each epoch
t1 = time.time()
assert len(val_interactions.unique_queries_test) in KeyWordSettings.QueryCountVal, len(val_interactions.unique_queries_test)
result_val = self.evaluate(val_interactions, topN)
hits = result_val["hits"]
ndcg = result_val["ndcg"]
t2 = time.time()
valiation_time = t2 - t1
if epoch_num and epoch_num % self._testing_epochs == 0:
t1 = time.time()
assert len(test_interactions.unique_queries_test) in KeyWordSettings.QueryCountTest
result_test = self.evaluate(test_interactions, topN)
hits_test = result_test["hits"]
ndcg_test = result_test["ndcg"]
t2 = time.time()
testing_time = t2 - t1
TensorboardWrapper.mywriter().add_scalar("hit/hit_test", hits_test, epoch_num)
TensorboardWrapper.mywriter().add_scalar("ndcg/ndcg_test", ndcg_test, epoch_num)
FileHandler.myprint('|Epoch %03d | Test hits@%d = %.5f | Test ndcg@%d = %.5f | Testing time: %04.1f(s)'
% (epoch_num, topN, hits_test, topN, ndcg_test, testing_time))
TensorboardWrapper.mywriter().add_scalar("hit/hits_val", hits, epoch_num)
TensorboardWrapper.mywriter().add_scalar("ndcg/ndcg_val", ndcg, epoch_num)
FileHandler.myprint('|Epoch %03d | Train time: %04.1f(s) | Train loss: %.3f'
'| Vad hits@%d = %.5f | Vad ndcg@%d = %.5f | Validation time: %04.1f(s)'
% (epoch_num, epoch_train_time, epoch_loss, topN, hits, topN, ndcg, valiation_time))
if hits > best_hit or (hits == best_hit and ndcg > best_ndcg):
# if (hits + ndcg) > (best_hit + best_ndcg):
count_patience_epochs = 0
with open(self.saved_model, "wb") as f:
torch.save(self._net.state_dict(), f)
# test_results_dict = result_test
best_hit, best_ndcg, best_epoch = hits, ndcg, epoch_num
# test_hit, test_ndcg = hits_test, ndcg_test
else: count_patience_epochs += 1
if self._early_stopping_patience and count_patience_epochs > self._early_stopping_patience:
FileHandler.myprint("Early Stopped due to no better performance in %s epochs" % count_patience_epochs)
break
if np.isnan(epoch_loss) or epoch_loss == 0.0:
raise ValueError('Degenerate epoch loss: {}'.format(epoch_loss))
FileHandler.myprint("Closing tensorboard")
TensorboardWrapper.mywriter().close()
FileHandler.myprint('Best result: | vad hits@%d = %.5f | vad ndcg@%d = %.5f | epoch = %d' % (
topN, best_hit, topN, best_ndcg, best_epoch))
FileHandler.myprint_details(json.dumps(test_results_dict, sort_keys = True, indent = 2))
def _get_multiple_negative_predictions_normal(self, query_ids: torch.Tensor,
query_contents: torch.Tensor,
doc_ids: torch.Tensor,
doc_contents: torch.Tensor,
negative_doc_ids: torch.Tensor,
negative_doc_contents: torch.Tensor,
query_lens: np.ndarray,
docs_lens: np.ndarray,
neg_docs_lens: np.ndarray, n: int, **kargs) -> torch.Tensor:
"""
We compute prediction for every pair of (user, neg_item). Since shape of user_ids is (batch_size, )
and neg_item_ids.shape = (batch_size, n), we need to reshape user_ids a little bit.
Parameters
----------
query_ids: shape (B, )
query_contents: (B, L1) where L1 is the number of words of each query
doc_ids: shape (B, )
doc_contents: (B, L2) where L2 is the number of words of each doc
negative_doc_ids (B, n)
negative_doc_contents: shape (B, n, L2)
query_lens: (B, )
docs_lens: (B, )
neg_docs_lens: (B, n)
n: number of negs
Note: Query and Doc have different lengths
Returns
-------
"""
batch_size = query_ids.size(0)
L2 = doc_contents.size(1)
L1 = query_contents.size(1)
assert negative_doc_contents.size() == (batch_size, n, L2)
additional_data_negative = {}
if self.use_visual:
query_images = kargs[KeyWordSettings.QueryImagesIndices] # (B, 1, M, 3, 224, 224)
neg_doc_images = kargs[KeyWordSettings.NegDocImagesIndices] # (B, n, M, 3, 224, 224)
additional_data_negative[KeyWordSettings.QueryImagesIndices] = query_images # query_images_tmp
additional_data_negative[KeyWordSettings.DocImagesIndices] = neg_doc_images
# needs to check
query_contents_tmp = query_contents.view(batch_size * L1, 1).expand(batch_size * L1, n).reshape(batch_size, L1, n)
query_contents_tmp = query_contents_tmp.permute(0, 2, 1).reshape(batch_size * n, L1) # (B, n, L1)
query_ids_tmp = query_ids.view(batch_size, 1).expand(batch_size, n).reshape(batch_size * n)
additional_data_negative[KeyWordSettings.QueryIDs] = query_ids_tmp
additional_data_negative[KeyWordSettings.DocIDs] = negative_doc_ids.reshape(batch_size * n)
additional_data_negative[KeyWordSettings.UseCuda] = self._use_cuda
assert query_contents_tmp.size() == (batch_size * n, L1) # (B * n, L1)
batch_negatives_tmp = negative_doc_contents.reshape(batch_size * n, L2) # (B * n, L2)
kargs[KeyWordSettings.QueryIDs] = query_ids
kargs[KeyWordSettings.DocIDs] = doc_ids
kargs[KeyWordSettings.UseCuda] = self._use_cuda
# why don't we combine all to 1???
positive_prediction = self._net(query_contents, doc_contents, **kargs) # (batch_size)
negative_prediction = self._net(query_contents_tmp, batch_negatives_tmp, **additional_data_negative) # (B * n)
if self._loss == "bpr" or self._loss == "hinge":
positive_prediction = positive_prediction.view(batch_size, 1).expand(batch_size, n).reshape(batch_size * n)
assert positive_prediction.shape == negative_prediction.shape
loss = self._loss_func(positive_prediction, negative_prediction)
elif self._loss == "pce":
negative_prediction = negative_prediction.view(batch_size, n)
loss = self._loss_func(positive_prediction, negative_prediction)
elif self._loss == "bce":
# (B, ) vs. (B * n) shape.
loss = self._loss_func(positive_prediction, negative_prediction)
return loss
def evaluate(self, testRatings: interactions.MatchInteractionVisual, K: int, output_ranking = False, **kargs):
"""
I decided to move this function into Fitter class since different models have different ways to evaluate (i.e.
different data sources to use). Therefore, it is needed to have seperate evaluation methods in each Fitter
class. Furthermore, I notice that this function uses _use_cuda which is a property of Fitter class.
Parameters
----------
testRatings
K
output_ranking
kargs
Returns
-------
"""
ndcg_metric = normalized_discounted_cumulative_gain.NormalizedDiscountedCumulativeGain
hits, ndcgs = [], []
ndcgs_at_1 = []
list_error_analysis = []
for query, candidates in tqdm(testRatings.unique_queries_test.items()):
t3 = time.time()
docs, labels, doc_contents, _ = candidates
query_content = testRatings.dict_query_contents[query]
query_images_indices = testRatings.dict_query_imgages[query]
query_len = [testRatings.dict_query_lengths[query]] * len(labels)
doc_lens = [testRatings.dict_doc_lengths[d] for d in docs]
doc_images_indices = [testRatings.dict_doc_imgages[d] for d in docs]
additional_data = {}
additional_data[KeyWordSettings.Query_lens] = query_len
additional_data[KeyWordSettings.Doc_lens] = doc_lens
if len(TFIDF.get_term_idf()) > 0:
query_idf_dict = TFIDF.get_term_idf()
query_idfs = [query_idf_dict.get(int(word_idx), 0.0) for word_idx in query_content]
query_idfs = np.tile(query_idfs, (len(labels), 1))
query_idfs = my_utils.gpu(torch.from_numpy(np.array(query_idfs)).float(), self._use_cuda)
additional_data[KeyWordSettings.Query_Idf] = query_idfs
if self.use_visual:
t1 = time.time()
query_images_indices = np.array(query_images_indices)
assert query_images_indices.shape == (len(query_images_indices), )
query_images = query_images_indices.reshape(1, 1, len(query_images_indices))
doc_images = np.array(doc_images_indices)
query_images = torch_utils.gpu(torch.from_numpy(query_images), self._use_cuda)
doc_images = torch_utils.gpu(torch.from_numpy(doc_images), self._use_cuda)
additional_data[KeyWordSettings.QueryImagesIndices] = query_images # (1, 1, M1)
additional_data[KeyWordSettings.DocImagesIndices] = doc_images.unsqueeze(1) # (B, 1, M2)
t2 = time.time()
# print("Loading time images to gpu of validation: ", t2 - t1, "seconds")
| |
XOR
'wedge': '\u2227', # ∧ LOGICAL AND
'wr': '\u2240', # ≀ WREATH PRODUCT
}
mathclose = {
'Rbag': '\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'lrcorner': '\u231f', # ⌟ BOTTOM RIGHT CORNER
'rangle': '\u27e9', # ⟩ MATHEMATICAL RIGHT ANGLE BRACKET
'rbag': '\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'rbrace': '}', # } RIGHT CURLY BRACKET
'rbrack': ']', # ] RIGHT SQUARE BRACKET
'rceil': '\u2309', # ⌉ RIGHT CEILING
'rfloor': '\u230b', # ⌋ RIGHT FLOOR
'rgroup': '\u27ef', # ⟯ MATHEMATICAL RIGHT FLATTENED PARENTHESIS
'rrbracket': '\u27e7', # ⟧ MATHEMATICAL RIGHT WHITE SQUARE BRACKET
'rrparenthesis': '\u2988', # ⦈ Z NOTATION RIGHT IMAGE BRACKET
'urcorner': '\u231d', # ⌝ TOP RIGHT CORNER
'}': '}', # } RIGHT CURLY BRACKET
}
mathfence = {
'Vert': '\u2016', # ‖ DOUBLE VERTICAL LINE
'vert': '|', # | VERTICAL LINE
'|': '\u2016', # ‖ DOUBLE VERTICAL LINE
}
mathop = {
'Join': '\u2a1d', # ⨝ JOIN
'bigcap': '\u22c2', # ⋂ N-ARY INTERSECTION
'bigcup': '\u22c3', # ⋃ N-ARY UNION
'biginterleave': '\u2afc', # ⫼ LARGE TRIPLE VERTICAL BAR OPERATOR
'bigodot': '\u2a00', # ⨀ N-ARY CIRCLED DOT OPERATOR
'bigoplus': '\u2a01', # ⨁ N-ARY CIRCLED PLUS OPERATOR
'bigotimes': '\u2a02', # ⨂ N-ARY CIRCLED TIMES OPERATOR
'bigsqcup': '\u2a06', # ⨆ N-ARY SQUARE UNION OPERATOR
'biguplus': '\u2a04', # ⨄ N-ARY UNION OPERATOR WITH PLUS
'bigvee': '\u22c1', # ⋁ N-ARY LOGICAL OR
'bigwedge': '\u22c0', # ⋀ N-ARY LOGICAL AND
'coprod': '\u2210', # ∐ N-ARY COPRODUCT
'fatsemi': '\u2a1f', # ⨟ Z NOTATION SCHEMA COMPOSITION
'fint': '\u2a0f', # ⨏ INTEGRAL AVERAGE WITH SLASH
'iiiint': '\u2a0c', # ⨌ QUADRUPLE INTEGRAL OPERATOR
'iiint': '\u222d', # ∭ TRIPLE INTEGRAL
'iint': '\u222c', # ∬ DOUBLE INTEGRAL
'int': '\u222b', # ∫ INTEGRAL
'oiint': '\u222f', # ∯ SURFACE INTEGRAL
'oint': '\u222e', # ∮ CONTOUR INTEGRAL
'ointctrclockwise': '\u2233', # ∳ ANTICLOCKWISE CONTOUR INTEGRAL
'prod': '\u220f', # ∏ N-ARY PRODUCT
'sqint': '\u2a16', # ⨖ QUATERNION INTEGRAL OPERATOR
'sum': '\u2211', # ∑ N-ARY SUMMATION
'varointclockwise': '\u2232', # ∲ CLOCKWISE CONTOUR INTEGRAL
}
mathopen = {
'Lbag': '\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'langle': '\u27e8', # ⟨ MATHEMATICAL LEFT ANGLE BRACKET
'lbag': '\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'lbrace': '{', # { LEFT CURLY BRACKET
'lbrack': '[', # [ LEFT SQUARE BRACKET
'lceil': '\u2308', # ⌈ LEFT CEILING
'lfloor': '\u230a', # ⌊ LEFT FLOOR
'lgroup': '\u27ee', # ⟮ MATHEMATICAL LEFT FLATTENED PARENTHESIS
'llbracket': '\u27e6', # ⟦ MATHEMATICAL LEFT WHITE SQUARE BRACKET
'llcorner': '\u231e', # ⌞ BOTTOM LEFT CORNER
'llparenthesis': '\u2987', # ⦇ Z NOTATION LEFT IMAGE BRACKET
'ulcorner': '\u231c', # ⌜ TOP LEFT CORNER
'{': '{', # { LEFT CURLY BRACKET
}
mathord = {
'#': '#', # # NUMBER SIGN
'$': '$', # $ DOLLAR SIGN
'%': '%', # % PERCENT SIGN
'&': '&', # & AMPERSAND
'AC': '\u223f', # ∿ SINE WAVE
'APLcomment': '\u235d', # ⍝ APL FUNCTIONAL SYMBOL UP SHOE JOT
'APLdownarrowbox': '\u2357', # ⍗ APL FUNCTIONAL SYMBOL QUAD DOWNWARDS ARROW
'APLinput': '\u235e', # ⍞ APL FUNCTIONAL SYMBOL QUOTE QUAD
'APLinv': '\u2339', # ⌹ APL FUNCTIONAL SYMBOL QUAD DIVIDE
'APLleftarrowbox': '\u2347', # ⍇ APL FUNCTIONAL SYMBOL QUAD LEFTWARDS ARROW
'APLlog': '\u235f', # ⍟ APL FUNCTIONAL SYMBOL CIRCLE STAR
'APLrightarrowbox': '\u2348', # ⍈ APL FUNCTIONAL SYMBOL QUAD RIGHTWARDS ARROW
'APLuparrowbox': '\u2350', # ⍐ APL FUNCTIONAL SYMBOL QUAD UPWARDS ARROW
'Aries': '\u2648', # ♈ ARIES
'CIRCLE': '\u25cf', # ● BLACK CIRCLE
'CheckedBox': '\u2611', # ☑ BALLOT BOX WITH CHECK
'Diamond': '\u25c7', # ◇ WHITE DIAMOND
'Finv': '\u2132', # Ⅎ TURNED CAPITAL F
'Game': '\u2141', # ⅁ TURNED SANS-SERIF CAPITAL G
'Gemini': '\u264a', # ♊ GEMINI
'Jupiter': '\u2643', # ♃ JUPITER
'LEFTCIRCLE': '\u25d6', # ◖ LEFT HALF BLACK CIRCLE
'LEFTcircle': '\u25d0', # ◐ CIRCLE WITH LEFT HALF BLACK
'Leo': '\u264c', # ♌ LEO
'Libra': '\u264e', # ♎ LIBRA
'Mars': '\u2642', # ♂ MALE SIGN
'Mercury': '\u263f', # ☿ MERCURY
'Neptune': '\u2646', # ♆ NEPTUNE
'Pluto': '\u2647', # ♇ PLUTO
'RIGHTCIRCLE': '\u25d7', # ◗ RIGHT HALF BLACK CIRCLE
'RIGHTcircle': '\u25d1', # ◑ CIRCLE WITH RIGHT HALF BLACK
'Saturn': '\u2644', # ♄ SATURN
'Scorpio': '\u264f', # ♏ SCORPIUS
'Square': '\u2610', # ☐ BALLOT BOX
'Sun': '\u2609', # ☉ SUN
'Taurus': '\u2649', # ♉ TAURUS
'Uranus': '\u2645', # ♅ URANUS
'Venus': '\u2640', # ♀ FEMALE SIGN
'XBox': '\u2612', # ☒ BALLOT BOX WITH X
'Yup': '\u2144', # ⅄ TURNED SANS-SERIF CAPITAL Y
'_': '_', # _ LOW LINE
'angle': '\u2220', # ∠ ANGLE
'aquarius': '\u2652', # ♒ AQUARIUS
'aries': '\u2648', # ♈ ARIES
'ast': '*', # * ASTERISK
'backepsilon': '\u03f6', # ϶ GREEK REVERSED LUNATE EPSILON SYMBOL
'backprime': '\u2035', # ‵ REVERSED PRIME
'backslash': '\\', # \ REVERSE SOLIDUS
'because': '\u2235', # ∵ BECAUSE
'bigstar': '\u2605', # ★ BLACK STAR
'binampersand': '&', # & AMPERSAND
'blacklozenge': '\u2b27', # ⬧ BLACK MEDIUM LOZENGE
'blacksmiley': '\u263b', # ☻ BLACK SMILING FACE
'blacksquare': '\u25fc', # ◼ BLACK MEDIUM SQUARE
'bot': '\u22a5', # ⊥ UP TACK
'boy': '\u2642', # ♂ MALE SIGN
'cancer': '\u264b', # ♋ CANCER
'capricornus': '\u2651', # ♑ CAPRICORN
'cdots': '\u22ef', # ⋯ MIDLINE HORIZONTAL ELLIPSIS
'cent': '\xa2', # ¢ CENT SIGN
'centerdot': '\u2b1d', # ⬝ BLACK VERY SMALL SQUARE
'checkmark': '\u2713', # ✓ CHECK MARK
'circlearrowleft': '\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'circlearrowright': '\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'circledR': '\xae', # ® REGISTERED SIGN
'circledcirc': '\u25ce', # ◎ BULLSEYE
'clubsuit': '\u2663', # ♣ BLACK CLUB SUIT
'complement': '\u2201', # ∁ COMPLEMENT
'dasharrow': '\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'dashleftarrow': '\u21e0', # ⇠ LEFTWARDS DASHED ARROW
'dashrightarrow': '\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'diameter': '\u2300', # ⌀ DIAMETER SIGN
'diamondsuit': '\u2662', # ♢ WHITE DIAMOND SUIT
'earth': '\u2641', # ♁ EARTH
'exists': '\u2203', # ∃ THERE EXISTS
'female': '\u2640', # ♀ FEMALE SIGN
'flat': '\u266d', # ♭ MUSIC FLAT SIGN
'forall': '\u2200', # ∀ FOR ALL
'fourth': '\u2057', # ⁗ QUADRUPLE PRIME
'frownie': '\u2639', # ☹ WHITE FROWNING FACE
'gemini': '\u264a', # ♊ GEMINI
'girl': '\u2640', # ♀ FEMALE SIGN
'heartsuit': '\u2661', # ♡ WHITE HEART SUIT
'infty': '\u221e', # ∞ INFINITY
'invneg': '\u2310', # ⌐ REVERSED NOT SIGN
'jupiter': '\u2643', # ♃ JUPITER
'ldots': '\u2026', # … HORIZONTAL ELLIPSIS
'leftmoon': '\u263e', # ☾ LAST QUARTER MOON
'leftturn': '\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'leo': '\u264c', # ♌ LEO
'libra': '\u264e', # ♎ LIBRA
'lnot': '\xac', # ¬ NOT SIGN
'lozenge': '\u25ca', # ◊ LOZENGE
'male': '\u2642', # ♂ MALE SIGN
'maltese': '\u2720', # ✠ MALTESE CROSS
'mathdollar': '$', # $ DOLLAR SIGN
'measuredangle': '\u2221', # ∡ MEASURED ANGLE
'mercury': '\u263f', # ☿ MERCURY
'mho': '\u2127', # ℧ INVERTED OHM SIGN
'nabla': '\u2207', # ∇ NABLA
'natural': '\u266e', # ♮ MUSIC NATURAL SIGN
'neg': '\xac', # ¬ NOT SIGN
'neptune': '\u2646', # ♆ NEPTUNE
'nexists': '\u2204', # ∄ THERE DOES NOT EXIST
'notbackslash': '\u2340', # ⍀ APL FUNCTIONAL SYMBOL BACKSLASH BAR
'partial': '\u2202', # ∂ PARTIAL DIFFERENTIAL
'pisces': '\u2653', # ♓ PISCES
'pluto': '\u2647', # ♇ PLUTO
'pounds': '\xa3', # £ POUND SIGN
'prime': '\u2032', # ′ PRIME
'quarternote': '\u2669', # ♩ QUARTER NOTE
'rightmoon': '\u263d', # ☽ FIRST QUARTER MOON
'rightturn': '\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'sagittarius': '\u2650', # ♐ SAGITTARIUS
'saturn': '\u2644', # ♄ SATURN
'scorpio': '\u264f', # ♏ SCORPIUS
'second': '\u2033', # ″ DOUBLE PRIME
'sharp': '\u266f', # ♯ MUSIC SHARP SIGN
'sim': '~', # ~ TILDE
'slash': '/', # / SOLIDUS
'smiley': '\u263a', # ☺ WHITE SMILING FACE
'spadesuit': '\u2660', # ♠ BLACK SPADE SUIT
'spddot': '\xa8', # ¨ DIAERESIS
'sphat': '^', # ^ CIRCUMFLEX ACCENT
'sphericalangle': '\u2222', # ∢ SPHERICAL ANGLE
'sptilde': '~', # ~ TILDE
'square': '\u25fb', # ◻ WHITE MEDIUM SQUARE
'sun': '\u263c', # ☼ WHITE SUN WITH RAYS
'taurus': '\u2649', # ♉ TAURUS
'therefore': '\u2234', # ∴ THEREFORE
'third': '\u2034', # ‴ TRIPLE PRIME
'top': '\u22a4', # ⊤ DOWN TACK
'triangleleft': '\u25c5', # ◅ WHITE LEFT-POINTING POINTER
'triangleright': '\u25bb', # ▻ WHITE RIGHT-POINTING POINTER
'twonotes': '\u266b', # ♫ BEAMED EIGHTH NOTES
'uranus': '\u2645', # ♅ URANUS
'varEarth': '\u2641', # ♁ EARTH
| |
and uniform
B-splines as special cases. Thus we will talk about non-uniform B-splines when we mean the general case, incorporating
both uniform and open uniform.
What can you do to control the shape of a B-spline?
- Move the control points.
- Add or remove control points.
- Use multiple control points.
- Change the order, k.
- Change the type of knot vector.
- Change the relative spacing of the knots.
- Use multiple knot values in the knot vector.
What should the defaults be?
If there are no pressing reasons for doing otherwise, your B-spline should be defined as follows:
- k=4 (cubic)
- no multiple control points
- uniform (for a closed curve) or open uniform (for an open curve) knot vector.
Rational B-splines
==================
https://www.cl.cam.ac.uk/teaching/2000/AGraphHCI/SMEG/node5.html:
Rational B-splines have all of the properties of non-rational B-splines plus the following two useful features:
They produce the correct results under projective transformations (while non-rational B-splines only produce the correct
results under affine transformations).
They can be used to represent lines, conics, non-rational B-splines; and, when generalised to patches, can represents
planes, quadrics, and tori.
The antonym of rational is non-rational. Non-rational B-splines are a special case of rational B-splines, just as
uniform B-splines are a special case of non-uniform B-splines. Thus, non-uniform rational B-splines encompass almost
every other possible 3D shape definition. Non-uniform rational B-spline is a bit of a mouthful and so it is generally
abbreviated to NURBS.
We have already learnt all about the the B-spline bit of NURBS and about the non-uniform bit. So now all we need to
know is the meaning of the rational bit and we will fully(?) understand NURBS.
Rational B-splines are defined simply by applying the B-spline equation (Equation 87) to homogeneous coordinates,
rather than normal 3D coordinates.
"""
from typing import List, Iterable, Sequence, TYPE_CHECKING, Dict, Tuple, Optional
from .vector import Vector, distance
from .matrix import Matrix
from math import pow, isclose
from ezdxf.lldxf.const import DXFValueError
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex
def open_uniform_knot_vector(n: int, order: int) -> List[float]:
"""
Returns an open uniform knot vector for a B-spline of `order` and `n` control points.
`order` = degree + 1
Args:
n: count of control points
order: spline order
"""
nplusc = n + order
nplus2 = n + 2
knots = [0.]
for i in range(2, nplusc + 1):
if (i > order) and (i < nplus2):
knots.append(knots[-1] + 1.)
else:
knots.append(knots[-1])
return knots
def is_uniform_knots(knots: Sequence[float], places: int = 4) -> bool:
deltas = set(round(k2 - k1, ndigits=places) for k1, k2 in zip(knots, knots[1:]))
return len(deltas) == 1
def uniform_knot_vector(n: int, order: int) -> List[float]:
"""
Returns an uniform knot vector for a B-spline of `order` and `n` control points.
`order` = degree + 1
Args:
n: count of control points
order: spline order
"""
return [float(knot_value) for knot_value in range(0, n + order)]
def required_knot_values(count: int, order: int) -> int:
"""
Returns the count of required knot values for a B-spline of `order` and `count` control points.
degree = degree of B-spline, in math papers often called: `p`
Args:
count: count of control points, in math papers often called: `n` + 1
order: order of B-Spline, in math papers often called: `k`
Relationships:
- `k` (order) = `p` (degree) + 1
- 2 ≤ `k` (order) ≤ `n` + 1 (count)
"""
k = order
n = count - 1
p = k - 1
if not (2 <= k <= (n + 1)):
raise DXFValueError('Invalid count/order combination')
# n + p + 2 = count + order
return n + p + 2
def uniform_t_vector(fit_points: Sequence) -> Iterable[float]:
n = float(len(fit_points) - 1)
for t in range(len(fit_points)):
yield float(t) / n
def distance_t_vector(fit_points: Iterable['Vertex']):
return centripetal_t_vector(fit_points, power=1)
def centripetal_t_vector(fit_points: Iterable['Vertex'], power: float = .5) -> Iterable[float]:
distances = [pow(distance(p1, p2), power) for p1, p2 in zip(fit_points, fit_points[1:])]
total_length = sum(distances)
s = 0.
yield s
for d in distances:
s += d
yield s / total_length
def bspline_basis(u: float, index: int, degree: int, knots: Sequence[float]) -> float:
"""
B-spline basis function.
Simple recursive implementation for testing and comparison.
Args:
u: curve parameter in range [0 .. max(knots)]
index: index of control point
degree: degree of B-spline
knots: knots vector
Returns:
float: basis value N_i,p(u)
"""
cache = {} # type: Dict[Tuple[int, int], float]
u = float(u)
def N(i: int, p: int) -> float:
try:
return cache[(i, p)]
except KeyError:
if p == 0:
retval = 1 if knots[i] <= u < knots[i + 1] else 0.
else:
dominator = (knots[i + p] - knots[i])
f1 = (u - knots[i]) / dominator * N(i, p - 1) if dominator else 0.
dominator = (knots[i + p + 1] - knots[i + 1])
f2 = (knots[i + p + 1] - u) / dominator * N(i + 1, p - 1) if dominator else 0.
retval = f1 + f2
cache[(i, p)] = retval
return retval
return N(int(index), int(degree))
def bspline_basis_vector(u: float, count: int, degree: int, knots: Sequence[float]) -> List[float]:
"""
Create basis vector at parameter u.
Used with the bspline_basis() for testing and comparison.
Args:
u: curve parameter in range [0 .. max(knots)]
count: control point count (n + 1)
degree: degree of B-spline (order = degree + 1)
knots: knot vector
Returns:
List[float]: basis vector, len(basis) == count
"""
assert len(knots) == (count + degree + 1)
basis = [bspline_basis(u, index, degree, knots) for index in range(count)] # type: List[float]
if isclose(u, knots[-1]): # pick up last point ??? why is this necessary ???
basis[-1] = 1.
return basis
def bspline_vertex(u: float, degree: int, control_points: Sequence['Vertex'], knots: Sequence[float]) -> Vector:
"""
Calculate B-spline vertex at parameter u.
Used with the bspline_basis_vector() for testing and comparison.
Args:
u: curve parameter in range [0 .. max(knots)]
degree: degree of B-spline (order = degree + 1)
control_points: control points as list of (x, y[,z]) tuples
knots: knot vector as list of floats, len(knots) == (count + order)
"""
basis_vector = bspline_basis_vector(u, count=len(control_points), degree=degree, knots=knots)
vertex = Vector()
for basis, point in zip(basis_vector, control_points):
vertex += Vector(point) * basis
return vertex
def bspline_control_frame(fit_points: Iterable['Vertex'], degree: int = 3, method: str = 'distance', power: float = .5):
"""
Generates the control points for the `B-spline`_ control frame by `Curve Global Interpolation`_.
Given are the fit points and the degree of the B-spline. The function provides 3 methods for generating the
parameter vector t:
=================== ============================================================
Method Description
=================== ============================================================
``'uniform'`` creates a uniform t vector, from ``0`` to ``1`` evenly spaced; see `uniform`_ method
``'distance'`` creates a t vector with values proportional to the fit point distances, see `chord length`_ method
``'centripetal'`` creates a t vector with values proportional to the fit point distances ^ ``power``; see `centripetal`_ method
=================== ============================================================
Args:
fit_points: fit points of B-spline, as list of :class:`Vector` compatible objects
degree: degree of B-spline
method: calculation method for parameter vector t
power: power for centripetal method
Returns:
:class:`BSpline`
"""
def create_t_vector():
if method == 'uniform':
return uniform_t_vector(fit_points) # equally spaced 0 .. 1
elif method == 'distance':
return distance_t_vector(fit_points)
elif method == 'centripetal':
return centripetal_t_vector(fit_points, power=power)
else:
raise DXFValueError('Unknown method: {}'.format(method))
fit_points = Vector.list(fit_points)
count = len(fit_points)
order = degree + 1
if order > count:
raise DXFValueError('Need more fit points for degree {}'.format(degree))
t_vector = list(create_t_vector())
knots = list(control_frame_knots(count - 1, degree, t_vector))
control_points = global_curve_interpolation(fit_points, degree, t_vector, knots)
bspline = BSpline(control_points, order=order, knots=knots)
bspline.t_array = t_vector
return bspline
def bspline_control_frame_approx(fit_points: Iterable['Vertex'], count: int, degree: int = 3, method: str = 'distance',
power: float = .5):
"""
Approximate `B-spline`_ by a reduced count of control points, given are the fit points and the degree of
the B-spline.
Args:
fit_points: all fit points of B-spline as :class:`Vector` compatible objects
count: count of designated control points
degree: degree of B-spline
method: calculation method for parameter vector t, see :func:`bspline_control_frame`
power: power for centripetal method
Returns:
:class:`BSpline`
"""
def create_t_vector():
if method == 'uniform':
return uniform_t_vector(fit_points) # equally | |
# coding: utf-8
import pprint
import re
import six
class QueryJobResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'parent_id': 'str',
'name': 'str',
'status': 'str',
'description': 'str',
'create_time': 'str',
'task_type': 'str',
'source_endpoint': 'Endpoint',
'dmq_endpoint': 'Endpoint',
'source_sharding': 'list[Endpoint]',
'target_endpoint': 'Endpoint',
'net_type': 'str',
'failed_reason': 'str',
'inst_info': 'InstInfo',
'actual_start_time': 'str',
'full_transfer_complete_time': 'str',
'update_time': 'str',
'job_direction': 'str',
'db_use_type': 'str',
'need_restart': 'bool',
'is_target_readonly': 'bool',
'conflict_policy': 'str',
'filter_ddl_policy': 'str',
'speed_limit': 'list[SpeedLimitInfo]',
'schema_type': 'str',
'node_num': 'str',
'object_switch': 'bool',
'master_job_id': 'str',
'full_mode': 'str',
'struct_trans': 'bool',
'index_trans': 'bool',
'replace_definer': 'bool',
'migrate_user': 'bool',
'sync_database': 'bool',
'error_code': 'str',
'error_message': 'str',
'target_root_db': 'DefaultRootDb',
'az_code': 'str',
'vpc_id': 'str',
'subnet_id': 'str',
'security_group_id': 'str',
'multi_write': 'bool',
'support_ip_v6': 'bool',
'inherit_id': 'str',
'gtid': 'str',
'alarm_notify': 'str',
'incre_start_position': 'str'
}
attribute_map = {
'id': 'id',
'parent_id': 'parent_id',
'name': 'name',
'status': 'status',
'description': 'description',
'create_time': 'create_time',
'task_type': 'task_type',
'source_endpoint': 'source_endpoint',
'dmq_endpoint': 'dmq_endpoint',
'source_sharding': 'source_sharding',
'target_endpoint': 'target_endpoint',
'net_type': 'net_type',
'failed_reason': 'failed_reason',
'inst_info': 'inst_info',
'actual_start_time': 'actual_start_time',
'full_transfer_complete_time': 'full_transfer_complete_time',
'update_time': 'update_time',
'job_direction': 'job_direction',
'db_use_type': 'db_use_type',
'need_restart': 'need_restart',
'is_target_readonly': 'is_target_readonly',
'conflict_policy': 'conflict_policy',
'filter_ddl_policy': 'filter_ddl_policy',
'speed_limit': 'speed_limit',
'schema_type': 'schema_type',
'node_num': 'node_num',
'object_switch': 'object_switch',
'master_job_id': 'master_job_id',
'full_mode': 'full_mode',
'struct_trans': 'struct_trans',
'index_trans': 'index_trans',
'replace_definer': 'replace_definer',
'migrate_user': 'migrate_user',
'sync_database': 'sync_database',
'error_code': 'error_code',
'error_message': 'error_message',
'target_root_db': 'target_root_db',
'az_code': 'az_code',
'vpc_id': 'vpc_id',
'subnet_id': 'subnet_id',
'security_group_id': 'security_group_id',
'multi_write': 'multi_write',
'support_ip_v6': 'support_ip_v6',
'inherit_id': 'inherit_id',
'gtid': 'gtid',
'alarm_notify': 'alarm_notify',
'incre_start_position': 'incre_start_position'
}
def __init__(self, id=None, parent_id=None, name=None, status=None, description=None, create_time=None, task_type=None, source_endpoint=None, dmq_endpoint=None, source_sharding=None, target_endpoint=None, net_type=None, failed_reason=None, inst_info=None, actual_start_time=None, full_transfer_complete_time=None, update_time=None, job_direction=None, db_use_type=None, need_restart=None, is_target_readonly=None, conflict_policy=None, filter_ddl_policy=None, speed_limit=None, schema_type=None, node_num=None, object_switch=None, master_job_id=None, full_mode=None, struct_trans=None, index_trans=None, replace_definer=None, migrate_user=None, sync_database=None, error_code=None, error_message=None, target_root_db=None, az_code=None, vpc_id=None, subnet_id=None, security_group_id=None, multi_write=None, support_ip_v6=None, inherit_id=None, gtid=None, alarm_notify=None, incre_start_position=None):
"""QueryJobResp - a model defined in huaweicloud sdk"""
self._id = None
self._parent_id = None
self._name = None
self._status = None
self._description = None
self._create_time = None
self._task_type = None
self._source_endpoint = None
self._dmq_endpoint = None
self._source_sharding = None
self._target_endpoint = None
self._net_type = None
self._failed_reason = None
self._inst_info = None
self._actual_start_time = None
self._full_transfer_complete_time = None
self._update_time = None
self._job_direction = None
self._db_use_type = None
self._need_restart = None
self._is_target_readonly = None
self._conflict_policy = None
self._filter_ddl_policy = None
self._speed_limit = None
self._schema_type = None
self._node_num = None
self._object_switch = None
self._master_job_id = None
self._full_mode = None
self._struct_trans = None
self._index_trans = None
self._replace_definer = None
self._migrate_user = None
self._sync_database = None
self._error_code = None
self._error_message = None
self._target_root_db = None
self._az_code = None
self._vpc_id = None
self._subnet_id = None
self._security_group_id = None
self._multi_write = None
self._support_ip_v6 = None
self._inherit_id = None
self._gtid = None
self._alarm_notify = None
self._incre_start_position = None
self.discriminator = None
if id is not None:
self.id = id
if parent_id is not None:
self.parent_id = parent_id
if name is not None:
self.name = name
if status is not None:
self.status = status
if description is not None:
self.description = description
if create_time is not None:
self.create_time = create_time
if task_type is not None:
self.task_type = task_type
if source_endpoint is not None:
self.source_endpoint = source_endpoint
if dmq_endpoint is not None:
self.dmq_endpoint = dmq_endpoint
if source_sharding is not None:
self.source_sharding = source_sharding
if target_endpoint is not None:
self.target_endpoint = target_endpoint
if net_type is not None:
self.net_type = net_type
if failed_reason is not None:
self.failed_reason = failed_reason
if inst_info is not None:
self.inst_info = inst_info
if actual_start_time is not None:
self.actual_start_time = actual_start_time
if full_transfer_complete_time is not None:
self.full_transfer_complete_time = full_transfer_complete_time
if update_time is not None:
self.update_time = update_time
if job_direction is not None:
self.job_direction = job_direction
if db_use_type is not None:
self.db_use_type = db_use_type
if need_restart is not None:
self.need_restart = need_restart
if is_target_readonly is not None:
self.is_target_readonly = is_target_readonly
if conflict_policy is not None:
self.conflict_policy = conflict_policy
if filter_ddl_policy is not None:
self.filter_ddl_policy = filter_ddl_policy
if speed_limit is not None:
self.speed_limit = speed_limit
if schema_type is not None:
self.schema_type = schema_type
if node_num is not None:
self.node_num = node_num
if object_switch is not None:
self.object_switch = object_switch
if master_job_id is not None:
self.master_job_id = master_job_id
if full_mode is not None:
self.full_mode = full_mode
if struct_trans is not None:
self.struct_trans = struct_trans
if index_trans is not None:
self.index_trans = index_trans
if replace_definer is not None:
self.replace_definer = replace_definer
if migrate_user is not None:
self.migrate_user = migrate_user
if sync_database is not None:
self.sync_database = sync_database
if error_code is not None:
self.error_code = error_code
if error_message is not None:
self.error_message = error_message
if target_root_db is not None:
self.target_root_db = target_root_db
if az_code is not None:
self.az_code = az_code
if vpc_id is not None:
self.vpc_id = vpc_id
if subnet_id is not None:
self.subnet_id = subnet_id
if security_group_id is not None:
self.security_group_id = security_group_id
if multi_write is not None:
self.multi_write = multi_write
if support_ip_v6 is not None:
self.support_ip_v6 = support_ip_v6
if inherit_id is not None:
self.inherit_id = inherit_id
if gtid is not None:
self.gtid = gtid
if alarm_notify is not None:
self.alarm_notify = alarm_notify
if incre_start_position is not None:
self.incre_start_position = incre_start_position
@property
def id(self):
"""Gets the id of this QueryJobResp.
任务id
:return: The id of this QueryJobResp.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this QueryJobResp.
任务id
:param id: The id of this QueryJobResp.
:type: str
"""
self._id = id
@property
def parent_id(self):
"""Gets the parent_id of this QueryJobResp.
父任务id。
:return: The parent_id of this QueryJobResp.
:rtype: str
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this QueryJobResp.
父任务id。
:param parent_id: The parent_id of this QueryJobResp.
:type: str
"""
self._parent_id = parent_id
@property
def name(self):
"""Gets the name of this QueryJobResp.
任务名称
:return: The name of this QueryJobResp.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this QueryJobResp.
任务名称
:param name: The name of this QueryJobResp.
:type: str
"""
self._name = name
@property
def status(self):
"""Gets the status of this QueryJobResp.
任务状态
:return: The status of this QueryJobResp.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this QueryJobResp.
任务状态
:param status: The status of this QueryJobResp.
:type: str
"""
self._status = status
@property
def description(self):
"""Gets the description of this QueryJobResp.
描述信息
:return: The description of this QueryJobResp.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this QueryJobResp.
描述信息
:param description: The description of this QueryJobResp.
:type: str
"""
self._description = description
@property
def create_time(self):
"""Gets the create_time of this QueryJobResp.
创建时间,时间戳格式。
:return: The create_time of this QueryJobResp.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this QueryJobResp.
创建时间,时间戳格式。
:param create_time: The create_time of this QueryJobResp.
:type: str
"""
self._create_time = create_time
@property
def task_type(self):
"""Gets the task_type of this QueryJobResp.
迁移模式
:return: The task_type of this QueryJobResp.
:rtype: str
"""
return self._task_type
@task_type.setter
def task_type(self, task_type):
"""Sets the task_type of this QueryJobResp.
迁移模式
:param task_type: The task_type of this QueryJobResp.
:type: str
"""
self._task_type = task_type
@property
def source_endpoint(self):
"""Gets the source_endpoint of this QueryJobResp.
:return: The source_endpoint of this QueryJobResp.
:rtype: Endpoint
"""
return self._source_endpoint
@source_endpoint.setter
def source_endpoint(self, source_endpoint):
"""Sets the source_endpoint of this QueryJobResp.
:param source_endpoint: The source_endpoint of this QueryJobResp.
:type: Endpoint
"""
self._source_endpoint = source_endpoint
@property
def dmq_endpoint(self):
"""Gets the dmq_endpoint of this QueryJobResp.
:return: The dmq_endpoint of this QueryJobResp.
:rtype: Endpoint
"""
return self._dmq_endpoint
@dmq_endpoint.setter
def dmq_endpoint(self, dmq_endpoint):
"""Sets the dmq_endpoint of this QueryJobResp.
:param dmq_endpoint: The dmq_endpoint of this QueryJobResp.
:type: Endpoint
"""
self._dmq_endpoint = dmq_endpoint
@property
def source_sharding(self):
"""Gets the source_sharding of this QueryJobResp.
物理源库信息。
:return: The source_sharding of this QueryJobResp.
:rtype: list[Endpoint]
"""
return self._source_sharding
@source_sharding.setter
def source_sharding(self, source_sharding):
"""Sets the source_sharding of this QueryJobResp.
物理源库信息。
:param source_sharding: The source_sharding of this QueryJobResp.
:type: list[Endpoint]
"""
self._source_sharding = source_sharding
@property
def target_endpoint(self):
"""Gets the target_endpoint of this QueryJobResp.
:return: The target_endpoint of this QueryJobResp.
:rtype: Endpoint
"""
| |
modeled astronomical tide as well as the highest and lowest observed tide for that time range, the total observation count and the maximum count of observations for any one pixel in the polygon, the polygon ID number (from 1 to 306), the polygon centroid in longitude and latitude and the count of tide stages attributed to every observation used in that polygon of the mosaic. For the count of tidal stage observations, e = ebbing tide, f = flowing tide, ph = peak high tide and pl = peak low tide.
The tide stages were calculated bycomparison to the modeled tide data for 15 minutes either side of the observation to determine the ebb, flow or peak movement of the tide.
Observations are filtered to remove poor quality observations including cloud, cloud shadow and band saturation (of any band).
For service status information, see https://status.dea.ga.gov.au""",
# Included as a keyword for the layer
"type": "Tidal Composite",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "low_tide_composite",
# The Datacube name for the associated data product
"product_name": "low_tide_comp_20p",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi"]
},
"wcs_default_bands": ["red", "green", "blue"],
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 0.30]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 0.30]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, SWIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["green"] - data["swir1"]) / (
data["swir1"] + data["green"]),
"needed_bands": ["green", "swir1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "ITEM",
"title": "Intertidal Extents Model",
"abstract": "The Intertidal Extents Model (ITEM) product is a national dataset of the exposed intertidal zone; "
"the land between the observed highest and lowest tide. ITEM provides the extent and topography of "
"the intertidal zone of Australia's coastline (excluding off-shore Territories). "
"This information was collated using observations in the Landsat archive since 1986. "
"ITEM can be a valuable complimentary dataset to both onshore LiDAR survey data and coarser offshore "
"bathymetry data, enabling a more realistic representation of the land and ocean interface.",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "Relative Layer",
"abstract": """
The Intertidal Extents Model (ITEM v2.0) product analyses GA’s historic archive of satellite imagery to derive a model of the spatial extents of the intertidal zone throughout the tidal cycle. The model can assist in understanding the relative elevation profile of the intertidal zone,
delineating exposed areas at differing tidal heights and stages.
The product differs from previous methods used to map the intertidal zone which have been predominately focused on analysing a small number of individual satellite images per location (e.g Ryu et al., 2002; Murray et al., 2012).
By utilising a full 30 year time series of observations and a global tidal model (Egbert and Erofeeva, 2002), the methodology enables us to overcome the requirement for clear, high quality observations acquired concurrent to the time of high and low tide.
*Accuracy and limitations*
Due the sun-synchronous nature of the various Landsat sensor observations; it is unlikely that the full physical extents of the tidal range in any cell will be observed. Hence, terminology has been adopted for the product to reflect the highest modelled tide observed in a given cell (HOT) and the lowest modelled tide observed (LOT) (see Sagar et al. 2017). These measures are relative to Mean Sea Level, and have no consistent relationship to Lowest (LAT) and Highest Astronomical Tide (HAT).
The inclusion of the lowest (LMT) and highest (HMT) modelled tide values for each tidal polygon indicates the highest and lowest tides modelled for that location across the full time series by the OTPS model. The | |
<filename>scripts/nasbench/train_cellss_pkl.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# pylint: disable-all
from io import StringIO
import os
import sys
import copy
import shutil
import logging
import argparse
import random
import pickle
import yaml
import setproctitle
from scipy.stats import stats
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from torch.utils.data import Dataset, DataLoader
from aw_nas import utils
from aw_nas.common import get_search_space, rollout_from_genotype_str
from aw_nas.evaluator.arch_network import ArchNetwork
def _get_float_format(lst, fmt):
if isinstance(lst, (float, np.float32, np.float64)):
return fmt.format(lst)
if isinstance(lst, (list, tuple)):
return "[" + ", ".join([_get_float_format(item, fmt) for item in lst]) + "]"
return "{}".format(lst)
class CellSSDataset(Dataset):
def __init__(self, data, minus=None, div=None):
self.data = data
self._len = len(self.data)
self.minus = minus
self.div = div
def __len__(self):
return self._len
def __getitem__(self, idx):
data = self.data[idx]
if self.minus is not None:
data = (data[0], data[1] - self.minus)
if self.div is not None:
data = (data[0], data[1] / self.div)
return data
def _cal_stage_probs(avg_stage_scores, stage_prob_power):
probs = []
prob = 1.
for i_decision, avg_score_stage in enumerate(avg_stage_scores):
power = stage_prob_power[i_decision] if isinstance(stage_prob_power, (tuple, list)) else stage_prob_power
avg_score_stage = (avg_score_stage[0] ** power, avg_score_stage[1] ** power)
cur_prob = prob * avg_score_stage[0] / (avg_score_stage[0] + avg_score_stage[1])
probs.append(cur_prob)
prob *= avg_score_stage[1] / (avg_score_stage[0] + avg_score_stage[1])
probs.append(prob)
return probs
def make_pair_pool(train_stages, args, stage_epochs):
num_stages = len(train_stages)
stage_pairs_list = []
diff_threshold = args.diff_threshold
all_train_stages = sum(train_stages, [])
stage_lens = [len(stage_data) for stage_data in train_stages]
stage_start_inds = [0] + list(np.cumsum(stage_lens[:-1]))
for i_stage in range(num_stages):
stage_data = train_stages[i_stage]
if i_stage in {0} and num_stages == 4: # try special handling for 4-stage
beyond_stage_data = sum([train_stages[j_stage] for j_stage in range(i_stage + 2, num_stages)], [])
other_start_ind = stage_start_inds[i_stage + 2]
else:
beyond_stage_data = sum([train_stages[j_stage] for j_stage in range(i_stage, num_stages)], [])
other_start_ind = stage_start_inds[i_stage]
epoch = stage_epochs[i_stage]
# stage_perf
stage_perf = np.array([d[1][epoch] for d in stage_data])
beyond_stage_perf = np.array([d[1][epoch] for d in beyond_stage_data])
diff = beyond_stage_perf - stage_perf[:, None]
acc_abs_diff_matrix = np.triu(np.abs(diff), 1) - np.tril(np.ones(diff.shape), 0)
indexes = np.where(acc_abs_diff_matrix > diff_threshold[i_stage])
pairs = (stage_start_inds[i_stage] + indexes[0], other_start_ind + indexes[1], (diff > 0)[indexes])
stage_pairs_list.append(pairs)
logging.info("Num pairs using the perfs of stage {} (epoch {}): {}/{}".format(
i_stage, epoch, len(pairs[0]),
stage_lens[i_stage] * (stage_lens[i_stage] - 1) / 2 + stage_lens[i_stage] * sum(stage_lens[i_stage+1:], 0)))
return all_train_stages, stage_pairs_list
def train_multi_stage_pair_pool(all_stages, pairs_list, model, i_epoch, args):
objs = utils.AverageMeter()
model.train()
# try get through all the pairs
pairs_pool = list(zip(*[np.concatenate(items) for items in zip(*pairs_list)]))
num_pairs = len(pairs_pool)
logging.info("Number of pairs: {}".format(num_pairs))
np.random.shuffle(pairs_pool)
num_batch = num_pairs // args.batch_size
for i_batch in range(num_batch):
archs_1_inds, archs_2_inds, better_lst = list(zip(
*pairs_pool[i_batch * args.batch_size: (i_batch + 1) * args.batch_size]))
loss = model.update_compare(np.array([all_stages[idx][0] for idx in archs_1_inds]),
np.array([all_stages[idx][0] for idx in archs_2_inds]), better_lst)
objs.update(loss, args.batch_size)
if i_batch % args.report_freq == 0:
logging.info("train {:03d} [{:03d}/{:03d}] {:.4f}".format(
i_epoch, i_batch, num_batch, objs.avg))
return objs.avg
def train_multi_stage(train_stages, model, epoch, args, avg_stage_scores, stage_epochs):
# TODO: multi stage
objs = utils.AverageMeter()
n_diff_pairs_meter = utils.AverageMeter()
model.train()
num_stages = len(train_stages)
# must specificy `stage_probs` or `stage_prob_power`
stage_probs = getattr(args, "stage_probs", None)
if stage_probs is None:
stage_probs = _cal_stage_probs(avg_stage_scores, args.stage_prob_power)
stage_accept_pair_probs = getattr(args, "stage_accept_pair_probs", [1.0] * num_stages)
stage_lens = [len(stage_data) for stage_data in train_stages]
for i, len_ in enumerate(stage_lens):
if len_ == 0:
n_j = num_stages - i - 1
for j in range(i + 1, num_stages):
stage_probs[j] += stage_probs[i] / float(n_j)
stage_probs[i] = 0
# diff_threshold = getattr(args, "diff_threshold", [0.08, 0.04, 0.02, 0.0])
stage_single_probs = getattr(args, "stage_single_probs", None)
if stage_single_probs is not None:
stage_probs = np.array([single_prob * len_ for single_prob, len_ in zip(stage_single_probs, stage_lens)])
stage_probs = stage_probs / stage_probs.sum()
logging.info("Epoch {:d}: Stage probs {}".format(epoch, stage_probs))
diff_threshold = args.diff_threshold
for step in range(args.num_batch_per_epoch):
pair_batch = []
i_pair = 0
while 1:
stage_1, stage_2 = np.random.choice(np.arange(num_stages), size=2, p=stage_probs)
d_1 = train_stages[stage_1][np.random.randint(0, stage_lens[stage_1])]
d_2 = train_stages[stage_2][np.random.randint(0, stage_lens[stage_2])]
min_stage = min(stage_2, stage_1)
if np.random.rand() > stage_accept_pair_probs[min_stage]:
continue
# max_stage = stage_2 + stage_1 - min_stage
# if max_stage - min_stage >= 2:
# better = stage_2 > stage_1
# else:
min_epoch = stage_epochs[min_stage]
diff_21 = d_2[1][min_epoch] - d_1[1][min_epoch]
# print(stage_1, stage_2, diff_21, diff_threshold)
if np.abs(diff_21) > diff_threshold[min_stage]:
# if the difference is larger than the threshold of the min stage, this pair count
better = diff_21 > 0
else:
continue
pair_batch.append((d_1[0], d_2[0], better))
i_pair += 1
if i_pair == args.batch_size:
break
archs_1, archs_2, better_lst = zip(*pair_batch)
n_diff_pairs = len(better_lst)
n_diff_pairs_meter.update(float(n_diff_pairs))
loss = model.update_compare(archs_1, archs_2, better_lst)
objs.update(loss, n_diff_pairs)
if step % args.report_freq == 0:
logging.info("train {:03d} [{:03d}/{:03d}] {:.4f}".format(
epoch, step, args.num_batch_per_epoch, objs.avg))
return objs.avg
def train_multi_stage_listwise(train_stages, model, epoch, args, avg_stage_scores, stage_epochs, score_train_stages=None):
# TODO: multi stage
objs = utils.AverageMeter()
n_listlength_meter = utils.AverageMeter()
model.train()
num_stages = len(train_stages)
stage_lens = [len(stage_data) for stage_data in train_stages]
stage_sep_inds = [np.arange(stage_len) for stage_len in stage_lens]
sample_acc_temp = getattr(args, "sample_acc_temp", None)
if sample_acc_temp is not None:
stage_sep_probs = []
for i_stage, stage_data in enumerate(train_stages):
perfs = np.array([item[1][stage_epochs[i_stage]] for item in train_stages[i_stage]])
perfs = perfs / sample_acc_temp
exp_perfs = np.exp(perfs - np.max(perfs))
stage_sep_probs.append(exp_perfs / exp_perfs.sum())
else:
stage_sep_probs = None
stage_single_probs = getattr(args, "stage_single_probs", None)
assert stage_single_probs is not None
if stage_single_probs is not None:
stage_probs = np.array([single_prob * len_ for single_prob, len_ in zip(stage_single_probs, stage_lens)])
stage_probs = stage_probs / stage_probs.sum()
logging.info("Epoch {:d}: Stage probs {}".format(epoch, stage_probs))
num_stage_samples_avg = np.zeros(num_stages)
train_stages = np.array(train_stages)
listwise_compare = getattr(args, "listwise_compare", False)
if listwise_compare:
assert args.list_length == 2
for step in range(args.num_batch_per_epoch):
num_stage_samples = np.random.multinomial(args.list_length, stage_probs)
num_stage_samples = np.minimum(num_stage_samples, stage_lens)
true_ll = np.sum(num_stage_samples)
n_listlength_meter.update(true_ll, args.batch_size)
num_stage_samples_avg += num_stage_samples
stage_inds = [np.array([np.random.choice(
stage_sep_inds[i_stage], size=(sz), replace=False,
p=None if stage_sep_probs is None else stage_sep_probs[i_stage]) for _ in range(args.batch_size)])
if sz > 0 else np.zeros((args.batch_size, 0), dtype=np.int) for i_stage, sz in enumerate(num_stage_samples)]
sorted_stage_inds = [s_stage_inds[np.arange(args.batch_size)[:, None], np.argsort(np.array(np.array(train_stages[i_stage])[s_stage_inds][:, :, 1].tolist())[:, :, stage_epochs[i_stage]], axis=1)] if s_stage_inds.shape[1] > 1 else s_stage_inds for i_stage, s_stage_inds in enumerate(stage_inds)]
archs = np.concatenate([np.array(train_stages[i_stage])[s_stage_inds][:, :, 0] for i_stage, s_stage_inds in enumerate(sorted_stage_inds) if s_stage_inds.size > 0], axis=1)
archs = archs[:, ::-1] # order: from best to worst
assert archs.ndim == 2
archs = np.array(archs.tolist()) # (batch_size, list_length, num_cell_groups, node_or_op, decisions)
if listwise_compare:
loss = model.update_compare(archs[:, 0], archs[:, 1], np.zeros(archs.shape[0]))
else:
loss = model.update_argsort(archs, idxes=None, first_n=getattr(args, "score_list_length", None), is_sorted=True)
objs.update(loss, args.batch_size)
if step % args.report_freq == 0:
logging.info("train {:03d} [{:03d}/{:03d}] {:.4f} (mean ll: {:.1f}; {})".format(
epoch, step, args.num_batch_per_epoch, objs.avg, n_listlength_meter.avg, (num_stage_samples_avg / (step + 1)).tolist()))
return objs.avg
def train(train_loader, model, epoch, args):
objs = utils.AverageMeter()
n_diff_pairs_meter = utils.AverageMeter()
n_eq_pairs_meter = utils.AverageMeter()
model.train()
margin_diff_coeff = getattr(args, "margin_diff_coeff", None)
eq_threshold = getattr(args, "eq_threshold", None)
eq_pair_ratio = getattr(args, "eq_pair_ratio", 0)
if eq_threshold is not None:
assert eq_pair_ratio > 0
assert eq_threshold <= args.compare_threshold
for step, (archs, all_accs) in enumerate(train_loader):
archs = np.array(archs)
n = len(archs)
use_checkpoint = getattr(args, "use_checkpoint", 3)
accs = all_accs[:, use_checkpoint]
if args.compare:
if getattr(args, "compare_split", False):
n_pairs = len(archs) // 2
accs = np.array(accs)
acc_diff_lst = accs[n_pairs:2*n_pairs] - accs[:n_pairs]
keep_inds = np.where(np.abs(acc_diff_lst) > args.compare_threshold)[0]
better_lst = (np.array(accs[n_pairs:2*n_pairs] - accs[:n_pairs]) > 0)[keep_inds]
archs_1 = np.array(archs[:n_pairs])[keep_inds]
archs_2 = np.array(archs[n_pairs:2*n_pairs])[keep_inds]
else:
n_max_pairs = int(args.max_compare_ratio * n * (1 - eq_pair_ratio))
acc_diff = np.array(accs)[:, None] - np.array(accs)
acc_abs_diff_matrix = np.triu(np.abs(acc_diff), 1)
ex_thresh_inds = np.where(acc_abs_diff_matrix > args.compare_threshold)
ex_thresh_num = len(ex_thresh_inds[0])
if ex_thresh_num > n_max_pairs:
if args.choose_pair_criterion == "diff":
keep_inds = np.argpartition(acc_abs_diff_matrix[ex_thresh_inds], -n_max_pairs)[-n_max_pairs:]
elif args.choose_pair_criterion == "random":
keep_inds = np.random.choice(np.arange(ex_thresh_num), n_max_pairs, replace=False)
ex_thresh_inds = (ex_thresh_inds[0][keep_inds], ex_thresh_inds[1][keep_inds])
archs_1, archs_2, better_lst, acc_diff_lst = archs[ex_thresh_inds[1]], archs[ex_thresh_inds[0]], (acc_diff > 0)[ex_thresh_inds], acc_diff[ex_thresh_inds]
n_diff_pairs = len(better_lst)
n_diff_pairs_meter.update(float(n_diff_pairs))
if eq_threshold is None:
if margin_diff_coeff is not None:
margin = np.abs(acc_diff_lst) * margin_diff_coeff
loss = model.update_compare(archs_1, archs_2, better_lst, margin=margin)
else:
loss = model.update_compare(archs_1, archs_2, better_lst)
else:
# drag close the score of arch pairs whose true acc diffs are below args.eq_threshold
n_eq_pairs = int(args.max_compare_ratio * n * eq_pair_ratio)
below_eq_thresh_inds = np.where(acc_abs_diff_matrix < eq_threshold)
below_eq_thresh_num = len(below_eq_thresh_inds[0])
if below_eq_thresh_num > n_eq_pairs:
keep_inds = np.random.choice(np.arange(below_eq_thresh_num), n_eq_pairs, replace=False)
below_eq_thresh_inds = (below_eq_thresh_inds[0][keep_inds], below_eq_thresh_inds[1][keep_inds])
eq_archs_1, eq_archs_2, below_acc_diff_lst = \
archs[below_eq_thresh_inds[1]], archs[below_eq_thresh_inds[0]], acc_abs_diff_matrix[below_eq_thresh_inds]
if margin_diff_coeff is not None:
margin = np.concatenate((
np.abs(acc_diff_lst),
np.abs(below_acc_diff_lst))) * margin_diff_coeff
else:
margin = None
better_pm_lst = np.concatenate((2 * better_lst - 1, np.zeros(len(eq_archs_1))))
n_eq_pairs_meter.update(float(len(eq_archs_1)))
loss = model.update_compare_eq(np.concatenate((archs_1, eq_archs_1)),
np.concatenate((archs_2, eq_archs_2)),
better_pm_lst, margin=margin)
objs.update(loss, n_diff_pairs)
else:
loss = model.update_predict(archs, accs)
objs.update(loss, n)
if step % args.report_freq == 0:
n_pair_per_batch = (args.batch_size * (args.batch_size - 1)) // 2
logging.info("train {:03d} [{:03d}/{:03d}] {:.4f}; {}".format(
epoch, step, len(train_loader), objs.avg,
| |
<filename>database_manager.py<gh_stars>0
"""
Object that manages the database that stores the blockchain.
"""
import sqlite3
from transaction import Transaction
from block import Block
import os
class BlockchainDatabase:
def __init__(self, blockchain, p, name='/blockchain', ):
self.name = name
with sqlite3.connect(p + name + '.db', check_same_thread=False) as self.db:
self.cursor = self.db.cursor()
self.setup_database()
self.blockchain = blockchain
def setup_database(self):
"""
Runs SQL to setup the database.
:return: None
"""
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Blocks(
hash CHAR(64) PRIMARY KEY,
previous_hash CHAR(64),
timestamp INTEGER,
difficulty INTEGER,
nonce INTEGER,
height INTEGER
);
""")
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Transactions(
txid CHAR(32) PRIMARY KEY,
block_hash CHAR(64),
type INTEGER,
value TEXT,
from_address CHAR(64),
timestamp INTEGER,
FOREIGN KEY (block_hash) REFERENCES Blocks(hash)
);
""")
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Inputs(
txid CHAR(32),
output_txid CHAR(32),
ind INTEGER,
value TEXT,
recipient CHAR(64),
sig BLOB,
type INTEGER,
PRIMARY KEY (txid, output_txid, ind),
FOREIGN KEY (txid) REFERENCES Transactions(txid)
);
""")
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Outputs(
txid CHAR(32),
ind INTEGER,
value TEXT,
recipient CHAR(64),
sig BLOB,
utxo BOOLEAN,
type INTEGER,
PRIMARY KEY (txid, ind),
FOREIGN KEY (txid) REFERENCES Transactions(txid)
);
""")
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Serialised_Tokens(
tkid CHAR(16) PRIMARY KEY,
poll_address CHAR(64),
voter_address CHAR(64),
timestamp INTEGER,
question VARCHAR,
options VARCHAR,
ans VARCHAR,
sig BLOB,
txid CHAR(32),
ind INTEGER,
locked BOOLEAN,
FOREIGN KEY (txid) REFERENCES Outputs(txid),
FOREIGN KEY (ind) REFERENCES Outputs(ind)
);
""")
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Locked_Tokens(
tkid CHAR(16) PRIMARY KEY,
poll_address CHAR(64),
voter_address CHAR(64),
timestamp INTEGER,
question VARCHAR,
options VARCHAR,
ans VARCHAR,
sig BLOB,
txid CHAR(32),
ind INTEGER,
FOREIGN KEY (txid) REFERENCES Outputs(txid),
FOREIGN KEY (ind) REFERENCES Outputs(ind)
);
""")
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Memory_Pool(
txid CHAR(32),
ind INTEGER,
FOREIGN KEY (txid) REFERENCES Outputs(txid),
FOREIGN KEY (ind) REFERENCES Outputs(ind),
PRIMARY KEY (txid, ind)
);
""")
def add_block(self, block):
"""
Adds a block to the database.
:param block: Block
:return: None
"""
sql = '''
INSERT INTO Blocks VALUES (?,?,?,?,?,?);
'''
hash = block.hash
p_hash = block.previous_hash
t = block.timestamp
d = block.difficulty
n = block.nonce
h = block.height
self.cursor.execute(sql, [hash, p_hash, t, d, n, h])
self.db.commit()
for transaction in block.transactions:
self.add_transaction(transaction, hash)
return
def add_transaction(self, tx, h):
"""
Adds a transaction to the database.
:param tx: Transaction
:param h: int
:return: None
"""
sql = '''
INSERT INTO Transactions VALUES (?,?,?,?,?,?);
'''
self.cursor.execute(sql, [tx.txid, h, tx.type, str(tx.value), tx.from_address, tx.timestamp])
self.db.commit()
for i in tx.inputs:
self.add_input(i, tx.txid)
for o in tx.outputs:
self.add_output(o)
return
def add_input(self, i, txid):
"""
Adds an input to the database.
:param i: dict
:param txid: string
:return:
"""
sql = '''
INSERT INTO Inputs VALUES (?,?,?,?,?,?,?);
'''
self.cursor.execute(sql, [txid, i['txid'], i['index'], str(i['value']), i['recipient'], i['sig'], i['type']])
self.db.commit()
return
def add_output(self, o):
"""
Adds an output to the database.
:param o: dict
:return: None
"""
sql = '''
INSERT INTO Outputs VALUES (?,?,?,?,?,?,?);
'''
self.cursor.execute(sql, [o['txid'], o['index'], str(o['value']), o['recipient'], o['sig'], True, o['type']])
self.db.commit()
if o['type'] == 1:
self.add_token(o['value'], o['txid'], o['index'])
elif o['type'] == 2:
self.update_token(o['value'], o['txid'], o['index'])
def update_utxo(self, utxo):
"""
Updates outputs in the database that have now been spent.
:param utxo:
:return: None
"""
sql = '''
UPDATE Outputs
SET utxo = FALSE
WHERE txid = ? AND ind = ?'''
self.cursor.execute(sql, [utxo['txid'], utxo['index']])
self.db.commit()
def get_utxos(self, addr, ty):
sql2 = '''
SELECT Outputs.txid, Outputs.ind, Outputs.value, Outputs.recipient, Outputs.sig, type FROM Outputs
WHERE utxo = TRUE AND recipient = ? and type = ?
'''
self.cursor.execute(sql2, [addr, ty])
results = self.cursor.fetchall()
return results
def get_tokens(self, addr, ty):
"""
Returns the number of tokens of a particular type that haven't been spent, as known by the Blockchain.
A user's funds can be found from just their UTXO's.
:param addr:
:param ty:
:return:
"""
sql = '''
SELECT value FROM Outputs
WHERE utxo = TRUE AND recipient = ? and type = ?
'''
self.cursor.execute(sql, [addr, ty])
results = self.cursor.fetchall()
t = 0
if ty == 0:
for value in results:
t += int(value[0])
else:
t = len(results)
return t
def create_transaction(self, txid):
"""
Creates a Transaction object from releavnt data in the database
:param txid: string
:return: Transaction
"""
inputs = []
outputs = []
sql = '''SELECT type, from_address, txid, timestamp FROM Transactions WHERE txid = ?'''
self.cursor.execute(sql, [txid])
results = self.cursor.fetchall()[0]
tx = Transaction(results[0], None, results[1], None, blockchain=self.blockchain)
tx.txid = results[2]
tx.timestamp = results[3]
sql = '''SELECT * FROM Inputs WHERE txid = ?'''
self.cursor.execute(sql, [txid])
results = self.cursor.fetchall()
for i in results:
try:
value = eval(i[3])
except Exception as e:
value = i[3]
inputs.append({'txid': i[1], 'index': i[2], 'value': value, 'recipient': i[4], 'sig': i[5], 'type': i[6]})
sql = '''SELECT * FROM Outputs WHERE txid = ? ORDER BY ind '''
self.cursor.execute(sql, [txid])
results = self.cursor.fetchall()
addresses = []
for o in results:
try:
value = eval(o[2])
except Exception as e:
value = o[2]
outputs.append({'txid': o[0], 'index': o[1], 'value': value, 'recipient': o[3], 'sig': o[4], 'type': o[6]})
if o[3] not in addresses:
addresses.append(o[3])
tx.to_address = addresses
tx.value = outputs[0]['value']
tx.inputs = inputs
tx.outputs = outputs
return tx
def create_block(self, ph):
"""
Creates a block from the database.
:param ph: string
:return: Block
"""
txs = []
sql = '''
SELECT Transactions.txid FROM Transactions
INNER JOIN Blocks ON Blocks.hash = Transactions.block_hash
WHERE previous_hash = ?
'''
self.cursor.execute(sql, [ph])
results = self.cursor.fetchall()
for txid in results:
txs.append(self.create_transaction(txid[0]))
sql = '''SELECT difficulty, nonce, hash, timestamp, height FROM Blocks WHERE previous_hash = ?'''
self.cursor.execute(sql, [ph])
results = self.cursor.fetchall()[0]
b = Block(ph, txs, results[3], results[4])
b.nonce = results[1]
b.hash = results[2]
b.timestamp = results[3]
return b
def create_recent_chain(self): # Returns chain of 16 previous blocks
"""
Creates the chain to be stored in memory by the Blockchain object.
:return: List of Blocks
"""
ch = []
height = self.get_block_height()
if height >= 16:
self.cursor.execute('SELECT previous_hash FROM Blocks WHERE height = ?', [height-15])
ph = self.cursor.fetchall()[0][0]
for n in range(16): # Block-height is 0 only when genesis block is added
b = self.create_block(ph)
ch.append(b)
ph = b.hash
else:
ph = "0" * 64
for n in range(height+1): # Block-height is 0 only when genesis block is added
b = self.create_block(ph)
ch.append(b)
ph = b.hash
return ch
def add_token(self, tk, txid, ind):
"""
Adds a token of type 1 to the database.
:param tk: dict
:param txid: string
:param ind: int
:return: None
"""
sql = '''
INSERT INTO Serialised_Tokens VALUES (?,?,?,?,?,?,?,?,?,?,?)
'''
self.cursor.execute(sql, [tk['tkid'], tk['poll_address'], tk['voter_address'], tk['timestamp'], tk['question'],
str(tk['options']), tk['ans'], tk['sig'], txid, ind, False])
self.db.commit()
def update_token(self, tk, txid, ind):
"""
Updates the state of a token of type 1 in the database and adds the version of the token when it is of type 2.
:param tk: dict
:param txid: string
:param ind: int
:return: None
"""
sql = '''
UPDATE Serialised_Tokens
SET locked = TRUE
WHERE tkid = ?
'''
self.cursor.execute(sql, [tk['tkid']])
self.db.commit()
sql = '''
INSERT INTO Locked_Tokens VALUES (?,?,?,?,?,?,?,?,?,?)
'''
self.cursor.execute(sql, [tk['tkid'], tk['poll_address'], tk['voter_address'], tk['timestamp'], tk['question'],
str(tk['options']), tk['ans'], tk['sig'], txid, ind])
self.db.commit()
def get_block_height(self):
"""
Gets the number of blocks that are stored in the database.
:return: int
"""
self.cursor.execute('SELECT hash FROM Blocks')
r = self.cursor.fetchall()
return len(r) - 1 # -1 is key because of genesis block
def block_from_height(self,h):
"""
Creates a Block from the database based on its height.
:param h: int
:return: Block
"""
txs = []
sql = '''
SELECT Transactions.txid FROM Transactions
INNER JOIN Blocks ON Blocks.hash = Transactions.block_hash
WHERE height = ?
'''
self.cursor.execute(sql, [h])
results = self.cursor.fetchall()
for txid in results:
txs.append(self.create_transaction(txid[0]))
sql = '''SELECT difficulty, nonce, hash, timestamp, previous_hash FROM Blocks WHERE height = ?'''
self.cursor.execute(sql, [h])
results = self.cursor.fetchall()[0]
b = Block(results[4], txs, results[0], h)
b.nonce = results[1]
b.hash = results[2]
b.timestamp = results[3]
return b
def get_serialized_votes(self,addr):
"""
Gets the number of votes that a poll has serialized.
:param addr: string
:return: Tuple
"""
sql = '''
SELECT txid From Transactions WHERE from_address = ? AND type = 1
'''
self.cursor.execute(sql, [addr])
r = | |
<gh_stars>10-100
"""Flexmock tests."""
# pylint: disable=missing-docstring,too-many-lines,disallowed-name,no-member,invalid-name,no-self-use
import functools
import os
import random
import re
import sys
import unittest
from contextlib import contextmanager
from typing import Type, Union
from flexmock._api import (
AT_LEAST,
AT_MOST,
EXACTLY,
RE_TYPE,
UPDATED_ATTRS,
CallOrderError,
ExceptionClassError,
ExceptionMessageError,
FlexmockContainer,
FlexmockError,
MethodCallError,
MethodSignatureError,
Mock,
MockBuiltinError,
ReturnValue,
StateError,
_format_args,
_is_class_method,
_is_static_method,
_isproperty,
flexmock,
flexmock_teardown,
)
from tests import some_module
from .proxy import Proxy
def module_level_function(some, args):
return f"{some}, {args}"
MODULE_LEVEL_ATTRIBUTE = "test"
class SomeClass:
CLASS_VALUE = "class_method"
def __init__(self):
self.instance_value = "instance_method"
@classmethod
def class_method(cls):
return cls.CLASS_VALUE
@classmethod
def class_method_with_args(cls, a):
return a
@staticmethod
def static_method():
return "static_method"
@staticmethod
def static_method_with_args(a):
return a
def instance_method(self):
return self.instance_value
def instance_method_with_args(self, a):
return a
class DerivedClass(SomeClass):
pass
@contextmanager
def assert_raises(expected_exception: Type[BaseException], match: Union[RE_TYPE, str, None]):
"""Assert that code raises the correct exception with a correct error message.
Args:
expected_exception: Type of the expected exception.
match: String or pattern to match the error message against. Use None
to skip error message checking.
"""
try:
yield
except Exception as raised_exception:
if not isinstance(raised_exception, expected_exception):
raise AssertionError(
f"Expected exception '{type(expected_exception)}' "
f"but '{type(raised_exception)}' was raised"
) from raised_exception
if match is not None:
fail = False
if isinstance(match, RE_TYPE):
fail = not match.search(str(raised_exception))
match = match.pattern
else:
fail = str(raised_exception) != str(match)
if fail:
raise AssertionError(
f"Expected error message:\n{'-'*39}\n'{str(match)}'\n"
f"\nBut got:\n\n'{str(raised_exception)}'\n{'-'*39}\n"
) from raised_exception
else:
raise AssertionError(f"Exception '{expected_exception.__name__}' not raised")
def assert_equal(expected, received, msg=""):
if not msg:
msg = f"expected {expected}, received {received}"
if expected != received:
raise AssertionError(f"{expected} != {received} : {msg}")
class RegularClass:
def _tear_down(self):
return flexmock_teardown()
def test_print_expectation(self):
mock = flexmock()
expectation = mock.should_receive("foo")
assert str(expectation) == "foo() -> ()"
def test_flexmock_should_create_mock_object(self):
mock = flexmock()
assert isinstance(mock, Mock)
def test_flexmock_should_create_mock_object_from_dict(self):
mock = flexmock(foo="foo", bar="bar")
assert_equal("foo", mock.foo)
assert_equal("bar", mock.bar)
def test_flexmock_should_add_expectations(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo")
assert "method_foo" in [x._name for x in FlexmockContainer.flexmock_objects[mock]]
def test_flexmock_should_return_value(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("value_bar")
mock.should_receive("method_bar").and_return("value_baz")
assert_equal("value_bar", mock.method_foo())
assert_equal("value_baz", mock.method_bar())
def test_type_flexmock_with_unicode_string_in_should_receive(self):
class Foo:
def bar(self):
return "bar"
flexmock(Foo).should_receive("bar").and_return("mocked_bar")
foo = Foo()
assert_equal("mocked_bar", foo.bar())
def test_flexmock_should_accept_shortcuts_for_creating_mock_object(self):
mock = flexmock(attr1="value 1", attr2=lambda: "returning 2")
assert_equal("value 1", mock.attr1)
assert_equal("returning 2", mock.attr2())
def test_flexmock_should_accept_shortcuts_for_creating_expectations(self):
class Foo:
def method1(self):
pass
def method2(self):
pass
foo = Foo()
flexmock(foo, method1="returning 1", method2="returning 2")
assert_equal("returning 1", foo.method1())
assert_equal("returning 2", foo.method2())
assert_equal("returning 2", foo.method2())
def test_flexmock_expectations_returns_all(self):
mock = flexmock(name="temp")
assert mock not in FlexmockContainer.flexmock_objects
mock.should_receive("method_foo")
mock.should_receive("method_bar")
assert_equal(2, len(FlexmockContainer.flexmock_objects[mock]))
def test_flexmock_expectations_returns_named_expectation(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo")
assert_equal(
"method_foo",
FlexmockContainer.get_flexmock_expectation(mock, "method_foo")._name,
)
def test_flexmock_expectations_returns_none_if_not_found(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo")
assert FlexmockContainer.get_flexmock_expectation(mock, "method_bar") is None
def test_flexmock_should_check_parameters(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").with_args("bar").and_return(1)
mock.should_receive("method_foo").with_args("baz").and_return(2)
assert_equal(1, mock.method_foo("bar"))
assert_equal(2, mock.method_foo("baz"))
def test_flexmock_should_keep_track_of_calls(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").with_args("foo").and_return(0)
mock.should_receive("method_foo").with_args("bar").and_return(1)
mock.should_receive("method_foo").with_args("baz").and_return(2)
mock.method_foo("bar")
mock.method_foo("bar")
mock.method_foo("baz")
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo", ("foo",))
assert_equal(0, expectation._times_called)
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo", ("bar",))
assert_equal(2, expectation._times_called)
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo", ("baz",))
assert_equal(1, expectation._times_called)
def test_flexmock_should_set_expectation_call_numbers(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").times(1)
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
with assert_raises(
MethodCallError, "method_foo() expected to be called exactly 1 time, called 0 times"
):
expectation._verify()
mock.method_foo()
expectation._verify()
def test_flexmock_should_check_raised_exceptions(self):
mock = flexmock(name="temp")
class FakeException(Exception):
pass
mock.should_receive("method_foo").and_raise(FakeException)
with assert_raises(FakeException, ""):
mock.method_foo()
assert_equal(
1,
FlexmockContainer.get_flexmock_expectation(mock, "method_foo")._times_called,
)
def test_flexmock_should_check_raised_exceptions_instance_with_args(self):
mock = flexmock(name="temp")
class FakeException(Exception):
def __init__(self, arg, arg2):
# pylint: disable=super-init-not-called
pass
mock.should_receive("method_foo").and_raise(FakeException(1, arg2=2))
with assert_raises(FakeException, "1"):
mock.method_foo()
assert_equal(
1,
FlexmockContainer.get_flexmock_expectation(mock, "method_foo")._times_called,
)
def test_flexmock_should_check_raised_exceptions_class_with_args(self):
mock = flexmock(name="temp")
class FakeException(Exception):
def __init__(self, arg, arg2):
# pylint: disable=super-init-not-called
pass
mock.should_receive("method_foo").and_raise(FakeException, 1, arg2=2)
with assert_raises(FakeException, "1"):
mock.method_foo()
assert_equal(
1,
FlexmockContainer.get_flexmock_expectation(mock, "method_foo")._times_called,
)
def test_flexmock_should_match_any_args_by_default(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("bar")
mock.should_receive("method_foo").with_args("baz").and_return("baz")
assert_equal("bar", mock.method_foo())
assert_equal("bar", mock.method_foo(1))
assert_equal("bar", mock.method_foo("foo", "bar"))
assert_equal("baz", mock.method_foo("baz"))
def test_spying_non_existent_mock_object_method_should_fail(self):
mock = flexmock()
with assert_raises(
FlexmockError,
"Mock object does not have attribute 'method_foo'. "
'Did you mean to call should_receive("method_foo") instead?',
):
mock.should_call("method_foo")
mock = flexmock(method_foo=lambda: "ok")
mock.should_call("method_foo")
def test_flexmock_should_fail_to_match_exactly_no_args_when_calling_with_args(self):
mock = flexmock()
mock.should_receive("method_foo").with_args()
with assert_raises(
MethodSignatureError,
(
"Arguments for call method_foo did not match expectations:\n"
' Received call:\tmethod_foo("baz")\n'
" Expected call[1]:\tmethod_foo()"
),
):
mock.method_foo("baz")
def test_flexmock_should_match_exactly_no_args(self):
class Foo:
def bar(self):
pass
foo = Foo()
flexmock(foo).should_receive("bar").with_args().and_return("baz")
assert_equal("baz", foo.bar())
def test_expectation_dot_mock_should_return_mock(self):
mock = flexmock(name="temp")
assert_equal(mock, mock.should_receive("method_foo").mock)
def test_flexmock_should_create_partial_new_style_object_mock(self):
class User:
def __init__(self, name=None):
self.name = name
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
user = User()
flexmock(user)
user.should_receive("get_name").and_return("john")
user.set_name("mike")
assert_equal("john", user.get_name())
def test_flexmock_should_create_partial_old_style_object_mock(self):
class User:
def __init__(self, name=None):
self.name = name
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
user = User()
flexmock(user)
user.should_receive("get_name").and_return("john")
user.set_name("mike")
assert_equal("john", user.get_name())
def test_flexmock_should_create_partial_new_style_class_mock(self):
class User:
def __init__(self):
pass
def get_name(self):
pass
flexmock(User)
User.should_receive("get_name").and_return("mike")
user = User()
assert_equal("mike", user.get_name())
def test_flexmock_should_create_partial_old_style_class_mock(self):
class User:
def __init__(self):
pass
def get_name(self):
pass
flexmock(User)
User.should_receive("get_name").and_return("mike")
user = User()
assert_equal("mike", user.get_name())
def test_flexmock_should_match_expectations_against_builtin_classes(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").with_args(str).and_return("got a string")
mock.should_receive("method_foo").with_args(int).and_return("got an int")
assert_equal("got a string", mock.method_foo("string!"))
assert_equal("got an int", mock.method_foo(23))
with assert_raises(
MethodSignatureError,
(
"Arguments for call method_foo did not match expectations:\n"
" Received call:\tmethod_foo(2.0)\n"
" Expected call[1]:\tmethod_foo(<class 'int'>)\n"
" Expected call[2]:\tmethod_foo(<class 'str'>)"
),
):
mock.method_foo(2.0)
def test_with_args_should_work_with_builtin_c_methods(self):
flexmock(sys.stdout).should_call("write") # set fall-through
flexmock(sys.stdout).should_receive("write").with_args("flexmock_builtin_test")
sys.stdout.write("flexmock_builtin_test")
def test_with_args_should_work_with_builtin_c_functions(self):
mocked = flexmock(sys)
mocked.should_receive("exit").with_args(1).once()
mocked.exit(1)
self._tear_down()
flexmock(os).should_receive("remove").with_args("path").once()
os.remove("path")
def test_with_args_should_work_with_builtin_python_methods(self):
flexmock(random).should_receive("randint").with_args(1, 10).once()
random.randint(1, 10)
def test_flexmock_should_match_expectations_against_user_defined_classes(self):
mock = flexmock(name="temp")
class Foo:
pass
mock.should_receive("method_foo").with_args(Foo).and_return("got a Foo")
assert_equal("got a Foo", mock.method_foo(Foo()))
with assert_raises(
MethodSignatureError,
(
"Arguments for call method_foo did not match expectations:\n"
" Received call:\tmethod_foo(1)\n"
" Expected call[1]:\tmethod_foo(<class 'tests.flexmock_test.RegularClass.test"
"_flexmock_should_match_expectations_against_user_defined_classes.<locals>.Foo'>)"
),
):
mock.method_foo(1)
def test_flexmock_configures_global_mocks_dict(self):
mock = flexmock(name="temp")
assert mock not in FlexmockContainer.flexmock_objects
mock.should_receive("method_foo")
assert mock in FlexmockContainer.flexmock_objects
assert_equal(len(FlexmockContainer.flexmock_objects[mock]), 1)
def test_flexmock_teardown_verifies_mocks(self):
mock = flexmock(name="temp")
mock.should_receive("verify_expectations").times(1)
with assert_raises(
MethodCallError,
"verify_expectations() expected to be called exactly 1 time, called 0 times",
):
self._tear_down()
def test_flexmock_teardown_does_not_verify_stubs(self):
mock = flexmock(name="temp")
mock.should_receive("verify_expectations")
self._tear_down()
def test_flexmock_preserves_stubbed_object_methods_between_tests(self):
class User:
def get_name(self):
return "mike"
user = User()
flexmock(user).should_receive("get_name").and_return("john")
assert_equal("john", user.get_name())
self._tear_down()
assert_equal("mike", user.get_name())
def test_flexmock_preserves_stubbed_class_methods_between_tests(self):
class User:
def get_name(self):
return "mike"
user = User()
flexmock(User).should_receive("get_name").and_return("john")
assert_equal("john", user.get_name())
self._tear_down()
assert_equal("mike", user.get_name())
def test_flexmock_removes_new_stubs_from_objects_after_tests(self):
class User:
def get_name(self):
pass
user = User()
saved = user.get_name
flexmock(user).should_receive("get_name").and_return("john")
assert saved != user.get_name
assert_equal("john", user.get_name())
self._tear_down()
assert_equal(saved, user.get_name)
def test_flexmock_removes_new_stubs_from_classes_after_tests(self):
class User:
def get_name(self):
pass
user = User()
saved = user.get_name
flexmock(User).should_receive("get_name").and_return("john")
assert saved != user.get_name
assert_equal("john", user.get_name())
self._tear_down()
assert_equal(saved, user.get_name)
def test_flexmock_removes_stubs_from_multiple_objects_on_teardown(self):
class User:
def get_name(self):
pass
class Group:
def get_name(self):
pass
user = User()
group = Group()
saved1 = user.get_name
saved2 = group.get_name
flexmock(user).should_receive("get_name").and_return("john").once()
flexmock(group).should_receive("get_name").and_return("john").once()
assert saved1 != user.get_name
assert saved2 != group.get_name
assert_equal("john", user.get_name())
assert_equal("john", group.get_name())
self._tear_down()
assert_equal(saved1, user.get_name)
assert_equal(saved2, group.get_name)
def test_flexmock_removes_stubs_from_multiple_classes_on_teardown(self):
class User:
def get_name(self):
pass
class Group:
def get_name(self):
pass
user = User()
group = User()
saved1 = user.get_name
saved2 = group.get_name
flexmock(User).should_receive("get_name").and_return("john")
flexmock(Group).should_receive("get_name").and_return("john")
assert saved1 != user.get_name
assert saved2 != group.get_name
assert_equal("john", user.get_name())
assert_equal("john", group.get_name())
self._tear_down()
assert_equal(saved1, user.get_name)
assert_equal(saved2, group.get_name)
def test_flexmock_respects_at_least_when_called_less_than_requested(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("bar").at_least().twice()
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(AT_LEAST, expectation._modifier)
mock.method_foo()
with assert_raises(
MethodCallError, "method_foo() expected to be called at least 2 times, called 1 time"
):
self._tear_down()
def test_flexmock_respects_at_least_when_called_requested_number(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("value_bar").at_least().once()
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(AT_LEAST, expectation._modifier)
mock.method_foo()
self._tear_down()
def test_flexmock_respects_at_least_when_called_more_than_requested(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("value_bar").at_least().once()
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(AT_LEAST, expectation._modifier)
mock.method_foo()
mock.method_foo()
self._tear_down()
def test_flexmock_respects_at_most_when_called_less_than_requested(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("bar").at_most().twice()
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(AT_MOST, expectation._modifier)
mock.method_foo()
self._tear_down()
def test_flexmock_respects_at_most_when_called_requested_number(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("value_bar").at_most().once()
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(AT_MOST, expectation._modifier)
mock.method_foo()
self._tear_down()
def test_flexmock_respects_at_most_when_called_more_than_requested(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("value_bar").at_most().once()
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(AT_MOST, expectation._modifier)
mock.method_foo()
with assert_raises(
MethodCallError, "method_foo() expected to be called at most 1 time, called 2 times"
):
mock.method_foo()
def test_flexmock_treats_once_as_times_one(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("value_bar").once()
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(1, expectation._expected_calls[EXACTLY])
with assert_raises(
MethodCallError, "method_foo() expected to be called exactly 1 time, called 0 times"
):
self._tear_down()
def test_flexmock_treats_twice_as_times_two(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").twice().and_return("value_bar")
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(2, expectation._expected_calls[EXACTLY])
with assert_raises(
MethodCallError, "method_foo() expected to be called exactly 2 times, called 0 times"
):
self._tear_down()
def test_flexmock_works_with_never_when_true(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("value_bar").never()
expectation = FlexmockContainer.get_flexmock_expectation(mock, "method_foo")
assert_equal(0, expectation._expected_calls[EXACTLY])
self._tear_down()
def test_flexmock_works_with_never_when_false(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").and_return("value_bar").never()
with assert_raises(
MethodCallError, "method_foo() expected to be called exactly 0 times, called 1 time"
):
mock.method_foo()
def test_flexmock_get_flexmock_expectation_should_work_with_args(self):
mock = flexmock(name="temp")
mock.should_receive("method_foo").with_args("value_bar")
assert FlexmockContainer.get_flexmock_expectation(mock, "method_foo", "value_bar")
def test_flexmock_function_should_return_previously_mocked_object(self):
class User:
pass
user = User()
foo = flexmock(user)
assert foo == user
assert foo == flexmock(user)
def test_flexmock_should_not_return_class_object_if_mocking_instance(self):
class User:
def method(self):
pass
user = User()
user2 = User()
class_mock = flexmock(User).should_receive("method").and_return("class").mock
user_mock = flexmock(user).should_receive("method").and_return("instance").mock
| |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class ContactsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_characters_character_id_contacts(self, character_id, contact_ids, **kwargs): # noqa: E501
"""Delete contacts # noqa: E501
Bulk delete contacts --- # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_characters_character_id_contacts(character_id, contact_ids, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param list[int] contact_ids: A list of contacts to delete (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_characters_character_id_contacts_with_http_info(character_id, contact_ids, **kwargs) # noqa: E501
else:
(data) = self.delete_characters_character_id_contacts_with_http_info(character_id, contact_ids, **kwargs) # noqa: E501
return data
def delete_characters_character_id_contacts_with_http_info(self, character_id, contact_ids, **kwargs): # noqa: E501
"""Delete contacts # noqa: E501
Bulk delete contacts --- # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_characters_character_id_contacts_with_http_info(character_id, contact_ids, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param list[int] contact_ids: A list of contacts to delete (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'contact_ids', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_characters_character_id_contacts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `delete_characters_character_id_contacts`") # noqa: E501
# verify the required parameter 'contact_ids' is set
if ('contact_ids' not in params or
params['contact_ids'] is None):
raise ValueError("Missing the required parameter `contact_ids` when calling `delete_characters_character_id_contacts`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `delete_characters_character_id_contacts`, must be a value greater than or equal to `1`") # noqa: E501
if ('contact_ids' in params and
len(params['contact_ids']) > 20):
raise ValueError("Invalid value for parameter `contact_ids` when calling `delete_characters_character_id_contacts`, number of items must be less than or equal to `20`") # noqa: E501
if ('contact_ids' in params and
len(params['contact_ids']) < 1):
raise ValueError("Invalid value for parameter `contact_ids` when calling `delete_characters_character_id_contacts`, number of items must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'contact_ids' in params:
query_params.append(('contact_ids', params['contact_ids'])) # noqa: E501
collection_formats['contact_ids'] = 'csv' # noqa: E501
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/contacts/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_alliances_alliance_id_contacts(self, alliance_id, **kwargs): # noqa: E501
"""Get alliance contacts # noqa: E501
Return contacts of an alliance --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_alliances_alliance_id_contacts(alliance_id, async=True)
>>> result = thread.get()
:param async bool
:param int alliance_id: An EVE alliance ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetAlliancesAllianceIdContacts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_alliances_alliance_id_contacts_with_http_info(alliance_id, **kwargs) # noqa: E501
else:
(data) = self.get_alliances_alliance_id_contacts_with_http_info(alliance_id, **kwargs) # noqa: E501
return data
def get_alliances_alliance_id_contacts_with_http_info(self, alliance_id, **kwargs): # noqa: E501
"""Get alliance contacts # noqa: E501
Return contacts of an alliance --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_alliances_alliance_id_contacts_with_http_info(alliance_id, async=True)
>>> result = thread.get()
:param async bool
:param int alliance_id: An EVE alliance ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetAlliancesAllianceIdContacts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['alliance_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_alliances_alliance_id_contacts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'alliance_id' is set
if ('alliance_id' not in params or
params['alliance_id'] is None):
raise ValueError("Missing the required parameter `alliance_id` when calling `get_alliances_alliance_id_contacts`") # noqa: E501
if 'alliance_id' in params and params['alliance_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `alliance_id` when calling `get_alliances_alliance_id_contacts`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'alliance_id' in params:
path_params['alliance_id'] = params['alliance_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/alliances/{alliance_id}/contacts/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetAlliancesAllianceIdContacts200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_contacts(self, character_id, **kwargs): # noqa: E501
"""Get contacts # noqa: E501
Return contacts of a character --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_contacts(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to | |
<filename>Reexec.py
import sublime, sublime_plugin
import os, sys, re
import threading
import subprocess
import functools
import time
import traceback
import posixpath
import difflib
def plugin_loaded():
"""This function checks settings for sanity."""
plugin_settings = sublime.load_settings("Reexec.sublime-settings")
if not plugin_settings.get('ssh_path', 'ssh'):
raise ValueError('reexec: ssh_path must not be empty. Please check the settings.')
if not plugin_settings.get('rsync_path', 'rsync'):
raise ValueError('reexec: rsync_path must not be empty. Please check the settings.')
servers = plugin_settings.get("servers", [])
unique_names = set()
required_parameters = ['name', 'root_directory', 'host']
nonempty_parameters = ['name', 'root_directory', 'host']
for s in servers:
for param in required_parameters:
if param not in s:
raise ValueError('reexec: "{0}" parameter is required. Please check the settings.'.format(param))
for param in nonempty_parameters:
if param in s and len(s[param])==0:
raise ValueError('reexec: "{0}" parameter must not be empty. Please check the settings.'.format(param))
if s['name'] in unique_names:
raise ValueError('reexec: duplicate "{0}" servers'.format(s['name']))
unique_names.add(s['name'])
def getRelativePath(projectPath, projectName=None):
"""This function returns path of the projectPath directory relative
to the folder named projectName which is supposed to be a part of the
projectPath; if it is not the case, then relative path is just the last
directory in the projectPath."""
if projectPath.endswith(os.path.sep):
projectPath = projectPath[:-1]
if not projectName:
return os.path.split(projectPath)[1]
else:
relPath = ''
p = projectPath
while True:
p, currentDir = os.path.split(p)
if not currentDir:
break
relPath = posixpath.join(currentDir, relPath)
if currentDir==projectName:
return relPath
return os.path.split(projectPath)[1]
def fullsplit(path, path_module):
if path.endswith(path_module.sep):
path = path[:-1]
p = []
while True:
path, directory = path_module.split(path)
if not directory:
if path:
p.insert(0, path)
break
p.insert(0, directory)
return p
def adjust_path(a, path_mod_a, b, path_mod_b):
diff = list(difflib.ndiff(fullsplit(a, path_mod_a), fullsplit(b, path_mod_b)))
if diff[-1][0]==' ':
return b
else:
relative_path = []
for s in reversed(diff):
if s[0]!=' ':
relative_path.insert(0, s[2:])
else:
break
return path_mod_b.join(b, *relative_path)
def cygwinize(path):
"""This function makes cygwin-style paths (e.g. /cygdrive/c/path/to/dir) from the
ordinary Windows paths (e.g. c:\\path\\to\\dir)."""
return re.sub(r'(\w):', '/cygdrive/\\1', path.replace('\\', '/'))
class ProcessListener(object):
def on_data(self, proc, data):
pass
def on_finished(self, proc):
pass
# Encapsulates subprocess.Popen, forwarding stdout to a supplied
# ProcessListener (on a separate thread)
class AsyncProcess(object):
def __init__(self, cmd, shell_cmd, env, listener,
# "path" is an option in build systems
path="",
# "shell" is an options in build systems
shell=False):
if not shell_cmd and not cmd:
raise ValueError("shell_cmd or cmd is required")
if shell_cmd and not isinstance(shell_cmd, str):
raise ValueError("shell_cmd must be a string")
self.listener = listener
self.killed = False
self.start_time = time.time()
# Hide the console window on Windows
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Set temporary PATH to locate executable in cmd
if path:
old_path = os.environ["PATH"]
# The user decides in the build system whether he wants to append $PATH
# or tuck it at the front: "$PATH;C:\\new\\path", "C:\\new\\path;$PATH"
os.environ["PATH"] = os.path.expandvars(path)
proc_env = os.environ.copy()
proc_env.update(env)
for k, v in proc_env.items():
proc_env[k] = os.path.expandvars(v)
if shell_cmd and sys.platform == "win32":
# Use shell=True on Windows, so shell_cmd is passed through with the correct escaping
self.proc = subprocess.Popen(shell_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, env=proc_env, shell=True)
elif shell_cmd and sys.platform == "darwin":
# Use a login shell on OSX, otherwise the users expected env vars won't be setup
self.proc = subprocess.Popen(["/bin/bash", "-l", "-c", shell_cmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, env=proc_env, shell=False)
elif shell_cmd and sys.platform == "linux":
# Explicitly use /bin/bash on Linux, to keep Linux and OSX as
# similar as possible. A login shell is explicitly not used for
# linux, as it's not required
self.proc = subprocess.Popen(["/bin/bash", "-c", shell_cmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, env=proc_env, shell=False)
else:
# Old style build system, just do what it asks
self.proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, env=proc_env, shell=shell)
if path:
os.environ["PATH"] = old_path
if self.proc.stdout:
threading.Thread(target=self.read_stdout).start()
if self.proc.stderr:
threading.Thread(target=self.read_stderr).start()
def kill(self):
if not self.killed:
self.killed = True
if sys.platform == "win32":
# terminate would not kill process opened by the shell cmd.exe, it will only kill
# cmd.exe leaving the child running
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen("taskkill /PID " + str(self.proc.pid), startupinfo=startupinfo)
else:
self.proc.terminate()
self.listener = None
def poll(self):
return self.proc.poll() == None
def exit_code(self):
return self.proc.poll()
def read_stdout(self):
while True:
data = os.read(self.proc.stdout.fileno(), 2**15)
if len(data) > 0:
if self.listener:
self.listener.on_data(self, data)
else:
self.proc.stdout.close()
if self.listener:
self.listener.on_finished(self)
break
def read_stderr(self):
while True:
data = os.read(self.proc.stderr.fileno(), 2**15)
if len(data) > 0:
if self.listener:
self.listener.on_data(self, data)
else:
self.proc.stderr.close()
break
class base_exec(sublime_plugin.WindowCommand, ProcessListener):
"""Original unchanged ExecCommand, just renamed it so it does not clutter the namespace."""
def run(self, cmd = None, shell_cmd = None, file_regex = "", line_regex = "", working_dir = "",
encoding = "utf-8", env = {}, quiet = False, kill = False,
word_wrap = True, syntax = "Packages/Text/Plain text.tmLanguage",
# Catches "path" and "shell"
**kwargs):
if kill:
if self.proc:
self.proc.kill()
self.proc = None
self.append_string(None, "[Cancelled]")
return
if not hasattr(self, 'output_view'):
# Try not to call get_output_panel until the regexes are assigned
self.output_view = self.window.create_output_panel("reexec")
# Default the to the current files directory if no working directory was given
if (working_dir == "" and self.window.active_view()
and self.window.active_view().file_name()):
working_dir = os.path.dirname(self.window.active_view().file_name())
self.output_view.settings().set("result_file_regex", file_regex)
self.output_view.settings().set("result_line_regex", line_regex)
self.output_view.settings().set("result_base_dir", working_dir)
self.output_view.settings().set("word_wrap", word_wrap)
self.output_view.settings().set("line_numbers", False)
self.output_view.settings().set("gutter", False)
self.output_view.settings().set("scroll_past_end", False)
self.output_view.assign_syntax(syntax)
# Call create_output_panel a second time after assigning the above
# settings, so that it'll be picked up as a result buffer
self.window.create_output_panel("reexec")
self.encoding = encoding
self.quiet = quiet
self.proc = None
if not self.quiet:
if shell_cmd:
print("Running " + shell_cmd)
else:
print("Running " + " ".join(cmd))
sublime.status_message("Building")
show_panel_on_build = sublime.load_settings("Preferences.sublime-settings").get("show_panel_on_build", True)
if show_panel_on_build:
self.window.run_command("show_panel", {"panel": "output.reexec"})
merged_env = env.copy()
if self.window.active_view():
user_env = self.window.active_view().settings().get('build_env')
if user_env:
merged_env.update(user_env)
# Change to the working dir, rather than spawning the process with it,
# so that emitted working dir relative path names make sense
if working_dir != "":
os.chdir(working_dir)
self.debug_text = ""
if shell_cmd:
self.debug_text += "[shell_cmd: " + shell_cmd + "]\n"
else:
self.debug_text += "[cmd: " + str(cmd) + "]\n"
self.debug_text += "[dir: " + str(os.getcwd()) + "]\n"
if "PATH" in merged_env:
self.debug_text += "[path: " + str(merged_env["PATH"]) + "]"
else:
self.debug_text += "[path: " + str(os.environ["PATH"]) + "]"
try:
# Forward kwargs to AsyncProcess
self.proc = AsyncProcess(cmd, shell_cmd, merged_env, self, **kwargs)
except Exception as e:
self.append_string(None, str(e) + "\n")
self.append_string(None, self.debug_text + "\n")
if not self.quiet:
self.append_string(None, "[Finished]")
def is_enabled(self, kill = False):
if kill:
return hasattr(self, 'proc') and self.proc and self.proc.poll()
else:
return True
def append_data(self, proc, data):
if proc != self.proc:
# a second call to exec has been made before the first one
# finished, ignore it instead of intermingling the output.
if proc:
proc.kill()
return
try:
str = data.decode(self.encoding)
except:
str = "[Decode error - output not " + self.encoding + "]\n"
proc = None
# Normalize newlines, Sublime Text always uses a single \n separator
# in memory.
str = str.replace('\r\n', '\n').replace('\r', '\n')
self.output_view.run_command('append', {'characters': str, 'force': True, 'scroll_to_end': True})
def append_string(self, proc, str):
self.append_data(proc, str.encode(self.encoding))
def finish(self, proc):
if not self.quiet:
elapsed = time.time() - proc.start_time
exit_code = proc.exit_code()
if exit_code == 0 or exit_code == None:
self.append_string(proc,
("[Finished in %.1fs]" % (elapsed)))
else:
self.append_string(proc, ("[Finished in %.1fs with exit code %d]\n"
% (elapsed, exit_code)))
self.append_string(proc, self.debug_text)
if proc != self.proc:
return
errs = self.output_view.find_all_results()
if len(errs) == 0:
sublime.status_message("Build finished")
else:
sublime.status_message(("Build finished with %d errors") % len(errs))
def on_data(self, proc, data):
sublime.set_timeout(functools.partial(self.append_data, proc, data), 0)
def on_finished(self, proc):
sublime.set_timeout(functools.partial(self.finish, proc), 0)
class ReexecCommand(base_exec):
def run(self, cmd = None, shell_cmd = None, file_regex = "", line_regex = "", working_dir = "",
encoding = "utf-8", env = {}, quiet = False, kill = False,
word_wrap = True, syntax = "Packages/Text/Plain text.tmLanguage",
# These are new parameters:
remote_server = None, remote_cmd = None, excludes = [],
local_rsync_root = None, remote_rsync_root = None,
# Catches "path" and "shell"
**kwargs):
self.cmd_list = []
self.project_path = None
self.file_regex = file_regex
# if remote server is not specified, no remote commands will be executed
if remote_server:
plugin_settings = sublime.load_settings("Reexec.sublime-settings")
servers = plugin_settings.get("servers", [])
found_by_name = [s for s in servers if s['name']==remote_server]
if len(found_by_name)==0:
sublime.message_dialog('reexec: unknown remote server "{0}"'.format(remote_server))
return
server_settings = found_by_name[0]
remote_rsync_root = remote_rsync_root or server_settings['root_directory']
| |
OnTaskCharacteristics(self,event):
try:
dialog = TaskCharacteristicsDialog(self)
dialog.ShowModal()
dialog.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit Task Characteristics',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def OnConceptReferences(self,event):
try:
dialog = ConceptReferencesDialog(self)
dialog.ShowModal()
dialog.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit Concept References',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def OnImportSecurityPatterns(self,event):
try:
defaultDir = './sql'
wildcard = "Pattern Catalogue (*.xml) | *.xml |"
spdlg = wx.FileDialog(None,'Import Security Patterns',defaultDir,style=wx.OPEN)
if (spdlg.ShowModal() == wx.ID_OK):
msgStr = importSecurityPatterns(spdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import Security Patterns',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
spdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import Security Patterns',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnImportComponentView(self,event):
try:
defaultDir = './sql'
wildcard = "Component View (*.xml) | *.xml |"
cvdlg = wx.FileDialog(None,'Import Component View',defaultDir,style=wx.OPEN)
if (cvdlg.ShowModal() == wx.ID_OK):
msgStr = importComponentViewFile(cvdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import Component View',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
cvdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import Component View',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnImportAttackPattern(self,event):
try:
defaultDir = './sql'
wildcard = "Attack Pattern (*.xml) | *.xml |"
cvdlg = wx.FileDialog(None,'Import Attack Pattern',defaultDir,style=wx.OPEN)
if (cvdlg.ShowModal() == wx.ID_OK):
msgStr = importAttackPattern(cvdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import Attack Pattern',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
cvdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import Attack Pattern',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnImportSynopses(self,event):
try:
defaultDir = './sql'
wildcard = "Synopses (*.xml) | *.xml |"
cvdlg = wx.FileDialog(None,'Import Synopses',defaultDir,style=wx.OPEN)
if (cvdlg.ShowModal() == wx.ID_OK):
msgStr = importSynopsesFile(cvdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import Synopses',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
cvdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import Synopses',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnImportAssets(self,event):
try:
defaultDir = './sql'
wildcard = "Template Assets (*.xml) | *.xml |"
cvdlg = wx.FileDialog(None,'Import Assets',defaultDir,style=wx.OPEN)
if (cvdlg.ShowModal() == wx.ID_OK):
msgStr = importAssetsFile(cvdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import Assets',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
cvdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import Assets',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnImportTVTypes(self,event):
try:
defaultDir = './sql'
wildcard = "Types files (*.xml) | *.xml |"
tdlg = wx.FileDialog(None,'Import Types',defaultDir,style=wx.OPEN)
if (tdlg.ShowModal() == wx.ID_OK):
msgStr = importTVTypeFile(tdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import Security Patterns',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
tdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import Types',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnExportTVTypes(self,event):
try:
defaultBackupDir = './sql'
dlg = wx.FileDialog(self,message='Export threat and vulnerability types',defaultDir=defaultBackupDir,style=wx.SAVE | wx.OVERWRITE_PROMPT)
if (dlg.ShowModal() == wx.ID_OK):
exportFile = dlg.GetPath() + ".xml"
xmlBuf,ttCount,vtCount = self.dbProxy.tvTypesToXml()
f = open(exportFile,'w')
f.write(xmlBuf)
f.close()
confDlg = wx.MessageDialog(self,'Exported ' + str(ttCount) + ' threat types, and ' + str(vtCount) + ' vulnerability types','Export threat and vulnerability types',wx.OK | wx.ICON_INFORMATION)
confDlg.ShowModal()
confDlg.Destroy()
dlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Export threat and vulnerability types',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def OnImportDirectories(self,event):
try:
defaultDir = './sql'
wildcard = "Types files (*.xml) | *.xml |"
tdlg = wx.FileDialog(None,'Import Directories',defaultDir,style=wx.OPEN)
if (tdlg.ShowModal() == wx.ID_OK):
msgStr = importDirectoryFile(tdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import Directories',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
tdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import Directories',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnAbout(self,event):
import re
svnid = '$Id: RMFrame.py 580 2012-03-19 17:45:52Z shaf $'
svnidrep = r'^\$Id: (?P<filename>.+) (?P<revision>\d+) (?P<date>\d{4}-\d{2}-\d{1,2}) (?P<time>\d{2}:\d{2}:\d{2})Z (?P<user>\w+) \$$'
mo = re.match(svnidrep,svnid)
repVersion = 'Revision ' + mo.group('revision')
info = wx.AboutDialogInfo()
info.SetIcon(wx.Icon(self.directoryPrefix + 'iris.png',wx.BITMAP_TYPE_PNG))
info.SetName('CAIRIS')
info.SetVersion(repVersion)
info.SetDescription('CAIRIS is a tool for specifying usable and secure systems')
info.SetWebSite('http://cairis.org')
info.SetLicense('CAIRIS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\nwithout even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\nPlease contact <NAME> for specific licensing queries.')
info.AddDeveloper('<NAME>')
wx.AboutBox(info)
def OnImportRequirements(self,event):
try:
defaultBackupDir = './sql'
tdlg = wx.FileDialog(self,message='Import goals and requirements',defaultDir=defaultBackupDir,style=wx.OPEN)
if (tdlg.ShowModal() == wx.ID_OK):
msgStr = importRequirementsFile(tdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import goals and requirements',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
tdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import goals and requirements',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnExportRequirements(self,event):
try:
defaultBackupDir = './sql'
dlg = wx.FileDialog(self,message='Export goals and requirements',defaultDir=defaultBackupDir,style=wx.SAVE | wx.OVERWRITE_PROMPT)
if (dlg.ShowModal() == wx.ID_OK):
exportFile = dlg.GetPath() + ".xml"
xmlBuf,dpCount,goalCount,obsCount,reqCount,cmCount = self.dbProxy.goalsToXml()
f = open(exportFile,'w')
f.write(xmlBuf)
f.close()
confDlg = wx.MessageDialog(self,'Exported ' + str(dpCount) + ' domain properties, ' + str(goalCount) + ' goals, ' + str(obsCount) + ' obstacles, ' + str(reqCount) + ' requirements, and ' + str(cmCount) + ' countermeasures','Export goals and requirements',wx.OK | wx.ICON_INFORMATION)
confDlg.ShowModal()
confDlg.Destroy()
dlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Export goals and requirements',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def OnExportRiskAnalysis(self,event):
try:
defaultBackupDir = './sql'
dlg = wx.FileDialog(self,message='Export risk data',defaultDir=defaultBackupDir,style=wx.SAVE | wx.OVERWRITE_PROMPT|wx.SAVE)
if (dlg.ShowModal() == wx.ID_OK):
exportFile = dlg.GetPath() + ".xml"
xmlBuf,roleCount,assetCount,vulCount,attackerCount,threatCount,riskCount,responseCount,relCount = self.dbProxy.riskAnalysisToXml()
f = open(exportFile,'w')
f.write(xmlBuf)
f.close()
confDlg = wx.MessageDialog(self,'Exported ' + str(roleCount) + ' roles, ' + str(assetCount) + ' assets, ' + str(vulCount) + ' vulnerabilities, ' + str(attackerCount) + ' attackers, ' + str(threatCount) + ' threats, ' + str(riskCount) + ' risks, ' + str(responseCount) + ' responses, and ' + str(relCount) + ' asset associations','Export risk data',wx.OK | wx.ICON_INFORMATION)
confDlg.ShowModal()
confDlg.Destroy()
dlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Export risk data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def OnImportRiskAnalysis(self,event):
try:
defaultBackupDir = './sql'
tdlg = wx.FileDialog(self,message='Import risk data',defaultDir=defaultBackupDir,style=wx.OPEN)
if (tdlg.ShowModal() == wx.ID_OK):
msgStr = importRiskAnalysisFile(tdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import risk data',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
tdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import risk data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnExportUsability(self,event):
try:
defaultBackupDir = './sql'
dlg = wx.FileDialog(self,message='Export usability data',defaultDir=defaultBackupDir,style=wx.SAVE | wx.OVERWRITE_PROMPT)
if (dlg.ShowModal() == wx.ID_OK):
exportFile = dlg.GetPath() + ".xml"
xmlBuf,personaCount,edCount,drCount,pcCount,taskCount,ucCount = self.dbProxy.usabilityToXml()
f = open(exportFile,'w')
f.write(xmlBuf)
f.close()
confDlg = wx.MessageDialog(self,'Exported ' + str(personaCount) + ' personas, ' + str(edCount) + ' external documents, ' + str(drCount) + ' document and concept references, ' + str(pcCount) + ' persona references, ' + str(taskCount) + ' tasks, and ' + str(ucCount) + ' use cases','Export usability data',wx.OK | wx.ICON_INFORMATION)
confDlg.ShowModal()
confDlg.Destroy()
dlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Export usability data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def OnImportUsability(self,event):
try:
defaultBackupDir = './sql'
tdlg = wx.FileDialog(self,message='Import usability data',defaultDir=defaultBackupDir,style=wx.OPEN)
if (tdlg.ShowModal() == wx.ID_OK):
msgStr = importUsabilityFile(tdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import usability data',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
tdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import usability data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnExportUsability(self,event):
try:
defaultBackupDir = './sql'
dlg = wx.FileDialog(self,message='Export usability data',defaultDir=defaultBackupDir,style=wx.SAVE | wx.OVERWRITE_PROMPT)
if (dlg.ShowModal() == wx.ID_OK):
exportFile = dlg.GetPath() + ".xml"
xmlBuf,pCount,edCount,drCount,pcCount,taskCount,ucCount = self.dbProxy.usabilityToXml()
f = open(exportFile,'w')
f.write(xmlBuf)
f.close()
confDlg = wx.MessageDialog(self,'Exported ' + str(pCount) + ' personas, ' + str(drCount) + ' document references, ' + str(pcCount) + ' persona characteristics, ' + str(taskCount) + ' tasks, and ' + str(ucCount) + ' use cases','Export usability data',wx.OK | wx.ICON_INFORMATION)
confDlg.ShowModal()
confDlg.Destroy()
dlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Export usability data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def OnExportAssociations(self,event):
try:
defaultBackupDir = './sql'
dlg = wx.FileDialog(self,message='Export association data',defaultDir=defaultBackupDir,style=wx.SAVE | wx.OVERWRITE_PROMPT)
if (dlg.ShowModal() == wx.ID_OK):
exportFile = dlg.GetPath() + ".xml"
xmlBuf,maCount,gaCount,rrCount,depCount = self.dbProxy.associationsToXml()
f = open(exportFile,'w')
f.write(xmlBuf)
f.close()
confDlg = wx.MessageDialog(self,'Exported ' + str(maCount) + ' manual associations, ' + str(gaCount) + ' goal associations, ' + str(rrCount) + ' non-goal responsibility relationships, and ' + str(depCount) + ' dependency associations','Export association data',wx.OK | wx.ICON_INFORMATION)
confDlg.ShowModal()
confDlg.Destroy()
dlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Export association data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def OnImportAssociations(self,event):
try:
defaultBackupDir = './sql'
tdlg = wx.FileDialog(self,message='Import association data',defaultDir=defaultBackupDir,style=wx.OPEN)
if (tdlg.ShowModal() == wx.ID_OK):
msgStr = importAssociationsFile(tdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import association data',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
tdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import association data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnExportProject(self,event):
try:
defaultBackupDir = './sql'
dlg = wx.FileDialog(self,message='Export project data',defaultDir=defaultBackupDir,style=wx.SAVE | wx.OVERWRITE_PROMPT)
if (dlg.ShowModal() == wx.ID_OK):
exportFile = dlg.GetPath() + ".xml"
xmlBuf= self.dbProxy.projectToXml()
f = open(exportFile,'w')
f.write(xmlBuf)
f.close()
confDlg = wx.MessageDialog(self,'Exported project data','Export project data',wx.OK | wx.ICON_INFORMATION)
confDlg.ShowModal()
confDlg.Destroy()
dlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Export project data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def OnImportModel(self,event):
try:
defaultBackupDir = './sql'
tdlg = wx.FileDialog(self,message='Import model',defaultDir=defaultBackupDir,style=wx.OPEN)
if (tdlg.ShowModal() == wx.ID_OK):
msgStr = importModelFile(tdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import model',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
tdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import model',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnImportProject(self,event):
try:
defaultBackupDir = './sql'
tdlg = wx.FileDialog(self,message='Import project data',defaultDir=defaultBackupDir,style=wx.OPEN)
if (tdlg.ShowModal() == wx.ID_OK):
msgStr = importProjectFile(tdlg.GetPath())
dlg = wx.MessageDialog(self,msgStr,'Import project data',wx.OK | wx.ICON_INFORMATION )
dlg.ShowModal()
dlg.Destroy()
tdlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Import project data',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def OnExportModel(self,event):
try:
defaultBackupDir = './sql'
| |
# pylint: disable=invalid-name
"""Create json data which will be send via post for the V4 api"""
if not params:
params = [{
"clientid": self.client_id,
"nickname": self.nickname
}, [{
"clientid": self.client_id,
"nickname": self.nickname,
"value": "yes",
"function": "WOL"
}]]
return {
"method": method,
"params": params,
"id": 1,
"version": "1.0"
}
def _send_http(self, url, method, **kwargs):
# pylint: disable=too-many-arguments
"""Send request command via HTTP json to Sony Bravia."""
log_errors = kwargs.pop("log_errors", True)
raise_errors = kwargs.pop("raise_errors", False)
method = kwargs.pop("method", method.value)
params = {
"cookies": self.cookies,
"timeout": TIMEOUT,
"headers": self.headers,
}
params.update(kwargs)
_LOGGER.debug(
"Calling http url %s method %s", url, method)
try:
response = getattr(requests, method)(url, **params)
response.raise_for_status()
except requests.exceptions.RequestException as ex:
if log_errors:
_LOGGER.error("HTTPError: %s", str(ex))
if raise_errors:
raise
else:
return response
def _post_soap_request(self, url, params, action):
headers = {
'SOAPACTION': '"{0}"'.format(action),
"Content-Type": "text/xml"
}
data = """<?xml version='1.0' encoding='utf-8'?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SOAP-ENV:Body>
{0}
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>""".format(params)
response = self._send_http(
url, method=HttpMethod.POST, headers=headers, data=data)
if response:
return response.content.decode("utf-8")
return False
def _send_req_ircc(self, params):
"""Send an IRCC command via HTTP to Sony Bravia."""
data = """<u:X_SendIRCC xmlns:u="urn:schemas-sony-com:service:IRCC:1">
<IRCCCode>{0}</IRCCCode>
</u:X_SendIRCC>""".format(params)
action = "urn:schemas-sony-com:service:IRCC:1#X_SendIRCC"
content = self._post_soap_request(
url=self.control_url, params=data, action=action)
return content
def _send_command(self, name):
if not self.commands:
self.init_device()
if self.commands:
if name in self.commands:
self._send_req_ircc(self.commands[name].value)
else:
raise ValueError('Unknown command: %s' % name)
else:
raise ValueError('Failed to read command list from device.')
def _get_action(self, name):
"""Get the action object for the action with the given name"""
if name not in self.actions and not self.actions:
self.init_device()
if name not in self.actions and not self.actions:
raise ValueError('Failed to read action list from device.')
return self.actions[name]
def _register_without_auth(self, registration_action):
try:
self._send_http(
registration_action.url,
method=HttpMethod.GET,
raise_errors=True)
# set the pin to something to make sure init_device is called
self.pin = 9999
except requests.exceptions.RequestException:
return AuthenticationResult.ERROR
else:
return AuthenticationResult.SUCCESS
@staticmethod
def _handle_register_error(ex):
if isinstance(ex, requests.exceptions.HTTPError) \
and ex.response.status_code == 401:
return AuthenticationResult.PIN_NEEDED
return AuthenticationResult.ERROR
def _register_v3(self, registration_action):
try:
self._send_http(registration_action.url,
method=HttpMethod.GET, raise_errors=True)
except requests.exceptions.RequestException as ex:
return self._handle_register_error(ex)
else:
return AuthenticationResult.SUCCESS
def _register_v4(self, registration_action):
authorization = self._create_api_json("actRegister")
try:
headers = {
"Content-Type": "application/json"
}
if self.pin is None:
auth_pin = ''
else:
auth_pin = str(self.pin)
response = self._send_http(registration_action.url,
method=HttpMethod.POST,
headers=headers,
auth=('', auth_pin),
data=json.dumps(authorization),
raise_errors=True)
except requests.exceptions.RequestException as ex:
return self._handle_register_error(ex)
else:
resp = response.json()
if not resp or resp.get('error'):
return AuthenticationResult.ERROR
self.cookies = response.cookies
return AuthenticationResult.SUCCESS
def _add_headers(self):
"""Add headers which all devices need"""
self.headers['X-CERS-DEVICE-ID'] = self.client_id
self.headers['X-CERS-DEVICE-INFO'] = self.client_id
def _recreate_auth_cookie(self):
"""Recreate auth cookie for all urls
Default cookie is for URL/sony.
For some commands we need it for the root path
"""
# pylint: disable=abstract-class-instantiated
cookies = requests.cookies.RequestsCookieJar()
cookies.set("auth", self.cookies.get("auth"))
return cookies
def register(self):
"""Register at the api.
The name which will be displayed in the UI of the device.
Make sure this name does not exist yet.
For this the device must be put in registration mode.
"""
registration_result = AuthenticationResult.ERROR
registration_action = registration_action = self._get_action(
"register")
if registration_action.mode < 3:
registration_result = self._register_without_auth(
registration_action)
elif registration_action.mode == 3:
registration_result = self._register_v3(registration_action)
elif registration_action.mode == 4:
registration_result = self._register_v4(registration_action)
else:
raise ValueError(
"Registration mode {0} is not supported"
.format(registration_action.mode))
if registration_result is AuthenticationResult.SUCCESS:
self.init_device()
return registration_result
def send_authentication(self, pin):
"""Authenticate against the device."""
registration_action = self._get_action("register")
# they do not need a pin
if registration_action.mode < 2:
return True
if not pin:
return False
self.pin = pin
self._recreate_authentication()
result = self.register()
return AuthenticationResult.SUCCESS == result
def wakeonlan(self, broadcast='255.255.255.255'):
"""Start the device either via wakeonlan."""
if self.mac:
wakeonlan.send_magic_packet(self.mac, ip_address=broadcast)
def get_playing_status(self):
"""Get the status of playback from the device"""
data = """<m:GetTransportInfo xmlns:m="urn:schemas-upnp-org:service:AVTransport:1">
<InstanceID>0</InstanceID>
</m:GetTransportInfo>"""
action = "urn:schemas-upnp-org:service:AVTransport:1#GetTransportInfo"
content = self._post_soap_request(
url=self.av_transport_url, params=data, action=action)
if not content:
return "OFF"
return find_in_xml(content, [".//CurrentTransportState"]).text
def get_power_status(self):
"""Check if the device is online."""
if self.api_version < 4:
url = self.actionlist_url
try:
self._send_http(url, HttpMethod.GET,
log_errors=False, raise_errors=True)
except requests.exceptions.RequestException as ex:
_LOGGER.debug(ex)
return False
return True
try:
resp = self._send_http(urljoin(self.base_url, "system"),
HttpMethod.POST,
json=self._create_api_json(
"getPowerStatus"))
if not resp:
return False
json_data = resp.json()
if not json_data.get('error'):
power_data = json_data.get('result')[0]
return power_data.get('status') != "off"
except requests.RequestException:
pass
return False
def start_app(self, app_name):
"""Start an app by name"""
# sometimes device does not start app if already running one
self.home()
if self.api_version < 4:
url = "{0}/apps/{1}".format(self.app_url, self.apps[app_name].id)
data = "LOCATION: {0}/run".format(url)
self._send_http(url, HttpMethod.POST, data=data)
else:
url = 'http://{}/DIAL/apps/{}'.format(
self.host, self.apps[app_name].id)
self._send_http(url, HttpMethod.POST,
cookies=self._recreate_auth_cookie())
def power(self, power_on, broadcast='255.255.255.255'):
"""Powers the device on or shuts it off."""
if power_on:
self.wakeonlan(broadcast)
# Try using the power on command incase the WOL doesn't work
if not self.get_power_status():
# Try using the power on command incase the WOL doesn't work
self._send_command('Power')
else:
self._send_command('Power')
def get_apps(self):
"""Get the apps from the stored dict."""
return list(self.apps.keys())
def volume_up(self):
# pylint: disable=invalid-name
"""Send the command 'VolumeUp' to the connected device."""
self._send_command('VolumeUp')
def volume_down(self):
# pylint: disable=invalid-name
"""Send the command 'VolumeDown' to the connected device."""
self._send_command('VolumeDown')
def mute(self):
# pylint: disable=invalid-name
"""Send the command 'Mute' to the connected device."""
self._send_command('Mute')
def up(self):
# pylint: disable=invalid-name
"""Send the command 'up' to the connected device."""
self._send_command('Up')
def confirm(self):
"""Send the command 'confirm' to the connected device."""
self._send_command('Confirm')
def down(self):
"""Send the command 'down' to the connected device."""
self._send_command('Down')
def right(self):
"""Send the command 'right' to the connected device."""
self._send_command('Right')
def left(self):
"""Send the command 'left' to the connected device."""
self._send_command('Left')
def home(self):
"""Send the command 'home' to the connected device."""
self._send_command('Home')
def options(self):
"""Send the command 'options' to the connected device."""
self._send_command('Options')
def returns(self):
"""Send the command 'returns' to the connected device."""
self._send_command('Return')
def num1(self):
"""Send the command 'num1' to the connected device."""
self._send_command('Num1')
def num2(self):
"""Send the command 'num2' to the connected device."""
self._send_command('Num2')
def num3(self):
"""Send the command 'num3' to the connected device."""
self._send_command('Num3')
def num4(self):
"""Send the command 'num4' to the connected device."""
self._send_command('Num4')
def num5(self):
"""Send the command 'num5' to the connected device."""
self._send_command('Num5')
def num6(self):
"""Send the command 'num6' to the connected device."""
self._send_command('Num6')
def num7(self):
"""Send the command 'num7' to the connected device."""
self._send_command('Num7')
def num8(self):
"""Send the command 'num8' to the connected device."""
self._send_command('Num8')
def num9(self):
"""Send the command 'num9' to the connected device."""
self._send_command('Num9')
def num0(self):
"""Send the command 'num0' to the connected device."""
self._send_command('Num0')
def display(self):
"""Send the command 'display' to the connected device."""
self._send_command('Display')
def audio(self):
"""Send the command 'audio' to the connected device."""
self._send_command('Audio')
def sub_title(self):
"""Send the command 'subTitle' to the connected device."""
self._send_command('SubTitle')
def favorites(self):
"""Send the command 'favorites' to the connected device."""
self._send_command('Favorites')
def yellow(self):
"""Send the command 'yellow' to the connected device."""
self._send_command('Yellow')
def blue(self):
"""Send the command 'blue' to the connected device."""
self._send_command('Blue')
def red(self):
"""Send the command 'red' to the connected device."""
self._send_command('Red')
def green(self):
"""Send the command 'green' to the connected device."""
self._send_command('Green')
def play(self):
"""Send the command 'play' to the connected device."""
self._send_command('Play')
def stop(self):
"""Send the command 'stop' to the connected device."""
self._send_command('Stop')
def pause(self):
"""Send the command 'pause' to the connected device."""
self._send_command('Pause')
def rewind(self):
"""Send the command 'rewind' to the connected device."""
self._send_command('Rewind')
def forward(self):
"""Send the command 'forward' to the connected device."""
self._send_command('Forward')
def prev(self):
"""Send the command 'prev' to the connected device."""
self._send_command('Prev')
def next(self):
"""Send the command 'next' to the connected device."""
self._send_command('Next')
def replay(self):
"""Send the command 'replay' to the connected device."""
self._send_command('Replay')
def advance(self):
"""Send the command 'advance' to the connected device."""
self._send_command('Advance')
def angle(self):
"""Send the command 'angle' to the connected device."""
self._send_command('Angle')
def top_menu(self):
"""Send the command 'top_menu' to the connected device."""
self._send_command('TopMenu')
def pop_up_menu(self):
"""Send the command 'pop_up_menu' to the connected device."""
self._send_command('PopUpMenu')
def eject(self):
"""Send the command 'eject' to the connected device."""
self._send_command('Eject')
def karaoke(self):
"""Send the command 'karaoke' to the connected device."""
self._send_command('Karaoke')
def netflix(self):
"""Send the command 'netflix' to the connected device."""
self._send_command('Netflix')
def mode_3d(self):
"""Send the command 'mode_3d' to the connected device."""
self._send_command('Mode3D')
def zoom_in(self):
"""Send the command 'zoom_in' to the connected device."""
self._send_command('ZoomIn')
def zoom_out(self):
"""Send the command 'zoom_out' to the connected device."""
self._send_command('ZoomOut')
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import time
import datetime
import os
LTsv_HourMAXLower,LTsv_HourMAXUpper=(24,48)
LTsv_overhour=LTsv_HourMAXLower
LTsv_diffminute=0
LTsv_zodiacjp=("鼠","牛","虎","兎","龍","蛇","馬","羊","猿","鶏","犬","猪")
LTsv_zodiacch=("子","丑","寅","卯","辰","巳","午","未","申","酉","戌","亥")
LTsv_maxmonth= (31,31,28,31,30,31,30,31,31,30,31,30,31,31)
LTsv_maxmonthleep=(31,31,29,31,30,31,30,31,31,30,31,30,31,31)
LTsv_monthjp= ( "睦月", "如月", "弥生", "卯月", "皐月","水無月", "文月", "葉月", "長月", "神無月", "霜月", "師走")
LTsv_month_jp= (" 睦月", " 如月"," 弥生", " 卯月"," 皐月","水無月"," 文月"," 葉月"," 長月", "神無月", " 霜月"," 師走")
LTsv_monthjpiz= ( "睦月", "如月", "弥生", "卯月", "皐月","水無月", "文月", "葉月", "長月", "神有月", "霜月", "師走")
LTsv_month_jpiz=(" 睦月", " 如月"," 弥生", " 卯月"," 皐月","水無月"," 文月"," 葉月"," 長月", "神有月", " 霜月"," 師走")
LTsv_monthenl= ("January","February","March","April", "May", "June", "July", "August","September","October","November","December")
LTsv_monthens= ("Jan", "Feb", "Mar", "Apr" , "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
LTsv_monthenc= ("J", "F", "C", "A", "M", "N", "L", "U", "S", "O", "N" ,"D")
LTsv_monthenh= ("January","February","marCh","April", "May", "juNe", "juLy", "aUgust","September","October","November","December")
LTsv_weekdayjp =("月", "火", "水", "木", "金", "土", "日")
LTsv_weekdayens=("Mon", "Tue", "Wed" ,"Thu", "Fri", "Sat", "Sun")
LTsv_weekdayenl=("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
LTsv_weekdayenc=("M", "T", "W", "R", "F", "S", "U")
LTsv_weekdayenh=("Monday","Tuesday","Wednesday","thuRsday","Friday","Saturday","sUnday")
LTsv_ampmjp= ("午前","午後","徹夜")
LTsv_ampmenl=("am", "pm", "an")
LTsv_ampmenu=("AM", "PM", "AN")
def LTsv_yearleap(LTsv_toyear):
return LTsv_toyear%4==0 and LTsv_toyear%100!=0 or LTsv_toyear%400==0
def LTsv_yearweeks(LTsv_toyear):
LTsv_Y,LTsv_WN,LTsv_WD=datetime.date(LTsv_toyear,12,31).isocalendar()
LTsv_YW=LTsv_WN if LTsv_Y==LTsv_toyear else datetime.date(LTsv_toyear,12,24).isocalendar()[1]
return LTsv_YW
def LTsv_monthleap(LTsv_toyear,LTsv_tomonth):
return LTsv_maxmonth[LTsv_tomonth] if not LTsv_yearleap(LTsv_toyear) else LTsv_maxmonthleep[LTsv_tomonth]
def LTsv_beat864(LTsv_tohour,LTsv_tominute,LTsv_tosecond):
LTsv_Beat=LTsv_tohour*3600+LTsv_tominute*60+LTsv_tosecond
LTsv_BeatInteger=LTsv_Beat*1000//86400
LTsv_BeatPoint=(LTsv_Beat*1000000//86400)%1000
return LTsv_Beat,LTsv_BeatInteger,LTsv_BeatPoint
LTsv_earlier_now=datetime.datetime.now()
LTsv_meridian_now=LTsv_earlier_now
LTsv_meridian_Year=LTsv_meridian_now.year
LTsv_meridian_Yearlower=LTsv_meridian_Year%100
LTsv_meridian_YearZodiac=(LTsv_meridian_Year+8)%12
LTsv_meridian_YearDays=365 if not LTsv_yearleap(LTsv_meridian_Year) else 366
LTsv_meridian_YearIso,LTsv_meridian_WeekNumberYearIso,LTsv_meridian_WeekDayIso=LTsv_meridian_now.isocalendar()
LTsv_meridian_YearWeeksIso=LTsv_yearweeks(LTsv_meridian_Year)
LTsv_meridian_Month=LTsv_meridian_now.month
LTsv_meridian_MonthDays=LTsv_monthleap(LTsv_meridian_Year,LTsv_meridian_Month)
LTsv_meridian_WeekDay=LTsv_meridian_now.weekday()
LTsv_meridian_WeekNumberMonth=LTsv_meridian_WeekDay//7+1
LTsv_meridian_DayMonth=LTsv_meridian_now.day
LTsv_meridian_DayYear=LTsv_meridian_now.toordinal()-datetime.date(LTsv_meridian_Year,1,1).toordinal()+1
LTsv_meridian_Hour=LTsv_meridian_now.hour
LTsv_meridian_HourAP=LTsv_meridian_Hour%12
LTsv_meridian_AP=LTsv_meridian_Hour//12
LTsv_meridian_APO=LTsv_meridian_Hour//12
LTsv_meridian_miNute=LTsv_meridian_now.minute
LTsv_meridian_Second=LTsv_meridian_now.second
LTsv_meridian_micRoSecond=LTsv_meridian_now.microsecond
LTsv_meridian_miLliSecond=LTsv_meridian_micRoSecond//1000
LTsv_meridian_Beat,LTsv_meridian_BeatInteger,LTsv_meridian_BeatPoint=LTsv_beat864(LTsv_meridian_Hour,LTsv_meridian_miNute,LTsv_meridian_Second)
LTsv_allnight_now=LTsv_meridian_now
LTsv_allnight_Year=LTsv_meridian_Year
LTsv_allnight_Yearlower=LTsv_meridian_Yearlower
LTsv_allnight_YearZodiac=LTsv_meridian_YearZodiac
LTsv_allnight_YearDays=LTsv_meridian_YearDays
LTsv_allnight_YearIso,LTsv_allnight_WeekNumberYearIso,LTsv_allnight_WeekDayIso=LTsv_meridian_YearIso,LTsv_meridian_WeekNumberYearIso,LTsv_meridian_WeekDayIso
LTsv_allnight_YearWeeksIso=LTsv_meridian_YearWeeksIso
LTsv_allnight_Month=LTsv_meridian_Month
LTsv_allnight_MonthDays=LTsv_meridian_MonthDays
LTsv_allnight_WeekDay=LTsv_meridian_WeekDay
LTsv_allnight_WeekNumberMonth=LTsv_allnight_WeekDay//7+1
LTsv_allnight_DayMonth=LTsv_meridian_DayMonth
LTsv_allnight_DayYear=LTsv_meridian_DayYear
LTsv_allnight_Hour=LTsv_meridian_Hour
LTsv_allnight_miNute=LTsv_meridian_miNute
LTsv_allnight_Second=LTsv_meridian_Second
LTsv_allnight_micRoSecond=LTsv_meridian_now.microsecond
LTsv_allnight_miLliSecond=LTsv_meridian_miLliSecond
LTsv_start_now=LTsv_meridian_now
LTsv_lap_now=LTsv_start_now+datetime.timedelta(microseconds=0)
LTsv_goal_now=LTsv_start_now+datetime.timedelta(microseconds=0)
LTsv_passed_TotalSeconds=(LTsv_lap_now-LTsv_start_now).total_seconds()
LTsv_passed_micRoSecond=int(LTsv_passed_TotalSeconds*1000000)%1000000
LTsv_passed_miLliSecond=int(LTsv_passed_TotalSeconds*1000)%1000
LTsv_passed_Second=int(LTsv_passed_TotalSeconds)%60
LTsv_passed_miNute=int(LTsv_passed_TotalSeconds/60)%60
LTsv_passed_Hour=int(LTsv_passed_TotalSeconds/3600)%24
LTsv_passed_DayHour=int(LTsv_passed_TotalSeconds/3600)
LTsv_passed_Day=int(LTsv_passed_TotalSeconds/86400)
LTsv_passed_Beat,LTsv_passed_BeatInteger,LTsv_passed_BeatPoint=LTsv_beat864(LTsv_passed_Hour,LTsv_passed_miNute,LTsv_passed_Second)
LTsv_timeleft_TotalSeconds=(LTsv_goal_now-LTsv_lap_now).total_seconds()
LTsv_timeleft_micRoSecond=int(LTsv_timeleft_TotalSeconds*1000000)%1000000
LTsv_timeleft_miLliSecond=int(LTsv_timeleft_TotalSeconds*1000)%1000
LTsv_timeleft_Second=int(LTsv_timeleft_TotalSeconds)%60
LTsv_timeleft_miNute=int(LTsv_timeleft_TotalSeconds/60)%60
LTsv_timeleft_Hour=int(LTsv_timeleft_TotalSeconds/3600)%24
LTsv_timeleft_DayHour=int(LTsv_timeleft_TotalSeconds/3600)
LTsv_timeleft_Day=int(LTsv_timeleft_TotalSeconds/86400)
LTsv_timeleft_Beat,LTsv_timeleft_BeatInteger,LTsv_timeleft_BeatPoint=LTsv_beat864(LTsv_timeleft_Hour,LTsv_timeleft_miNute,LTsv_timeleft_Second)
LTsv_limit_TotalSeconds=(LTsv_goal_now-LTsv_start_now).total_seconds()
LTsv_limit_micRoSecond=int(LTsv_limit_TotalSeconds*1000000)%1000000
LTsv_limit_miLliSecond=int(LTsv_limit_TotalSeconds*1000)%1000
LTsv_limit_Second=int(LTsv_limit_TotalSeconds)%60
LTsv_limit_miNute=int(LTsv_limit_TotalSeconds/60)%60
LTsv_limit_Hour=int(LTsv_limit_TotalSeconds/3600)%24
LTsv_limit_DayHour=int(LTsv_limit_TotalSeconds/3600)
LTsv_limit_Day=int(LTsv_limit_TotalSeconds/86400)
LTsv_limit_Beat,LTsv_limit_BeatInteger,LTsv_limit_BeatPoint=LTsv_beat864(LTsv_limit_Hour,LTsv_limit_miNute,LTsv_limit_Second)
LTsv_FPS_now=datetime.datetime.now()
LTsv_FPS_earlier=LTsv_FPS_now
LTsv_FPS_TotalSeconds=(LTsv_FPS_now-LTsv_FPS_earlier).total_seconds()
LTsv_FPS_fPsK=min(max(int(1/LTsv_FPS_TotalSeconds),1),999) if LTsv_FPS_TotalSeconds > 0 else 999
LTsv_FPS_fPsC=min(max(int(1/LTsv_FPS_TotalSeconds),1),99) if LTsv_FPS_TotalSeconds > 0 else 99
#FPS
def LTsv_checkFPS():
global LTsv_FPS_now,LTsv_FPS_earlier,LTsv_FPS_TotalSeconds,LTsv_FPS_fPsK,LTsv_FPS_fPsC
LTsv_FPS_now=datetime.datetime.now()
LTsv_FPS_TotalSeconds=(LTsv_FPS_now-LTsv_FPS_earlier).total_seconds()
LTsv_FPS_fPsK=min(max(int(1/LTsv_FPS_TotalSeconds),0),999) if LTsv_FPS_TotalSeconds > 0 else 999
LTsv_FPS_fPsC=min(max(int(1/LTsv_FPS_TotalSeconds),0),99) if LTsv_FPS_TotalSeconds > 0 else 99
LTsv_FPS_earlier=LTsv_FPS_now
return LTsv_FPS_fPsK
#daytime
def LTsv_setdaytimeshift():
global LTsv_meridian_now,LTsv_overhour,LTsv_diffminute,LTsv_HourMAXLower,LTsv_HourMAXUpper
global LTsv_meridian_miNute,LTsv_meridian_Second,LTsv_meridian_micRoSecond,LTsv_meridian_miLliSecond
global LTsv_meridian_Beat,LTsv_meridian_BeatInteger,LTsv_meridian_BeatPoint
global LTsv_meridian_Hour,LTsv_meridian_HourAP,LTsv_meridian_AP,LTsv_meridian_APO
LTsv_meridian_miNute=LTsv_meridian_now.minute
LTsv_meridian_Second=LTsv_meridian_now.second
LTsv_meridian_micRoSecond=LTsv_meridian_now.microsecond
LTsv_meridian_miLliSecond=LTsv_meridian_micRoSecond//1000
LTsv_meridian_Hour=LTsv_meridian_now.hour
LTsv_meridian_HourAP=LTsv_meridian_Hour%12
LTsv_meridian_AP=LTsv_meridian_Hour//12
LTsv_meridian_APO=LTsv_meridian_Hour//12
LTsv_meridian_Beat,LTsv_meridian_BeatInteger,LTsv_meridian_BeatPoint=LTsv_beat864(LTsv_meridian_Hour,LTsv_meridian_miNute,LTsv_meridian_Second)
global LTsv_meridian_DayMonth,LTsv_meridian_DayYear
if LTsv_meridian_DayMonth != LTsv_meridian_now.day:
LTsv_meridian_APO=2
global LTsv_meridian_Year,LTsv_meridian_Yearlower,LTsv_meridian_YearZodiac,LTsv_meridian_YearDays,LTsv_meridian_YearIso
global LTsv_meridian_Month,LTsv_meridian_MonthDays,LTsv_meridian_WeekNumberYearIso
global LTsv_meridian_WeekDay,LTsv_meridian_WeekNumberMonth,LTsv_meridian_WeekDayIso
LTsv_meridian_Year=LTsv_meridian_now.year
LTsv_meridian_Yearlower=LTsv_meridian_Year%100
LTsv_meridian_YearZodiac=(LTsv_meridian_Year+8)%12
LTsv_meridian_YearDays=365 if not LTsv_yearleap(LTsv_meridian_Year) else 366
LTsv_meridian_YearIso,LTsv_meridian_WeekNumberYearIso,LTsv_meridian_WeekDayIso=LTsv_meridian_now.isocalendar()
LTsv_meridian_YearWeeksIso=LTsv_yearweeks(LTsv_meridian_Year)
LTsv_meridian_Month=LTsv_meridian_now.month
LTsv_meridian_MonthDays=LTsv_monthleap(LTsv_meridian_Year,LTsv_meridian_Month)
LTsv_meridian_WeekDay=LTsv_meridian_now.weekday()
LTsv_meridian_WeekNumberMonth=LTsv_meridian_WeekDay//7+1
LTsv_meridian_DayMonth=LTsv_meridian_now.day
LTsv_meridian_DayYear=LTsv_meridian_now.toordinal()-datetime.date(LTsv_meridian_Year,1,1).toordinal()+1
global LTsv_allnight_now
global LTsv_allnight_miNute,LTsv_allnight_Second,LTsv_allnight_micRoSecond,LTsv_allnight_miLliSecond
LTsv_allnight_miNute=LTsv_meridian_miNute
LTsv_allnight_Second=LTsv_meridian_Second
LTsv_allnight_miLliSecond=LTsv_meridian_miLliSecond
LTsv_allnight_micRoSecond=LTsv_meridian_micRoSecond
global LTsv_allnight_Hour
if LTsv_HourMAXLower+LTsv_meridian_Hour < LTsv_overhour:
LTsv_allnight_now=LTsv_meridian_now-datetime.timedelta(days=1)
LTsv_allnight_Hour=LTsv_HourMAXLower+LTsv_meridian_Hour
else:
LTsv_allnight_now=LTsv_meridian_now
LTsv_allnight_Hour=LTsv_meridian_Hour
global LTsv_allnight_DayMonth,LTsv_allnight_DayYear
if LTsv_allnight_DayMonth != LTsv_allnight_now.day:
global LTsv_allnight_Year,LTsv_allnight_Yearlower,LTsv_allnight_YearZodiac,LTsv_allnight_YearDays,LTsv_allnight_YearIso
global LTsv_allnight_Month,LTsv_allnight_MonthDays,LTsv_allnight_WeekNumberYearIso
global LTsv_allnight_WeekDay,LTsv_allnight_WeekNumberMonth,LTsv_allnight_WeekDayIso
LTsv_allnight_Year=LTsv_allnight_now.year
LTsv_allnight_Yearlower=LTsv_allnight_Year%100
LTsv_allnight_YearZodiac=(LTsv_allnight_Year+8)%12
LTsv_allnight_YearDays=365 if not LTsv_yearleap(LTsv_allnight_Year) else 366
LTsv_allnight_YearIso,LTsv_allnight_WeekNumberYearIso,LTsv_allnight_WeekDayIso=LTsv_allnight_now.isocalendar()
LTsv_allnight_YearWeeksIso=LTsv_yearweeks(LTsv_allnight_Year)
LTsv_allnight_Month=LTsv_allnight_now.month
LTsv_allnight_MonthDays=LTsv_monthleap(LTsv_allnight_Year,LTsv_allnight_Month)
LTsv_allnight_WeekDay=LTsv_allnight_now.weekday()
LTsv_allnight_WeekNumberMonth=LTsv_allnight_WeekDay//7+1
LTsv_allnight_DayMonth=LTsv_allnight_now.day
LTsv_allnight_DayYear=LTsv_allnight_now.toordinal()-datetime.date(LTsv_allnight_Year,1,1).toordinal()+1
def LTsv_setdaytimeoption(overhour=None,diffminute=None):
global LTsv_meridian_now,LTsv_overhour,LTsv_diffminute,LTsv_HourMAXLower,LTsv_HourMAXUpper
LTsv_overhour=LTsv_overhour if overhour is None else min(max(overhour,LTsv_HourMAXLower),LTsv_HourMAXUpper)
LTsv_diffminute=LTsv_diffminute if diffminute is None else min(max(diffminute,-24*60),+24*60)
def LTsv_putdaytimespecify(LTsv_toyear,LTsv_tomonth,LTsv_today,LTsv_tohour,LTsv_tominute,LTsv_tosecond,LTsv_tomicrosecond,overhour=None,diffminute=None):
LTsv_setdaytimeoption(overhour,diffminute)
global LTsv_meridian_now,LTsv_overhour,LTsv_diffminute,LTsv_HourMAXLower,LTsv_HourMAXUpper
LTsv_year=min(max(LTsv_toyear,datetime.MINYEAR),datetime.MAXYEAR)
LTsv_month=min(max(LTsv_tomonth,1),12)
LTsv_day=min(max(LTsv_today,1),LTsv_monthleap(LTsv_year,LTsv_month))
LTsv_hour=min(max(LTsv_tohour,0),23)
LTsv_minute=min(max(LTsv_tominute,0),59)
LTsv_second=min(max(LTsv_tosecond,0),59)
LTsv_microsecond=min(max(LTsv_tomicrosecond,0),999999)
LTsv_meridian_now=datetime.datetime(LTsv_year,LTsv_month,LTsv_day,LTsv_hour,LTsv_minute,LTsv_second,LTsv_microsecond)+datetime.timedelta(minutes=LTsv_diffminute)
LTsv_setdaytimeshift()
return LTsv_meridian_now
def LTsv_putdaytimever(LTsv_verstr,overhour=None,diffminute=None):
LTsv_setdaytimeoption(overhour,diffminute)
global LTsv_meridian_now,LTsv_overhour,LTsv_diffminute,LTsv_HourMAXLower,LTsv_HourMAXUpper
LTsv_ymd,LTsv_hns=0,0
try:
LTsv_ymd=int(LTsv_verstr[0:8])
LTsv_hns=int(LTsv_verstr[-6:])
except ValueError:
pass
LTsv_year=min(max(LTsv_ymd//10000,datetime.MINYEAR),datetime.MAXYEAR)
LTsv_month=min(max(LTsv_ymd//100%100,1),12)
LTsv_day=min(max(LTsv_ymd%100,1),LTsv_monthleap(LTsv_year,LTsv_month))
LTsv_hour=min(max(LTsv_hns//10000,0),23)
LTsv_minute=min(max(LTsv_hns//100%100,0),59)
LTsv_second=min(max(LTsv_hns%100,0),59)
LTsv_microsecond=0
LTsv_meridian_now=datetime.datetime(LTsv_year,LTsv_month,LTsv_day,LTsv_hour,LTsv_minute,LTsv_second,LTsv_microsecond)+datetime.timedelta(minutes=LTsv_diffminute)
LTsv_setdaytimeshift()
return LTsv_meridian_now
def LTsv_putdaytimenow(overhour=None,diffminute=None):
LTsv_setdaytimeoption(overhour,diffminute)
global LTsv_earlier_now
global LTsv_meridian_now,LTsv_overhour,LTsv_diffminute,LTsv_HourMAXLower,LTsv_HourMAXUpper
LTsv_earlier_now=datetime.datetime.now()
LTsv_meridian_now=LTsv_earlier_now+datetime.timedelta(minutes=LTsv_diffminute)
LTsv_setdaytimeshift()
return LTsv_meridian_now
def LTsv_putdaytimeearlier(overhour=None,diffminute=None):
LTsv_setdaytimeoption(overhour,diffminute)
global LTsv_earlier_now
global LTsv_meridian_now,LTsv_overhour,LTsv_diffminute,LTsv_HourMAXLower,LTsv_HourMAXUpper
LTsv_meridian_now=LTsv_earlier_now+datetime.timedelta(minutes=LTsv_diffminute)
LTsv_setdaytimeshift()
return LTsv_meridian_now
def LTsv_putdaytimemodify(LTsv_path,overhour=None,diffminute=None):
LTsv_setdaytimeoption(overhour,diffminute)
global LTsv_meridian_now,LTsv_overhour,LTsv_diffminute,LTsv_HourMAXLower,LTsv_HourMAXUpper
LTsv_meridian_now=datetime.datetime.fromtimestamp(os.stat(LTsv_path).st_mtime)+datetime.timedelta(minutes=LTsv_diffminute)
LTsv_setdaytimeshift()
return LTsv_meridian_now
def LTsv_getdaytimestr(timeformat=None,overhour=None,diffminute=None):
global LTsv_overhour,LTsv_diffminute
if not overhour is None or not diffminute is None:
LTsv_overhour=LTsv_overhour if overhour is None else overhour
LTsv_diffminute=LTsv_diffminute if diffminute is None else diffminute
LTsv_putdaytimenow(LTsv_overhour,LTsv_diffminute)
LTsv_tf="@000y@0m@0dm@wdec@0h@0n@0s" if timeformat is None else timeformat
LTsv_tf=LTsv_tf if not "@@" in LTsv_tf else LTsv_tf.replace("@@","\t")
global LTsv_meridian_miNute,LTsv_meridian_Second,LTsv_meridian_micRoSecond,LTsv_meridian_miLliSecond
global LTsv_meridian_Hour,LTsv_meridian_HourAP,LTsv_meridian_AP,LTsv_meridian_APO
global LTsv_meridian_DayMonth,LTsv_meridian_DayYear
global LTsv_meridian_Year,LTsv_meridian_Yearlower,LTsv_meridian_YearZodiac,LTsv_meridian_YearDays,LTsv_meridian_YearIso
global LTsv_meridian_Month,LTsv_meridian_MonthDays,LTsv_meridian_WeekNumberYearIso
global LTsv_meridian_WeekDay,LTsv_meridian_WeekNumberMonth,LTsv_meridian_WeekDayIso
global LTsv_allnight_miNute,LTsv_allnight_Second,LTsv_allnight_micRoSecond,LTsv_allnight_miLliSecond
global LTsv_meridian_Beat,LTsv_meridian_BeatInteger,LTsv_meridian_BeatPoint
global LTsv_allnight_Hour
global LTsv_allnight_DayMonth,LTsv_allnight_DayYear
global LTsv_allnight_Year,LTsv_allnight_Yearlower,LTsv_allnight_YearZodiac,LTsv_allnight_YearDays,LTsv_allnight_YearIso
global LTsv_allnight_Month,LTsv_allnight_MonthDays,LTsv_allnight_WeekNumberYearIso
global LTsv_allnight_WeekDay,LTsv_allnight_WeekNumberMonth,LTsv_allnight_WeekDayIso
global LTsv_FPS_now,LTsv_FPS_earlier,LTsv_FPS_TotalSeconds,LTsv_FPS_fPsK,LTsv_FPS_fPsC
LTsv_tf=LTsv_tf if not "@yzj" in LTsv_tf else LTsv_tf.replace("@yzj" ,"{0}".format(LTsv_zodiacjp[LTsv_meridian_YearZodiac]))
LTsv_tf=LTsv_tf if not "@yzc" in LTsv_tf else LTsv_tf.replace("@yzc" ,"{0}".format(LTsv_zodiacch[LTsv_meridian_YearZodiac]))
LTsv_tf=LTsv_tf if not "@0yz" in LTsv_tf else LTsv_tf.replace("@0yz" ,"{0:0>2}".format(LTsv_meridian_YearZodiac))
LTsv_tf=LTsv_tf if not "@_yz" in LTsv_tf else LTsv_tf.replace("@_yz" ,"{0: >2}".format(LTsv_meridian_YearZodiac))
LTsv_tf=LTsv_tf if not "@yz" in LTsv_tf else LTsv_tf.replace("@yz" ,"{0:2}".format(LTsv_meridian_YearZodiac))
LTsv_tf=LTsv_tf if not "@0yd" in LTsv_tf else LTsv_tf.replace("@0yd" ,"{0:0>3}".format(LTsv_meridian_YearDays))
LTsv_tf=LTsv_tf if not "@_yd" in LTsv_tf else LTsv_tf.replace("@_yd" ,"{0:0>3}".format(LTsv_meridian_YearDays))
LTsv_tf=LTsv_tf if not "@yd" in LTsv_tf else LTsv_tf.replace("@yd" ,"{0}".format(LTsv_meridian_YearDays))
LTsv_tf=LTsv_tf if not "@0ywi" in LTsv_tf else LTsv_tf.replace("@0ywi" ,"{0:0>2}".format(LTsv_meridian_YearWeeksIso))
LTsv_tf=LTsv_tf if not "@_ywi" in LTsv_tf else LTsv_tf.replace("@_ywi" ,"{0: >2}".format(LTsv_meridian_YearWeeksIso))
LTsv_tf=LTsv_tf if not "@ywi" in LTsv_tf else LTsv_tf.replace("@ywi" ,"{0}".format(LTsv_meridian_YearWeeksIso))
LTsv_tf=LTsv_tf if not "@000yi" in LTsv_tf else LTsv_tf.replace("@000y" ,"{0:0>4}".format(LTsv_meridian_YearIso))
LTsv_tf=LTsv_tf if not "@___yi" in LTsv_tf else LTsv_tf.replace("@___yi","{0: >4}".format(LTsv_meridian_YearIso))
LTsv_tf=LTsv_tf if not "@4yi" in LTsv_tf else LTsv_tf.replace("@4yi" ,"{0:4}".format(LTsv_meridian_YearIso))
LTsv_tf=LTsv_tf if not "@0yi" in LTsv_tf else LTsv_tf.replace("@0yi" ,"{0:0>2}".format(LTsv_meridian_YearIso))
LTsv_tf=LTsv_tf if not "@_yi" in LTsv_tf else LTsv_tf.replace("@_yi" ,"{0: >2}".format(LTsv_meridian_YearIso))
LTsv_tf=LTsv_tf if not "@2yi" in LTsv_tf else LTsv_tf.replace("@2yi" ,"{0:2}".format(LTsv_meridian_YearIso))
LTsv_tf=LTsv_tf if not "@yi" in LTsv_tf else LTsv_tf.replace("@yi" ,"{0}".format(LTsv_meridian_YearIso))
LTsv_tf=LTsv_tf if not "@000y" in LTsv_tf else LTsv_tf.replace("@000y" ,"{0:0>4}".format(LTsv_meridian_Year))
LTsv_tf=LTsv_tf if not "@___y" in LTsv_tf else LTsv_tf.replace("@___y" ,"{0: >4}".format(LTsv_meridian_Year))
LTsv_tf=LTsv_tf if not "@4y" in LTsv_tf else LTsv_tf.replace("@4y" ,"{0:4}".format(LTsv_meridian_Year))
LTsv_tf=LTsv_tf if not "@0y" in LTsv_tf else LTsv_tf.replace("@0y" ,"{0:0>2}".format(LTsv_meridian_Yearlower))
LTsv_tf=LTsv_tf if not "@_y" in LTsv_tf else LTsv_tf.replace("@_y" ,"{0: >2}".format(LTsv_meridian_Yearlower))
LTsv_tf=LTsv_tf if not "@2y" in LTsv_tf else LTsv_tf.replace("@2y" ,"{0:2}".format(LTsv_meridian_Yearlower))
LTsv_tf=LTsv_tf if not "@y" in LTsv_tf else LTsv_tf.replace("@y" ,"{0}".format(LTsv_meridian_Year))
LTsv_tf=LTsv_tf if not "@Yzj" in LTsv_tf else LTsv_tf.replace("@Yzj" ,"{0}".format(LTsv_zodiacjp[LTsv_allnight_YearZodiac]))
LTsv_tf=LTsv_tf if not "@Yzc" in LTsv_tf else LTsv_tf.replace("@Yzc" ,"{0}".format(LTsv_zodiacch[LTsv_allnight_YearZodiac]))
LTsv_tf=LTsv_tf if not "@0Yz" in LTsv_tf else LTsv_tf.replace("@0Yz" ,"{0:0>2}".format(LTsv_allnight_YearZodiac))
LTsv_tf=LTsv_tf if not "@_Yz" in LTsv_tf else LTsv_tf.replace("@_Yz" ,"{0: >2}".format(LTsv_allnight_YearZodiac))
LTsv_tf=LTsv_tf if not "@Yz" in LTsv_tf else LTsv_tf.replace("@Yz" ,"{0:2}".format(LTsv_allnight_YearZodiac))
LTsv_tf=LTsv_tf if not "@0Yd" in LTsv_tf else LTsv_tf.replace("@0Yd" ,"{0:0>3}".format(LTsv_allnight_YearDays))
LTsv_tf=LTsv_tf if not "@_Yd" in LTsv_tf else LTsv_tf.replace("@_Yd" ,"{0:0>3}".format(LTsv_allnight_YearDays))
LTsv_tf=LTsv_tf if not "@Yd" in LTsv_tf else LTsv_tf.replace("@Yd" ,"{0}".format(LTsv_allnight_YearDays))
LTsv_tf=LTsv_tf if not "@0Ywi" in LTsv_tf else LTsv_tf.replace("@0Ywi" ,"{0:0>2}".format(LTsv_allnight_YearWeeksIso))
LTsv_tf=LTsv_tf if not "@_Ywi" in LTsv_tf else LTsv_tf.replace("@_Ywi" ,"{0: >2}".format(LTsv_allnight_YearWeeksIso))
LTsv_tf=LTsv_tf if not "@Ywi" in LTsv_tf else LTsv_tf.replace("@Ywi" ,"{0}".format(LTsv_allnight_YearWeeksIso))
LTsv_tf=LTsv_tf if not "@000Yi" in LTsv_tf else LTsv_tf.replace("@000Y" ,"{0:0>4}".format(LTsv_allnight_YearIso))
LTsv_tf=LTsv_tf if not "@___Yi" in LTsv_tf else LTsv_tf.replace("@___Yi","{0: >4}".format(LTsv_allnight_YearIso))
LTsv_tf=LTsv_tf if not "@4Yi" in LTsv_tf else LTsv_tf.replace("@4Yi" ,"{0:4}".format(LTsv_allnight_YearIso))
LTsv_tf=LTsv_tf if not "@0Yi" in LTsv_tf else LTsv_tf.replace("@0Yi" ,"{0:0>2}".format(LTsv_allnight_YearIso))
LTsv_tf=LTsv_tf if not "@_Yi" in LTsv_tf else LTsv_tf.replace("@_Yi" ,"{0: >2}".format(LTsv_allnight_YearIso))
LTsv_tf=LTsv_tf if not "@2Yi" in LTsv_tf else LTsv_tf.replace("@2Yi" ,"{0:2}".format(LTsv_allnight_YearIso))
LTsv_tf=LTsv_tf if not "@Yi" in LTsv_tf else LTsv_tf.replace("@Yi" ,"{0}".format(LTsv_allnight_YearIso))
LTsv_tf=LTsv_tf if not "@000Y" in LTsv_tf else LTsv_tf.replace("@000Y" ,"{0:0>4}".format(LTsv_allnight_Year))
LTsv_tf=LTsv_tf if not "@___Y" in LTsv_tf else LTsv_tf.replace("@___Y" ,"{0: >4}".format(LTsv_allnight_Year))
LTsv_tf=LTsv_tf if not "@4Y" in LTsv_tf else LTsv_tf.replace("@4Y" ,"{0:4}".format(LTsv_allnight_Year))
LTsv_tf=LTsv_tf if not "@0Y" in LTsv_tf else LTsv_tf.replace("@0Y" ,"{0:0>2}".format(LTsv_allnight_Yearlower))
LTsv_tf=LTsv_tf if not "@_Y" in LTsv_tf else LTsv_tf.replace("@_Y" ,"{0: >2}".format(LTsv_allnight_Yearlower))
LTsv_tf=LTsv_tf if not "@2Y" in LTsv_tf else LTsv_tf.replace("@2Y" ,"{0:2}".format(LTsv_allnight_Yearlower))
LTsv_tf=LTsv_tf if not "@Y" in LTsv_tf else LTsv_tf.replace("@Y" ,"{0}".format(LTsv_allnight_Year))
LTsv_tf=LTsv_tf if not "@0md" in LTsv_tf else LTsv_tf.replace("@0md" ,"{0:0>2}".format(LTsv_meridian_MonthDays))
LTsv_tf=LTsv_tf if not "@_md" in LTsv_tf else LTsv_tf.replace("@_md" ,"{0:0>2}".format(LTsv_meridian_MonthDays))
LTsv_tf=LTsv_tf if not "@md" in LTsv_tf else LTsv_tf.replace("@md" ,"{0}".format(LTsv_meridian_MonthDays))
LTsv_tf=LTsv_tf if not "@mec" in LTsv_tf else LTsv_tf.replace("@mec" ,"{0}".format(LTsv_monthenc[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@mes" in LTsv_tf else LTsv_tf.replace("@mes" ,"{0}".format(LTsv_monthens[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@mel" in LTsv_tf else LTsv_tf.replace("@mel" ,"{0}".format(LTsv_monthenl[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@_mel" in LTsv_tf else LTsv_tf.replace("@_mel" ,"{0: >9}".format(LTsv_monthenl[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@meh" in LTsv_tf else LTsv_tf.replace("@meh" ,"{0}".format(LTsv_monthenh[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@_meh" in LTsv_tf else LTsv_tf.replace("@_meh" ,"{0: >9}".format(LTsv_monthenh[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@mjiz" in LTsv_tf else LTsv_tf.replace("@mjiz" ,"{0}".format(LTsv_monthjpiz[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@_mjiz" in LTsv_tf else LTsv_tf.replace("@mjiz" ,"{0}".format(LTsv_monthjpiz[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@mj" in LTsv_tf else LTsv_tf.replace("@mj" ,"{0}".format(LTsv_monthjp[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@_mj" in LTsv_tf else LTsv_tf.replace("@mj" ,"{0}".format(LTsv_monthjp[LTsv_meridian_Month]))
LTsv_tf=LTsv_tf if not "@0m" in LTsv_tf else LTsv_tf.replace("@0m" ,"{0:0>2}".format(LTsv_meridian_Month))
LTsv_tf=LTsv_tf if not "@_m" in LTsv_tf else LTsv_tf.replace("@_m" ,"{0: >2}".format(LTsv_meridian_Month))
LTsv_tf=LTsv_tf if not "@mz" in LTsv_tf else LTsv_tf.replace("@mz" ,"0123456789"[LTsv_allnight_Month]) if LTsv_meridian_Month <= 9 else LTsv_tf.replace("@Mz","{0}".format(LTsv_allnight_Month))
LTsv_tf=LTsv_tf if not "@m" in LTsv_tf else LTsv_tf.replace("@m" ,"{0}".format(LTsv_meridian_Month))
LTsv_tf=LTsv_tf if not "@0Md" in LTsv_tf else LTsv_tf.replace("@0Md" ,"{0:0>2}".format(LTsv_allnight_MonthDays))
LTsv_tf=LTsv_tf if not "@_Md" in LTsv_tf else LTsv_tf.replace("@_Md" ,"{0:0>2}".format(LTsv_allnight_MonthDays))
LTsv_tf=LTsv_tf if not "@Md" in LTsv_tf else LTsv_tf.replace("@Md" ,"{0}".format(LTsv_allnight_MonthDays))
LTsv_tf=LTsv_tf if not "@Mec" in LTsv_tf else LTsv_tf.replace("@Mec" ,"{0}".format(LTsv_monthenc[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@Mes" in LTsv_tf else LTsv_tf.replace("@Mes" ,"{0}".format(LTsv_monthens[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@Mel" in LTsv_tf else LTsv_tf.replace("@Mel" ,"{0}".format(LTsv_monthenl[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@_Mel" in LTsv_tf else LTsv_tf.replace("@_Mel" ,"{0: >9}".format(LTsv_monthenl[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@Meh" in LTsv_tf else LTsv_tf.replace("@Meh" ,"{0}".format(LTsv_monthenh[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@_Meh" in LTsv_tf else LTsv_tf.replace("@_Meh" ,"{0: >9}".format(LTsv_monthenh[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@Mjiz" in LTsv_tf else LTsv_tf.replace("@Mjiz" ,"{0}".format(LTsv_monthjpiz[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@_Mjiz" in LTsv_tf else LTsv_tf.replace("@Mjiz" ,"{0}".format(LTsv_monthjpiz[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@Mj" in LTsv_tf else LTsv_tf.replace("@Mj" ,"{0}".format(LTsv_monthjp[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@_Mj" in LTsv_tf else LTsv_tf.replace("@Mj" ,"{0}".format(LTsv_monthjp[LTsv_allnight_Month]))
LTsv_tf=LTsv_tf if not "@0M" in LTsv_tf else LTsv_tf.replace("@0M" ,"{0:0>2}".format(LTsv_allnight_Month))
LTsv_tf=LTsv_tf if not "@_M" in LTsv_tf else LTsv_tf.replace("@_M" ,"{0: >2}".format(LTsv_allnight_Month))
LTsv_tf=LTsv_tf if not "@Mz" in LTsv_tf else LTsv_tf.replace("@Mz" ,"0123456789"[LTsv_allnight_Month]) if LTsv_meridian_Month <= 9 else LTsv_tf.replace("@Mz","{0}".format(LTsv_allnight_Month))
LTsv_tf=LTsv_tf if not "@M" in LTsv_tf else LTsv_tf.replace("@M" ,"{0}".format(LTsv_allnight_Month))
LTsv_tf=LTsv_tf if not "@0wnyi" in LTsv_tf else LTsv_tf.replace("@0wnyi","{0:0>2}".format(LTsv_meridian_WeekNumberYearIso))
LTsv_tf=LTsv_tf if not "@_wnyi" in LTsv_tf else LTsv_tf.replace("@_wnyi","{0: >2}".format(LTsv_meridian_WeekNumberYearIso))
LTsv_tf=LTsv_tf if not "@wnyiz" in LTsv_tf else LTsv_tf.replace("@wnyiz","0123456789"[LTsv_meridian_WeekNumberYearIso]) if LTsv_meridian_WeekNumberYearIso <= 9 else LTsv_tf.replace("@wnyiz","{0}".format(LTsv_meridian_WeekNumberYearIso))
LTsv_tf=LTsv_tf if not "@wnyi" in LTsv_tf else LTsv_tf.replace("@wnyi" ,"{0}".format(LTsv_meridian_WeekNumberYearIso))
LTsv_tf=LTsv_tf if not "@0Wnyi" in LTsv_tf else LTsv_tf.replace("@0Wnyi","{0:0>2}".format(LTsv_meridian_WeekNumberYearIso))
LTsv_tf=LTsv_tf if not "@_Wnyi" in LTsv_tf else LTsv_tf.replace("@_Wnyi","{0: >2}".format(LTsv_meridian_WeekNumberYearIso))
LTsv_tf=LTsv_tf if not "@Wnyiz" in LTsv_tf else LTsv_tf.replace("@Wnyiz","0123456789"[LTsv_meridian_WeekNumberYearIso]) if LTsv_meridian_WeekNumberYearIso <= 9 else LTsv_tf.replace("@Wnyiz","{0}".format(LTsv_meridian_WeekNumberYearIso))
| |
b * acosh(c * x)) ** (n + S(-1))
* (c * x + S(-1)) ** (p + S(-1) / 2)
* (c * x + S(1)) ** (p + S(-1) / 2),
x,
),
x,
)
+ Simp(
(f * x) ** (m + S(1))
* (a + b * acosh(c * x)) ** n
* (d + e * x ** S(2)) ** p
/ (f * (m + S(1))),
x,
)
)
def replacement6170(a, b, c, d, e, f, m, n, x):
return (
-Dist(
c ** S(2)
* sqrt(d + e * x ** S(2))
/ (f ** S(2) * (m + S(1)) * sqrt(c ** S(2) * x ** S(2) + S(1))),
Int(
(f * x) ** (m + S(2))
* (a + b * asinh(c * x)) ** n
/ sqrt(c ** S(2) * x ** S(2) + S(1)),
x,
),
x,
)
- Dist(
b
* c
* n
* sqrt(d + e * x ** S(2))
/ (f * (m + S(1)) * sqrt(c ** S(2) * x ** S(2) + S(1))),
Int((f * x) ** (m + S(1)) * (a + b * asinh(c * x)) ** (n + S(-1)), x),
x,
)
+ Simp(
(f * x) ** (m + S(1))
* (a + b * asinh(c * x)) ** n
* sqrt(d + e * x ** S(2))
/ (f * (m + S(1))),
x,
)
)
def replacement6171(a, b, c, d1, d2, e1, e2, f, m, n, x):
return (
-Dist(
c ** S(2)
* sqrt(d1 + e1 * x)
* sqrt(d2 + e2 * x)
/ (f ** S(2) * (m + S(1)) * sqrt(c * x + S(-1)) * sqrt(c * x + S(1))),
Int(
(f * x) ** (m + S(2))
* (a + b * acosh(c * x)) ** n
/ (sqrt(c * x + S(-1)) * sqrt(c * x + S(1))),
x,
),
x,
)
- Dist(
b
* c
* n
* sqrt(d1 + e1 * x)
* sqrt(d2 + e2 * x)
/ (f * (m + S(1)) * sqrt(c * x + S(-1)) * sqrt(c * x + S(1))),
Int((f * x) ** (m + S(1)) * (a + b * acosh(c * x)) ** (n + S(-1)), x),
x,
)
+ Simp(
(f * x) ** (m + S(1))
* (a + b * acosh(c * x)) ** n
* sqrt(d1 + e1 * x)
* sqrt(d2 + e2 * x)
/ (f * (m + S(1))),
x,
)
)
def replacement6172(a, b, c, d, e, f, m, n, p, x):
return (
-Dist(
S(2) * e * p / (f ** S(2) * (m + S(1))),
Int(
(f * x) ** (m + S(2))
* (a + b * asinh(c * x)) ** n
* (d + e * x ** S(2)) ** (p + S(-1)),
x,
),
x,
)
- Dist(
b
* c
* d ** IntPart(p)
* n
* (d + e * x ** S(2)) ** FracPart(p)
* (c ** S(2) * x ** S(2) + S(1)) ** (-FracPart(p))
/ (f * (m + S(1))),
Int(
(f * x) ** (m + S(1))
* (a + b * asinh(c * x)) ** (n + S(-1))
* (c ** S(2) * x ** S(2) + S(1)) ** (p + S(-1) / 2),
x,
),
x,
)
+ Simp(
(f * x) ** (m + S(1))
* (a + b * asinh(c * x)) ** n
* (d + e * x ** S(2)) ** p
/ (f * (m + S(1))),
x,
)
)
def replacement6173(a, b, c, d1, d2, e1, e2, f, m, n, p, x):
return (
-Dist(
S(2) * e1 * e2 * p / (f ** S(2) * (m + S(1))),
Int(
(f * x) ** (m + S(2))
* (a + b * acosh(c * x)) ** n
* (d1 + e1 * x) ** (p + S(-1))
* (d2 + e2 * x) ** (p + S(-1)),
x,
),
x,
)
- Dist(
b
* c
* n
* (-d1 * d2) ** (p + S(-1) / 2)
* sqrt(d1 + e1 * x)
* sqrt(d2 + e2 * x)
/ (f * (m + S(1)) * sqrt(c * x + S(-1)) * sqrt(c * x + S(1))),
Int(
(f * x) ** (m + S(1))
* (a + b * acosh(c * x)) ** (n + S(-1))
* (c ** S(2) * x ** S(2) + S(-1)) ** (p + S(-1) / 2),
x,
),
x,
)
+ Simp(
(f * x) ** (m + S(1))
* (a + b * acosh(c * x)) ** n
* (d1 + e1 * x) ** p
* (d2 + e2 * x) ** p
/ (f * (m + S(1))),
x,
)
)
def replacement6174(a, b, c, d, e, f, m, n, p, x):
return (
Dist(
S(2) * d * p / (m + S(2) * p + S(1)),
Int(
(f * x) ** m
* (a + b * acosh(c * x)) ** n
* (d + e * x ** S(2)) ** (p + S(-1)),
x,
),
x,
)
- Dist(
b * c * n * (-d) ** p / (f * (m + S(2) * p + S(1))),
Int(
(f * x) ** (m + S(1))
* (a + b * acosh(c * x)) ** (n + S(-1))
* (c * x + S(-1)) ** (p + S(-1) / 2)
* (c * x + S(1)) ** (p + S(-1) / 2),
x,
),
x,
)
+ Simp(
(f * x) ** (m + S(1))
* (a + b * acosh(c * x)) ** n
* (d + e * x ** S(2)) ** p
/ (f * (m + S(2) * p + S(1))),
x,
)
)
def replacement6175(a, b, c, d, e, f, m, n, x):
return (
Dist(
sqrt(d + e * x ** S(2)) / ((m + S(2)) * sqrt(c ** S(2) * x ** S(2) + S(1))),
Int(
(f * x) ** m
* (a + b * asinh(c * x)) ** n
/ sqrt(c ** S(2) * x ** S(2) + S(1)),
x,
),
x,
)
- Dist(
b
* c
* n
* sqrt(d + e * x ** S(2))
/ (f * (m + S(2)) * sqrt(c ** S(2) * x ** S(2) + S(1))),
Int((f * x) ** (m + S(1)) * (a + b * asinh(c * x)) ** (n + S(-1)), x),
x,
)
+ Simp(
(f * x) ** (m + S(1))
* (a + b * asinh(c * x)) ** n
* sqrt(d + e * x ** S(2))
/ (f * (m + S(2))),
x,
)
)
def replacement6176(a, b, c, d1, d2, e1, e2, f, m, n, x):
return (
-Dist(
sqrt(d1 + e1 * x)
* sqrt(d2 + e2 * x)
/ ((m + S(2)) * sqrt(c * x + S(-1)) * sqrt(c * x + S(1))),
Int(
(f * x) ** m
* (a + b * acosh(c * x)) ** n
/ (sqrt(c * x + S(-1)) * sqrt(c * x + S(1))),
x,
),
x,
)
- Dist(
b
* c
* n
* sqrt(d1 + e1 * x)
* sqrt(d2 + e2 * x)
/ (f * (m + S(2)) * sqrt(c * x + S(-1)) * sqrt(c * x + S(1))),
Int((f * x) ** (m + S(1)) * (a + b * acosh(c * x)) ** (n + S(-1)), x),
x,
)
+ Simp(
(f * x) ** (m + S(1))
* (a + b * acosh(c * x)) ** n
* sqrt(d1 + e1 * x)
* sqrt(d2 + | |
<reponame>zxjzxj9/aipot<filename>model.py
#! /usr/bin/env python
import tensorflow as tf
import yaml
from option import opts
import itertools
import numpy as np
import math
class AttnNet(object):
"""
This code is inspired by google's paper Attention Is All You Need
Build a transformer to predict the system energy
Developped after DensityNet (which seems not working)
"""
def __init__(self, max_atom, n_atom_embed, n_kmesh, n_trans, n_heads, n_ff):
self.atom_kinds = 120 # index zero is unkown atoms
self.max_atom = max_atom
self.n_atom_embed = n_atom_embed
# Reciprocal points
self.n_kmesh = n_kmesh
self.n_trans = n_trans
self.n_heads = n_heads
self.n_zetas = 256
self.nsc = 2
#self.n_pos_embed = 2*n_kmesh**3
#self.n_dims = 2*self.n_kmesh**3+self.n_atom_embed
self.n_dims = self.n_zetas + self.n_atom_embed
self.n_ff = n_ff
self._build_graph()
def get_atom_embed(self, atom_ids):
with tf.variable_scope("atomic_embedding"):
embed=tf.get_variable("embed", shape=(self.atom_kinds, self.n_atom_embed),
dtype=tf.float32, initializer=tf.initializers.random_normal(stddev=1.0e-3))
embeds = tf.nn.embedding_lookup(embed, atom_ids)
return embeds
def get_param_embed(self, atom_ids, name, mean=5.0):
with tf.variable_scope("{}_embedding".format(name)):
embed=tf.get_variable("embed", shape=(self.atom_kinds, self.n_zetas),
dtype=tf.float32, initializer=tf.initializers.random_normal(mean=mean, stddev=1.0e-3*abs(mean)))
embeds = tf.nn.embedding_lookup(embed, atom_ids)
return embeds
def get_k_mesh(self):
with tf.variable_scope("reciprocal_mesh"):
kmesh = []
for i, j, k in itertools.product(*itertools.repeat(range(self.n_kmesh), 3)):
kmesh.append(np.array([i/self.n_kmesh, j/self.n_kmesh, k/self.n_kmesh]))
kmesh = np.stack(kmesh, axis=0) # n_kmesh**3 x 3
return tf.constant(kmesh, dtype=tf.float32)
def get_pos_embed(self, latts, carts):
with tf.variable_scope("position_embedding"):
inv_latts = tf.matrix_inverse(latts)
kmesh = self.get_k_mesh()
rvec = tf.einsum("nl,kml->knm", kmesh, inv_latts) # nbatch x n_kmesh**3 x 3
phase = 2*math.pi*tf.einsum("knl,kml->knm", carts, rvec) # nbatch x max_atom x n_kmesh**3
s_phase = tf.sin(phase)
c_phase = tf.cos(phase)
return tf.concat((s_phase, c_phase), axis=-1) # nbatch x max_atom x 2*n_kmesh**3
def get_trans_param(self, name=""):
with tf.variable_scope("parameters_"+name):
trans=tf.get_variable("trans", shape=(self.n_dims, self.n_trans),
dtype=tf.float32, initializer=tf.initializers.random_normal(stddev=1.0e-3))
return trans
def attn(self, embed, mask, name=""):
"""
Assume embeddiing has nbatch x max_atom x n_dims
mask has nbatch x max_atom
"""
with tf.variable_scope("attn_"+name):
K = self.get_trans_param("K")
Q = self.get_trans_param("Q")
V = self.get_trans_param("V")
kdata = tf.einsum("nml,lk->nmk", embed, K)
qdata = tf.einsum("nml,lk->nmk", embed, Q)
vdata = tf.einsum("nml,lk->nmk", embed, V) # nbatch x max_atom x n_trans
kq = tf.einsum("nml,nkl->nmk", qdata, kdata)*(1/math.sqrt(self.n_trans)) # nbatch x max_atom x max_atom
#mask = tf.expand_dims(mask, 1) # nbatch x 1 x max_atom
mask = tf.keras.backend.repeat(mask, self.max_atom)
score = tf.where(mask, -9999*tf.ones_like(kq), kq)
#score = kq
#score = tf.scatter_update(tf.Variable(kq, validate_shape=False), mask, -9999)# assign a large number
w = tf.nn.softmax(score, axis=-1) # calculate attention weight, nbatch x max_atom x max_atom
vout = tf.einsum("nml,nlk->nmk", w, vdata) # nbatch x max_atom x n_trans
return vout
def trans(self, features, masks2, name=""):
with tf.variable_scope("transformer_{}".format(name)):
attns = []
for idx in range(self.n_heads):
attns.append(self.attn(features, masks2, "{}".format(idx)))
attns = tf.concat(attns, axis=-1) # nbatch x max_atom x (n_heads x n_trans)
# print(attns); import sys; sys.exit()
# Feed-forward neural networks to calculate energy
layer1 = tf.contrib.layers.layer_norm(features+attns)
layer2 = tf.keras.layers.Dense(self.n_ff, activation=tf.nn.relu)(layer1)
layer3 = tf.keras.layers.Dense(self.n_dims, activation=None)(layer2)
return layer3
def stru_factor(self, atom_ids, latts, coords):
"""
Get structure factor of whole crystal
"""
with tf.variable_scope("structure_factor"):
atom_embeds = self.get_atom_embed(atom_ids)
# mask out padding atoms
coeff = tf.keras.layers.Dense(1, activation=tf.nn.tanh)(atom_embeds) # nbatch x maxatom x 1
coeff = tf.where(tf.expand_dims(tf.not_equal(atom_ids, 0), axis=-1), coeff, tf.zeros_like(coeff))
coeff_sq = tf.expand_dims(tf.complex(tf.einsum("bij,bkj->bik", coeff, coeff), 0.0), -1)
inv_latts = tf.matrix_inverse(latts)
kmesh = self.get_k_mesh()
rvec = 2*math.pi*tf.einsum("nl,kml->knm", kmesh, inv_latts) # nbatch x n_kmesh**3 x 3
phase = tf.einsum("bik,bjk->bij", coords, rvec) # nbatch x natom x n_kmesh**3
wfunc = tf.exp(tf.complex(0.0, -phase)) # nbatch x natom x n_kmesh**3
wfunc1 = tf.expand_dims(wfunc, 1)
wfunc2 = tf.expand_dims(wfunc, 2)
sq = tf.reduce_sum(wfunc1*tf.math.conj(wfunc2)*coeff_sq, axis=2)
cf = tf.math.real(sq)/tf.reduce_sum(tf.square(coeff), axis=[1], keepdims=True)
sf = tf.math.imag(sq)/tf.reduce_sum(tf.square(coeff), axis=[1], keepdims=True)
feat = tf.concat([atom_embeds, cf, sf], axis = -1)
return feat
def get_sc(self):
"""
get supercell vector given the grid mesh, dim (nsc**3)*3
"""
sc = []
a = np.array([1, 0, 0], dtype=np.float32)
b = np.array([0, 1, 0], dtype=np.float32)
c = np.array([0, 0, 1], dtype=np.float32)
# order z -> y -> x
for i, j, k in itertools.product(*itertools.repeat(range(-(self.nsc//2),self.nsc//2+1), 3)):
sc.append(i*a + j*b + k*c)
sc = np.stack(sc, axis=0)
#print(sc.shape)
with tf.variable_scope("supercell"):
sc = tf.constant(sc, dtype=tf.float32)
return sc
def dist_weight(self, atom_ids, latts, coords):
with tf.variable_scope("distance_weight"):
atom_embeds = self.get_atom_embed(atom_ids)
zeta = self.get_param_embed(atom_ids, "zeta", 0.1) # batch x maxatom x nzeta
zeta = tf.expand_dims(zeta, axis=1) # batch x 1 x maxatom x nzeta
coeff = self.get_param_embed(atom_ids, "coeff", 0.001) # batch x maxatom x nzeta
mask = tf.cast(tf.not_equal(atom_ids, 0), tf.float32) # batch x maxatom
mask = tf.expand_dims(mask, -1)
coeff = mask*coeff
coeff = tf.expand_dims(coeff, axis=1) # batch x 1 x maxatom x nzeta
inv_latts = tf.matrix_inverse(latts)
frac = tf.einsum("bij,bjk->bik", coords, inv_latts) # batch x maxatom x 3
frac = tf.expand_dims(frac, axis=2) # batch x maxatom x 1 x 3
fract = tf.transpose(frac, perm=[0, 2, 1, 3]) # batch x 1 x maxatom x 3
sc = tf.reshape(self.get_sc(), shape=[1, -1, 1, 1, 3]) # 1 x nsc**3 x 1 x 1 x 3
dfrac = tf.expand_dims(fract - frac , axis = 1) # batch x 1 x maxatom x maxatom x 3
dfrac = dfrac + sc # batch x nsc**3 x maxatom x maxatom x 3
dreal = tf.einsum("blnmi,bij->blnmj", dfrac, latts) # batch x nsc**3 x maxatom x maxatom x 3
dreal = tf.reduce_sum(tf.square(dreal), axis=-1, keepdims=True) # batch x nsc**3 x maxatom x maxatom x 1
dreal = tf.where(tf.equal(dreal, 0), 9999*tf.ones_like(dreal), dreal) # maske out self image
dreal = tf.sqrt(tf.reduce_min(dreal, axis=1)) # batch x maxatom x maxatom x 1
#dreal = tf.reduce_min(dreal, axis=1) # batch x maxatom x maxatom x 1
weight = tf.reduce_sum(coeff*tf.exp(-tf.abs(zeta)*dreal), axis=-2) # batch x maxatom x nzeta
feat = tf.concat([atom_embeds, weight], axis=-1)
return feat
def energy_func(self, atom_ids, coords, latts):
with tf.variable_scope("attention"):
#atom_embeds = self.get_atom_embed(atom_ids)
#pos_embeds = self.get_pos_embed(latts, coords)
#embeds = tf.concat((atom_embeds, pos_embeds), axis=-1)
#embeds = self.stru_factor(atom_ids, latts, coords)
embeds = self.dist_weight(atom_ids, latts, coords)
#print(embeds)
#import sys; sys.exit()
masks1 = tf.cast(tf.not_equal(atom_ids, 0), tf.float32)
masks2 = tf.equal(atom_ids, 0)
layer1 = self.trans(embeds, masks2, "1")
#layer1 = self.trans(layer1, masks2, "2")
#layer3 = self.trans(layer2, masks2, "3")
layer2 = tf.keras.layers.Dense(self.n_ff, activation=tf.nn.relu)(layer1)
layer3 = tf.keras.layers.Dense(self.n_ff, activation=tf.nn.relu)(layer2)
layer4 = tf.keras.layers.Dense(self.n_ff, activation=tf.nn.relu)(layer3)
layer5 = tf.keras.layers.Dense(self.n_ff, activation=tf.nn.relu)(layer4)
layer6 = tf.keras.layers.Dense(self.n_ff, activation=tf.nn.relu)(layer5)
layer7 = tf.keras.layers.Dense(self.n_ff, activation=tf.nn.relu)(layer6)
layer8 = tf.squeeze(tf.keras.layers.Dense(1, activation=None)(layer7))
atomic_energy = masks1*layer8
energy = tf.reduce_sum(atomic_energy, axis=-1)
## mask out unused atoms
#layer4 = tf.expand_dims(masks1, axis=-1)*layer3 # nbatch x maxatom x self.n_dims
## nbatch x self.n_dims, average over all usable atoms
#layer5 = tf.reduce_sum(layer4, axis=1)/tf.reduce_sum(masks1, axis=-1, keepdims=True)
#layer6 = tf.keras.layers.Dense(self.n_ff, activation=tf.nn.relu)(layer5)
#layer7 = tf.keras.layers.Dense(1, activation=None)(layer6)
#energy = tf.squeeze(layer7)
return energy
def _build_graph(self):
self.train_graph = tf.Graph()
self.valid_graph = tf.Graph()
self.infer_graph = tf.Graph()
def train(self, atom_ids, coords, latts, force, energy, stress):
with self.train_graph.as_default():
energy_p = self.energy_func(atom_ids, coords, latts)
masks1 = tf.cast(tf.not_equal(atom_ids, 0), tf.float32)
masks1 = tf.expand_dims(masks1, axis=-1)
force_p = -tf.gradients(energy_p, coords)[0]*masks1
vols = tf.reshape(tf.abs(tf.linalg.det(latts)), (-1, 1, 1))
stress_p = -tf.einsum("bij,bkj->bik", tf.gradients(energy_p, latts)[0], latts)/vols
masks = tf.cast(tf.not_equal(atom_ids, 0), tf.float32)
na = tf.reduce_sum(masks, axis=1)
self.avg_na = tf.reduce_mean(na)
loss = tf.losses.huber_loss(energy/na, energy_p/na, delta=0.05) + \
tf.losses.huber_loss(force, force_p, delta=0.5) + \
tf.losses.huber_loss(stress, stress_p, delta=0.05)
#loss = tf.losses.mean_squared_error(energy/na, energy_p/na) + \
# tf.losses.mean_squared_error(force, force_p) + \
# tf.losses.mean_squared_error(stress, stress_p)
#loss = tf.losses.mean_squared_error(energy, energy_p) + \
# tf.losses.mean_squared_error(force, force_p) + \
# tf.losses.mean_squared_error(stress, stress_p)
# Add code to check the broken structure
self.check_ops = {}
#cdt = tf.greater_equal(loss, 30)
#self.check_ops.append(tf.cond(cdt, lambda: tf.print(energy_p/na), lambda: False))
#self.check_ops.append(tf.cond(cdt, lambda: tf.print(energy/na), lambda: False))
self.check_ops["energy_p"] = energy_p
self.check_ops["energy"] = energy
self.check_ops["force_p"] = force_p
self.check_ops["force"] = force
self.check_ops["atom_ids"] = atom_ids
self.check_ops["latts"] = latts
self.check_ops["coords"] = coords
energy_loss_t = tf.sqrt(tf.reduce_mean(((energy-energy_p)/na)**2))
#na = tf.reshape(na, (-1, 1, 1))
force_loss_t = tf.sqrt(tf.reduce_mean(tf.reduce_sum(((force-force_p))**2, axis=-1)))
stress_loss_t = tf.sqrt(tf.reduce_mean(((stress-stress_p))**2))
return loss, energy_loss_t, force_loss_t, stress_loss_t
def validate(self, atom_ids, coords, latts, force, energy, stress):
with self.valid_graph.as_default():
energy_p = self.energy_func(atom_ids, coords, latts)
masks1 = tf.cast(tf.not_equal(atom_ids, 0), tf.float32)
masks1 = tf.expand_dims(masks1, axis=-1)
force_p = -tf.gradients(energy_p, coords)[0]*masks1
vols = tf.reshape(tf.abs(tf.linalg.det(latts)), (-1, 1, 1))
stress_p = -tf.einsum("bij,bkj->bik", tf.gradients(energy_p, latts)[0], latts)/vols
masks = tf.cast(tf.not_equal(atom_ids, 0), tf.float32)
na = tf.reduce_sum(masks, axis=1)
self.avg_na = tf.reduce_mean(na)
loss = tf.losses.huber_loss(energy/na, energy_p/na, delta=0.05) + \
tf.losses.huber_loss(force, force_p, delta=0.5) + \
tf.losses.huber_loss(stress, stress_p, delta=0.05)
energy_loss_t = tf.sqrt(tf.reduce_mean(((energy-energy_p)/na)**2))
#na = tf.reshape(na, (-1, 1, 1))
force_loss_t = tf.sqrt(tf.reduce_mean(tf.reduce_sum(((force-force_p))**2, axis=-1)))
stress_loss_t = tf.sqrt(tf.reduce_mean(((stress-stress_p))**2))
return loss, energy_loss_t, force_loss_t, stress_loss_t
def infer(self, atom_ids, coords, latts):
with self.infer_graph.as_default():
energy_p = self.energy_func(atom_ids, coords, latts)
masks1 = tf.cast(tf.not_equal(atom_ids, 0), tf.float32)
masks1 = tf.expand_dims(masks1, axis=-1)
force_p = -tf.gradients(energy_p, coords)[0]*masks1
vols = tf.reshape(tf.abs(tf.linalg.det(latts)), (-1, 1, 1))
stress_p = -tf.einsum("bij,bkj->bik", tf.gradients(energy_p, | |
<reponame>AgriculturalModelExchangeInitiative/PyCropML<filename>src/pycropml/transpiler/antlr_py/tests/examples/DssatComponent/SoilTemp/STEMP.py<gh_stars>1-10
def STEMP(SOILPROP, SRAD, SW, TAVG, TMAX, XLAT, TAV, TAMP,NL, SRFTEMP, ST) :
#USE ModuleDefs #Definitions of constructed variable types,
# which contain control information, soil
# parameters, hourly weather data.
TMA = [None]*5
BD =[None]*5
DLAYR =[None]*5
DLI =[None]*5
DS=[None]*5,
DSI=[None]*5
DSMID =[None]*5,
DUL =[None]*5,
LL =[None]*5
ST =[None]*5
SW =[None]*5
SWI =[None]*5
BD = SOILPROP.BD
DLAYR = SOILPROP.DLAYR
DS = SOILPROP.DS
DUL = SOILPROP.DUL
LL = SOILPROP.LL
NLAYR = SOILPROP.NLAYR
MSALB = SOILPROP.MSALB
#***********************************************************************
#***********************************************************************
# Run initialization - run once per simulation
#***********************************************************************
# IF (DYNAMIC .EQ. RUNINIT) THEN
#-----------------------------------------------------------------------
#***********************************************************************
#***********************************************************************
# Seasonal initialization - run once per season
#***********************************************************************
# ELSEIF (DYNAMIC .EQ. SEASINIT) THEN
IF (DYNAMIC .EQ. SEASINIT) THEN
#-----------------------------------------------------------------------
FILEIO = CONTROL % FILEIO
LUNIO = CONTROL % LUNIO
RUN = CONTROL % RUN
RNMODE = CONTROL % RNMODE
IF (RUN .EQ. 1 .OR. INDEX('QF',RNMODE) .LE. 0) THEN
# IF (ISWWAT .NE. 'N') THEN
## Read inital soil water values from FILEIO
## (not yet done in WATBAL, so need to do here)
# OPEN (LUNIO, FILE = FILEIO, STATUS = 'OLD', IOSTAT=ERRNUM)
# IF (ERRNUM .NE. 0) CALL ERROR(ERRKEY,ERRNUM,FILEIO,0)
# SECTION = '*INITI'
# CALL FIND(LUNIO, SECTION, LNUM, FOUND)
# IF (FOUND .EQ. 0) CALL ERROR(SECTION, 42, FILEIO, LNUM)
#
## Initial depth to water table (not currently used)
# READ(LUNIO,'(40X,F6.0)',IOSTAT=ERRNUM) ICWD ; LNUM = LNUM + 1
# IF (ERRNUM .NE. 0) CALL ERROR(ERRKEY,ERRNUM,FILEIO,LNUM)
#
## These have not yet been initialized in SOILDYN, so do it here.
# DO L = 1, NLAYR
# READ(LUNIO,'(2X,2F6.0)',IOSTAT=ERRNUM) DSI(L), SWI(L)
# LNUM = LNUM + 1
# IF (ERRNUM .NE. 0) CALL ERROR(ERRKEY,ERRNUM,FILEIO,LNUM)
# IF (SWI(L) .LT. LL(L)) SWI(L) = LL(L)
# ENDDO
#
# CLOSE (LUNIO)
# ELSE
# SWI = DUL
# DSI = SOILPROP % DS
# ENDIF
SWI = SW
DSI = SOILPROP % DS
IF (XLAT .LT. 0.0) THEN
HDAY = 20.0 #DOY (hottest) for southern hemisphere
ELSE
HDAY = 200.0 #DOY (hottest) for northern hemisphere
ENDIF
TBD = 0.0
TLL = 0.0
TSW = 0.0
TDL = 0.0
CUMDPT = 0.0
DO L = 1, NLAYR
IF (L .EQ. 1) THEN
DLI(L) = DSI(L)
ELSE
DLI(L) = DSI(L) - DSI(L-1)
ENDIF
DSMID(L) = CUMDPT + DLI(L)* 5.0 #mm depth to midpt of lyr
CUMDPT = CUMDPT + DLI(L)*10.0 #mm profile depth
TBD = TBD + BD(L) * DLI(L) #CHP
TLL = TLL + LL(L) * DLI(L)
TSW = TSW + SWI(L) * DLI(L)
TDL = TDL + DUL(L) * DLI(L)
END DO
IF (ISWWAT .EQ. 'Y') THEN
PESW = AMAX1(0.0, TSW - TLL) #cm
ELSE
#If water not being simulated, use DUL as water content
PESW = AMAX1(0.0, TDL - TLL)
ENDIF
ABD = TBD / DSI(NLAYR) #CHP
FX = ABD/(ABD+686.0*EXP(-5.63*ABD))
DP = 1000.0 + 2500.0*FX
WW = 0.356 - 0.144*ABD
B = ALOG(500.0/DP)
ALBEDO = MSALB
# CVF: difference in soil temperatures occur between different optimization
# levels in compiled versions.
# Keep only 4 decimals. chp 06/03/03
# Prevents differences between release & debug modes:
DO I = 1, 5
TMA(I) = NINT(TAVG*10000.)/10000. #chp
END DO
ATOT = TMA(1) * 5.0
DO L = 1, NLAYR
ST(L) = TAVG
END DO
DO I = 1, 8
CALL SOILT (
& ALBEDO, B, CUMDPT, DOY, DP, HDAY, NLAYR, #Input
& PESW, SRAD, TAMP, TAV, TAVG, TMAX, WW, DSMID,#Input
& ATOT, TMA, SRFTEMP, ST) #Output
END DO
ENDIF
# Print soil temperature data in STEMP.OUT
CALL OPSTEMP(CONTROL, ISWITCH, DOY, SRFTEMP, ST, TAV, TAMP)
#***********************************************************************
#***********************************************************************
# Daily rate calculations
#***********************************************************************
C Determines soil temperature by layer
C-----------------------------------------------------------------------
C Revision history
C 02/09/1933 PWW Header revision and minor changes.
C 12/09/1999 CHP Revisions for modular format.
C 01/01/2000 AJG Added surface temperature for the CENTURY-based
C SOM/soil-N module.
C 01/14/2005 CHP Added METMP = 3: Corrected water content in temp. eqn.
# 12/07/2008 CHP Removed METMP -- use only corrected water content
C-----------------------------------------------------------------------
C Called : STEMP
C Calls : None
C=======================================================================
SUBROUTINE SOILT (
& ALBEDO, B, CUMDPT, DOY, DP, HDAY, NLAYR, #Input
& PESW, SRAD, TAMP, TAV, TAVG, TMAX, WW, DSMID,#Input
& ATOT, TMA, SRFTEMP, ST) #Output
# ------------------------------------------------------------------
USE ModuleDefs #Definitions of constructed variable types,
# which contain control information, soil
# parameters, hourly weather data.
# NL defined in ModuleDefs.for
IMPLICIT NONE
SAVE
INTEGER K, L, DOY, NLAYR
REAL ALBEDO, ALX, ATOT, B, CUMDPT, DD, DP, DT, FX
REAL HDAY, PESW, SRAD, SRFTEMP, TA, TAMP, TAV, TAVG, TMAX
REAL WC, WW, ZD
REAL TMA(5)
REAL DSMID(NL)
REAL ST(NL)
#-----------------------------------------------------------------------
ALX = (FLOAT(DOY) - HDAY) * 0.0174
ATOT = ATOT - TMA(5)
DO K = 5, 2, -1
TMA(K) = TMA(K-1)
END DO
TMA(1) = (1.0 - ALBEDO) * (TAVG + (TMAX - TAVG) *
& SQRT(SRAD * 0.03)) + ALBEDO * TMA(1)
# Prevents differences between release & debug modes:
# Keep only 4 decimals. chp 06/03/03
TMA(1) = NINT(TMA(1)*10000.)/10000. #chp
ATOT = ATOT + TMA(1)
#-----------------------------------------------------------------------
# #Water content function - compare old and new
# SELECT CASE (METMP)
# CASE ('O') #Old, uncorrected equation
# #OLD EQUATION (used in DSSAT v3.5 CROPGRO, SUBSTOR, CERES-Maize
# WC = AMAX1(0.01, PESW) / (WW * CUMDPT * 10.0)
#
# CASE ('E') #Corrected method (EPIC)
# #NEW (CORRECTED) EQUATION
# #chp 11/24/2003 per GH and LAH
WC = AMAX1(0.01, PESW) / (WW * CUMDPT) * 10.0
# frac = cm / ( mm ) * mm/cm
#WC (ratio)
#PESW (cm)
#WW (dimensionless)
#CUMDPT (mm)
# END SELECT
#-----------------------------------------------------------------------
FX = EXP(B * ((1.0 - WC) / (1.0 + WC))**2)
DD = FX * DP #DD in mm
# JWJ, GH 12/9/2008
# Checked damping depths against values from literature and
# values are reasonable (after fix to WC equation).
# <NAME>. 2004. Introduction to Environmental Soil Physics.
# Academic Press, San Diego, CA, USA.
TA = TAV + TAMP * COS(ALX) / 2.0
DT = ATOT / 5.0 - TA
DO L = 1, NLAYR
ZD = -DSMID(L) / DD
ST(L) = TAV + (TAMP / 2.0 * COS(ALX + ZD) + DT) * EXP(ZD)
ST(L) = NINT(ST(L) * 1000.) / 1000. #debug vs release fix
END DO
# Added: soil T for surface litter layer.
# NB: this should be done by adding array element 0 to ST(L). Now
# temporarily done differently.
SRFTEMP = TAV + (TAMP / 2. * COS(ALX) + DT)
# Note: ETPHOT calculates TSRF(3), which is surface temperature by
# canopy zone. 1=sunlit leaves. 2=shaded leaves. 3= soil. Should
# we combine these variables? At this time, only SRFTEMP is used
# elsewhere. - chp 11/27/01
#-----------------------------------------------------------------------
RETURN
END SUBROUTINE SOILT
C=======================================================================
#=======================================================================
# STEMP and SOILT Variable definitions - updated 2/15/2004
#=======================================================================
# ABD Average bulk density for soil profile (g [soil] / cm3 [soil])
# ALBEDO Reflectance of soil-crop surface (fraction)
# ALX
# ATOT Sum of TMA array (last 5 days soil temperature) (�C)
# B Exponential decay factor (Parton and Logan) (in subroutine
# HTEMP)
# BD(L) Bulk density, soil layer L (g [soil] / cm3 [soil])
# CONTROL Composite variable containing variables related to control
# and/or timing of simulation. See Appendix A.
# CUMDPT Cumulative depth of soil profile (mm)
# DD
# DLAYR(L) Thickness of soil layer L (cm)
# DOY Current day of simulation (d)
# DP
# DS(L) Cumulative depth in soil layer L (cm)
# DSMID Depth to midpoint of soil layer L (cm)
# DT
# DUL(L) Volumetric soil water content at Drained Upper Limit in soil
# layer L (cm3[water]/cm3[soil])
# ERRNUM Error number for input
# FILEIO Filename for input file (e.g., IBSNAT35.INP)
# FOUND Indicator that good data was read from file by subroutine FIND
# (0 - End-of-file encountered, 1 - NAME was found)
# FX
# HDAY
# ICWD Initial water table depth (cm)
# ISWITCH Composite variable containing switches which control flow of
# execution for model. The structure of the variable
# (SwitchType) is defined in ModuleDefs.for.
# ISWWAT Water simulation control switch (Y or N)
# LINC Line number of input file
# LL(L) Volumetric soil water content in soil layer L at lower limit
# (cm3 [water] / cm3 [soil])
# LNUM Current line number of input file
# LUNIO | |
import functools
from pylearn2.models.mlp import MLP, CompositeLayer
from pylearn2.space import CompositeSpace, VectorSpace
import theano
from theano import tensor as T
from theano.compat import OrderedDict
from theano.sandbox.rng_mrg import MRG_RandomStreams
from adversarial import AdversaryPair, AdversaryCost2, Generator, theano_parzen
class ConditionalAdversaryPair(AdversaryPair):
def __init__(self, generator, discriminator, data_space, condition_space,
inferer=None,
inference_monitoring_batch_size=128,
monitor_generator=True,
monitor_discriminator=True,
monitor_inference=True,
shrink_d=0.):
super(ConditionalAdversaryPair, self).__init__(generator, discriminator, inferer,
inference_monitoring_batch_size, monitor_generator, monitor_discriminator,
monitor_inference, shrink_d)
self.data_space = data_space
self.condition_space = condition_space
self.input_source = self.discriminator.get_input_source()
self.output_space = self.discriminator.get_output_space()
def get_monitoring_channels(self, data):
rval = OrderedDict()
g_ch = self.generator.get_monitoring_channels(data)
d_ch = self.discriminator.get_monitoring_channels((data, None))
samples, _, conditional_data, _ = self.generator.sample_and_noise(100)
d_samp_ch = self.discriminator.get_monitoring_channels(((samples, conditional_data), None))
i_ch = OrderedDict()
if self.inferer is not None:
batch_size = self.inference_monitoring_batch_size
sample, noise, conditional_data, _ = self.generator.sample_and_noise(batch_size)
i_ch.update(self.inferer.get_monitoring_channels(((sample, conditional_data), noise)))
if self.monitor_generator:
for key in g_ch:
rval['gen_' + key] = g_ch[key]
if self.monitor_discriminator:
for key in d_ch:
rval['dis_on_data_' + key] = d_samp_ch[key]
for key in d_ch:
rval['dis_on_samp_' + key] = d_ch[key]
if self.monitor_inference:
for key in i_ch:
rval['inf_' + key] = i_ch[key]
return rval
class ConditionalGenerator(Generator):
def __init__(self, mlp, input_condition_space, condition_distribution, noise_dim=100, *args, **kwargs):
super(ConditionalGenerator, self).__init__(mlp, *args, **kwargs)
self.noise_dim = noise_dim
self.noise_space = VectorSpace(dim=self.noise_dim)
self.condition_space = input_condition_space
self.condition_distribution = condition_distribution
self.input_space = CompositeSpace([self.noise_space, self.condition_space])
self.mlp.set_input_space(self.input_space)
def sample_and_noise(self, conditional_data, default_input_include_prob=1., default_input_scale=1.,
all_g_layers=False):
"""
Retrieve a sample (and the noise used to generate the sample)
conditioned on some input data.
Parameters
----------
conditional_data: member of self.condition_space
A minibatch of conditional data to feedforward.
default_input_include_prob: float
WRITEME
default_input_scale: float
WRITEME
all_g_layers: boolean
If true, return all generator layers in `other_layers` slot
of this method's return value. (Otherwise returns `None` in
this slot.)
Returns
-------
net_output: 3-tuple
Tuple of the form `(sample, noise, other_layers)`.
"""
if isinstance(conditional_data, int):
conditional_data = self.condition_distribution.sample(conditional_data)
num_samples = conditional_data.shape[0]
noise = self.get_noise((num_samples, self.noise_dim))
# TODO necessary?
formatted_noise = self.noise_space.format_as(noise, self.noise_space)
# Build inputs: concatenate noise with conditional data
inputs = (formatted_noise, conditional_data)
# Feedforward
# if all_g_layers:
# rval = self.mlp.dropout_fprop(inputs, default_input_include_prob=default_input_include_prob,
# default_input_scale=default_input_scale, return_all=all_g_layers)
# other_layers, rval = rval[:-1], rval[-1]
# else:
rval = self.mlp.dropout_fprop(inputs, default_input_include_prob=default_input_include_prob,
default_input_scale=default_input_scale)
# other_layers = None
return rval, formatted_noise, conditional_data, None# , other_layers
def sample(self, conditional_data, **kwargs):
sample, _, _, _ = self.sample_and_noise(conditional_data, **kwargs)
return sample
def get_monitoring_channels(self, data):
if data is None:
m = 100
conditional_data = self.condition_distribution.sample(m)
else:
_, conditional_data = data
m = conditional_data.shape[0]
noise = self.get_noise((m, self.noise_dim))
rval = OrderedDict()
sampled_data = (noise, conditional_data)
try:
rval.update(self.mlp.get_monitoring_channels((sampled_data, None)))
except Exception:
warnings.warn("something went wrong with generator.mlp's monitoring channels")
if self.monitor_ll:
rval['ll'] = T.cast(self.ll(data, self.ll_n_samples, self.ll_sigma),
theano.config.floatX).mean()
rval['nll'] = -rval['ll']
return rval
def ll(self, data, n_samples, sigma):
real_data, conditional_data = data
sampled_data = self.sample(conditional_data)
output_space = self.mlp.get_output_space()
if 'Conv2D' in str(output_space):
samples = output_space.convert(sampled_data, output_space.axes, ('b', 0, 1, 'c'))
samples = samples.flatten(2)
data = output_space.convert(real_data, output_space.axes, ('b', 0, 1, 'c'))
data = data.flatten(2)
parzen = theano_parzen(data, samples, sigma)
return parzen
class CompositeMLPLayer(CompositeLayer):
"""A CompositeLayer where each of the components are MLPs.
Supports forwarding dropout parameters to each MLP independently."""
def __init__(self, layers, *args, **kwargs):
for layer in layers:
assert isinstance(layer, MLP), "CompositeMLPLayer only supports MLP component layers"
super(CompositeMLPLayer, self).__init__(layers=layers, *args, **kwargs)
def _collect_mlp_layer_names(self):
"""Collect the layer names of the MLPs nested within this
layer."""
return [[sub_layer.layer_name for sub_layer in mlp.layers] for mlp in self.layers]
def validate_layer_names(self, req_names):
all_names = []
for sub_names in self._collect_mlp_layer_names():
all_names.extend(sub_names)
if any(req_name not in all_names for req_name in req_names):
unknown_names = [req_name for req_name in req_names
if req_name not in all_names]
raise ValueError("No MLPs in this CompositeMLPLayer have layer(s) named %s" %
", ".join(unknown_names))
def dropout_fprop(self, state_below, input_include_probs=None, input_scales=None,
*args, **kwargs):
"""Extension of Layer#fprop which forwards on dropout parameters
to MLP sub-layers."""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
# Use to determine which args should be routed to which places
mlp_layer_names = self._collect_mlp_layer_names()
rvals = []
for i, mlp in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
# Get dropout params for relevant layers
relevant_keys_include = set(mlp_layer_names[i]) & set(input_include_probs)
relevant_keys_scale = set(mlp_layer_names[i]) & set(input_scales)
relevant_include = dict((k, input_include_probs[k]) for k in relevant_keys_include)
relevant_scale = dict((k, input_scales[k]) for k in relevant_keys_scale)
rvals.append(mlp.dropout_fprop(cur_state_below,
input_include_probs=relevant_include,
input_scales=relevant_scale,
*args, **kwargs))
return tuple(rvals)
class ConditionalDiscriminator(MLP):
def __init__(self, data_mlp, condition_mlp, joint_mlp,
input_data_space, input_condition_space, input_source=('features', 'condition'),
*args, **kwargs):
"""
A discriminator acting within a cGAN which may "condition" on
extra information.
Parameters
----------
data_mlp: pylearn2.models.mlp.MLP
MLP which processes the data-space information. Must output
a `VectorSpace` of some sort.
condition_mlp: pylearn2.models.mlp.MLP
MLP which processes the condition-space information. Must
output a `VectorSpace` of some sort.
joint_mlp: pylearn2.models.mlp.MLP
MLP which processes the combination of the outputs of the
data MLP and the condition MLP.
input_data_space : pylearn2.space.CompositeSpace
Space which contains the empirical / model-generated data
input_condition_space : pylearn2.space.CompositeSpace
Space which contains the extra data being conditioned on
kwargs : dict
Passed on to MLP superclass.
"""
# Make sure user isn't trying to override any fixed keys
for illegal_key in ['input_source', 'input_space', 'layers']:
assert illegal_key not in kwargs
# First feed forward in parallel along the data and condition
# MLPs; then feed the composite output to the joint MLP
layers = [
CompositeMLPLayer(layer_name='discriminator_composite',
layers=[data_mlp, condition_mlp],
inputs_to_layers={0: [0], 1: [1]}),
joint_mlp
]
super(ConditionalDiscriminator, self).__init__(
layers=layers,
input_space=CompositeSpace([input_data_space, input_condition_space]),
input_source=input_source,
*args, **kwargs)
@functools.wraps(MLP.dropout_fprop)
def dropout_fprop(self, state_below, default_input_include_prob=0.5,
input_include_probs=None, default_input_scale=2.,
input_scales=None, per_example=True):
"""Extended version of MLP#dropout_fprop which supports passing
on dropout parameters to nested MLPs within this MLP.
Coupled with `CompositeMLPLayer`, which is a core part of the
ConditionalDiscriminator setup.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
layer_name_set = set(input_include_probs.keys())
layer_name_set.update(input_scales.keys())
# Remove layers from the outer net
layer_name_set.difference_update(set(layer.layer_name for layer in self.layers))
# Make sure remaining layers are contained within sub-MLPs
# NOTE: Assumes composite layer is only at position zero
self.layers[0].validate_layer_names(list(input_include_probs.keys()))
self.layers[0].validate_layer_names(list(input_scales.keys()))
theano_rng = MRG_RandomStreams(max(self.rng.randint(2 ** 15), 1))
for layer in self.layers:
layer_name = layer.layer_name
if layer_name in input_include_probs:
include_prob = input_include_probs[layer_name]
else:
include_prob = default_input_include_prob
if layer_name in input_scales:
scale = input_scales[layer_name]
else:
scale = default_input_scale
# Forward propagate
if isinstance(layer, CompositeMLPLayer):
# This is a composite MLP layer -- forward on the
# dropout parameters
state_below = layer.dropout_fprop(state_below,
default_input_include_prob=default_input_include_prob,
input_include_probs=input_include_probs,
default_input_scale=default_input_scale,
input_scales=input_scales,
per_example=per_example)
else:
state_below = self.apply_dropout(
state=state_below,
include_prob=include_prob,
theano_rng=theano_rng,
scale=scale,
mask_value=layer.dropout_input_mask_value,
input_space=layer.get_input_space(),
per_example=per_example
)
state_below = layer.fprop(state_below)
return state_below
class ConditionalAdversaryCost(AdversaryCost2):
"""
Defines the cost expression for a cGAN.
"""
supervised = False
def __init__(self, condition_distribution, **kwargs):
self.condition_distribution = condition_distribution
super(ConditionalAdversaryCost, self).__init__(**kwargs)
def get_samples_and_objectives(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
assert isinstance(model, ConditionalAdversaryPair)
G, D = model.generator, model.discriminator
# X_data: empirical data to be sent to the discriminator. We'll
# make an equal amount of generated data and send this to the
# discriminator as well.
#
# X_condition: Conditional data for each empirical sample.
X_data, X_condition = data
m = X_data.shape[3]
# TODO get_batch_axis is wrong here.. probably a dataset issue?
# Expected discriminator output: 1 for real data, 0 for
# generated samples
y1 = T.alloc(1, m, 1)
y0 = T.alloc(0, m, 1)
# Generate conditional data for the generator
G_conditional_data = self.condition_distribution.sample(m)
S, z, _, other_layers = G.sample_and_noise(G_conditional_data,
default_input_include_prob=self.generator_default_input_include_prob,
default_input_scale=self.generator_default_input_scale,
all_g_layers=(self.infer_layer is not None))
if self.noise_both != 0.:
rng = MRG_RandomStreams(2014 / 6 + 2)
S = S + rng.normal(size=S.shape, dtype=S.dtype) * self.noise_both
X_data = X_data + rng.normal(size=X_data.shape, dtype=X_data.dtype) * self.noise_both
fprop_args = [self.discriminator_default_input_include_prob,
self.discriminator_input_include_probs,
self.discriminator_default_input_scale,
self.discriminator_input_scales]
# Run discriminator on empirical data (1 expected)
y_hat1 = D.dropout_fprop((X_data, X_condition), *fprop_args)
# Run discriminator on generated data (0 expected)
y_hat0 = D.dropout_fprop((S, G_conditional_data), *fprop_args)
# Compute discriminator objective
d_obj = 0.5 * (D.layers[-1].cost(y1, y_hat1) + D.layers[-1].cost(y0, y_hat0))
# Compute generator objective
if self.no_drop_in_d_for_g:
y_hat0_no_drop = D.dropout_fprop(S)
g_obj = D.layers[-1].cost(y1, y_hat0_no_drop)
else:
g_obj = D.layers[-1].cost(y1, y_hat0)
if self.blend_obj:
g_obj = (self.zurich_coeff * g_obj - self.minimax_coeff * d_obj) / (self.zurich_coeff + self.minimax_coeff)
| |
array-like, optional
Array of integers defining frame times of the first data. If not provided,
regular time-spaced data is assumed.
t2 : array-like, optional
Array of integers defining frame times of the second data. If not provided,
regular time-spaced data is assumed.
n : int, optional
Determines the length of the output (max time delay - 1 by default).
Note that 'aout' parameter takes precedence over 'n'
norm : int, optional
Specifies normalization procedure 0,1,2, or 3 (default).
method : str, optional
Either 'fft', 'corr' or 'diff'. If not given it is chosen automatically based on
the rest of the input parameters.
align : bool, optional
Whether to align data prior to calculation. Note that a complete copy of
the data takes place.
axis : int, optional
Axis over which to calculate.
complex : bool, optional
If set to False (default), a real-valued correlation is calculated. If set
to True, a complex-valued correlation is calculated.
f1s : array-like, optional
First absolute square of the input data. For norm = NORM_COMPENSATED square of the
signal is analysed. If not given it is calculated on the fly.
f2s : array-like, optional
Second absolute square of the input data.
aout : a tuple of ndarrays, optional
Tuple of output arrays.
For method = 'diff' : (corr, count, sum1, sum2)
for 'corr' and 'fft' : (corr, count, squaresum, sum1, sum2)
Returns
-------
(corr, count, squaresum, sum1, sum2) : (ndarray, ndarray, ndarray, ndarray, ndarray)
Computed correlation data for 'fft' and 'corr' methods,
If norm = 3, these are all defined. For norm < 3, some may be NoneType.
(diff, count, sum1, sum2) : (ndarray, ndarray, ndarray, ndarray)
Computed difference data for 'diff' method.
Examples
--------
Say we have two datasets f1 and f2. To compute cross-correlation of both
datasets :
>>> f1, f2 = np.random.randn(24,4,6) + 0j, np.random.randn(24,4,6) + 0j
>>> data = ccorr(f1, f2, n = 16)
Now we can set the 'out' parameter, and the results of the next dataset
are added to results of the first dataset:
>>> data = ccorr(f1, f2, aout = data)
Note that the parameter 'n' = 64 is automatically determined here, based on the
provided 'aout' arrays.
"""
t0 = time.time()
complex = bool(complex)
method = _default_method(method, n)
print1("Computing {}...".format(method))
norm = _default_norm(norm, method, cross = True)
correlate = False if method == "diff" else True
if method == "diff":
if complex == True:
raise ValueError("`complex` calculation not supported for method == 'diff'.")
cor, count, ds1, ds2 = (None,)*4 if aout is None else aout
else:
cor, count, sq, ds1, ds2 = (None,)*5 if aout is None else aout
f1,f2,t1,t2,axis,n = _inspect_cross_arguments(f1,f2,t1,t2,axis,n,cor,aout_full_size = complex)
nframes = f1.shape[axis]
regular = False
new_axis = -2 if f1.ndim > 1 else -1
if t1 is None and t2 is None:
regular = True
if _is_aligned(f1, axis, align):
new_axis = -1
f1 = _move_axis_and_align(f1,axis,new_axis, align)
f2 = _move_axis_and_align(f2,axis,new_axis, align)
print2(" * axis : {}".format(axis))
print2(" * norm : {}".format(norm))
print2(" * n : {}".format(n))
print2(" * align : {}".format(align))
print2(" * method : {}".format(method))
print2(" * complex : {}".format(complex))
if norm != 0:
print2("... correlate")
if method == "fft":
cor = cross_correlate_fft(f1,f2,t1,t2, axis = new_axis, n = n, complex = complex, aout = cor)
elif method == "corr":
cor = cross_correlate(f1,f2,t1,t2, axis = new_axis, n = n, complex = complex, aout = cor)
else:
cor = cross_difference(f1,f2,t1,t2, axis = new_axis, n = n, aout = cor)
if t1 is None:
t = np.arange(f1.shape[new_axis])
count = cross_count(t,t,n, complex = complex, aout = count)
else:
count = cross_count(t1,t2,n, complex = complex, aout = count)
if method == "fft" and regular == False:
_sum = cross_sum_fft
else:
_sum = cross_sum
if (norm & NORM_STRUCTURED) and correlate:
print2("... tau sum square")
if f1s is None:
f1s = abs2(f1)
else:
f1s = _move_axis_and_align(f1s,axis, new_axis, align)
if f2s is None:
f2s = abs2(f2)
else:
f2s = _move_axis_and_align(f2s,axis, new_axis, align)
sq = _sum(f1s,t1,t_other = t2,axis = new_axis, n = n, aout = sq, complex = complex)
sq = _sum(f2s,t2,t_other = t1,axis = new_axis, n = n ,aout = sq, complex = complex, time_inversion = True)
if norm & NORM_SUBTRACTED:
print2("... tau sum signal")
ds1 = _sum(f1,t = t1,t_other = t2,axis = new_axis, n = n, aout = ds1, complex = complex)
ds2 = _sum(f2,t = t2,t_other = t1,axis = new_axis, n = n, aout = ds2, complex = complex, time_inversion = True)
print_frame_rate(nframes, t0)
if method == "diff":
return cor, count, ds1, ds2
else:
return cor, count, sq, ds1, ds2
def iccorr(data, t1 = None, t2 = None, n = None, norm = None, method = "corr", count = None,
chunk_size = None, thread_divisor = None,
auto_background = False, viewer = None, viewer_interval = 1, mode = "full", mask = None, stats = True, complex = False):
"""Iterative version of :func:`ccorr`.
Parameters
----------
data : iterable
An iterable object, iterating over dual-frame ndarray data.
t1 : int or array-like, optional
Array of integers defining frame times of the first data. If it is a scalar
it defines the length of the input data
t2 : array-like, optional
Array of integers defining frame times of the second data. If not provided,
regular time-spaced data is assumed.
n : int, optional
Determines the length of the output (max time delay - 1 by default).
norm : int, optional
Specifies normalization procedure 1,2,3,5,6,7.
method : str, optional
Either 'fft', 'corr' or 'diff'. If not given it is chosen automatically based on
the rest of the input parameters.
count : int, optional
If given, it defines how many elements of the data to process. If not given,
count is set to len(t1) if that is not specified, it is set to len(data).
chunk_size : int
Length of data chunk.
thread_divisor : int, optional
If specified, input frame is reshaped to 2D with first axis of length
specified with the argument. It defines how many treads are run. This
must be a divisor of the total size of the frame. Using this may speed
up computation in some cases because of better memory alignment and
cache sizing.
auto_background : bool
Whether to use data from first chunk to calculate and subtract background.
viewer : any, optional
You can use :class:`.viewer.MultitauViewer` to display data.
viewer_interval : int, optional
A positive integer, defines how frequently are plots updated 1 for most
frequent, higher numbers for less frequent updates.
mode : str
Either "full" or "chunk". With mode = "full", output of this function
is identical to the output of :func:`ccorr_multi`. With mode = "chunk",
cross correlation between neigbouring chunks is not computed.
mask : ndarray, optional
If specifed, computation is done only over elements specified by the mask.
The rest of elements are not computed, np.nan values are written to output
arrays.
stats : bool
Whether to return stats as well.
complex : bool, optional
If set to False (default), a real-valued correlation is calculated. If set
to True, a complex-valued correlation is calculated.
Returns
-------
ccorr_data, bg, var : ccorr_type, ndarray, ndarray
Ccorr data, background and variance data. See :func:`ccorr` for definition
of accorr_type
ccorr_data : ccorr_type
If `stats` == False
"""
if (t1 is None and t2 is not None) or (t2 is None and t1 is not None):
raise ValueError("Both `t1` and `t2` arguments must be provided")
norm = _default_norm(norm, method, cross = True)
period = 1
nlevel = 0
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1beta1.types import artifact
from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact
from google.cloud.aiplatform_v1beta1.types import context
from google.cloud.aiplatform_v1beta1.types import context as gca_context
from google.cloud.aiplatform_v1beta1.types import execution
from google.cloud.aiplatform_v1beta1.types import execution as gca_execution
from google.cloud.aiplatform_v1beta1.types import lineage_subgraph
from google.cloud.aiplatform_v1beta1.types import metadata_schema
from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema
from google.cloud.aiplatform_v1beta1.types import metadata_service
from google.cloud.aiplatform_v1beta1.types import metadata_store
from google.longrunning import operations_pb2 # type: ignore
from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO
class MetadataServiceGrpcTransport(MetadataServiceTransport):
"""gRPC backend transport for MetadataService.
Service for reading and writing metadata entries.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_metadata_store(
self,
) -> Callable[
[metadata_service.CreateMetadataStoreRequest], operations_pb2.Operation
]:
r"""Return a callable for the create metadata store method over gRPC.
Initializes a MetadataStore, including allocation of
resources.
Returns:
Callable[[~.CreateMetadataStoreRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_metadata_store" not in self._stubs:
self._stubs["create_metadata_store"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore",
request_serializer=metadata_service.CreateMetadataStoreRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_metadata_store"]
@property
def get_metadata_store(
self,
) -> Callable[
[metadata_service.GetMetadataStoreRequest], metadata_store.MetadataStore
]:
r"""Return a callable for the get metadata store method over gRPC.
Retrieves a specific MetadataStore.
Returns:
Callable[[~.GetMetadataStoreRequest],
~.MetadataStore]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_metadata_store" not in self._stubs:
self._stubs["get_metadata_store"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore",
request_serializer=metadata_service.GetMetadataStoreRequest.serialize,
response_deserializer=metadata_store.MetadataStore.deserialize,
)
return self._stubs["get_metadata_store"]
@property
def list_metadata_stores(
self,
) -> Callable[
[metadata_service.ListMetadataStoresRequest],
metadata_service.ListMetadataStoresResponse,
]:
r"""Return a callable for the list metadata stores method over gRPC.
Lists MetadataStores for a Location.
Returns:
Callable[[~.ListMetadataStoresRequest],
~.ListMetadataStoresResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_metadata_stores" not in self._stubs:
self._stubs["list_metadata_stores"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores",
request_serializer=metadata_service.ListMetadataStoresRequest.serialize,
response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize,
)
return self._stubs["list_metadata_stores"]
@property
def delete_metadata_store(
self,
) -> Callable[
[metadata_service.DeleteMetadataStoreRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete metadata store method over gRPC.
Deletes a single MetadataStore.
Returns:
Callable[[~.DeleteMetadataStoreRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the | |
out_coord = Quantity(self.radec_wcs.all_world2pix(coords, 0), output_unit).round(0).astype(int)
elif input_unit == "pix" and out_name == "deg":
out_coord = Quantity(self.radec_wcs.all_pix2world(coords, 0), output_unit)
# These go between degrees and XMM sky XY coordinates
elif input_unit == "deg" and out_name == "xmm_sky":
interim = self.radec_wcs.all_world2pix(coords, 0)
out_coord = Quantity(self.skyxy_wcs.all_pix2world(interim, 0), xmm_sky)
elif input_unit == "xmm_sky" and out_name == "deg":
interim = self.skyxy_wcs.all_world2pix(coords, 0)
out_coord = Quantity(self.radec_wcs.all_pix2world(interim, 0), deg)
# These go between XMM sky XY and pixel coordinates
elif input_unit == "xmm_sky" and out_name == "pix":
out_coord = Quantity(self.skyxy_wcs.all_world2pix(coords, 0), output_unit).round(0).astype(int)
elif input_unit == "pix" and out_name == "xmm_sky":
out_coord = Quantity(self.skyxy_wcs.all_pix2world(coords, 0), output_unit)
# These go between degrees and XMM Det XY coordinates
elif input_unit == "deg" and out_name == "xmm_det":
interim = self.radec_wcs.all_world2pix(coords, 0)
out_coord = Quantity(self.detxy_wcs.all_pix2world(interim, 0), xmm_sky)
elif input_unit == "xmm_det" and out_name == "deg":
interim = self.detxy_wcs.all_world2pix(coords, 0)
out_coord = Quantity(self.radec_wcs.all_pix2world(interim, 0), deg)
# These go between XMM det XY and pixel coordinates
elif input_unit == "xmm_det" and out_name == "pix":
out_coord = Quantity(self.detxy_wcs.all_world2pix(coords, 0), output_unit).round(0).astype(int)
elif input_unit == "pix" and out_name == "xmm_det":
out_coord = Quantity(self.detxy_wcs.all_pix2world(coords, 0), output_unit)
# It is possible to convert between XMM coordinates and pixel and supply coordinates
# outside the range covered by an image, but we can at least catch the error
if out_name == "pix" and np.any(out_coord < 0) and self._prod_type != "psf":
raise ValueError("You've converted to pixel coordinates, and some elements are less than zero.")
# Have to compare to the [1] element of shape because numpy arrays are flipped and we want
# to compare x to x
elif out_name == "pix" and np.any(out_coord[:, 0].value> self.shape[1]) and self._prod_type != "psf":
raise ValueError("You've converted to pixel coordinates, and some x coordinates are larger than the "
"image x-shape.")
# Have to compare to the [0] element of shape because numpy arrays are flipped and we want
# to compare y to y
elif out_name == "pix" and np.any(out_coord[:, 1].value > self.shape[0]) and self._prod_type != "psf":
raise ValueError("You've converted to pixel coordinates, and some y coordinates are larger than the "
"image y-shape.")
# If there was only pair passed in, we'll return a flat numpy array
if out_coord.shape == (1, 2):
out_coord = out_coord[0, :]
# if out_coord.shape ==
elif input_unit == out_name and out_name == 'pix':
out_coord = coords.round(0).astype(int)
else:
out_coord = coords
return out_coord
@property
def psf_corrected(self) -> bool:
"""
Tells the user (and XGA), whether an Image based object has been PSF corrected or not.
:return: Boolean flag, True means this object has been PSF corrected, False means it hasn't
:rtype: bool
"""
return self._psf_corrected
@psf_corrected.setter
def psf_corrected(self, new_val):
"""
Allows the psf_corrected flag to be altered.
"""
self._psf_corrected = new_val
@property
def psf_algorithm(self) -> Union[str, None]:
"""
If this object has been PSF corrected, this property gives the name of the algorithm used.
:return: The name of the algorithm used to correct for PSF effects, or None if the object
hasn't been PSF corrected.
:rtype: Union[str, None]
"""
return self._psf_correction_algorithm
@psf_algorithm.setter
def psf_algorithm(self, new_val: str):
"""
If this object has been PSF corrected, this property setter allows you to set the
name of the algorithm used. If it hasn't been PSF corrected then an error will be triggered.
"""
if self._psf_corrected:
self._psf_correction_algorithm = new_val
else:
raise NotPSFCorrectedError("You are trying to set the PSF Correction algorithm for an Image"
" that hasn't been PSF corrected.")
@property
def psf_bins(self) -> Union[int, None]:
"""
If this object has been PSF corrected, this property gives the number of bins that the X and Y axes
were divided into to generate the PSFGrid.
:return: The number of bins in X and Y for which PSFs were generated, or None if the object
hasn't been PSF corrected.
:rtype: Union[int, None]
"""
return self._psf_num_bins
@psf_bins.setter
def psf_bins(self, new_val: int):
"""
If this object has been PSF corrected, this property setter allows you to store the
number of bins in X and Y for which PSFs were generated. If it hasn't been PSF corrected
then an error will be triggered.
"""
if self._psf_corrected:
self._psf_num_bins = new_val
else:
raise NotPSFCorrectedError("You are trying to set the number of PSF bins for an Image"
" that hasn't been PSF corrected.")
@property
def psf_iterations(self) -> Union[int, None]:
"""
If this object has been PSF corrected, this property gives the number of iterations that the
algorithm went through to create this image.
:return: The number of iterations the PSF correction algorithm went through, or None if the
object hasn't been PSF corrected.
:rtype: Union[int, None]
"""
return self._psf_num_iterations
@psf_iterations.setter
def psf_iterations(self, new_val: int):
"""
If this object has been PSF corrected, this property setter allows you to store the
number of iterations that the algorithm went through to create this image. If it hasn't
been PSF corrected then an error will be triggered.
"""
if self._psf_corrected:
self._psf_num_iterations = new_val
else:
raise NotPSFCorrectedError("You are trying to set the number of algorithm iterations for an Image"
" that hasn't been PSF corrected.")
@property
def psf_model(self) -> Union[str, None]:
"""
If this object has been PSF corrected, this property gives the name of the PSF model used.
:return: The name of the PSF model used to correct for PSF effects, or None if the object
hasn't been PSF corrected.
:rtype: Union[str, None]
"""
return self._psf_model
@psf_model.setter
def psf_model(self, new_val: str):
"""
If this object has been PSF corrected, this property setter allows you to add the
name of the PSF model used. If it hasn't been PSF corrected then an error will be triggered.
"""
if self._psf_corrected:
self._psf_model = new_val
else:
raise NotPSFCorrectedError("You are trying to set the PSF model for an Image that hasn't "
"been PSF corrected.")
def get_count(self, at_coord: Quantity) -> float:
"""
A simple method that converts the given coordinates to pixels, then finds the number of counts
at those coordinates.
:param Quantity at_coord: Coordinate at which to find the number of counts.
:return: The counts at the supplied coordinates.
:rtype: Quantity
"""
pix_coord = self.coord_conv(at_coord, pix).value
cts = self.data[pix_coord[1], pix_coord[0]]
return Quantity(cts, "ct")
def simple_peak(self, mask: np.ndarray, out_unit: Union[UnitBase, str] = deg) -> Tuple[Quantity, bool]:
"""
Simplest possible way to find the position of the peak of X-ray emission in an Image. This method
takes a mask in the form of a numpy array, which allows the user to mask out parts of the ratemap
that shouldn't be searched (outside of a certain region, or within point sources for instance).
Results from this can be less valid than the RateMap implementation (especially if the object you care
about is off-axis), as that takes into account vignetting corrected exposure times.
:param np.ndarray mask: A numpy array used to weight the data. It should be 0 for pixels that
aren't to be searched, and 1 for those that are.
:param UnitBase/str out_unit: The desired output unit of the peak coordinates, the default is degrees.
:return: An astropy quantity containing the coordinate of the X-ray peak of this ratemap (given
the user's mask), in units of out_unit, as specified by the user. A null value is also returned in
place of the boolean flag describing whether the coordinates are near an edge or not that RateMap returns.
:rtype: Tuple[Quantity, None]
"""
# The code is essentially identical to that in simple_peak in RateMap, but I'm tired and can't be bothered
# to do this properly so I'll just copy it over
if mask.shape != self.data.shape:
raise ValueError("The shape of the mask array ({0}) must be the same as that of the data array "
"({1}).".format(mask.shape, self.data.shape))
# Creates the data array that we'll be searching. Takes into account the passed mask
masked_data = self.data * mask
# Uses argmax to find the flattened coordinate of the max value, then unravel_index to convert
| |
, 'N':'R:U6_S_0' , 'E':'T:U6_W_1' },
{'name':'U5;10->01;sw' , 'parity':0, 'S':'L:U4_N_0' , 'W':'B:U4_E_1' , 'N':'iL:U5;10->01' , 'E':'iB:U5;10->01' },
{'name':'U5;10->01;se' , 'parity':1, 'S':'R:U4_N_0' , 'W':'iB:U5;10->01' , 'N':'iR:U5;10->01' , 'E':'B:U6_W_1' },
{'name':'U5;10->10;nw' , 'parity':1, 'S':'iL:U5;10->10' , 'W':'T:U4_E_1' , 'N':'L:U6_S_0' , 'E':'iT:U5;10->10' },
{'name':'U5;10->10;ne' , 'parity':0, 'S':'iR:U5;10->10' , 'W':'iT:U5;10->10' , 'N':'R:U6_S_0' , 'E':'T:U6_W_0' },
{'name':'U5;10->10;sw' , 'parity':0, 'S':'L:U4_N_1' , 'W':'B:U4_E_1' , 'N':'iL:U5;10->10' , 'E':'iB:U5;10->10' },
{'name':'U5;10->10;se' , 'parity':1, 'S':'R:U4_N_1' , 'W':'iB:U5;10->10' , 'N':'iR:U5;10->10' , 'E':'B:U6_W_0' },
{'name':'U5;10->11;nw' , 'parity':1, 'S':'iL:U5;10->11' , 'W':'T:U4_E_1' , 'N':'L:U6_S_0' , 'E':'iT:U5;10->11' },
{'name':'U5;10->11;ne' , 'parity':0, 'S':'iR:U5;10->11' , 'W':'iT:U5;10->11' , 'N':'R:U6_S_0' , 'E':'T:U6_W_1' },
{'name':'U5;10->11;sw' , 'parity':0, 'S':'L:U4_N_1' , 'W':'B:U4_E_1' , 'N':'iL:U5;10->11' , 'E':'iB:U5;10->11' },
{'name':'U5;10->11;se' , 'parity':1, 'S':'R:U4_N_1' , 'W':'iB:U5;10->11' , 'N':'iR:U5;10->11' , 'E':'B:U6_W_1' },
{'name':'U5;11->00;nw' , 'parity':1, 'S':'iL:U5;11->00' , 'W':'T:U4_E_1' , 'N':'L:U6_S_1' , 'E':'iT:U5;11->00' },
{'name':'U5;11->00;ne' , 'parity':0, 'S':'iR:U5;11->00' , 'W':'iT:U5;11->00' , 'N':'R:U6_S_1' , 'E':'T:U6_W_0' },
{'name':'U5;11->00;sw' , 'parity':0, 'S':'L:U4_N_0' , 'W':'B:U4_E_1' , 'N':'iL:U5;11->00' , 'E':'iB:U5;11->00' },
{'name':'U5;11->00;se' , 'parity':1, 'S':'R:U4_N_0' , 'W':'iB:U5;11->00' , 'N':'iR:U5;11->00' , 'E':'B:U6_W_0' },
{'name':'U5;11->01;nw' , 'parity':1, 'S':'iL:U5;11->01' , 'W':'T:U4_E_1' , 'N':'L:U6_S_1' , 'E':'iT:U5;11->01' },
{'name':'U5;11->01;ne' , 'parity':0, 'S':'iR:U5;11->01' , 'W':'iT:U5;11->01' , 'N':'R:U6_S_1' , 'E':'T:U6_W_1' },
{'name':'U5;11->01;sw' , 'parity':0, 'S':'L:U4_N_0' , 'W':'B:U4_E_1' , 'N':'iL:U5;11->01' , 'E':'iB:U5;11->01' },
{'name':'U5;11->01;se' , 'parity':1, 'S':'R:U4_N_0' , 'W':'iB:U5;11->01' , 'N':'iR:U5;11->01' , 'E':'B:U6_W_1' },
{'name':'U5;11->10;nw' , 'parity':1, 'S':'iL:U5;11->10' , 'W':'T:U4_E_1' , 'N':'L:U6_S_1' , 'E':'iT:U5;11->10' },
{'name':'U5;11->10;ne' , 'parity':0, 'S':'iR:U5;11->10' , 'W':'iT:U5;11->10' , 'N':'R:U6_S_1' , 'E':'T:U6_W_0' },
{'name':'U5;11->10;sw' , 'parity':0, 'S':'L:U4_N_1' , 'W':'B:U4_E_1' , 'N':'iL:U5;11->10' , 'E':'iB:U5;11->10' },
{'name':'U5;11->10;se' , 'parity':1, 'S':'R:U4_N_1' , 'W':'iB:U5;11->10' , 'N':'iR:U5;11->10' , 'E':'B:U6_W_0' },
{'name':'U5;11->11;nw' , 'parity':1, 'S':'iL:U5;11->11' , 'W':'T:U4_E_1' , 'N':'L:U6_S_1' , 'E':'iT:U5;11->11' },
{'name':'U5;11->11;ne' , 'parity':0, 'S':'iR:U5;11->11' , 'W':'iT:U5;11->11' , 'N':'R:U6_S_1' , 'E':'T:U6_W_1' },
{'name':'U5;11->11;sw' , 'parity':0, 'S':'L:U4_N_1' , 'W':'B:U4_E_1' , 'N':'iL:U5;11->11' , 'E':'iB:U5;11->11' },
{'name':'U5;11->11;se' , 'parity':1, 'S':'R:U4_N_1' , 'W':'iB:U5;11->11' , 'N':'iR:U5;11->11' , 'E':'B:U6_W_1' },
{'name':'U3;00->00;nw' , 'parity':1, 'S':'iL:U3;00->00' , 'W':'T:U2_E_0' , 'N':'L:U4_S_0' , 'E':'iT:U3;00->00' },
{'name':'U3;00->00;ne' , 'parity':0, 'S':'iR:U3;00->00' , 'W':'iT:U3;00->00' , 'N':'R:U4_S_0' , 'E':'T:U4_W_0' },
{'name':'U3;00->00;sw' , 'parity':0, 'S':'L:U2_N_0' , 'W':'B:U2_E_0' , 'N':'iL:U3;00->00' , 'E':'iB:U3;00->00' },
{'name':'U3;00->00;se' , 'parity':1, 'S':'R:U2_N_0' , 'W':'iB:U3;00->00' , 'N':'iR:U3;00->00' , 'E':'B:U4_W_0' },
{'name':'U3;00->01;nw' , 'parity':1, 'S':'iL:U3;00->01' , 'W':'T:U2_E_0' , 'N':'L:U4_S_0' , 'E':'iT:U3;00->01' },
{'name':'U3;00->01;ne' , 'parity':0, 'S':'iR:U3;00->01' , 'W':'iT:U3;00->01' , 'N':'R:U4_S_0' , 'E':'T:U4_W_1' },
{'name':'U3;00->01;sw' , 'parity':0, 'S':'L:U2_N_0' , 'W':'B:U2_E_0' , 'N':'iL:U3;00->01' , 'E':'iB:U3;00->01' },
{'name':'U3;00->01;se' , 'parity':1, 'S':'R:U2_N_0' , 'W':'iB:U3;00->01' , 'N':'iR:U3;00->01' , 'E':'B:U4_W_1' },
{'name':'U3;00->10;nw' , 'parity':1, 'S':'iL:U3;00->10' , 'W':'T:U2_E_0' , 'N':'L:U4_S_0' , 'E':'iT:U3;00->10' },
{'name':'U3;00->10;ne' , 'parity':0, 'S':'iR:U3;00->10' , 'W':'iT:U3;00->10' , 'N':'R:U4_S_0' , 'E':'T:U4_W_0' },
{'name':'U3;00->10;sw' , 'parity':0, 'S':'L:U2_N_1' , 'W':'B:U2_E_0' , 'N':'iL:U3;00->10' , 'E':'iB:U3;00->10' },
{'name':'U3;00->10;se' , 'parity':1, 'S':'R:U2_N_1' , 'W':'iB:U3;00->10' , 'N':'iR:U3;00->10' , 'E':'B:U4_W_0' },
{'name':'U3;00->11;nw' , 'parity':1, 'S':'iL:U3;00->11' , 'W':'T:U2_E_0' , 'N':'L:U4_S_0' , 'E':'iT:U3;00->11' },
{'name':'U3;00->11;ne' , 'parity':0, 'S':'iR:U3;00->11' , 'W':'iT:U3;00->11' , 'N':'R:U4_S_0' , 'E':'T:U4_W_1' },
{'name':'U3;00->11;sw' , 'parity':0, 'S':'L:U2_N_1' , 'W':'B:U2_E_0' , 'N':'iL:U3;00->11' , 'E':'iB:U3;00->11' },
{'name':'U3;00->11;se' , 'parity':1, 'S':'R:U2_N_1' , 'W':'iB:U3;00->11' , 'N':'iR:U3;00->11' , 'E':'B:U4_W_1' },
{'name':'U3;01->00;nw' , 'parity':1, 'S':'iL:U3;01->00' , 'W':'T:U2_E_0' , 'N':'L:U4_S_1' , 'E':'iT:U3;01->00' },
{'name':'U3;01->00;ne' , 'parity':0, 'S':'iR:U3;01->00' , 'W':'iT:U3;01->00' , 'N':'R:U4_S_1' , 'E':'T:U4_W_0' },
{'name':'U3;01->00;sw' , 'parity':0, 'S':'L:U2_N_0' , 'W':'B:U2_E_0' , 'N':'iL:U3;01->00' , 'E':'iB:U3;01->00' },
{'name':'U3;01->00;se' , 'parity':1, 'S':'R:U2_N_0' , 'W':'iB:U3;01->00' , 'N':'iR:U3;01->00' , 'E':'B:U4_W_0' },
{'name':'U3;01->01;nw' , 'parity':1, 'S':'iL:U3;01->01' , 'W':'T:U2_E_0' , 'N':'L:U4_S_1' , 'E':'iT:U3;01->01' },
{'name':'U3;01->01;ne' , 'parity':0, 'S':'iR:U3;01->01' , 'W':'iT:U3;01->01' , 'N':'R:U4_S_1' , 'E':'T:U4_W_1' },
{'name':'U3;01->01;sw' , 'parity':0, 'S':'L:U2_N_0' , 'W':'B:U2_E_0' , 'N':'iL:U3;01->01' , 'E':'iB:U3;01->01' },
{'name':'U3;01->01;se' , 'parity':1, 'S':'R:U2_N_0' , 'W':'iB:U3;01->01' , 'N':'iR:U3;01->01' , 'E':'B:U4_W_1' },
{'name':'U3;01->10;nw' , 'parity':1, 'S':'iL:U3;01->10' , 'W':'T:U2_E_0' , 'N':'L:U4_S_1' , 'E':'iT:U3;01->10' },
{'name':'U3;01->10;ne' , 'parity':0, 'S':'iR:U3;01->10' , 'W':'iT:U3;01->10' , 'N':'R:U4_S_1' , 'E':'T:U4_W_0' },
{'name':'U3;01->10;sw' , 'parity':0, 'S':'L:U2_N_1' , 'W':'B:U2_E_0' , 'N':'iL:U3;01->10' , 'E':'iB:U3;01->10' },
{'name':'U3;01->10;se' , 'parity':1, 'S':'R:U2_N_1' , 'W':'iB:U3;01->10' , 'N':'iR:U3;01->10' , 'E':'B:U4_W_0' },
{'name':'U3;01->11;nw' , 'parity':1, 'S':'iL:U3;01->11' , 'W':'T:U2_E_0' , 'N':'L:U4_S_1' , 'E':'iT:U3;01->11' },
{'name':'U3;01->11;ne' , 'parity':0, 'S':'iR:U3;01->11' , 'W':'iT:U3;01->11' , 'N':'R:U4_S_1' , 'E':'T:U4_W_1' },
{'name':'U3;01->11;sw' , 'parity':0, 'S':'L:U2_N_1' , 'W':'B:U2_E_0' , 'N':'iL:U3;01->11' , 'E':'iB:U3;01->11' },
{'name':'U3;01->11;se' , 'parity':1, 'S':'R:U2_N_1' , 'W':'iB:U3;01->11' , 'N':'iR:U3;01->11' , 'E':'B:U4_W_1' },
{'name':'U3;10->00;nw' , 'parity':1, 'S':'iL:U3;10->00' , 'W':'T:U2_E_1' , 'N':'L:U4_S_0' , 'E':'iT:U3;10->00' },
{'name':'U3;10->00;ne' , 'parity':0, 'S':'iR:U3;10->00' , 'W':'iT:U3;10->00' , 'N':'R:U4_S_0' , 'E':'T:U4_W_0' },
{'name':'U3;10->00;sw' , 'parity':0, 'S':'L:U2_N_0' , 'W':'B:U2_E_1' , 'N':'iL:U3;10->00' , 'E':'iB:U3;10->00' },
{'name':'U3;10->00;se' , 'parity':1, 'S':'R:U2_N_0' , 'W':'iB:U3;10->00' , 'N':'iR:U3;10->00' , 'E':'B:U4_W_0' },
{'name':'U3;10->01;nw' , 'parity':1, 'S':'iL:U3;10->01' , 'W':'T:U2_E_1' , 'N':'L:U4_S_0' , 'E':'iT:U3;10->01' },
{'name':'U3;10->01;ne' , 'parity':0, 'S':'iR:U3;10->01' , 'W':'iT:U3;10->01' , 'N':'R:U4_S_0' , 'E':'T:U4_W_1' },
{'name':'U3;10->01;sw' , 'parity':0, 'S':'L:U2_N_0' , 'W':'B:U2_E_1' , 'N':'iL:U3;10->01' , 'E':'iB:U3;10->01' },
{'name':'U3;10->01;se' , 'parity':1, 'S':'R:U2_N_0' , 'W':'iB:U3;10->01' , 'N':'iR:U3;10->01' , 'E':'B:U4_W_1' },
{'name':'U3;10->10;nw' , 'parity':1, 'S':'iL:U3;10->10' , 'W':'T:U2_E_1' , 'N':'L:U4_S_0' , 'E':'iT:U3;10->10' },
{'name':'U3;10->10;ne' , 'parity':0, 'S':'iR:U3;10->10' , 'W':'iT:U3;10->10' , 'N':'R:U4_S_0' , 'E':'T:U4_W_0' },
{'name':'U3;10->10;sw' , 'parity':0, 'S':'L:U2_N_1' , 'W':'B:U2_E_1' , 'N':'iL:U3;10->10' , 'E':'iB:U3;10->10' },
{'name':'U3;10->10;se' , 'parity':1, 'S':'R:U2_N_1' , 'W':'iB:U3;10->10' , 'N':'iR:U3;10->10' , 'E':'B:U4_W_0' },
{'name':'U3;10->11;nw' , 'parity':1, 'S':'iL:U3;10->11' , 'W':'T:U2_E_1' , 'N':'L:U4_S_0' , 'E':'iT:U3;10->11' },
{'name':'U3;10->11;ne' , 'parity':0, 'S':'iR:U3;10->11' , 'W':'iT:U3;10->11' , 'N':'R:U4_S_0' , 'E':'T:U4_W_1' },
{'name':'U3;10->11;sw' , 'parity':0, 'S':'L:U2_N_1' , 'W':'B:U2_E_1' , 'N':'iL:U3;10->11' , 'E':'iB:U3;10->11' },
{'name':'U3;10->11;se' , 'parity':1, 'S':'R:U2_N_1' , 'W':'iB:U3;10->11' , 'N':'iR:U3;10->11' , 'E':'B:U4_W_1' },
{'name':'U3;11->00;nw' , 'parity':1, 'S':'iL:U3;11->00' , 'W':'T:U2_E_1' , 'N':'L:U4_S_1' , 'E':'iT:U3;11->00' },
{'name':'U3;11->00;ne' , 'parity':0, 'S':'iR:U3;11->00' , 'W':'iT:U3;11->00' , 'N':'R:U4_S_1' , 'E':'T:U4_W_0' },
{'name':'U3;11->00;sw' , 'parity':0, 'S':'L:U2_N_0' , 'W':'B:U2_E_1' , 'N':'iL:U3;11->00' , 'E':'iB:U3;11->00' },
{'name':'U3;11->00;se' , 'parity':1, 'S':'R:U2_N_0' , 'W':'iB:U3;11->00' , 'N':'iR:U3;11->00' , 'E':'B:U4_W_0' },
{'name':'U3;11->01;nw' , 'parity':1, 'S':'iL:U3;11->01' , 'W':'T:U2_E_1' , 'N':'L:U4_S_1' , 'E':'iT:U3;11->01' },
{'name':'U3;11->01;ne' , 'parity':0, 'S':'iR:U3;11->01' , 'W':'iT:U3;11->01' , 'N':'R:U4_S_1' , 'E':'T:U4_W_1' },
{'name':'U3;11->01;sw' , 'parity':0, 'S':'L:U2_N_0' , 'W':'B:U2_E_1' , 'N':'iL:U3;11->01' , 'E':'iB:U3;11->01' },
{'name':'U3;11->01;se' , 'parity':1, 'S':'R:U2_N_0' , 'W':'iB:U3;11->01' , 'N':'iR:U3;11->01' , 'E':'B:U4_W_1' },
{'name':'U3;11->10;nw' , 'parity':1, 'S':'iL:U3;11->10' , 'W':'T:U2_E_1' , 'N':'L:U4_S_1' , 'E':'iT:U3;11->10' },
{'name':'U3;11->10;ne' , 'parity':0, 'S':'iR:U3;11->10' , 'W':'iT:U3;11->10' , 'N':'R:U4_S_1' , 'E':'T:U4_W_0' },
{'name':'U3;11->10;sw' , 'parity':0, 'S':'L:U2_N_1' , 'W':'B:U2_E_1' , 'N':'iL:U3;11->10' , 'E':'iB:U3;11->10' },
{'name':'U3;11->10;se' , 'parity':1, 'S':'R:U2_N_1' , 'W':'iB:U3;11->10' , 'N':'iR:U3;11->10' , 'E':'B:U4_W_0' },
{'name':'U3;11->11;nw' , 'parity':1, 'S':'iL:U3;11->11' , 'W':'T:U2_E_1' , 'N':'L:U4_S_1' , 'E':'iT:U3;11->11' },
{'name':'U3;11->11;ne' , 'parity':0, 'S':'iR:U3;11->11' , 'W':'iT:U3;11->11' , 'N':'R:U4_S_1' , 'E':'T:U4_W_1' },
{'name':'U3;11->11;sw' , 'parity':0, 'S':'L:U2_N_1' , 'W':'B:U2_E_1' , 'N':'iL:U3;11->11' , 'E':'iB:U3;11->11' },
{'name':'U3;11->11;se' , 'parity':1, 'S':'R:U2_N_1' , 'W':'iB:U3;11->11' , 'N':'iR:U3;11->11' , 'E':'B:U4_W_1' },
{'name':'U8;0_->0_;nw' , 'parity':1, 'S':'iL:U8;0_->0_' , 'W':'T:U8_W_0' , 'N':'L:U8_N' , 'E':'iT:U8;0_->0_' },
{'name':'U8;0_->0_;ne' , 'parity':0, 'S':'iR:U8;0_->0_' , 'W':'iT:U8;0_->0_' , 'N':'R:U8_N' , 'E':'T:U8_E' },
{'name':'U8;0_->0_;sw' , 'parity':0, 'S':'L:U8_S_0' , 'W':'B:U8_W_0' , 'N':'iL:U8;0_->0_' , 'E':'iB:U8;0_->0_' },
{'name':'U8;0_->0_;se' , 'parity':1, 'S':'R:U8_S_0' , 'W':'iB:U8;0_->0_' , 'N':'iR:U8;0_->0_' , 'E':'B:U8_E' },
{'name':'U8;0_->1_;nw' , 'parity':1, 'S':'iL:U8;0_->1_' , 'W':'T:U8_W_0' , 'N':'L:U8_N' , 'E':'iT:U8;0_->1_' },
{'name':'U8;0_->1_;ne' , 'parity':0, 'S':'iR:U8;0_->1_' , 'W':'iT:U8;0_->1_' , 'N':'R:U8_N' , 'E':'T:U8_E' },
{'name':'U8;0_->1_;sw' , 'parity':0, 'S':'L:U8_S_1' , 'W':'B:U8_W_0' , 'N':'iL:U8;0_->1_' , 'E':'iB:U8;0_->1_' },
{'name':'U8;0_->1_;se' , 'parity':1, 'S':'R:U8_S_1' , 'W':'iB:U8;0_->1_' , 'N':'iR:U8;0_->1_' , 'E':'B:U8_E' },
{'name':'U8;1_->0_;nw' , 'parity':1, 'S':'iL:U8;1_->0_' , 'W':'T:U8_W_1' , 'N':'L:U8_N' , 'E':'iT:U8;1_->0_' },
{'name':'U8;1_->0_;ne' , 'parity':0, 'S':'iR:U8;1_->0_' , 'W':'iT:U8;1_->0_' , 'N':'R:U8_N' , 'E':'T:U8_E' },
{'name':'U8;1_->0_;sw' , 'parity':0, 'S':'L:U8_S_0' , 'W':'B:U8_W_1' , 'N':'iL:U8;1_->0_' , 'E':'iB:U8;1_->0_' },
{'name':'U8;1_->0_;se' , 'parity':1, 'S':'R:U8_S_0' , 'W':'iB:U8;1_->0_' , 'N':'iR:U8;1_->0_' , 'E':'B:U8_E' },
{'name':'U8;1_->1_;nw' , 'parity':1, 'S':'iL:U8;1_->1_' , 'W':'T:U8_W_1' , 'N':'L:U8_N' , 'E':'iT:U8;1_->1_' },
{'name':'U8;1_->1_;ne' , 'parity':0, 'S':'iR:U8;1_->1_' , 'W':'iT:U8;1_->1_' , 'N':'R:U8_N' , 'E':'T:U8_E' },
{'name':'U8;1_->1_;sw' , 'parity':0, 'S':'L:U8_S_1' , 'W':'B:U8_W_1' , 'N':'iL:U8;1_->1_' , 'E':'iB:U8;1_->1_' },
{'name':'U8;1_->1_;se' , 'parity':1, 'S':'R:U8_S_1' , 'W':'iB:U8;1_->1_' , 'N':'iR:U8;1_->1_' , 'E':'B:U8_E' },
{'name':'U2;_0->_0;nw' , 'parity':1, 'S':'iL:U2;_0->_0' , 'W':'T:U2_W' , 'N':'L:U2_N_0' , 'E':'iT:U2;_0->_0' },
{'name':'U2;_0->_0;ne' , 'parity':0, 'S':'iR:U2;_0->_0' , 'W':'iT:U2;_0->_0' , 'N':'R:U2_N_0' , 'E':'T:U2_E_0' },
{'name':'U2;_0->_0;sw' , 'parity':0, 'S':'L:U2_S' , 'W':'B:U2_W' , 'N':'iL:U2;_0->_0' , 'E':'iB:U2;_0->_0' },
{'name':'U2;_0->_0;se' , 'parity':1, 'S':'R:U2_S' , 'W':'iB:U2;_0->_0' , 'N':'iR:U2;_0->_0' , 'E':'B:U2_E_0' },
{'name':'U2;_0->_1;nw' , 'parity':1, 'S':'iL:U2;_0->_1' , 'W':'T:U2_W' , 'N':'L:U2_N_0' , 'E':'iT:U2;_0->_1' },
{'name':'U2;_0->_1;ne' , 'parity':0, 'S':'iR:U2;_0->_1' , 'W':'iT:U2;_0->_1' , 'N':'R:U2_N_0' , 'E':'T:U2_E_1' },
{'name':'U2;_0->_1;sw' , 'parity':0, 'S':'L:U2_S' , 'W':'B:U2_W' , 'N':'iL:U2;_0->_1' , 'E':'iB:U2;_0->_1' },
{'name':'U2;_0->_1;se' , 'parity':1, 'S':'R:U2_S' , 'W':'iB:U2;_0->_1' , 'N':'iR:U2;_0->_1' , 'E':'B:U2_E_1' },
{'name':'U2;_1->_0;nw' , 'parity':1, 'S':'iL:U2;_1->_0' , 'W':'T:U2_W' , 'N':'L:U2_N_1' , 'E':'iT:U2;_1->_0' },
{'name':'U2;_1->_0;ne' , 'parity':0, 'S':'iR:U2;_1->_0' , 'W':'iT:U2;_1->_0' , 'N':'R:U2_N_1' , 'E':'T:U2_E_0' | |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.active_alerts_stats import ActiveAlertsStats
from cohesity_management_sdk.models.get_consumer_stats_result import GetConsumerStatsResult
from cohesity_management_sdk.models.file_distribution_stats import FileDistributionStats
from cohesity_management_sdk.models.protection_runs_stats import ProtectionRunsStats
from cohesity_management_sdk.models.last_protection_run_stats import LastProtectionRunStats
from cohesity_management_sdk.models.protected_objects_summary import ProtectedObjectsSummary
from cohesity_management_sdk.models.restore_stats import RestoreStats
from cohesity_management_sdk.models.storage_stats import StorageStats
from cohesity_management_sdk.models.get_tenant_stats_result import GetTenantStatsResult
from cohesity_management_sdk.models.vault_stats import VaultStats
from cohesity_management_sdk.models.vault_provider_stats_info import VaultProviderStatsInfo
from cohesity_management_sdk.models.vault_run_stats_summary import VaultRunStatsSummary
from cohesity_management_sdk.models.get_view_box_stats_result import GetViewBoxStatsResult
from cohesity_management_sdk.models.view_stats_snapshot import ViewStatsSnapshot
from cohesity_management_sdk.models.view_protocol_stats import ViewProtocolStats
from cohesity_management_sdk.exceptions.error_exception import ErrorException
from cohesity_management_sdk.exceptions.request_error_error_exception import RequestErrorErrorException
class StatsController(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, client=None, call_back=None):
super(StatsController, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
def get_active_alerts_stats(self,
start_time_usecs,
end_time_usecs):
"""Does a GET request to /public/stats/alerts.
Compute the statistics on the active Alerts generated on the cluster
based on the provided time interval.
Args:
start_time_usecs (long|int): Specifies the start time Unix time
epoch in microseconds from which the active alerts stats are
computed.
end_time_usecs (long|int): Specifies the end time Unix time epoch
in microseconds to which the active alerts stats are
computed.
Returns:
ActiveAlertsStats: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_active_alerts_stats called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_active_alerts_stats.')
self.validate_parameters(start_time_usecs=start_time_usecs,
end_time_usecs=end_time_usecs)
# Prepare query URL
self.logger.info('Preparing query URL for get_active_alerts_stats.')
_url_path = '/public/stats/alerts'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'startTimeUsecs': start_time_usecs,
'endTimeUsecs': end_time_usecs
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_active_alerts_stats.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_active_alerts_stats.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_active_alerts_stats')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_active_alerts_stats.')
if _context.response.status_code == 0:
raise ErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ActiveAlertsStats.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_consumer_stats(self,
consumer_type=None,
max_count=None,
cookie=None,
consumer_id_list=None,
view_boxes_id_list=None,
organizations_id_list=None):
"""Does a GET request to /public/stats/consumers.
Gets the statistics of consumers.
Args:
consumer_type (ConsumerTypeGetConsumerStatsEnum, optional):
Specifies the consumer type. Type of the consumer can be one
of the following three, 'kViews', indicates the stats info of
Views used per organization (tenant) per view box (storage
domain). 'kProtectionRuns', indicates the stats info of
Protection Runs used per organization (tenant) per view box
(storage domain). 'kReplicationRuns', indicates the stats info
of Replication In used per organization (tenant) per view box
(storage domain).
max_count (long|int, optional): Specifies a limit on the number of
stats groups returned.
cookie (string, optional): Specifies the opaque string returned in
the previous response. If this is set, next set of active
opens just after the previous response are returned. If this
is not set, first set of active opens are returned.
consumer_id_list (list of long|int, optional): Specifies a list of
consumer ids.
view_boxes_id_list (list of long|int, optional): Specifies a list
of view boxes (storage domain) id.
organizations_id_list (list of string, optional): Specifies a list
of organizations (tenant) id.
Returns:
GetConsumerStatsResult: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_consumer_stats called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_consumer_stats.')
_url_path = '/public/stats/consumers'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'consumerType': consumer_type,
'maxCount': max_count,
'cookie': cookie,
'consumerIdList': consumer_id_list,
'viewBoxesIdList': view_boxes_id_list,
'organizationsIdList': organizations_id_list
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_consumer_stats.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_consumer_stats.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_consumer_stats')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_consumer_stats.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetConsumerStatsResult.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_file_distribution_stats(self,
entity_type):
"""Does a GET request to /public/stats/files.
Compute the file distribution statistics on a given entity like
cluster or storage domain.
Args:
entity_type (EntityTypeGetFileDistributionStatsEnum): Specifies
the entity type on which file distribution stats are
computed.
Returns:
list of FileDistributionStats: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_file_distribution_stats called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_file_distribution_stats.')
self.validate_parameters(entity_type=entity_type)
# Prepare query URL
self.logger.info('Preparing query URL for get_file_distribution_stats.')
_url_path = '/public/stats/files'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'entityType': entity_type
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_file_distribution_stats.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_file_distribution_stats.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_file_distribution_stats')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_file_distribution_stats.')
if _context.response.status_code == 0:
raise ErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, FileDistributionStats.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_protection_runs_stats(self,
status,
start_time_usecs,
end_time_usecs):
"""Does a GET request to /public/stats/protectionRuns.
Compute the statistics of the Protection Runs based on the input
filters. This endpoint provides a snapshot of count of Protection Runs
of a specified status on a specified time interval.
Args:
status (StatusGetProtectionRunsStatsEnum): Specifies the
Protection Run status for which stats has to be computed.
start_time_usecs (long|int): Specifies the start time in Unix
timestamp epoch in microseconds where the end time of the
Protection Run is greater than the specified value.
end_time_usecs (long|int): Specifies the end time in Unix
timestamp epoch in microseconds where the end time of the
Protection Run is lesser than the specified value.
Returns:
ProtectionRunsStats: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_protection_runs_stats called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_protection_runs_stats.')
self.validate_parameters(status=status,
start_time_usecs=start_time_usecs,
end_time_usecs=end_time_usecs)
# Prepare query URL
self.logger.info('Preparing query URL for get_protection_runs_stats.')
_url_path = '/public/stats/protectionRuns'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'status': status,
'startTimeUsecs': start_time_usecs,
'endTimeUsecs': end_time_usecs
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_protection_runs_stats.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_protection_runs_stats.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_protection_runs_stats')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_protection_runs_stats.')
if _context.response.status_code == 0:
raise ErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ProtectionRunsStats.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_last_protection_run_stats(self):
"""Does a GET request to /public/stats/protectionRuns/lastRun.
Compute stats on last Protection Run for every job.
Returns:
LastProtectionRunStats: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_last_protection_run_stats called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_last_protection_run_stats.')
_url_path = '/public/stats/protectionRuns/lastRun'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_last_protection_run_stats.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_last_protection_run_stats.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_last_protection_run_stats')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_last_protection_run_stats.')
if _context.response.status_code == 0:
raise ErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, | |
True})
self.assertEqual({'another_value': 2}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'input_attribute': True}, stub_test_action_2.call_body)
stub_test_action_1.assert_called_once_with({})
stub_test_action_2.assert_called_once_with({'input_attribute': True})
if not six.PY2:
# In Python 2, PyTest's parametrize cannot be used with patchings like mock.patch and stub_action
@pytest.mark.parametrize(('input_arg',), (('foo',), ('bar',)))
@stub_action('test_service', 'test_action_2')
@stub_action('test_service', 'test_action_1')
def test_two_stubs_same_service_as_decorator_with_pytest_parametrize_before(
self,
stub_test_action_1,
stub_test_action_2,
input_arg,
):
stub_test_action_1.return_value = {'value': 1}
stub_test_action_2.return_value = {'another_value': 2}
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual({'value': 1}, response.body)
response = self.client.call_action('test_service', 'test_action_2', {'input_attribute': True})
self.assertEqual({'another_value': 2}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'input_attribute': True}, stub_test_action_2.call_body)
stub_test_action_1.assert_called_once_with({})
stub_test_action_2.assert_called_once_with({'input_attribute': True})
assert input_arg in ('foo', 'bar')
@stub_action('test_service', 'test_action_2')
@stub_action('test_service', 'test_action_1')
@pytest.mark.parametrize(('input_arg',), (('foo',), ('bar',)))
def test_two_stubs_same_service_as_decorator_with_pytest_parametrize_after(
self,
stub_test_action_1,
stub_test_action_2,
input_arg,
):
stub_test_action_1.return_value = {'value': 1}
stub_test_action_2.return_value = {'another_value': 2}
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual({'value': 1}, response.body)
response = self.client.call_action('test_service', 'test_action_2', {'input_attribute': True})
self.assertEqual({'another_value': 2}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'input_attribute': True}, stub_test_action_2.call_body)
stub_test_action_1.assert_called_once_with({})
stub_test_action_2.assert_called_once_with({'input_attribute': True})
assert input_arg in ('foo', 'bar')
@parameterized.expand((('foo',), ('bar',)))
@stub_action('test_service', 'test_action_2')
@stub_action('test_service', 'test_action_1')
def test_two_stubs_same_service_as_decorator_with_3rd_party_parametrize(
self,
input_arg,
stub_test_action_1,
stub_test_action_2,
):
stub_test_action_1.return_value = {'value': 1}
stub_test_action_2.return_value = {'another_value': 2}
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual({'value': 1}, response.body)
response = self.client.call_action('test_service', 'test_action_2', {'input_attribute': True})
self.assertEqual({'another_value': 2}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'input_attribute': True}, stub_test_action_2.call_body)
stub_test_action_1.assert_called_once_with({})
stub_test_action_2.assert_called_once_with({'input_attribute': True})
assert input_arg in ('foo', 'bar')
@stub_action('test_service', 'test_action_2')
@stub_action('test_service', 'test_action_1')
def _two_stubs_external_method_get_response(self, another_value, stub_test_action_1, stub_test_action_2):
stub_test_action_1.return_value = {'value': -10}
stub_test_action_2.return_value = {'another_value': another_value}
try:
return (
self.client.call_action('test_service', 'test_action_1'),
self.client.call_action('test_service', 'test_action_2', {'input_attribute': False})
)
finally:
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'input_attribute': False}, stub_test_action_2.call_body)
stub_test_action_1.assert_called_once_with({})
stub_test_action_2.assert_called_once_with({'input_attribute': False})
@pytest.mark.parametrize(('value2', ), ((-15, ), (-20, )))
def test_two_stubs_same_service_as_decorated_external_method(self, value2):
response1, response2 = self._two_stubs_external_method_get_response(value2)
self.assertEqual({'value': -10}, response1.body)
self.assertEqual({'another_value': value2}, response2.body)
@stub_action('test_service', 'test_action_2')
@stub_action('test_service', 'test_action_1')
def test_two_stubs_same_service_as_decorator_multiple_calls_to_one(self, stub_test_action_1, stub_test_action_2):
stub_test_action_1.return_value = {'value': 1}
stub_test_action_2.side_effect = ({'another_value': 2}, {'third_value': 3})
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual({'value': 1}, response.body)
response = self.client.call_action('test_service', 'test_action_2', {'input_attribute': True})
self.assertEqual({'another_value': 2}, response.body)
response = self.client.call_action('test_service', 'test_action_2', {'another_attribute': False})
self.assertEqual({'third_value': 3}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(2, stub_test_action_2.call_count)
self.assertEqual({'another_attribute': False}, stub_test_action_2.call_body)
self.assertEqual(({'input_attribute': True}, {'another_attribute': False}), stub_test_action_2.call_bodies)
stub_test_action_1.assert_called_once_with({})
stub_test_action_2.assert_has_calls([
mock.call({'input_attribute': True}),
mock.call({'another_attribute': False}),
])
def test_stub_action_with_side_effect_mixed_exceptions_and_bodies_as_context_manager(self):
with stub_action('foo', 'bar', side_effect=[MessageReceiveTimeout('No message received'), {'good': 'yes'}]):
with pytest.raises(MessageReceiveTimeout):
self.client.call_action('foo', 'bar')
response = self.client.call_action('foo', 'bar')
assert response.body == {'good': 'yes'}
@stub_action('foo', 'bar')
def test_stub_action_with_side_effect_mixed_exceptions_and_bodies_as_decorator(self, stub_foo_bar):
stub_foo_bar.side_effect = [MessageReceiveTimeout('No message received'), {'good': 'yes'}]
with pytest.raises(MessageReceiveTimeout):
self.client.call_action('foo', 'bar')
response = self.client.call_action('foo', 'bar')
assert response.body == {'good': 'yes'}
@stub_action('test_service', 'test_action_1')
def test_two_stubs_same_service_split(self, stub_test_action_1):
stub_test_action_1.return_value = {'value': 1}
with stub_action('test_service', 'test_action_2') as stub_test_action_2:
stub_test_action_2.return_value = {'another_value': 2}
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual({'value': 1}, response.body)
response = self.client.call_action('test_service', 'test_action_2', {'input_attribute': True})
self.assertEqual({'another_value': 2}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'input_attribute': True}, stub_test_action_2.call_body)
@stub_action('test_another_service', 'test_action_2')
@stub_action('test_service', 'test_action_1')
def test_two_stubs_different_services_as_decorator(self, stub_test_action_1, stub_test_action_2):
stub_test_action_1.return_value = {'value': 1}
stub_test_action_2.return_value = {'another_value': 2}
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual({'value': 1}, response.body)
response = self.client.call_action('test_another_service', 'test_action_2', {'input_attribute': True})
self.assertEqual({'another_value': 2}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'input_attribute': True}, stub_test_action_2.call_body)
@stub_action('test_service', 'test_action_1')
def test_two_stubs_different_services_split(self, stub_test_action_1):
stub_test_action_1.return_value = {'value': 1}
with stub_action('test_another_service', 'test_action_2') as stub_test_action_2:
stub_test_action_2.return_value = {'another_value': 2}
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual({'value': 1}, response.body)
response = self.client.call_action('test_another_service', 'test_action_2', {'input_attribute': True})
self.assertEqual({'another_value': 2}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'input_attribute': True}, stub_test_action_2.call_body)
@stub_action('test_service', 'test_action_1', body={'value': 1})
def test_one_stub_as_decorator_with_real_call_handled(self, stub_test_action_1):
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual(response.body, {'value': 1})
response = self.secondary_stub_client.call_action('cat', 'meow')
self.assertEqual({'type': 'squeak'}, response.body)
response = self.secondary_stub_client.call_action('dog', 'bark')
self.assertEqual({'sound': 'woof'}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
def test_one_stub_as_context_manager_with_real_call_handled(self):
with stub_action('test_service', 'test_action_1', body={'value': 1}) as stub_test_action_1:
response = self.client.call_action('test_service', 'test_action_1')
self.assertEqual(response.body, {'value': 1})
response = self.secondary_stub_client.call_action('cat', 'meow')
self.assertEqual({'type': 'squeak'}, response.body)
response = self.secondary_stub_client.call_action('dog', 'bark')
self.assertEqual({'sound': 'woof'}, response.body)
self.assertEqual(1, stub_test_action_1.call_count)
self.assertEqual({}, stub_test_action_1.call_body)
@stub_action('test_service', 'test_action_2')
@mock.patch(__name__ + '._test_function', return_value=3)
def test_as_decorator_with_patch_before(self, mock_randint, stub_test_action_2):
stub_test_action_2.return_value = {'value': 99}
response = self.client.call_actions(
'test_service',
[ActionRequest(action='test_action_1'), ActionRequest(action='test_action_2')],
)
self.assertEqual(2, len(response.actions))
self.assertEqual({'value': 3}, response.actions[0].body)
self.assertEqual({'value': 99}, response.actions[1].body)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({}, stub_test_action_2.call_body)
mock_randint.assert_called_once_with(0, 99)
@mock.patch(__name__ + '._test_function', return_value=7)
@stub_action('test_service', 'test_action_2')
def test_as_decorator_with_patch_after(self, stub_test_action_2, mock_randint):
stub_test_action_2.side_effect = ({'value': 122}, {'also': 157})
response = self.client.call_actions(
'test_service',
[{'action': 'test_action_1'}, {'action': 'test_action_2'}, {'action': 'test_action_2'}],
)
self.assertEqual(3, len(response.actions))
self.assertEqual({'value': 7}, response.actions[0].body)
self.assertEqual({'value': 122}, response.actions[1].body)
self.assertEqual({'also': 157}, response.actions[2].body)
self.assertEqual(2, stub_test_action_2.call_count)
self.assertEqual(({}, {}), stub_test_action_2.call_bodies)
stub_test_action_2.assert_has_calls([mock.call({}), mock.call({})])
mock_randint.assert_called_once_with(0, 99)
def test_using_start_stop(self):
action_stubber = stub_action('test_service', 'test_action_1')
stubbed_action = action_stubber.start()
stubbed_action.return_value = {'what about': 'this'}
response = self.client.call_action('test_service', 'test_action_1', {'burton': 'guster', 'sean': 'spencer'})
self.assertEqual({'what about': 'this'}, response.body)
self.assertEqual(1, stubbed_action.call_count)
self.assertEqual({'burton': 'guster', 'sean': 'spencer'}, stubbed_action.call_body)
stubbed_action.assert_called_once_with({'burton': 'guster', 'sean': 'spencer'})
action_stubber.stop()
@stub_action('test_service', 'test_action_2', errors=[
{'code': 'BAD_FOO', 'field': 'foo', 'message': 'Nope'},
])
def test_mock_action_with_error_raises_exception(self, stub_test_action_2):
with self.assertRaises(Client.CallActionError) as e:
self.client.call_action('test_service', 'test_action_2', {'a': 'body'})
self.assertEqual('BAD_FOO', e.exception.actions[0].errors[0].code)
self.assertEqual('foo', e.exception.actions[0].errors[0].field)
self.assertEqual('Nope', e.exception.actions[0].errors[0].message)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'a': 'body'}, stub_test_action_2.call_body)
stub_test_action_2.assert_called_once_with({'a': 'body'})
@stub_test_action()
def test_stub_action_with_side_effect_callback(self, _stub_test_action):
response = self.client.call_action('test_service', 'test_action', body={'id': 1, 'type': 'user'})
self.assertEqual(response.body, {'id': 1, 'type': 'user'})
response = self.client.call_action('test_service', 'test_action', body={'id': 2, 'type': 'admin'})
self.assertEqual(response.body, {'id': 2, 'type': 'admin', 'extra': 'data'})
@stub_test_action(add_extra=False)
def test_stub_action_with_side_effect_callback_and_param(self, _stub_test_action):
response = self.client.call_action('test_service', 'test_action', body={'id': 1, 'type': 'user'})
self.assertEqual(response.body, {'id': 1, 'type': 'user'})
response = self.client.call_action('test_service', 'test_action', body={'id': 2, 'type': 'admin'})
self.assertEqual(response.body, {'id': 2, 'type': 'admin'})
def test_stub_action_with_side_effect_callback_in_context_manager(self):
with stub_test_action():
response = self.client.call_action('test_service', 'test_action', body={'id': 1, 'type': 'user'})
self.assertEqual(response.body, {'id': 1, 'type': 'user'})
with stub_test_action():
response = self.client.call_action('test_service', 'test_action', body={'id': 2, 'type': 'admin'})
self.assertEqual(response.body, {'id': 2, 'type': 'admin', 'extra': 'data'})
def test_stub_action_with_side_effect_callback_in_context_manager_and_param(self):
with stub_test_action(add_extra=False):
response = self.client.call_action('test_service', 'test_action', body={'id': 1, 'type': 'user'})
self.assertEqual(response.body, {'id': 1, 'type': 'user'})
with stub_test_action(add_extra=False):
response = self.client.call_action('test_service', 'test_action', body={'id': 2, 'type': 'admin'})
self.assertEqual(response.body, {'id': 2, 'type': 'admin'})
@stub_action(
'test_service',
'test_action_2',
side_effect=ActionError(errors=[Error(code='BAR_BAD', field='bar', message='Uh-uh')]),
)
def test_stub_action_with_error_side_effect_raises_exception(self, stub_test_action_2):
with self.assertRaises(Client.CallActionError) as e:
self.client.call_action('test_service', 'test_action_2', {'a': 'body'})
self.assertEqual('BAR_BAD', e.exception.actions[0].errors[0].code)
self.assertEqual('bar', e.exception.actions[0].errors[0].field)
self.assertEqual('Uh-uh', e.exception.actions[0].errors[0].message)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'a': 'body'}, stub_test_action_2.call_body)
stub_test_action_2.assert_called_once_with({'a': 'body'})
@stub_action(
'test_service',
'test_action_2',
side_effect=JobError(errors=[Error(code='BAR_BAD_JOB', message='Uh-uh job')]),
)
def test_stub_action_with_job_error_side_effect_raises_job_error_exception(self, stub_test_action_2):
with self.assertRaises(Client.JobError) as e:
self.client.call_action('test_service', 'test_action_2', {'a': 'body'})
self.assertEqual('BAR_BAD_JOB', e.exception.errors[0].code)
self.assertIsNone(e.exception.errors[0].field)
self.assertEqual('Uh-uh job', e.exception.errors[0].message)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'a': 'body'}, stub_test_action_2.call_body)
stub_test_action_2.assert_called_once_with({'a': 'body'})
@stub_action('test_service', 'test_action_2')
def test_mock_action_with_error_side_effect_raises_exception(self, stub_test_action_2):
stub_test_action_2.side_effect = ActionError(errors=[Error(code='BAR_BAD', field='bar', message='Uh-uh')])
with self.assertRaises(Client.CallActionError) as e:
self.client.call_action('test_service', 'test_action_2', {'a': 'body'})
self.assertEqual('BAR_BAD', e.exception.actions[0].errors[0].code)
self.assertEqual('bar', e.exception.actions[0].errors[0].field)
self.assertEqual('Uh-uh', e.exception.actions[0].errors[0].message)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'a': 'body'}, stub_test_action_2.call_body)
stub_test_action_2.assert_called_once_with({'a': 'body'})
@stub_action('test_service', 'test_action_2')
def test_mock_action_with_job_error_side_effect_raises_job_error_exception(self, stub_test_action_2):
stub_test_action_2.side_effect = JobError(errors=[Error(code='BAR_BAD_JOB', message='Uh-uh job')])
with self.assertRaises(Client.JobError) as e:
self.client.call_action('test_service', 'test_action_2', {'a': 'body'})
self.assertEqual('BAR_BAD_JOB', e.exception.errors[0].code)
self.assertIsNone(e.exception.errors[0].field)
self.assertEqual('Uh-uh job', e.exception.errors[0].message)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'a': 'body'}, stub_test_action_2.call_body)
stub_test_action_2.assert_called_once_with({'a': 'body'})
@stub_action('test_service', 'test_action_2')
def test_mock_action_with_job_error_response_raises_job_error_exception(self, stub_test_action_2):
stub_test_action_2.return_value = JobResponse(errors=[Error(code='BAR_BAD_JOB', message='Uh-uh job')])
with self.assertRaises(Client.JobError) as e:
self.client.call_action('test_service', 'test_action_2', {'a': 'body'})
self.assertEqual('BAR_BAD_JOB', e.exception.errors[0].code)
self.assertIsNone(e.exception.errors[0].field)
self.assertEqual('Uh-uh job', e.exception.errors[0].message)
self.assertEqual(1, stub_test_action_2.call_count)
self.assertEqual({'a': 'body'}, stub_test_action_2.call_body)
stub_test_action_2.assert_called_once_with({'a': 'body'})
@stub_action('test_service', 'test_action_2', errors=[
{'code': 'INVALID_BAR', 'message': 'A bad message'},
])
def test_multiple_actions_stop_on_error(self, stub_test_action_2):
response = self.client.call_actions(
'test_service',
[
ActionRequest(action='test_action_1'),
ActionRequest(action='test_action_2'),
ActionRequest(action='test_action_1'),
],
raise_action_errors=False,
)
# Called 3 actions, but expected to stop after the error in the second action
self.assertEqual(2, len(response.actions))
self.assertEqual('INVALID_BAR', response.actions[1].errors[0].code)
self.assertEqual('A bad message', response.actions[1].errors[0].message)
self.assertTrue(stub_test_action_2.called)
@stub_action('test_service', 'test_action_2', errors=[
{'code': 'MISSING_BAZ', 'field': 'entity_id', 'message': 'Your entity ID was missing'},
])
def test_multiple_actions_continue_on_error(self, mock_test_action_2):
response = self.client.call_actions(
'test_service',
[{'action': 'test_action_1'}, {'action': 'test_action_2'}, {'action': 'test_action_1'}],
raise_action_errors=False,
continue_on_error=True,
)
# Called 3 actions, and expected all three of them to be called, even with the interrupting error
self.assertEqual(3, len(response.actions))
self.assertEqual('MISSING_BAZ', response.actions[1].errors[0].code)
self.assertEqual('entity_id', response.actions[1].errors[0].field)
self.assertEqual('Your entity ID was missing', response.actions[1].errors[0].message)
self.assertTrue(mock_test_action_2.called)
@stub_action('test_service', 'test_action_2', body={'three': 'four'})
@stub_action('test_service', 'test_action_1', body={'one': 'two'})
def test_two_stubs_with_parallel_calls_all_stubbed(self, stub_test_action_1, stub_test_action_2):
job_responses = self.client.call_jobs_parallel(
[
{'service_name': 'test_service', 'actions': [{'action': 'test_action_1', 'body': {'a': 'b'}}]},
{'service_name': 'test_service', 'actions': [{'action': 'test_action_2', 'body': {'c': 'd'}}]},
],
)
self.assertIsNotNone(job_responses)
self.assertEqual(2, len(job_responses))
self.assertEqual(1, len(job_responses[0].actions))
self.assertEqual({'one': 'two'}, job_responses[0].actions[0].body)
self.assertEqual(1, len(job_responses[1].actions))
self.assertEqual({'three': 'four'}, job_responses[1].actions[0].body)
stub_test_action_1.assert_called_once_with({'a': 'b'})
stub_test_action_2.assert_called_once_with({'c': 'd'})
@stub_action('test_service', 'test_action_2')
@mock.patch(__name__ + '._test_function')
def test_one_stub_with_parallel_calls(self, mock_randint, stub_test_action_2):
mock_randint.side_effect = (42, 17, 31)
stub_test_action_2.return_value = {'concert': 'tickets'}
job_responses = self.client.call_jobs_parallel(
[
{'service_name': 'test_service', 'actions': [{'action': 'test_action_1'}]},
{'service_name': 'test_service', 'actions': [
{'action': 'test_action_2', 'body': {'slide': 'rule'}},
{'action': 'test_action_1'},
]},
{'service_name': 'test_service', 'actions': [{'action': 'test_action_1'}]},
],
)
self.assertIsNotNone(job_responses)
self.assertEqual(3, len(job_responses))
self.assertEqual(1, len(job_responses[0].actions))
self.assertEqual({'value': 42}, job_responses[0].actions[0].body)
self.assertEqual(2, len(job_responses[1].actions))
self.assertEqual({'concert': 'tickets'}, job_responses[1].actions[0].body)
self.assertEqual({'value': 17}, job_responses[1].actions[1].body)
self.assertEqual(1, len(job_responses[2].actions))
self.assertEqual({'value': 31}, job_responses[2].actions[0].body)
stub_test_action_2.assert_called_once_with({'slide': 'rule'})
@stub_action('test_service', 'test_action_2')
@stub_action('test_service', 'test_action_1')
def test_two_stubs_with_parallel_calls(self, stub_test_action_1, stub_test_action_2):
stub_test_action_1.return_value = {'value': 1}
stub_test_action_2.return_value = {'another_value': 2}
job_responses = Client(dict(self.client.config, **_secondary_stub_client_settings)).call_jobs_parallel(
[
{'service_name': 'test_service', 'actions': [
{'action': 'test_action_1', 'body': {'input_attribute': True}},
{'action': 'test_action_2', 'body': {'another_variable': 'Cool'}},
]},
{'service_name': 'cat', 'actions': [{'action': 'meow'}]},
{'service_name': 'dog', 'actions': [{'action': 'bark'}]},
{'service_name': 'test_service', 'actions': [{'action': 'does_not_exist'}]},
],
| |
None,
'balance_classes': None,
'build_tree_one_node': None,
'classification': 1,
'cols': None,
'destination_key': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'importance': 1, # enable variable importance by default
'max_after_balance_size': None,
'max_depth': None,
'min_rows': None, # how many rows in leaves for stopping condition
'mtries': None,
'nbins': None,
'ntrees': trees,
'n_folds': None,
'response': None,
'sample_rate': None,
'score_each_iteration': None,
'seed': None,
'source': data_key,
'validation': None,
}
if 'model_key' in kwargs:
kwargs['destination_key'] = kwargs['model_key'] # hmm..should we switch test to new param?
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'random_forest', print_params)
# on v2, there is no default response. So if it's none, we should use the last column, for compatibility
inspect = h2o_cmd.runInspect(key=data_key)
# response only takes names. can't use col index..have to look it up
# or add last col
# mnist can be col 0 for response!
if ('response' not in params_dict) or (params_dict['response'] is None):
params_dict['response'] = str(inspect['cols'][-1]['name'])
elif isinstance(params_dict['response'], int):
params_dict['response'] = str(inspect['cols'][params_dict['response']]['name'])
if print_params:
print "\n%s parameters:" % algo, params_dict
sys.stdout.flush()
# always follow thru to rfview?
rf = self.do_json_request(algo + '.json', timeout=timeoutSecs, params=params_dict)
print "\n%s result:" % algo, dump_json(rf)
# noPoll and rfView=False are similar?
if (noPoll or not rfView):
# just return for now
print "no rfView:", rfView, "noPoll", noPoll
return rf
# since we don't know the model key from the rf response, we just let rf redirect us to completion
# if we want to do noPoll, we have to name the model, so we know what to ask for when we do the completion view
# HACK: wait more for first poll?
time.sleep(5)
rfView = self.poll_url(rf, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noise=noise, benchmarkLogging=benchmarkLogging, noPrint=noPrint)
return rfView
def random_forest_view(self, data_key=None, model_key=None, timeoutSecs=300,
retryDelaySecs=0.2, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, print_params=False, noPoll=False,
noPrint=False, **kwargs):
print "random_forest_view not supported in H2O fvec yet. hacking done response"
r = {'response': {'status': 'done'}, 'trees': {'number_built': 0}}
# return r
algo = '2/DRFModelView'
# No such thing as 2/DRFScore2
algoScore = '2/DRFScore2'
# is response_variable needed here? it shouldn't be
# do_json_request will ignore any that remain = None
params_dict = {
'_modelKey': model_key,
}
browseAlso = kwargs.pop('browseAlso', False)
# only update params_dict..don't add
# throw away anything else as it should come from the model (propagating what RF used)
for k in kwargs:
if k in params_dict:
params_dict[k] = kwargs[k]
if print_params:
print "\n%s parameters:" % algo, params_dict
sys.stdout.flush()
whichUsed = algo
# for drf2, you can't pass a new dataset here, compared to what you trained with.
# should complain or something if tried with a data_key
if data_key:
print "Can't pass a new data_key to random_forest_view for v2's DRFModelView. Not using"
a = self.do_json_request(whichUsed + ".json", timeout=timeoutSecs, params=params_dict)
verboseprint("\n%s result:" % whichUsed, dump_json(a))
if noPoll:
return a
# add a fake redirect_request and redirect_request_args
# to the RF response, to make it look like everyone else
rfView = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noPrint=noPrint, noise=noise, benchmarkLogging=benchmarkLogging)
drf_model = rfView['drf_model']
numberBuilt = drf_model['N']
# want to double check all this because it's new
# and we had problems with races/doneness before
errorInResponse = False
# numberBuilt<0 or ntree<0 or numberBuilt>ntree or \
# ntree!=rfView['ntree']
if errorInResponse:
raise Exception("\nBad values in %s.json\n" % whichUsed +
"progress: %s, progressTotal: %s, ntree: %s, numberBuilt: %s, status: %s" % \
(progress, progressTotal, ntree, numberBuilt, status))
if (browseAlso | h2o_args.browse_json):
h2b.browseJsonHistoryAsUrlLastMatch(whichUsed)
return rfView
def set_column_names(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'copy_from': None,
'source': None,
'cols': None,
'comma_separated_list': None,
}
check_params_update_kwargs(params_dict, kwargs, 'set_column_names', print_params)
a = self.do_json_request('2/SetColumnNames2.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nset_column_names result:", dump_json(a))
return a
def quantiles(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'source_key': None,
'column': None,
'quantile': None,
'max_qbins': None,
'interpolation_type': None,
'multiple_pass': None,
}
check_params_update_kwargs(params_dict, kwargs, 'quantiles', print_params)
a = self.do_json_request('2/QuantilesPage.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nquantiles result:", dump_json(a))
return a
def anomaly(self, timeoutSecs=300, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, **kwargs):
params_dict = {
'destination_key': None,
'source': None,
'dl_autoencoder_model': None,
'thresh': -1,
}
check_params_update_kwargs(params_dict, kwargs, 'anomaly', print_params)
a = self.do_json_request('2/Anomaly.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nanomaly result:", dump_json(a))
return a
def deep_features(self, timeoutSecs=300, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, **kwargs):
params_dict = {
'destination_key': None,
'source': None,
'dl_model': None,
'layer': -1,
}
check_params_update_kwargs(params_dict, kwargs, 'deep_features', print_params)
a = self.do_json_request('2/DeepFeatures.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\ndeep_features result:", dump_json(a))
return a
def naive_bayes(self, timeoutSecs=300, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, **kwargs):
params_dict = {
'destination_key': None,
'source': None,
'response': None,
'cols': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'laplace': None,
'drop_na_cols': None,
'min_std_dev': None,
}
check_params_update_kwargs(params_dict, kwargs, 'naive_bayes', print_params)
a = self.do_json_request('2/NaiveBayes.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nnaive_bayes result:", dump_json(a))
return a
def anomaly(self, timeoutSecs=300, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, **kwargs):
params_dict = {
'destination_key': None,
'source': None,
'dl_autoencoder_model': None,
'thresh': None,
}
check_params_update_kwargs(params_dict, kwargs, 'anomaly', print_params)
start = time.time()
a = self.do_json_request('2/Anomaly.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nanomaly :result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def gbm_view(self, model_key, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'_modelKey': model_key,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'gbm_view', print_params)
a = self.do_json_request('2/GBMModelView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\ngbm_view result:", dump_json(a))
return a
def gbm_grid_view(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'job_key': None,
'destination_key': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'gbm_grid_view', print_params)
a = self.do_json_request('2/GridSearchProgress.json', timeout=timeoutSecs, params=params_dict)
print "\ngbm_grid_view result:", dump_json(a)
return a
def speedrf_view(self, modelKey, timeoutSecs=300, print_params=False, **kwargs):
params_dict = { '_modelKey': modelKey, }
check_params_update_kwargs(params_dict, kwargs, 'speedrf_view', print_params)
a = self.do_json_request('2/SpeeDRFModelView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nspeedrf_view_result:", dump_json(a))
return a
def speedrf_grid_view(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'job_key': None,
'destination_key': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'speedrf_grid_view', print_params)
a = self.do_json_request('2/GridSearchProgress.json', timeout=timeoutSecs, params=params_dict)
print "\nspeedrf_grid_view result:", dump_json(a)
return a
def pca_view(self, modelKey, timeoutSecs=300, print_params=False, **kwargs):
#this function is only for pca on fvec! may replace in future.
params_dict = {
'_modelKey': modelKey,
}
check_params_update_kwargs(params_dict, kwargs, 'pca_view', print_params)
a = self.do_json_request('2/PCAModelView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\npca_view_result:", dump_json(a))
return a
def glm_grid_view(self, timeoutSecs=300, print_params=False, **kwargs):
#this function is only for glm2, may remove it in future.
params_dict = {
'grid_key': None,
}
check_params_update_kwargs(params_dict, kwargs, 'glm_grid_view', print_params)
a = self.do_json_request('2/GLMGridView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nglm_grid_view result:", dump_json(a))
return a
def glm_view(self, modelKey=None, timeoutSecs=300, print_params=False, **kwargs):
#this function is only for glm2, may remove it in future.
params_dict = {
'_modelKey': modelKey,
}
check_params_update_kwargs(params_dict, kwargs, 'glm_view', print_params)
a = self.do_json_request('2/GLMModelView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nglm_view result:", dump_json(a))
return a
def save_model(self, timeoutSecs=300, print_params=False, **kwargs):
#this function is only for glm2, may remove it in future.
params_dict = {
'model': None,
'path': None,
'force': None,
}
check_params_update_kwargs(params_dict, kwargs, 'save_model', print_params)
a = self.do_json_request('2/SaveModel.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nsave_model result:", dump_json(a))
return a
def load_model(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'path': None,
}
check_params_update_kwargs(params_dict, kwargs, 'load_model', print_params)
a = self.do_json_request('2/LoadModel.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nload_model result:", dump_json(a))
return a
def generate_predictions(self, data_key, model_key, destination_key=None, timeoutSecs=300, print_params=True,
**kwargs):
algo = '2/Predict'
algoView = '2/Inspect2'
params_dict = {
'data': data_key,
'model': model_key,
# 'prediction_key': destination_key,
'prediction': destination_key,
}
browseAlso = kwargs.pop('browseAlso', False)
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'generate_predictions', print_params)
if print_params:
print "\n%s parameters:" % algo, params_dict
sys.stdout.flush()
a = self.do_json_request(
algo + '.json',
timeout=timeoutSecs,
params=params_dict)
verboseprint("\n%s result:" % algo, dump_json(a))
if (browseAlso | h2o_args.browse_json):
h2b.browseJsonHistoryAsUrlLastMatch(algo)
return a
def predict_confusion_matrix(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'actual': None,
'vactual': 'predict',
'predict': None,
'vpredict': 'predict',
}
# everyone should move to using this, and a full list in params_dict
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'predict_confusion_matrix', print_params)
a = self.do_json_request('2/ConfusionMatrix.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nprediction_confusion_matrix result:", dump_json(a))
return a
def hit_ratio(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'actual': None,
'vactual': 'predict',
'predict': None,
'max_k': seed,
'make_k': 'None',
}
check_params_update_kwargs(params_dict, kwargs, 'auc', print_params)
a = self.do_json_request('2/HitRatio.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nhit_ratio result:", dump_json(a))
return a
def generate_auc(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'thresholds': None,
'actual': None,
'vactual': 'predict',
'predict': None,
'vpredict': 'predict',
}
| |
import warnings
from django.contrib.auth.models import Permission
from django.conf.urls import url
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model
from django.forms.widgets import flatatt
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from wagtail.wagtailcore.models import Page
from wagtail.wagtailimages.models import Filter
from wagtail.wagtailcore import hooks
from .menus import ModelAdminMenuItem, GroupMenuItem, SubMenu
from .helpers import (
PermissionHelper, PagePermissionHelper, ButtonHelper, PageButtonHelper,
get_url_pattern, get_object_specific_url_pattern, get_url_name)
from .views import (
IndexView, InspectView, CreateView, ChooseParentView, EditView,
ConfirmDeleteView, CopyRedirectView, UnpublishRedirectView)
class WagtailRegisterable(object):
"""
Base class, providing a more convenient way for ModelAdmin or
ModelAdminGroup instances to be registered with Wagtail's admin area.
"""
add_to_settings_menu = False
def register_with_wagtail(self):
@hooks.register('register_permissions')
def register_permissions():
return self.get_permissions_for_registration()
@hooks.register('register_admin_urls')
def register_admin_urls():
return self.get_admin_urls_for_registration()
menu_hook = (
'register_settings_menu_item' if self.add_to_settings_menu else
'register_admin_menu_item'
)
@hooks.register(menu_hook)
def register_admin_menu_item():
return self.get_menu_item()
class ThumbmnailMixin(object):
"""
Mixin class to help display thumbnail images in ModelAdmin listing results.
`thumb_image_field_name` must be overridden to name a ForeignKey field on
your model, linking to `wagtailimages.Image`.
"""
thumb_image_field_name = 'image'
thumb_image_filter_spec = 'fill-100x100'
thumb_image_width = 50
thumb_classname = 'admin-thumb'
thumb_col_header_text = _('image')
thumb_default = None
def admin_thumb(self, obj):
try:
image = getattr(obj, self.thumb_image_field_name, None)
except AttributeError:
raise ImproperlyConfigured(
u"The `thumb_image_field_name` attribute on your `%s` class "
"must name a field on your model." % self.__class__.__name__
)
img_attrs = {
'src': self.thumb_default,
'width': self.thumb_image_width,
'class': self.thumb_classname,
}
if image:
fltr, _ = Filter.objects.get_or_create(
spec=self.thumb_image_filter_spec)
img_attrs.update({'src': image.get_rendition(fltr).url})
return mark_safe('<img{}>'.format(flatatt(img_attrs)))
elif self.thumb_default:
return mark_safe('<img{}>'.format(flatatt(img_attrs)))
return ''
admin_thumb.short_description = thumb_col_header_text
class ModelAdmin(WagtailRegisterable):
"""
The core wagtailmodeladmin class. It provides an alternative means to
list and manage instances of a given 'model' within Wagtail's admin area.
It is essentially comprised of attributes and methods that allow a degree
of control over how the data is represented, and other methods to make the
additional functionality available via various Wagtail hooks.
"""
model = None
menu_label = None
menu_icon = None
menu_order = None
list_display = ('__str__',)
list_display_add_buttons = None
inspect_view_fields = None
inspect_view_fields_exclude = []
inspect_view_enabled = False
empty_value_display = '-'
list_filter = ()
list_select_related = False
list_per_page = 100
search_fields = None
ordering = None
parent = None
index_view_class = IndexView
create_view_class = CreateView
inspect_view_class = InspectView
edit_view_class = EditView
confirm_delete_view_class = ConfirmDeleteView
choose_parent_view_class = ChooseParentView
copy_view_class = CopyRedirectView
unpublish_view_class = UnpublishRedirectView
index_template_name = ''
create_template_name = ''
edit_template_name = ''
inspect_template_name = ''
confirm_delete_template_name = ''
choose_parent_template_name = ''
permission_helper_class = None
button_helper_class = None
index_view_extra_css = []
index_view_extra_js = []
inspect_view_extra_css = []
inspect_view_extra_js = []
form_view_extra_css = []
form_view_extra_js = []
def __init__(self, parent=None):
"""
Don't allow initialisation unless self.model is set to a valid model
"""
if not self.model or not issubclass(self.model, Model):
raise ImproperlyConfigured(
u"The model attribute on your '%s' class must be set, and "
"must be a valid Django model." % self.__class__.__name__)
self.opts = self.model._meta
self.is_pagemodel = issubclass(self.model, Page)
self.parent = parent
permission_helper_class = self.get_permission_helper_class()
self.permission_helper = permission_helper_class(self.model)
def get_permission_helper_class(self):
if self.permission_helper_class:
return self.permission_helper_class
if self.is_pagemodel:
return PagePermissionHelper
return PermissionHelper
def get_button_helper_class(self):
if self.button_helper_class:
return self.button_helper_class
if self.is_pagemodel:
return PageButtonHelper
return ButtonHelper
def get_menu_label(self):
"""
Returns the label text to be used for the menu item
"""
return self.menu_label or self.opts.verbose_name_plural.title()
def get_menu_icon(self):
"""
Returns the icon to be used for the menu item. The value is prepended
with 'icon-' to create the full icon class name. For design
consistency, the same icon is also applied to the main heading for
views called by this class
"""
if self.menu_icon:
return self.menu_icon
if self.is_pagemodel:
return 'doc-full-inverse'
return 'snippet'
def get_menu_order(self):
"""
Returns the 'order' to be applied to the menu item. 000 being first
place. Where ModelAdminGroup is used, the menu_order value should be
applied to that, and any ModelAdmin classes added to 'items'
attribute will be ordered automatically, based on their order in that
sequence.
"""
return self.menu_order or 999
def show_menu_item(self, request):
"""
Returns a boolean indicating whether the menu item should be visible
for the user in the supplied request, based on their permissions.
"""
return self.permission_helper.has_list_permission(request.user)
def get_list_display(self, request):
"""
Return a sequence containing the fields/method output to be displayed
in the list view.
"""
return self.list_display
def get_list_display_add_buttons(self, request):
"""
Return the name of the field/method from list_display where action
buttons should be added.
"""
return self.list_display_add_buttons or self.list_display[0]
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin.
"""
return mark_safe(self.empty_value_display)
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar in the list view.
"""
return self.list_filter
def get_ordering(self, request):
"""
Returns a sequence defining the default ordering for results in the
list view.
"""
return self.ordering or ()
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site.
"""
qs = self.model._default_manager.get_queryset()
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_search_fields(self, request):
"""
Returns a sequence defining which fields on a model should be searched
when a search is initiated from the list view.
"""
return self.search_fields or ()
def get_index_url(self):
return reverse(get_url_name(self.opts))
def get_choose_parent_url(self):
return reverse(get_url_name(self.opts, 'choose_parent'))
def get_create_url(self):
return reverse(get_url_name(self.opts, 'create'))
def get_inspect_view_fields(self):
if not self.inspect_view_fields:
found_fields = []
for f in self.model._meta.get_fields():
if f.name not in self.inspect_view_fields_exclude:
if f.concrete and (
not f.is_relation or
(not f.auto_created and f.related_model)
):
found_fields.append(f.name)
return found_fields
return self.inspect_view_fields
def get_extra_class_names_for_field_col(self, obj, field_name):
"""
Return a list of additional CSS class names to be added to the table
cell's `class` attribute when rendering the output of `field_name` for
`obj` in `index_view`.
Must always return a list or tuple.
"""
return []
def get_extra_attrs_for_field_col(self, obj, field_name):
"""
Return a dictionary of additional HTML attributes to be added to a
table cell when rendering the output of `field_name` for `obj` in
`index_view`.
Must always return a dictionary.
"""
return {}
def get_index_view_extra_css(self):
css = ['wagtailmodeladmin/css/index.css']
css.extend(self.index_view_extra_css)
return css
def get_index_view_extra_js(self):
return self.index_view_extra_js
def get_form_view_extra_css(self):
return self.form_view_extra_css
def get_form_view_extra_js(self):
return self.form_view_extra_js
def get_inspect_view_extra_css(self):
return self.inspect_view_extra_css
def get_inspect_view_extra_js(self):
return self.inspect_view_extra_js
def index_view(self, request):
"""
Instantiates a class-based view to provide listing functionality for
the assigned model. The view class used can be overridden by changing
the 'index_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.index_view_class
return view_class.as_view(**kwargs)(request)
def create_view(self, request):
"""
Instantiates a class-based view to provide 'creation' functionality for
the assigned model, or redirect to Wagtail's create view if the
assigned model extends 'Page'. The view class used can be overridden by
changing the 'create_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.create_view_class
return view_class.as_view(**kwargs)(request)
def inspect_view(self, request, object_id):
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.inspect_view_class
return view_class.as_view(**kwargs)(request)
def choose_parent_view(self, request):
"""
Instantiates a class-based view to provide a view that allows a parent
page to be chosen for a new object, where the assigned model extends
Wagtail's Page model, and there is more than one potential parent for
new instances. The view class used can be overridden by changing the
'choose_parent_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.choose_parent_view_class
return view_class.as_view(**kwargs)(request)
def edit_view(self, request, object_id):
"""
Instantiates a class-based view to provide 'edit' functionality for the
assigned model, or redirect to Wagtail's edit view if the assigned
model extends 'Page'. The view class used can be overridden by changing
the 'edit_view_class' attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.edit_view_class
return view_class.as_view(**kwargs)(request)
def confirm_delete_view(self, request, object_id):
"""
Instantiates a class-based view to provide 'delete confirmation'
functionality for the assigned model, or redirect to Wagtail's delete
confirmation view if the assigned model extends 'Page'. The view class
used can be overridden by changing the 'confirm_delete_view_class'
attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.confirm_delete_view_class
return view_class.as_view(**kwargs)(request)
def unpublish_view(self, request, object_id):
"""
Instantiates a class-based view that redirects to Wagtail's 'unpublish'
view for models that extend 'Page' (if the user has sufficient
permissions). We do this via our own view so that we can reliably
control redirection of the user back to the | |
#!/usr/bin/env python
#
# PrettyTable 0.5
# Copyright (c) 2009, <NAME> <<EMAIL>>
# All rights reserved.
# With contributions from:
# * <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import print_function
import cgi
import copy
import six
import six.moves.cPickle
from six.moves import range
from six.moves import zip
import sys
FRAME = 0
ALL = 1
NONE = 2
class PrettyTable:
def __init__(self, fields=None, caching=True, padding_width=1, left_padding=None, right_padding=None):
"""Return a new PrettyTable instance
Arguments:
fields - list or tuple of field names
caching - boolean value to turn string caching on/off
padding width - number of spaces between column lines and content"""
# Data
self.fields = []
if fields:
self.set_field_names(fields)
else:
self.widths = []
self.aligns = []
self.set_padding_width(padding_width)
self.rows = []
self.cache = {}
self.html_cache = {}
# Options
self.hrules = FRAME
self.caching = caching
self.padding_width = padding_width
self.left_padding = left_padding
self.right_padding = right_padding
self.vertical_char = "|"
self.horizontal_char = "-"
self.junction_char = "+"
def __getslice__(self, i, j):
"""Return a new PrettyTable whose data rows are a slice of this one's
Arguments:
i - beginning slice index
j - ending slice index"""
newtable = copy.deepcopy(self)
newtable.rows = self.rows[i:j]
return newtable
def __str__(self):
return self.get_string()
##############################
# ATTRIBUTE SETTERS #
##############################
def set_field_names(self, fields):
"""Set the names of the fields
Arguments:
fields - list or tuple of field names"""
# We *may* need to change the widths if this isn't the first time
# setting the field names. This could certainly be done more
# efficiently.
if self.fields:
self.widths = [len(field) for field in fields]
for row in self.rows:
for i in range(0,len(row)):
if len(six.text_type(row[i])) > self.widths[i]:
self.widths[i] = len(six.text_type(row[i]))
else:
self.widths = [len(field) for field in fields]
self.fields = fields
self.aligns = len(fields)*["c"]
self.cache = {}
self.html_cache = {}
def set_field_align(self, fieldname, align):
"""Set the alignment of a field by its fieldname
Arguments:
fieldname - name of the field whose alignment is to be changed
align - desired alignment - "l" for left, "c" for centre and "r" for right"""
if fieldname not in self.fields:
raise Exception("No field %s exists!" % fieldname)
if align not in ["l","c","r"]:
raise Exception("Alignment %s is invalid, use l, c or r!" % align)
self.aligns[self.fields.index(fieldname)] = align
self.cache = {}
self.html_cache = {}
def set_padding_width(self, padding_width):
"""Set the number of empty spaces between a column's edge and its content
Arguments:
padding_width - number of spaces, must be a positive integer"""
try:
assert int(padding_width) >= 0
except AssertionError:
raise Exception("Invalid value for padding_width: %s!" % six.text_type(padding_width))
self.padding_width = padding_width
self.cache = {}
self.html_cache = {}
def set_left_padding(self, left_padding):
"""Set the number of empty spaces between a column's left edge and its content
Arguments:
left_padding - number of spaces, must be a positive integer"""
try:
assert left_padding == None or int(left_padding) >= 0
except AssertionError:
raise Exception("Invalid value for left_padding: %s!" % six.text_type(left_padding))
self.left_padding = left_padding
self.cache = {}
self.html_cache = {}
def set_right_padding(self, right_padding):
"""Set the number of empty spaces between a column's right edge and its content
Arguments:
right_padding - number of spaces, must be a positive integer"""
try:
assert right_padding == None or int(right_padding) >= 0
except AssertionError:
raise Exception("Invalid value for right_padding: %s!" % six.text_type(right_padding))
self.right_padding = right_padding
self.cache = {}
self.html_cache = {}
def set_border_chars(self, vertical="|", horizontal="-", junction="+"):
"""Set the characters to use when drawing the table border
Arguments:
vertical - character used to draw a vertical line segment. Default is |
horizontal - character used to draw a horizontal line segment. Default is -
junction - character used to draw a line junction. Default is +"""
if len(vertical) > 1 or len(horizontal) > 1 or len(junction) > 1:
raise Exception("All border characters must be strings of length ONE!")
self.vertical_char = vertical
self.horizontal_char = horizontal
self.junction_char = junction
self.cache = {}
##############################
# DATA INPUT METHODS #
##############################
def add_row(self, row):
"""Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields"""
if len(row) != len(self.fields):
raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self.fields)))
self.rows.append(row)
for i in range(0,len(row)):
if len(six.text_type(row[i])) > self.widths[i]:
self.widths[i] = len(six.text_type(row[i]))
self.html_cache = {}
def add_column(self, fieldname, column, align="c"):
"""Add a column to the table.
Arguments:
fieldname - name of the field to contain the new column of data
column - column of data, should be a list with as many elements as the
table has rows
align - desired alignment for this column - "l" for left, "c" for centre and "r" for right"""
if len(self.rows) in (0, len(column)):
if align not in ["l","c","r"]:
raise Exception("Alignment %s is invalid, use l, c or r!" % align)
self.fields.append(fieldname)
self.widths.append(len(fieldname))
self.aligns.append(align)
for i in range(0, len(column)):
if len(self.rows) < i+1:
self.rows.append([])
self.rows[i].append(column[i])
if len(six.text_type(column[i])) > self.widths[-1]:
self.widths[-1] = len(six.text_type(column[i]))
else:
raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self.rows)))
##############################
# MISC PRIVATE METHODS #
##############################
def _get_sorted_rows(self, start, end, sortby, reversesort):
# Sort rows using the "Decorate, Sort, Undecorate" (DSU) paradigm
rows = copy.deepcopy(self.rows[start:end])
sortindex = self.fields.index(sortby)
# Decorate
rows = [[row[sortindex]]+row for row in rows]
# Sort
rows.sort(reverse=reversesort)
# Undecorate
rows = [row[1:] for row in rows]
return rows
def _get_paddings(self):
if self.left_padding is not None:
lpad = self.left_padding
else:
lpad = self.padding_width
if self.right_padding is not None:
rpad = self.right_padding
else:
rpad = self.padding_width
return lpad, rpad
##############################
# ASCII PRINT/STRING METHODS #
##############################
def printt(self, start=0, end=None, fields=None, header=True, border=True, hrules=FRAME, sortby=None, reversesort=False):
"""Print table in current state to stdout.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
sortby - name of field to sort rows by
reversesort - True or False to sort in descending or ascending order
border - should be True or False to print or not print borders
hrules - controls printing of horizontal rules after each row. Allowed values: FRAME, ALL, NONE"""
print(self.get_string(start, end, fields, header, border, hrules, sortby, reversesort))
def get_string(self, start=0, end=None, fields=None, header=True, border=True, hrules=FRAME, sortby=None, reversesort=False):
"""Return string representation of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
sortby - name of field to sort rows by
reversesort - True or False to sort in descending or ascending order
border - should be True or False to print or not print borders
hrules - controls | |
self._contributor(
display_name="<NAME>", viaf="73520345",
)
# _contributor() set sort_name to a random value; remove it.
with_viaf.sort_name = None
expect = ContributorData(
display_name="<NAME>", sort_name="<NAME>",
viaf="73520345"
)
_match(
expect, m(display_name="<NAME>")
)
# Again, this works even if some of the incoming arguments
# turn out not to be supported by the database data.
_match(
expect, m(display_name="<NAME>", sort_name="<NAME>",
viaf="abcd")
)
# If there's a duplicate that provides conflicting information,
# the corresponding field is left blank -- we don't know which
# value is correct.
with_incorrect_viaf, ignore = self._contributor(
display_name="<NAME>", viaf="abcd",
)
with_incorrect_viaf.sort_name=None
expect = ContributorData(
display_name="<NAME>", sort_name="<NAME>",
)
_match(expect, m(display_name="<NAME>"))
# If there's conflicting information in the database for a
# field, but the input included a value for that field, then
# the input value is used.
expect.viaf = "73520345"
_match(expect, m(display_name="<NAME>", viaf="73520345"))
def test_apply(self):
# Makes sure ContributorData.apply copies all the fields over when there's changes to be made.
contributor_old, made_new = self._contributor(sort_name="<NAME>", viaf="viaf12345")
kwargs = dict()
kwargs[Contributor.BIRTH_DATE] = '2001-01-01'
contributor_data = ContributorData(
sort_name = "<NAME>",
lc = "1234567",
viaf = "ABC123",
aliases = ["Primo"],
display_name = "<NAME>",
family_name = "TestAuttie",
wikipedia_name = "TestWikiAuth",
biography = "He was born on Main Street.",
extra = kwargs,
)
contributor_new, changed = contributor_data.apply(contributor_old)
eq_(changed, True)
eq_(contributor_new.sort_name, u"<NAME>")
eq_(contributor_new.lc, u"1234567")
eq_(contributor_new.viaf, u"ABC123")
eq_(contributor_new.aliases, [u"Primo"])
eq_(contributor_new.display_name, u"Test Author For The Win")
eq_(contributor_new.family_name, u"TestAuttie")
eq_(contributor_new.wikipedia_name, u"TestWikiAuth")
eq_(contributor_new.biography, u"He was born on Main Street.")
eq_(contributor_new.extra[Contributor.BIRTH_DATE], u"2001-01-01")
#eq_(contributor_new.contributions, u"Audio")
contributor_new, changed = contributor_data.apply(contributor_new)
eq_(changed, False)
def test_display_name_to_sort_name_from_existing_contributor(self):
# If there's an existing contributor with a matching display name,
# we'll use their sort name.
existing_contributor, ignore = self._contributor(sort_name="Sort, Name", display_name="<NAME>")
eq_("Sort, Name", ContributorData.display_name_to_sort_name_from_existing_contributor(self._db, "<NAME>"))
# Otherwise, we don't know.
eq_(None, ContributorData.display_name_to_sort_name_from_existing_contributor(self._db, "<NAME>"))
def test_find_sort_name(self):
metadata_client = DummyMetadataClient()
metadata_client.lookups["Metadata Client Author"] = "<NAME>."
existing_contributor, ignore = self._contributor(sort_name="Author, E.", display_name="Existing Author")
contributor_data = ContributorData()
# If there's already a sort name, keep it.
contributor_data.sort_name = "Sort Name"
eq_(True, contributor_data.find_sort_name(self._db, [], metadata_client))
eq_("Sort Name", contributor_data.sort_name)
contributor_data.sort_name = "Sort Name"
contributor_data.display_name = "Existing Author"
eq_(True, contributor_data.find_sort_name(self._db, [], metadata_client))
eq_("Sort Name", contributor_data.sort_name)
contributor_data.sort_name = "Sort Name"
contributor_data.display_name = "Metadata Client Author"
eq_(True, contributor_data.find_sort_name(self._db, [], metadata_client))
eq_("Sort Name", contributor_data.sort_name)
# If there's no sort name but there's already an author with the same display name,
# use that author's sort name.
contributor_data.sort_name = None
contributor_data.display_name = "Existing Author"
eq_(True, contributor_data.find_sort_name(self._db, [], metadata_client))
eq_("Author, E.", contributor_data.sort_name)
# If there's no sort name and no existing author, check the metadata wrangler
# for a sort name.
contributor_data.sort_name = None
contributor_data.display_name = "Metadata Client Author"
eq_(True, contributor_data.find_sort_name(self._db, [], metadata_client))
eq_("Author, <NAME>.", contributor_data.sort_name)
# If there's no sort name, no existing author, and nothing from the metadata
# wrangler, guess the sort name based on the display name.
contributor_data.sort_name = None
contributor_data.display_name = "New Author"
eq_(True, contributor_data.find_sort_name(self._db, [], metadata_client))
eq_("Author, New", contributor_data.sort_name)
def test_find_sort_name_survives_metadata_client_exception(self):
class Mock(ContributorData):
# Simulate an integration error from the metadata wrangler side.
def display_name_to_sort_name_through_canonicalizer(
self, _db, identifiers, metadata_client
):
self.called_with = (_db, identifiers, metadata_client)
raise RemoteIntegrationException(
"http://url/", "Metadata wrangler failure!"
)
# Here's a ContributorData that's going to run into an error.
contributor_data = Mock()
contributor_data.display_name = "<NAME>"
identifiers = []
metadata_client = object()
contributor_data.find_sort_name(self._db, identifiers, metadata_client)
# display_name_to_sort_name_through_canonicalizer was called
# with the arguments we expect.
eq_((self._db, identifiers, metadata_client),
contributor_data.called_with)
# Although that method raised an exception, we were able to
# keep going and use the default display name -> sort name
# algorithm to guess at the author name.
eq_("<NAME>.", contributor_data.sort_name)
class TestLinkData(DatabaseTest):
@parameterized.expand([
('image', Hyperlink.IMAGE, ExternalIntegrationLink.COVERS),
('thumbnail', Hyperlink.THUMBNAIL_IMAGE, ExternalIntegrationLink.COVERS),
('open_access_book', Hyperlink.OPEN_ACCESS_DOWNLOAD, ExternalIntegrationLink.OPEN_ACCESS_BOOKS),
('protected_access_book', Hyperlink.GENERIC_OPDS_ACQUISITION, ExternalIntegrationLink.PROTECTED_ACCESS_BOOKS)
])
def test_mirror_type_returns_correct_mirror_type_for(self, name, rel, expected_mirror_type):
# Arrange
link_data = LinkData(rel, href='dummy')
# Act
result = link_data.mirror_type()
# Assert
eq_(result, expected_mirror_type)
def test_guess_media_type(self):
rel = Hyperlink.IMAGE
# Sometimes we have no idea what media type is at the other
# end of a link.
unknown = LinkData(rel, href="http://foo/bar.unknown")
eq_(None, unknown.guessed_media_type)
# Sometimes we can guess based on the file extension.
jpeg = LinkData(rel, href="http://foo/bar.jpeg")
eq_(Representation.JPEG_MEDIA_TYPE, jpeg.guessed_media_type)
# An explicitly known media type takes precedence over
# something we guess from the file extension.
png = LinkData(rel, href="http://foo/bar.jpeg",
media_type=Representation.PNG_MEDIA_TYPE)
eq_(Representation.PNG_MEDIA_TYPE, png.guessed_media_type)
description = LinkData(Hyperlink.DESCRIPTION, content="Some content")
eq_(None, description.guessed_media_type)
class TestMetadata(DatabaseTest):
def test_defaults(self):
# Verify that a Metadata object doesn't make any assumptions
# about an item's medium.
m = Metadata(data_source=DataSource.OCLC)
eq_(None, m.medium)
def test_from_edition(self):
# Makes sure Metadata.from_edition copies all the fields over.
edition, pool = self._edition(with_license_pool=True)
edition.series = "<NAME> and the Mollusk of Infamy"
edition.series_position = "14"
edition.primary_identifier.add_link(Hyperlink.IMAGE, "image", edition.data_source)
metadata = Metadata.from_edition(edition)
# make sure the metadata and the originating edition match
for field in Metadata.BASIC_EDITION_FIELDS:
eq_(getattr(edition, field), getattr(metadata, field))
e_contribution = edition.contributions[0]
m_contributor_data = metadata.contributors[0]
eq_(e_contribution.contributor.sort_name, m_contributor_data.sort_name)
eq_(e_contribution.role, m_contributor_data.roles[0])
eq_(edition.data_source, metadata.data_source(self._db))
eq_(edition.primary_identifier.identifier, metadata.primary_identifier.identifier)
e_link = edition.primary_identifier.links[0]
m_link = metadata.links[0]
eq_(e_link.rel, m_link.rel)
eq_(e_link.resource.url, m_link.href)
# The series position can also be 0.
edition.series_position = 0
metadata = Metadata.from_edition(edition)
eq_(edition.series_position, metadata.series_position)
def test_update(self):
# Tests that Metadata.update correctly prefers new fields to old, unless
# new fields aren't defined.
edition_old, pool = self._edition(with_license_pool=True)
edition_old.publisher = "test_old_publisher"
edition_old.subtitle = "old_subtitile"
edition_old.series = "old_series"
edition_old.series_position = 5
metadata_old = Metadata.from_edition(edition_old)
edition_new, pool = self._edition(with_license_pool=True)
# set more fields on metadatas
edition_new.publisher = None
edition_new.subtitle = "new_updated_subtitile"
edition_new.series = "new_series"
edition_new.series_position = 0
metadata_new = Metadata.from_edition(edition_new)
metadata_old.update(metadata_new)
eq_(metadata_old.publisher, "test_old_publisher")
eq_(metadata_old.subtitle, metadata_new.subtitle)
eq_(metadata_old.series, edition_new.series)
eq_(metadata_old.series_position, edition_new.series_position)
def test_apply(self):
edition_old, pool = self._edition(with_license_pool=True)
metadata = Metadata(
data_source=DataSource.OVERDRIVE,
title=u"The Harry Otter and the Seaweed of Ages",
sort_title=u"Harry Otter and the Seaweed of Ages, The",
subtitle=u"Kelp At It",
series=u"The Harry Otter Sagas",
series_position=u"4",
language=u"eng",
medium=u"Audio",
publisher=u"Scholastic Inc",
imprint=u"Follywood",
published=datetime.date(1987, 5, 4),
issued=datetime.date(1989, 4, 5)
)
edition_new, changed = metadata.apply(edition_old, pool.collection)
eq_(changed, True)
eq_(edition_new.title, u"The Harry Otter and the Seaweed of Ages")
eq_(edition_new.sort_title, u"Harry Otter and the Seaweed of Ages, The")
eq_(edition_new.subtitle, u"Kelp At It")
eq_(edition_new.series, u"The Harry Otter Sagas")
eq_(edition_new.series_position, u"4")
eq_(edition_new.language, u"eng")
eq_(edition_new.medium, u"Audio")
eq_(edition_new.publisher, u"Scholastic Inc")
eq_(edition_new.imprint, u"Follywood")
eq_(edition_new.published, datetime.date(1987, 5, 4))
eq_(edition_new.issued, datetime.date(1989, 4, 5))
edition_new, changed = metadata.apply(edition_new, pool.collection)
eq_(changed, False)
# The series position can also be 0.
metadata.series_position = 0
edition_new, changed = metadata.apply(edition_new, pool.collection)
eq_(changed, True)
eq_(edition_new.series_position, 0)
# Metadata.apply() does not create a Work if no Work exists.
eq_(0, self._db.query(Work).count())
def test_apply_wipes_presentation_calculation_records(self):
# We have a work.
work = self._work(title="The Wrong Title", with_license_pool=True)
# We learn some more information about the work's identifier.
metadata = Metadata(
data_source=DataSource.OVERDRIVE,
primary_identifier=work.presentation_edition.primary_identifier,
title=u"The Harry Otter and the Seaweed of Ages",
)
edition, ignore = metadata.edition(self._db)
metadata.apply(edition, None)
# The work still has the wrong title.
eq_("The Wrong Title", work.title)
# However, the work is now slated to have its presentation
# edition recalculated -- that will fix it.
def assert_registered(full):
"""Verify that the WorkCoverageRecord for a full (full=True) or
partial (full=false) presentation recalculation operation
is in the 'registered' state, and that the
WorkCoverageRecord for the other presentation
recalculation operation is in the 'success' state.
The verified WorkCoverageRecord will be reset to the 'success'
state so that this can be called over and over without any
extra setup.
"""
WCR = WorkCoverageRecord
for x in work.coverage_records:
if x.operation == WCR.CLASSIFY_OPERATION:
if full:
eq_(WCR.REGISTERED, x.status)
x.status = WCR.SUCCESS
else:
eq_(WCR.SUCCESS, x.status)
elif x.operation == WCR.CHOOSE_EDITION_OPERATION:
if full:
eq_(WCR.SUCCESS, x.status)
else:
eq_(WCR.REGISTERED, x.status)
x.status = WCR.SUCCESS
assert_registered(full=False)
# We then learn about a subject under which the work
# is classified.
metadata.title = None
metadata.subjects = [SubjectData(Subject.TAG, "subject")]
metadata.apply(edition, None)
# The work is now slated to have its presentation completely
# recalculated.
record = assert_registered(full=True)
# We then find a new description for the work.
metadata.subjects = None
metadata.links = [
LinkData(rel=Hyperlink.DESCRIPTION, content="a description")
]
metadata.apply(edition, None)
# We need to do a full recalculation again.
assert_registered(full=True)
# We then find a new cover image for the work.
metadata.subjects = None
metadata.links = [
LinkData(rel=Hyperlink.IMAGE, href="http://image/")
]
metadata.apply(edition, None)
# We need to choose a new presentation edition.
assert_registered(full=False)
def test_apply_identifier_equivalency(self):
# Set up an Edition.
edition, pool = self._edition(with_license_pool=True)
# Create two | |
all the column in :class:`UltimateListCtrl`."""
count = len(self._mainWin._columns)
for n in range(count):
self.DeleteColumn(0)
return True
def ClearAll(self):
"""Deletes everything in :class:`UltimateListCtrl`."""
self._mainWin.DeleteEverything()
def DeleteColumn(self, col):
"""
Deletes the specified column.
:param `col`: the index of the column to delete.
"""
self._mainWin.DeleteColumn(col)
return True
def EditLabel(self, item):
"""
Starts editing an item label.
:param `item`: the index of the item to edit.
"""
self._mainWin.EditLabel(item)
def EnsureVisible(self, item):
"""
Ensures this item is visible.
:param `index`: the index of the item to scroll into view.
"""
self._mainWin.EnsureVisible(item)
return True
def FindItem(self, start, str, partial=False):
"""
Find an item whose label matches this string.
:param `start`: the starting point of the input `string` or the beginning
if `start` is -1;
:param `string`: the string to look for matches;
:param `partial`: if ``True`` then this method will look for items which
begin with `string`.
:note: The string comparison is case insensitive.
"""
return self._mainWin.FindItem(start, str, partial)
def FindItemData(self, start, data):
"""
Find an item whose data matches this data.
:param `start`: the starting point of the input `data` or the beginning
if `start` is -1;
:param `data`: the data to look for matches.
"""
return self._mainWin.FindItemData(start, data)
def FindItemAtPos(self, start, pt):
"""
Find an item nearest this position.
:param `pt`: an instance of :class:`Point`.
"""
return self._mainWin.FindItemAtPos(pt)
def HitTest(self, pointOrTuple):
"""
HitTest method for a :class:`UltimateListCtrl`.
:param `pointOrTuple`: an instance of :class:`Point` or a tuple representing
the mouse `x`, `y` position.
:see: :meth:`UltimateListMainWindow.HitTestLine() <UltimateListMainWindow.HitTestLine>` for a list of return flags.
"""
if isinstance(pointOrTuple, wx.Point):
x, y = pointOrTuple.x, pointOrTuple.y
else:
x, y = pointOrTuple
return self._mainWin.HitTest(x, y)
def InsertItem(self, info):
"""
Inserts an item into :class:`UltimateListCtrl`.
:param `info`: an instance of :class:`UltimateListItem`.
"""
self._mainWin.InsertItem(info)
return info._itemId
def InsertStringItem(self, index, label, it_kind=0):
"""
Inserts a string item at the given location.
:param `index`: the index at which we wish to insert the item;
:param `label`: the item text;
:param `it_kind`: the item kind.
:see: :meth:`~UltimateListCtrl.SetStringItem` for a list of valid item kinds.
"""
info = UltimateListItem()
info._text = label
info._mask = ULC_MASK_TEXT
if it_kind:
info._mask |= ULC_MASK_KIND
info._kind = it_kind
info._itemId = index
return self.InsertItem(info)
def InsertImageItem(self, index, imageIds, it_kind=0):
"""
Inserts an image item at the given location.
:param `index`: the index at which we wish to insert the item;
:param `imageIds`: a Python list containing the image indexes for the
images associated to this item;
:param `it_kind`: the item kind.
:see: :meth:`~UltimateListCtrl.SetStringItem` for a list of valid item kinds.
"""
info = UltimateListItem()
info._mask = ULC_MASK_IMAGE
if it_kind:
info._mask |= ULC_MASK_KIND
info._kind = it_kind
info._image = to_list(imageIds)
info._itemId = index
return self.InsertItem(info)
def InsertImageStringItem(self, index, label, imageIds, it_kind=0):
"""
Inserts an image+string item at the given location.
:param `index`: the index at which we wish to insert the item;
:param `label`: the item text;
:param `imageIds`: a Python list containing the image indexes for the
images associated to this item;
:param `it_kind`: the item kind.
:see: :meth:`~UltimateListCtrl.SetStringItem` for a list of valid item kinds.
"""
info = UltimateListItem()
info._text = label
info._image = to_list(imageIds)
info._mask = ULC_MASK_TEXT | ULC_MASK_IMAGE
if it_kind:
info._mask |= ULC_MASK_KIND
info._kind = it_kind
info._itemId = index
return self.InsertItem(info)
def InsertColumnInfo(self, col, item):
"""
Inserts a column into :class:`UltimateListCtrl`.
:param `col`: the column index at which we wish to insert a column;
:param `item`: an instance of :class:`UltimateListItem`.
:returns: the index at which the column has been inserted.
"""
if not self._mainWin.InReportView() and not self.HasAGWFlag(ULC_HEADER_IN_ALL_VIEWS) and \
not self._mainWin.InTileView():
raise Exception("Can't add column in non report/tile modes or without the ULC_HEADER_IN_ALL_VIEWS style set")
idx = self._mainWin.InsertColumn(col, item)
if self._headerWin:
self._headerWin.Refresh()
return idx
def InsertColumn(self, col, heading, format=ULC_FORMAT_LEFT, width=-1):
"""
Inserts a column into :class:`UltimateListCtrl`.
:param `col`: the column index at which we wish to insert a column;
:param `heading`: the header text;
:param `format`: the column alignment flag. This can be one of the following
bits:
============================ ========= ==============================
Alignment Bits Hex Value Description
============================ ========= ==============================
``ULC_FORMAT_LEFT`` 0x0 The item is left-aligned
``ULC_FORMAT_RIGHT`` 0x1 The item is right-aligned
``ULC_FORMAT_CENTRE`` 0x2 The item is centre-aligned
``ULC_FORMAT_CENTER`` 0x2 The item is center-aligned
============================ ========= ==============================
:param `width`: can be a width in pixels or ``wx.LIST_AUTOSIZE`` (-1) or
``wx.LIST_AUTOSIZE_USEHEADER`` (-2) or ``LIST_AUTOSIZE_FILL`` (-3).
``wx.LIST_AUTOSIZE`` will resize the column to the length of its longest
item. ``wx.LIST_AUTOSIZE_USEHEADER`` will resize the column to the
length of the header (Win32) or 80 pixels (other platforms).
``LIST_AUTOSIZE_FILL`` will resize the column fill the remaining width
of the window.
:returns: the index at which the column has been inserted.
"""
item = UltimateListItem()
item._mask = ULC_MASK_TEXT | ULC_MASK_FORMAT | ULC_MASK_FONT
item._text = heading
if width >= -2:
item._mask |= ULC_MASK_WIDTH
item._width = width
item._format = format
return self.InsertColumnInfo(col, item)
def IsColumnShown(self, column):
"""
Returns ``True`` if the input column is shown, ``False`` if it is hidden.
:param `column`: an integer specifying the column index.
"""
if self._headerWin:
return self._mainWin.IsColumnShown(column)
raise Exception("Showing/hiding columns works only with the header shown")
def SetColumnShown(self, column, shown=True):
"""
Sets the specified column as shown or hidden.
:param `column`: an integer specifying the column index;
:param `shown`: ``True`` to show the column, ``False`` to hide it.
"""
col = self.GetColumn(column)
col._mask |= ULC_MASK_SHOWN
col.SetShown(shown)
self._mainWin.SetColumn(column, col)
self.Update()
def ScrollList(self, dx, dy):
"""
Scrolls the :class:`UltimateListCtrl`.
:param `dx`: if in icon, small icon or report view mode, specifies the number
of pixels to scroll. If in list view mode, `dx` specifies the number of
columns to scroll.
:param `dy`: always specifies the number of pixels to scroll vertically.
"""
return self._mainWin.ScrollList(dx, dy)
# Sort items.
# The return value is a negative number if the first item should precede the second
# item, a positive number of the second item should precede the first,
# or zero if the two items are equivalent.
def SortItems(self, func=None):
"""
Call this function to sort the items in the :class:`UltimateListCtrl`. Sorting is done
using the specified function `func`. This function must have the
following prototype::
def OnCompareItems(self, line1, line2):
DoSomething(line1, line2)
# function code
It is called each time when the two items must be compared and should return 0
if the items are equal, negative value if the first item is less than the second
one and positive value if the first one is greater than the second one.
:param `func`: the method to use to sort the items. The default is to use the
:meth:`UltimateListMainWindow.OnCompareItems() <UltimateListMainWindow.OnCompareItems>` method.
"""
self._mainWin.SortItems(func)
wx.CallAfter(self.Refresh)
return True
# ----------------------------------------------------------------------------
# event handlers
# ----------------------------------------------------------------------------
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for :class:`UltimateListCtrl`.
:param `event`: a :class:`SizeEvent` event to be processed.
"""
if not self.IsShownOnScreen():
# We don't have the proper column sizes until we are visible so
# use CallAfter to resize the columns on the first display
if self._mainWin:
wx.CallAfter(self._mainWin.ResizeColumns)
if not self._mainWin:
return
# We need to override OnSize so that our scrolled
# window a) does call Layout() to use sizers for
# positioning the controls but b) does not query
# the sizer for their size and use that for setting
# the scrollable area as set that ourselves by
# calling SetScrollbar() further down.
self.DoLayout()
def OnSetFocus(self, event):
"""
Handles the ``wx.EVT_SET_FOCUS`` event for :class:`UltimateListCtrl`.
:param `event`: a :class:`FocusEvent` event to be processed.
"""
if self._mainWin:
self._mainWin.SetFocusIgnoringChildren()
self._mainWin.Update()
event.Skip()
def OnInternalIdle(self):
"""
This method is normally only used internally, but sometimes an application
may need it to implement functionality that should not be disabled by an
application defining an `OnIdle` handler in a derived class.
This method may be used to do delayed painting, for example, and most
implementations call :meth:`Window.UpdateWindowUI` in order to send update events
to the window in idle time.
"""
wx.PyControl.OnInternalIdle(self)
# do it only if | |
from ..mapping import MappedArray, AccessType
from ..indexing import is_fullslice, split_operation, slicer_sub2ind, invert_slice
from .. import volutils
from ..readers import reader_classes
from .metadata import ome_zooms, parse_unit
from nitorch.spatial import affine_default
from nitorch.core import pyutils, dtypes
from tifffile import TiffFile
from contextlib import contextmanager
import torch
import numpy as np
from warnings import warn
class TiffArray(MappedArray):
"""
MappedArray that uses `tifffile` under the hood.
"""
def __init__(self, file_like, permission='r', keep_file_open=True, **hints):
"""
Parameters
----------
file_like : str or file object
keep_file_open : bool, default=True
Whether to keep the file handle open
hints : keyword of the form `is_<format>=<True|False>`
Tells the Tiff reader that a file is or isn't of a specific
subformat. If not provided, it it guessed by the Tiff reader.
"""
self._tiff = TiffFile(file_like, **hints)
if not keep_file_open:
self._tiff.close()
self._series = 0
self._level = 0
self._cache = dict()
super().__init__()
_series: int = 0 # index of series to map
_level: int = 0 # index of pyramid level to map
_cache: dict = {} # a cache of precomputed _shape, _spatial, etc
@property
def _shape(self):
"""Full shape of a series+level"""
if '_shape' not in self._cache:
with self.tiffobj() as tiff:
shape = tiff.series[self.series].levels[self.level].shape
self._cache['_shape'] = shape
return self._cache['_shape']
@property
def _axes(self):
"""Axes names of a series+level"""
if '_axes' not in self._cache:
with self.tiffobj() as tiff:
axes = tiff.series[self.series].levels[self.level].axes
self._cache['_axes'] = axes
return self._cache['_axes']
@property
def _spatial(self):
"""Mask of spatial axes of a series+level"""
msk = [ax in 'XYZ' for ax in self._axes]
return msk
@property
def _affine(self):
"""Affine orientation matrix of a series+level"""
# TODO: I don't know yet how we should use GeoTiff to encode
# affine matrices. In the matrix/zooms, their voxels are ordered
# as [x, y, z] even though their dimensions in the returned array
# are ordered as [Z, Y, X]. If we want to keep the same convention
# as nitorch, I need to permute the matrix/zooms.
if '_affine' not in self._cache:
with self.tiffobj() as tiff:
omexml = tiff.ome_metadata
geotags = tiff.geotiff_metadata or {}
zooms, units, axes = ome_zooms(omexml, self.series)
if zooms:
# convert to mm + drop non-spatial zooms
units = [parse_unit(u) for u in units]
zooms = [z * (f / 1e-3) for z, (f, type) in zip(zooms, units)
if type == 'm']
if 'ModelPixelScaleTag' in geotags:
warn("Both OME and GeoTiff pixel scales are present: "
"{} vs {}. Using OME."
.format(zooms, geotags['ModelPixelScaleTag']))
elif 'ModelPixelScaleTag' in geotags:
zooms = geotags['ModelPixelScaleTag']
axes = 'XYZ'
else:
zooms = 1.
axes = [ax for ax in self._axes if ax in 'XYZ']
if 'ModelTransformation' in geotags:
aff = geotags['ModelTransformation']
aff = torch.as_tensor(aff, dtype=torch.double).reshape(4, 4)
self._cache['_affine'] = aff
elif ('ModelTiepointTag' in geotags):
# copied from tifffile
sx, sy, sz = pyutils.make_list(zooms, n=3)
tiepoints = torch.as_tensor(geotags['ModelTiepointTag'])
affines = []
for tiepoint in tiepoints:
i, j, k, x, y, z = tiepoint
affines.append(torch.as_tensor(
[[sx, 0.0, 0.0, x - i * sx],
[0.0, -sy, 0.0, y + j * sy],
[0.0, 0.0, sz, z - k * sz],
[0.0, 0.0, 0.0, 1.0]], dtype=torch.double))
affines = torch.stack(affines, dim=0)
if len(tiepoints) == 1:
affines = affines[0]
self._cache['_affine'] = affines
else:
zooms = pyutils.make_list(zooms, n=len(axes))
ax2zoom = {ax: zoom for ax, zoom in zip(axes, zooms)}
axes = [ax for ax in self._axes if ax in 'XYZ']
shape = [shp for shp, msk in zip(self._shape, self._spatial)
if msk]
zooms = [ax2zoom.get(ax, 1.) for ax in axes]
layout = [('R' if ax == 'Z' else 'P' if ax == 'Y' else 'S')
for ax in axes]
aff = affine_default(shape, zooms, layout=''.join(layout))
self._cache['_affine'] = aff
return self._cache['_affine']
@property
def dtype(self):
if 'dtype' not in self._cache:
with self.tiffobj() as tiff:
dt = tiff.series[self.series].levels[self.level].dtype
self._cache['dtype'] = dt
return self._cache['dtype']
@property
def series(self):
"""Series index (Tiff files can hold multiple series)"""
return self._series
@series.setter
def series(self, val):
if val != self.series and not all(is_fullslice(self.slicer)):
raise RuntimeError("Cannot change series in a view")
self._series = val
self._cache = {}
@property
def level(self):
"""Level index (Tiff files can hold multiple spatial resolutions)"""
return self._level
@level.setter
def level(self, val):
if val != self.level and not all(is_fullslice(self.slicer)):
raise RuntimeError("Cannot change resolution level in a view")
self._level = val
self._cache = {}
@property
def readable(self):
# That's not exact: pseudo partial access in-plane
return AccessType.TruePartial
@property
def writable(self):
return AccessType.No
@contextmanager
def tiffobj(self):
"""Returns an *open* Tiff reader.
Should be used in a `with` statement:
```python
>>> with self.tiffobj() as tiff:
>>> # do stuff with `tiff`
```
"""
closed = self._tiff.filehandle.closed
if closed:
self._tiff.filehandle.open()
yield self._tiff
if closed:
self._tiff.close()
def __del__(self):
# make sure we close all file objects
self._tiff.close()
@property
def filename(self):
with self.tiffobj() as f:
return f.filename
def data(self, dtype=None, device=None, casting='unsafe', rand=True,
cutoff=None, dim=None, numpy=False):
# --- sanity check before reading ---
dtype = self.dtype if dtype is None else dtype
dtype = dtypes.dtype(dtype)
if not numpy and dtype.torch is None:
raise TypeError('Data type {} does not exist in PyTorch.'
.format(dtype))
# --- check that view is not empty ---
if pyutils.prod(self.shape) == 0:
if numpy:
return np.zeros(self.shape, dtype=dtype.numpy)
else:
return torch.zeros(self.shape, dtype=dtype.torch, device=device)
# --- read native data ---
slicer, perm, newdim = split_operation(self.permutation, self.slicer, 'r')
with self.tiffobj() as f:
dat = self._read_data_raw(slicer, tiffobj=f)
dat = dat.transpose(perm)[newdim]
indtype = dtypes.dtype(self.dtype)
# --- cutoff ---
dat = volutils.cutoff(dat, cutoff, dim)
# --- cast ---
rand = rand and not indtype.is_floating_point
if rand and not dtype.is_floating_point:
tmpdtype = dtypes.float64
else:
tmpdtype = dtype
dat, scale = volutils.cast(dat, tmpdtype.numpy, casting, with_scale=True)
# --- random sample ---
# uniform noise in the uncertainty interval
if rand and not (scale == 1 and not dtype.is_floating_point):
dat = volutils.addnoise(dat, scale)
# --- final cast ---
dat = volutils.cast(dat, dtype.numpy, 'unsafe')
# convert to torch if needed
if not numpy:
dat = torch.as_tensor(dat, device=device)
return dat
# --------------
# LOW LEVEL
# --------------
def _read_data_raw(self, slicer=None, tiffobj=None):
"""Read native data
Dispatch to `_read_data_raw_full` or `_read_data_raw_partial`.
Parameters
----------
slicer : tuple[index_like], optional
A tuple of indices that describe the chunk of data to read.
If None, read everything.
tiffobj : file object, default=`self.fileobj('image', 'r')`
A file object (with `seek`, `read`) from which to read
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw(slicer, tiffobj)
# load sub-array
if slicer is None or all(is_fullslice(slicer, self._shape)):
dat = self._read_data_raw_full(tiffobj)
else:
dat = self._read_data_raw_partial(slicer, tiffobj)
return dat
def _read_data_raw_partial(self, slicer, tiffobj=None):
"""Read a chunk of data from disk
Parameters
----------
slicer : tuple[slice or int]
tiffobj : TiffFile
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw_partial(slicer, tiffobj)
# 1) split dimensions
shape_feat, shape_stack, shape_page = self._shape_split(tiffobj)
dim_feat = len(shape_feat)
dim_stack = len(shape_stack)
dim_page = len(shape_page)
# 2) split slicer
slicer_feat = slicer[:dim_feat]
slicer_stack = slicer[dim_feat:dim_feat+dim_stack]
slicer_page = slicer[dim_feat+dim_stack:]
dim_feat_out = sum(isinstance(idx, slice) for idx in slicer_feat)
dim_stack_out = sum(isinstance(idx, slice) for idx in slicer_stack)
dim_page_out = sum(isinstance(idx, slice) for idx in slicer_page)
# 3) ensure positive strides
slicer_inv = [slice(None, None, -1) if idx.step and idx.step < 0
else slice(None) for idx in slicer_stack
if isinstance(idx, slice)]
slicer_stack = [invert_slice(idx, shp) if isinstance(idx, slice) and
idx.step and idx.step < 0
else idx for idx, shp in zip(slicer_stack, shape_stack)]
# 4) convert stack slice to list of linear indices
# (or to one slice if possible)
index_stack = slicer_sub2ind(slicer_stack, shape_stack)
# 5) read only pages in the substack
dat = tiffobj.asarray(key=index_stack,
series=self.series,
level=self.level)
dat = dat.reshape([*shape_feat, -1, *shape_page])
# 6) apply slicers along the feature and page dimensions
dat = dat[(*slicer_feat, slice(None), *slicer_page)]
# 7) reshape
dat = dat.reshape(self.shape)
# 7) final slicers for negative strides along stack dimensions
slicer = [slice(None)] * dim_feat_out + slicer_inv + [slice(None)] * dim_page_out
dat = dat[tuple(slicer)]
return dat
def _read_data_raw_full(self, tiffobj=None):
"""Read the full data from disk
Parameters
----------
tiffobj : TiffFile
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw_full(tiffobj)
return tiffobj.asarray(series=self.series, level=self.level)
def _shape_split(self, | |
**Response Structure**
- *(dict) --*
- **ComplianceSummaryItems** *(list) --*
A list of compliant and non-compliant summary counts based on compliance types. For example, this call returns State Manager associations, patches, or custom compliance types according to the filter criteria that you specified.
- *(dict) --*
A summary of compliance information by compliance type.
- **ComplianceType** *(string) --*
The type of compliance item. For example, the compliance type can be Association, Patch, or Custom:string.
- **CompliantSummary** *(dict) --*
A list of COMPLIANT items for the specified compliance type.
- **CompliantCount** *(integer) --*
The total number of resources that are compliant.
- **SeveritySummary** *(dict) --*
A summary of the compliance severity by compliance type.
- **CriticalCount** *(integer) --*
The total number of resources or compliance items that have a severity level of critical. Critical severity is determined by the organization that published the compliance items.
- **HighCount** *(integer) --*
The total number of resources or compliance items that have a severity level of high. High severity is determined by the organization that published the compliance items.
- **MediumCount** *(integer) --*
The total number of resources or compliance items that have a severity level of medium. Medium severity is determined by the organization that published the compliance items.
- **LowCount** *(integer) --*
The total number of resources or compliance items that have a severity level of low. Low severity is determined by the organization that published the compliance items.
- **InformationalCount** *(integer) --*
The total number of resources or compliance items that have a severity level of informational. Informational severity is determined by the organization that published the compliance items.
- **UnspecifiedCount** *(integer) --*
The total number of resources or compliance items that have a severity level of unspecified. Unspecified severity is determined by the organization that published the compliance items.
- **NonCompliantSummary** *(dict) --*
A list of NON_COMPLIANT items for the specified compliance type.
- **NonCompliantCount** *(integer) --*
The total number of compliance items that are not compliant.
- **SeveritySummary** *(dict) --*
A summary of the non-compliance severity by compliance type
- **CriticalCount** *(integer) --*
The total number of resources or compliance items that have a severity level of critical. Critical severity is determined by the organization that published the compliance items.
- **HighCount** *(integer) --*
The total number of resources or compliance items that have a severity level of high. High severity is determined by the organization that published the compliance items.
- **MediumCount** *(integer) --*
The total number of resources or compliance items that have a severity level of medium. Medium severity is determined by the organization that published the compliance items.
- **LowCount** *(integer) --*
The total number of resources or compliance items that have a severity level of low. Low severity is determined by the organization that published the compliance items.
- **InformationalCount** *(integer) --*
The total number of resources or compliance items that have a severity level of informational. Informational severity is determined by the organization that published the compliance items.
- **UnspecifiedCount** *(integer) --*
The total number of resources or compliance items that have a severity level of unspecified. Unspecified severity is determined by the organization that published the compliance items.
:type Filters: list
:param Filters:
One or more compliance or inventory filters. Use a filter to return a more specific list of results.
- *(dict) --*
One or more filters. Use a filter to return a more specific list of results.
- **Key** *(string) --*
The name of the filter.
- **Values** *(list) --*
The value for which to search.
- *(string) --*
- **Type** *(string) --*
The type of comparison that should be performed for the value: Equal, NotEqual, BeginWith, LessThan, or GreaterThan.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListDocumentVersions(Paginator):
def paginate(self, Name: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.list_document_versions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListDocumentVersions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Name='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'DocumentVersions': [
{
'Name': 'string',
'DocumentVersion': 'string',
'VersionName': 'string',
'CreatedDate': datetime(2015, 1, 1),
'IsDefaultVersion': True|False,
'DocumentFormat': 'YAML'|'JSON',
'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',
'StatusInformation': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **DocumentVersions** *(list) --*
The document versions.
- *(dict) --*
Version information about the document.
- **Name** *(string) --*
The document name.
- **DocumentVersion** *(string) --*
The document version.
- **VersionName** *(string) --*
The version of the artifact associated with the document. For example, "Release 12, Update 6". This value is unique across all versions of a document, and cannot be changed.
- **CreatedDate** *(datetime) --*
The date the document was created.
- **IsDefaultVersion** *(boolean) --*
An identifier for the default version of the document.
- **DocumentFormat** *(string) --*
The document format, either JSON or YAML.
- **Status** *(string) --*
The status of the Systems Manager document, such as ``Creating`` , ``Active`` , ``Failed`` , and ``Deleting`` .
- **StatusInformation** *(string) --*
A message returned by AWS Systems Manager that explains the ``Status`` value. For example, a ``Failed`` status might be explained by the ``StatusInformation`` message, "The specified S3 bucket does not exist. Verify that the URL of the S3 bucket is correct."
:type Name: string
:param Name: **[REQUIRED]**
The name of the document about which you want version information.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListDocuments(Paginator):
def paginate(self, DocumentFilterList: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.list_documents`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListDocuments>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DocumentFilterList=[
{
'key': 'Name'|'Owner'|'PlatformTypes'|'DocumentType',
'value': 'string'
},
],
Filters=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'DocumentIdentifiers': [
{
'Name': 'string',
'Owner': 'string',
'VersionName': 'string',
'PlatformTypes': [
'Windows'|'Linux',
],
'DocumentVersion': 'string',
'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package',
'SchemaVersion': 'string',
'DocumentFormat': 'YAML'|'JSON',
'TargetType': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **DocumentIdentifiers** *(list) --*
The names of the Systems Manager documents.
- *(dict) --*
Describes the name of a Systems Manager document.
- **Name** *(string) --*
The name of the Systems Manager document.
- **Owner** *(string) --*
The AWS user account that created the document.
- **VersionName** *(string) --*
An optional field specifying the version of the artifact associated with the document. For example, "Release 12, Update 6". This value is unique across all versions of a document, and | |
<filename>bb-master/sandbox/lib/python3.5/site-packages/buildbot/scripts/runner.py
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
#
# N.B.: don't import anything that might pull in a reactor yet. Some of our
# subcommands want to load modules that need the gtk reactor.
#
# Also don't forget to mirror your changes on command-line options in manual
# pages and texinfo documentation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future.builtins import range
import sys
import textwrap
import sqlalchemy as sa
from twisted.python import reflect
from twisted.python import usage
from buildbot.scripts import base
# Note that the terms 'options' and 'config' are used interchangeably here - in
# fact, they are interchanged several times. Caveat legator.
def validateMasterOption(master):
"""
Validate master (-m, --master) command line option.
Checks that option is a string of the 'hostname:port' form, otherwise
raises an UsageError exception.
@type master: string
@param master: master option
@raise usage.UsageError: on invalid master option
"""
try:
hostname, port = master.split(":")
port = int(port)
except (TypeError, ValueError):
raise usage.UsageError("master must have the form 'hostname:port'")
class UpgradeMasterOptions(base.BasedirMixin, base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.upgrade_master.upgradeMaster"
optFlags = [
["quiet", "q", "Do not emit the commands being run"],
["develop", "d", "link to buildbot dir rather than copy, with no "
"JS optimization (UNIX only)"],
["replace", "r", "Replace any modified files without confirmation."],
]
optParameters = [
]
def getSynopsis(self):
return "Usage: buildbot upgrade-master [options] [<basedir>]"
longdesc = textwrap.dedent("""
This command takes an existing buildmaster working directory and
adds/modifies the files there to work with the current version of
buildbot. When this command is finished, the buildmaster directory should
look much like a brand-new one created by the 'create-master' command.
Use this after you've upgraded your buildbot installation and before you
restart the buildmaster to use the new version.
If you have modified the files in your working directory, this command
will leave them untouched, but will put the new recommended contents in a
.new file (for example, if index.html has been modified, this command
will create index.html.new). You can then look at the new version and
decide how to merge its contents into your modified file.
When upgrading the database, this command uses the database specified in
the master configuration file. If you wish to use a database other than
the default (sqlite), be sure to set that parameter before upgrading.
""")
class CreateMasterOptions(base.BasedirMixin, base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.create_master.createMaster"
optFlags = [
["quiet", "q", "Do not emit the commands being run"],
["force", "f",
"Re-use an existing directory (will not overwrite master.cfg file)"],
["relocatable", "r",
"Create a relocatable buildbot.tac"],
["develop", "d", "link to buildbot dir rather than copy, with no "
"JS optimization (UNIX only)"],
["no-logrotate", "n",
"Do not permit buildmaster rotate logs by itself"]
]
optParameters = [
["config", "c", "master.cfg", "name of the buildmaster config file"],
["log-size", "s", 10000000,
"size at which to rotate twisted log files", int],
["log-count", "l", 10,
"limit the number of kept old twisted log files"],
["db", None, "sqlite:///state.sqlite",
"which DB to use for scheduler/status state. See below for syntax."],
]
def getSynopsis(self):
return "Usage: buildbot create-master [options] [<basedir>]"
longdesc = textwrap.dedent("""
This command creates a buildmaster working directory and buildbot.tac file.
The master will live in <basedir> (defaults to the current directory)
and create various files there.
If --relocatable is given, then the resulting buildbot.tac file will be
written such that its containing directory is assumed to be the basedir.
This is generally a good idea.
At runtime, the master will read a configuration file (named
'master.cfg' by default) in its basedir. This file should contain python
code which eventually defines a dictionary named 'BuildmasterConfig'.
The elements of this dictionary are used to configure the Buildmaster.
See doc/config.xhtml for details about what can be controlled through
this interface.
The --db string is evaluated to build the DB object, which specifies
which database the buildmaster should use to hold scheduler state and
status information. The default (which creates an SQLite database in
BASEDIR/state.sqlite) is equivalent to:
--db='sqlite:///state.sqlite'
To use a remote MySQL database instead, use something like:
--db='mysql://bbuser:bbpasswd@dbhost/bbdb'
The --db string is stored verbatim in the buildbot.tac file, and
evaluated at 'buildbot start' time to pass a DBConnector instance into
the newly-created BuildMaster object.
""")
def postOptions(self):
base.BasedirMixin.postOptions(self)
# validate 'log-count' parameter
if self['log-count'] == 'None':
self['log-count'] = None
else:
try:
self['log-count'] = int(self['log-count'])
except ValueError:
raise usage.UsageError(
"log-count parameter needs to be an int or None")
# validate 'db' parameter
try:
# check if sqlalchemy will be able to parse specified URL
sa.engine.url.make_url(self['db'])
except sa.exc.ArgumentError:
raise usage.UsageError("could not parse database URL '%s'"
% self['db'])
class StopOptions(base.BasedirMixin, base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.stop.stop"
optFlags = [
["quiet", "q", "Do not emit the commands being run"],
["clean", "c", "Clean shutdown master"],
["no-wait", None, "Don't wait for complete master shutdown"],
]
def getSynopsis(self):
return "Usage: buildbot stop [<basedir>]"
class RestartOptions(base.BasedirMixin, base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.restart.restart"
optFlags = [
['quiet', 'q', "Don't display startup log messages"],
['nodaemon', None, "Don't daemonize (stay in foreground)"],
["clean", "c", "Clean shutdown master"],
]
def getSynopsis(self):
return "Usage: buildbot restart [<basedir>]"
class StartOptions(base.BasedirMixin, base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.start.start"
optFlags = [
['quiet', 'q', "Don't display startup log messages"],
['nodaemon', None, "Don't daemonize (stay in foreground)"],
]
def getSynopsis(self):
return "Usage: buildbot start [<basedir>]"
class ReconfigOptions(base.BasedirMixin, base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.reconfig.reconfig"
optFlags = [
['quiet', 'q', "Don't display log messages about reconfiguration"],
]
def getSynopsis(self):
return "Usage: buildbot reconfig [<basedir>]"
class SendChangeOptions(base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.sendchange.sendchange"
def __init__(self):
base.SubcommandOptions.__init__(self)
self['properties'] = {}
optParameters = [
("master", "m", None,
"Location of the buildmaster's PBChangeSource (host:port)"),
# deprecated in 0.8.3; remove in 0.8.5 (bug #1711)
("auth", "a", 'change:changepw',
"Authentication token - <PASSWORD>:password, or prompt for password"),
("who", "W", None, "Author of the commit"),
("repository", "R", '', "Repository specifier"),
("vc", "s", None, "The VC system in use, one of: cvs, svn, darcs, hg, "
"bzr, git, mtn, p4"),
("project", "P", '', "Project specifier"),
("branch", "b", None, "Branch specifier"),
("category", "C", None, "Category of repository"),
("codebase", None, None,
"Codebase this change is in (requires 0.8.7 master or later)"),
("revision", "r", None, "Revision specifier"),
("revision_file", None, None, "Filename containing revision spec"),
("property", "p", None,
"A property for the change, in the format: name:value"),
("comments", "c", None, "log message"),
("logfile", "F", None,
"Read the log messages from this file (- for stdin)"),
("when", "w", None, "timestamp to use as the change time"),
("revlink", "l", '', "Revision link (revlink)"),
("encoding", "e", 'utf8', "Encoding of other parameters"),
]
buildbotOptions = [
['master', 'master'],
['who', 'who'],
['branch', 'branch'],
['category', 'category'],
['vc', 'vc'],
]
requiredOptions = ['who', 'master']
def getSynopsis(self):
return "Usage: buildbot sendchange [options] filenames.."
def parseArgs(self, *args):
self['files'] = args
def opt_property(self, property):
name, value = property.split(':', 1)
self['properties'][name] = value
def postOptions(self):
base.SubcommandOptions.postOptions(self)
if self.get("revision_file"):
with open(self["revision_file"], "r") as f:
self['revision'] = f.read()
if self.get('when'):
try:
self['when'] = float(self['when'])
except (TypeError, ValueError):
raise usage.UsageError('invalid "when" value %s'
% (self['when'],))
else:
self['when'] = None
if not self.get('comments') and self.get('logfile'):
if self['logfile'] == "-":
self['comments'] = sys.stdin.read()
else:
with open(self['logfile'], "rt") as f:
self['comments'] = f.read()
if self.get('comments') is None:
self['comments'] = ""
# fix up the auth with a password if none was given
auth = self.get('auth')
if ':' not in auth:
import getpass
pw = getpass.getpass("Enter password for '%s': " % auth)
auth = "%s:%s" % (auth, pw)
self['auth'] = tuple(auth.split(':', 1))
vcs = ['cvs', 'svn', 'darcs', 'hg', 'bzr', 'git', | |
self.hemi = "both"
if self.hemi == "both":
# check if arrays are in string format
for hemi in ["L", "R"]:
self.data['normal'][hemi] = string2float(self.data['normal'][hemi])
self.data['position'][hemi] = string2float(self.data['position'][hemi])
else:
self.data['normal'][self.hemi] = string2float(self.data['normal'][self.hemi])
self.data['position'][self.hemi] = string2float(self.data['position'][self.hemi])
self.subject = subject
def get(self, keyword, hemi='both'):
"""return values from dataframe given keyword. Can be any column name or 'prf' for pRF-parameters"""
keywords = np.array(self.data.columns)
if keyword == "prf":
if hemi == "both":
return {"lh": [self.data[ii]['L'] for ii in ['x', 'y', 'size', 'beta', 'baseline', 'r2']],
"rh": [self.data[ii]['R'] for ii in ['x', 'y', 'size', 'beta', 'baseline', 'r2']]}
elif hemi.lower() == "right" or hemi.lower() == "r" or hemi.lower() == "rh":
return [self.data[ii]['R'] for ii in ['x', 'y', 'size', 'beta', 'baseline', 'r2']]
elif hemi.lower() == "left" or hemi.lower() == "l" or hemi.lower() == "lh":
return [self.data[ii]['L'] for ii in ['x', 'y', 'size', 'beta', 'baseline', 'r2']]
else:
if keyword not in keywords:
raise ValueError(f"{keyword} does not exist in {keywords}")
if hemi == "both":
return {"lh": self.data[keyword]['L'],
"rh": self.data[keyword]['R']}
elif hemi.lower() == "right" or hemi.lower() == "r" or hemi.lower() == "rh":
return self.data[keyword]['R']
elif hemi.lower() == "left" or hemi.lower() == "l" or hemi.lower() == "lh":
return self.data[keyword]['L']
def make_binary_cm(color):
"""make_binary_cm
This function creates a custom binary colormap using matplotlib based on the RGB code specified. Especially useful if you want to overlay in imshow, for instance. These RGB values will be converted to range between 0-1 so make sure you're specifying the actual RGB-values. I like `https://htmlcolorcodes.com` to look up RGB-values of my desired color. The snippet of code used here comes from https://kbkb-wx-python.blogspot.com/2015/12/python-transparent-colormap.html
Parameters
----------
<color>: tuple, str
either hex-code with (!!) '#' or a tuple consisting of:
* <R> int | red-channel (0-255)
* <G> int | green-channel (0-255)
* <B> int | blue-channel (0-255)
Returns
----------
matplotlib.colors.LinearSegmentedColormap object
colormap to be used with `plt.imshow`
Example
----------
>>> cm = make_binary_cm((232,255,0))
>>> cm
<matplotlib.colors.LinearSegmentedColormap at 0x7f35f7154a30>
>>> cm = make_binary_cm("#D01B47")
>>> cm
>>> <matplotlib.colors.LinearSegmentedColormap at 0x7f35f7154a30>
"""
if isinstance(color, tuple):
(R,G,B) = color
elif isinstance(color, str):
color = ImageColor.getcolor(color, "RGB")
(R,G,B) = color
if R > 1:
R = R/255
if G > 1:
G = G/255
if B > 1:
B = B/255
colors = [(R,G,B,c) for c in np.linspace(0,1,100)]
cmap = mcolors.LinearSegmentedColormap.from_list('mycmap', colors, N=5)
return cmap
def percent_change(ts, ax):
"""convert timeseries to percent signal change via the nilearn method"""
return (ts / np.expand_dims(np.mean(ts, ax), ax) - 1) * 100
def select_from_df(df, expression="run = 1", index=True, indices=None):
if expression == "ribbon":
if isinstance(indices, tuple):
return df.iloc[:,indices[0]:indices[1]]
elif isinstance(indices, list):
return df.iloc[:,indices]
elif isinstance(indices, np.ndarray):
return df.iloc[:,list(indices)]
else:
raise TypeError(f"Unknown type '{type(indices)}' for indices; must be a tuple of 2 values representing a range, or a list/array of indices to select")
else:
# fetch existing indices
idc = list(df.index.names)
if idc[0] != None:
reindex = True
else:
reindex = False
# sometimes throws an error if you're trying to reindex a non-indexed dataframe
try:
df = df.reset_index()
except:
pass
sub_df = df.copy()
if isinstance(expression, str):
expression = [expression]
if isinstance(expression, tuple) or isinstance(expression, list):
expressions = expression[::2]
operators = expression[1::2]
if len(expressions) == 1:
col1,operator1,val1 = expressions[0].split()
ops1 = str2operator(operator1)
if len(val1) == 1:
val1 = int(val1)
sub_df = sub_df.loc[ops1(sub_df[col1], val1)]
if len(expressions) == 2:
col1,operator1,val1 = expressions[0].split()
col2,operator2,val2 = expressions[1].split()
main_ops = str2operator(operators[0])
ops1 = str2operator(operator1)
ops2 = str2operator(operator2)
# check if we should interpret values invididually as integers
if len(val1) == 1:
val1 = int(val1)
if len(val2) == 1:
val2 = int(val2)
sub_df = sub_df.loc[main_ops(ops1(sub_df[col1], val1), ops2(sub_df[col2], val2))]
# first check if we should do indexing
if index != None:
# then check if we actually have something to index
if reindex:
if idc[0] != None:
sub_df = sub_df.set_index(idc)
return sub_df
def split_bids_components(fname):
comp_list = fname.split('_')
comps = {}
ids = ['ses', 'task', 'acq', 'rec', 'sub', 'desc', 'run']
for el in comp_list:
for i in ids:
if i in el:
comp = el.split('-')[-1]
if i == "run":
comp = int(comp)
comps[i] = comp
if len(comps) != 0:
return comps
else:
print(f"Could not find any element of {ids} in {fname}")
def filter_for_nans(array):
"""filter out NaNs from an array"""
if np.isnan(array).any():
return np.nan_to_num(array)
else:
return array
def find_max_val(array):
"""find the index of maximum value given an array"""
return np.where(array == np.amax(array))[0]
def read_fs_reg(dat_file):
"""read_fs_reg
Read a `.dat`-formatted registration file from FreeSurfer
Parameters
----------
dat_file: str
path pointing to the registration file
Returns
----------
nump.ndarray
(4,4) numpy array containing the transformation
"""
with open(dat_file) as f:
d = f.readlines()[4:-1]
return np.array([[float(s) for s in dd.split() if s] for dd in d])
def random_timeseries(intercept, volatility, nr):
"""random_timeseries
Create a random timecourse by multiplying an intercept with a random Gaussian distribution.
Parameters
----------
intercept: float
starting point of timecourse
volatility: float
this factor is multiplied with the Gaussian distribution before multiplied with the intercept
nr: int
length of timecourse
Returns
----------
numpy.ndarray
array of length `nr`
Example
----------
>>> from linescanning import utils
>>> ts = utils.random_timeseries(1.2, 0.5, 100)
Notes
----------
Source: https://stackoverflow.com/questions/67977231/how-to-generate-random-time-series-data-with-noise-in-python-3
"""
time_series = [intercept, ]
for _ in range(nr):
time_series.append(time_series[-1] + intercept * random.gauss(0,1) * volatility)
return np.array(time_series[:-1])
def squeeze_generic(a, axes_to_keep):
"""squeeze_generic
Numpy squeeze implementation keeping <axes_to_keep> dimensions.
Parameters
----------
a: numpy.ndarray
array to be squeezed
axes_to_keep: tuple, range
tuple of axes to keep from original input
Returns
----------
numpy.ndarray
`axes_to_keep` from `a`
Example
----------
>>> a = np.random.rand(3,5,1)
>>> squeeze_generic(a, axes_to_keep=range(2)).shape
(3, 5)
Notes
----------
From: https://stackoverflow.com/questions/57472104/is-it-possible-to-squeeze-all-but-n-dimensions-using-numpy
"""
out_s = [s for i, s in enumerate(a.shape) if i in axes_to_keep or s != 1]
return a.reshape(out_s)
def find_intersection(xx, curve1, curve2):
"""find_intersection
Find the intersection coordinates given two functions using `Shapely`.
Parameters
----------
xx: numpy.ndarray
array describing the x-axis values
curve1: numpy.ndarray
array describing the first curve
curve2: numpy.ndarray
array describing the first curve
Returns
----------
tuple
x,y coordinates where *curve1* and *curve2* intersect
Raises
----------
ValueError
if no intersection coordinates could be found
Example
----------
See [refer to linescanning.prf.SizeResponse.find_stim_sizes]
"""
first_line = geometry.LineString(np.column_stack((xx, curve1)))
second_line = geometry.LineString(np.column_stack((xx, curve2)))
intersection = first_line.intersection(second_line)
try:
x_coord, y_coord = geometry.LineString(intersection).xy[0]
except:
raise ValueError("Could not find intersection between curves..")
return (x_coord, y_coord)
class CollectSubject:
"""CollectSubject
Simple class to fetch pRF-related settings given a subject. Collects the design matrix, settings, and target vertex information. The `ses`-flag decides from which session the pRF-parameters to be used. You can either specify an *analysis_yaml* file containing information about a pRF-analysis, or specify *settings='recent'* to fetch the most recent analysis file in the pRF-directory of the subject. The latter is generally fine if you want information about the stimulus.
Parameters
----------
subject: str
subject ID as used throughout the pipeline
derivatives: str, optional
Derivatives directory, by default None.
cx_dir: str, optional
path to subject-specific pycortex directory
prf_dir: str, optional
subject-specific pRF directory, by default None. `derivatives` will be ignore if this flag is used
ses: int, optional
Source session of pRF-parameters to use, by default 1
analysis_yaml: str, optional
String pointing to an existing file, by default None.
hemi: str, optional
Hemisphere to extract target vertex from, by default "lh"
settings: str, optional
Fetch most recent settings file rather than `analysis_yaml`, by default None.
model: str, optional
This flag can be set to read in a specific 'best_vertex' file as the location parameters sometimes differ between a Gaussian and DN-fit.
Example
----------
>>> from linescanning import utils
>>> subject_info = utils.CollectSubject(subject, derivatives=<path_to_derivatives>, settings='recent', hemi="lh")
"""
def __init__(self, subject, derivatives=None, cx_dir=None, prf_dir=None, ses=1, analysis_yaml=None, hemi="lh", settings=None, model="gauss", correct_screen=True, verbose=True):
self.subject = subject
self.derivatives = derivatives
self.cx_dir = cx_dir
self.prf_dir = prf_dir
self.prf_ses = ses
self.hemi = hemi
self.model = model
self.analysis_yaml = analysis_yaml
self.correct_screen = correct_screen
self.verbose = verbose
if self.hemi == "lh" or self.hemi.lower() == "l" or self.hemi.lower() == "left":
self.hemi_tag = "L"
elif self.hemi == "rh" or self.hemi.lower() == "r" or self.hemi.lower() == "right":
self.hemi_tag = "R"
else:
self.hemi_tag = "both"
# set | |
self.pan))
c.append(('elevation', self.elevation))
return c
class Part(MusicXMLElementList):
'''
This assumes a part-wise part
'''
def __init__(self):
MusicXMLElementList.__init__(self)
self._tag = 'part'
# attributes
self._attr['id'] = None
# component objects
self.componentList = [] # a list of measure objects
def _getComponents(self):
return self.componentList
def setDefaults(self):
pass
# might need to do this a different way
# randomly generated in m21 object when needed
#self.set('id', defaults.partId)
def getStavesCount(self):
'''
Look ahead into the measure Attributes and return the highest number
of staves used in this part.
'''
maxStaves = 1
for c in self.componentList:
if c._tag == 'measure':
if c.attributesObj is not None:
if c.attributesObj.staves is not None:
count = int(c.attributesObj.staves)
if count > maxStaves:
maxStaves = count
return maxStaves
class Measure(MusicXMLElementList):
def __init__(self):
MusicXMLElementList.__init__(self)
self._tag = 'measure'
# not all measures store an attributes object
# yet, a measure can refer to a divisons setting
# established in previous measures
self.external['attributes'] = None
self.external['divisions'] = None
# attributes
self._attr['number'] = None
self._attr['implicit'] = None
self._attr['width'] = None
# elements
self.attributesObj = None # an object
self.componentList = [] # a list notes and other things
# in some cases we have multiple attribute objects in one measure
# need to store and merge
# store multiple attributes objects found in this Measure
self._attributesObjList = []
self._crossReference['attributesObj'] = ['attributes']
# store unique voice index numbers
self._voiceIndices = []
def _getComponents(self):
c = []
c.append(self.attributesObj)
#c += self.componentList
for part in self.componentList:
if isinstance(part, Print):
# place print elements first, ahead of attributes
c.insert(0, part)
else:
c.append(part)
return c
def setDefaults(self):
self.set('number', 1)
attributes = Attributes()
attributes.setDefaults()
self.set('attributes', attributes)
self.external['divisions'] = attributes.get('divisions')
def update(self):
'''This method looks at all note, forward, and backup objects and updates divisons and attributes references
'''
updateAttributes = False
if len(self._attributesObjList) > 1:
updateAttributes = True
attrConsolidate = Attributes()
# consolidate is necessary for some MusicXML files that define
# each attribute component in its own attribute container
for attrObj in self._attributesObjList:
#environLocal.printDebug(['found multiple Attributes', attrObj])
attrConsolidate = attrConsolidate.merge(attrObj)
#environLocal.printDebug(['Measure.updaate(); found multiple Attributes objects for a single measure', attrConsolidate])
self.attributesObj = attrConsolidate
self.external['attributes'] = self.attributesObj
# must make sure that this is not None, as we may get an incomplete
# attributes object here
if self.attributesObj.divisions is not None:
self.external['divisions'] = self.attributesObj.divisions
# keep existing divisions
#counter = 0
#noteThis = None
#noteNext = None
for pos in range(len(self.componentList)):
#environLocal.printDebug(['Measure.update()', counter])
obj = self.componentList[pos]
if obj.tag in ['note']:
if obj.voice is not None:
if obj.voice not in self._voiceIndices:
self._voiceIndices.append(obj.voice)
# may need to assign new, merged attributes obj to components
if updateAttributes:
obj.external['attributes'] = self.attributesObj
if self.attributesObj.divisions is not None:
obj.external['divisions'] = self.attributesObj.divisions
self._voiceIndices.sort()
def getVoiceCount(self):
'''Return the number of voices defined in this Measure; this must be called after update().
'''
return len(self._voiceIndices)
def getVoiceIndices(self):
'''Return a list of unique sorted voice ids.
'''
return self._voiceIndices
class Attributes(MusicXMLElement):
# store measure data; assuming that there is one per measure
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'attributes'
# simple elements
self.divisions = None
self.staves = None
# complex elements
# there can be one key for each staff in a Part, and there can be
# more than one staff per part
self.keyList = []
# more than one pair of beat and beat-type is used for composite signatures
self.timeList = []
self.clefList = []
self.staffDetailsList = []
self.transposeObj = None # needs to be an ojbect
self.measureStyleObj = None # for slash notation, mult rests
# not yet implemented
#self.staffDetails = None # shows different stave styles
self.directive = None
self._crossReference['timeList'] = ['time']
self._crossReference['clefList'] = ['clef']
def _getComponents(self):
c = []
c.append(('divisions', self.divisions))
c = c + self.keyList
c = c + self.timeList
c.append(('staves', self.staves))
# part symbol
# instruments
c = c + self.clefList
# staff details
c = c + self.staffDetailsList
c.append(self.transposeObj)
# directive
c.append(self.measureStyleObj)
return c
def setDefaultDivisions(self):
'''Utility to just set the divisions parameters
'''
self.set('divisions', defaults.divisionsPerQuarter)
def setDefaults(self):
self.set('divisions', defaults.divisionsPerQuarter)
mxTime = Time()
mxTime.setDefaults()
self.timeList.append(mxTime)
mxClef = Clef()
mxClef.setDefaults()
self.clefList.append(mxClef)
mxKey = Key()
mxKey.setDefaults()
self.keyList.append(mxKey)
class StaffDetails(MusicXMLElement):
# contains information about staff size, staff number, printing, etc.
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'staff-details'
# attributes
self._attr['number'] = None
self._attr['print-object'] = 'yes'
# elements
self.staffSize = None
self.staffLines = 5
self._crossReference['number'] = ['number']
self._crossReference['print-object'] = ['print-object', 'printObject']
def _getComponents(self):
c = []
c.append(('staff-size', self.staffSize))
c.append(('staff-lines', self.staffLines))
return c
class Key(MusicXMLElement):
# permits traditional and non-traditional keys
# non traditional keys use key-step and key-alter pairs
# traditional uses fifths, mode
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'key'
# attribute
# optional attributes that refers to staff numbers
self._attr['number'] = None
# cancel is given as a fifths value of the canceld key
self.cancel = None # if a previous key signature should be canceled
self.fifths = None
self.mode = None
# non-traditional keys are defined as three tags
# key-step, key-alter, and key-octave; it is not clear if these
# need to be in order; best to store objects for each
self.nonTraditionalKeyList = [] # a list of objects
def _getComponents(self):
c = []
c.append(('cancel', self.cancel))
c.append(('fifths', self.fifths))
c.append(('mode', self.mode))
c = c + self.nonTraditionalKeyList
return c
def setDefaults(self):
self.set('fifths', defaults.keyFifths)
self.set('mode', defaults.keyMode)
class KeyStep(MusicXMLElement):
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'key-step'
self.charData = None # a number
class KeyAlter(MusicXMLElement):
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'key-alter'
self.charData = None # a number
class KeyOctave(MusicXMLElement):
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'key-octave'
self.charData = None # a number
self._attr['number'] = None
class Transpose(MusicXMLElement):
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'transpose'
# simple elements
self.diatonic = None
self.chromatic= None
self.octaveChange = None
self.double = False # boolean
def _getComponents(self):
c = []
c.append(('diatonic', self.diatonic))
c.append(('chromatic', self.chromatic))
c.append(('octave-change', self.octaveChange))
c.append(('double', self.double))
return c
class Time(MusicXMLElement):
# there may be more than one time obj per attribute/measure
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'time'
# attributes
self._attr['symbol'] = None
self._attr['number'] = None # number here refers to staff number
# simple elements
self.componentList = [] # a list of beats and beatType
#self.beats = None
#self.beatType = None
self.senzaMisura = None # an empty element, but boolean here
def _getComponents(self):
c = []
c += self.componentList # beats and beatType
#c.append(('beats', self.beats))
#c.append(('beat-type', self.beatType))
c.append(('senza-misura', self.senzaMisura))
return c
def setDefaults(self):
#self.set('beats', defaults.meterNumerator)
#self.set('beat-type', defaults.meterDenominatorBeatType)
beats = Beats(defaults.meterNumerator)
beatType = BeatType(defaults.meterDenominatorBeatType)
self.componentList.append(beats)
self.componentList.append(beatType)
class Beats(MusicXMLElement):
def __init__(self, charData=None):
MusicXMLElement.__init__(self)
self._tag = 'beats'
self.charData = charData
class BeatType(MusicXMLElement):
def __init__(self, charData=None):
MusicXMLElement.__init__(self)
self._tag = 'beat-type'
self.charData = charData
class Clef(MusicXMLElement):
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'clef'
# attributes:
self._attr['number'] = None # clef number refers to staff number
self._attr['additional'] = None
# elements
self.sign = None
self.line = None
self.clefOctaveChange = None # integer for transposing clefs
self._crossReference['clefOctaveChange'] = ['octaveChange']
def _getComponents(self):
c = []
c.append(('sign', self.sign))
c.append(('line', self.line))
c.append(('clef-octave-change', self.clefOctaveChange))
return c
def setDefaults(self):
self.set('sign', defaults.clefSign)
self.set('line', defaults.clefLine)
class Direction(MusicXMLElementList):
'''One or more Direction objects are found in measures, after an attributes
object. Within the Direction object may be a number of objects,
including DirectionType, Sound.
'''
def __init__(self):
MusicXMLElementList.__init__(self)
self._tag = 'direction'
# attributes
# note that placement does not seem to make a difference for some types
self._attr['placement'] = None
# elements
self.componentList = []
self.staff = None # number, for parts w/ > 1 staff
# position of this direction can be configured with a number in
# divisions. this is given within <direction> and after <direction-type>
self.offset = None # number, in divisions.
def _getComponents(self):
# need to look for sound tags stored on componentList and place
# them at the very end of all components, after offset
c = []
c.append(('staff', self.staff))
soundTag = None
for i, sub in enumerate(self.componentList):
if isinstance(sub, Sound):
soundTag = sub
else: # store others in order
c.append(sub)
#c = c + self.componentList
# | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mayanqiong'
import math
from datetime import datetime
from typing import Callable
from tqsdk.datetime import _is_in_trading_time
from tqsdk.diff import _simple_merge_diff
from tqsdk.sim.utils import _get_price_range, _get_option_margin, _get_premium, _get_close_profit, _get_commission, \
_get_future_margin
class SimTrade(object):
"""
天勤模拟交易账户,处理 orderbook 和撮合交易
计算账户资金、持仓信息
本模块为 TqSim 交易部分的子模块,纯同步计算,不涉及连接行情的动态信息,所以对于接口的调用有一些需要注意的要求
提供的接口:
+ init_snapshot: 返回初始的账户截面信息
+ insert_order: 处理下单请求,调用 TqSimAccount.insert_order 之前应该调用过 update_quote,保证收到过合约的行情;期权还应该确保收到了标的的行情
+ cancel_order:处理撤单请求
+ update_quote:处理行情更新
返回值 diffs
diffs 是 list 类型,每个元素都是符合 diff 协议中 trade 交易部分的数据包,且返回的都是完整字段的对象,比如:order成交时,返回的是order完整对象而不是有变化的字段
+ settle:处理结算请求
返回值 diffs, trade_log
diffs 同上,trade_log 是结算前的账户日志信息
"""
def __init__(self, account_key: str, init_balance: float = 10000000.0, get_trade_timestamp: Callable = None,
is_in_trading_time: Callable = None) -> None:
self._account_key = account_key
self._quotes = {} # 会记录所有的发来的行情
self._account = {
"currency": "CNY",
"pre_balance": init_balance,
"static_balance": init_balance,
"balance": init_balance,
"available": init_balance,
"float_profit": 0.0,
"position_profit": 0.0, # 期权没有持仓盈亏
"close_profit": 0.0,
"frozen_margin": 0.0,
"margin": 0.0,
"frozen_commission": 0.0,
"commission": 0.0,
"frozen_premium": 0.0,
"premium": 0.0,
"deposit": 0.0,
"withdraw": 0.0,
"risk_ratio": 0.0,
"market_value": 0.0,
"ctp_balance": float("nan"),
"ctp_available": float("nan")
}
self._positions = {} # {symbol: position, ...}
self._orders = {} # {symbol: {order_id: order}, ...}
self._trades = [] # list 类型,与重构之前代码保持一致,list 可以保留 trade 生产的顺序信息
self._diffs = []
self._orders_events = [] # 按照顺序记录 order 的更新,返回给调用方
self._max_datetime = "" # 所有 quotes 的最大行情更新时间
# 本模块在计算成交时间、判断是否在交易时间段内,默认使用所有 quotes 的最大行情更新时间当作当前时间,并且没有模拟到交易时的时间差
# 若外部调用模块需要更精确时间,则由外部模块提供函数支持
self._get_trade_timestamp = get_trade_timestamp if get_trade_timestamp else self._default_get_trade_timestamp
self._is_in_trading_time = is_in_trading_time if is_in_trading_time else self._default_is_in_trading_time
def insert_order(self, symbol, pack):
quote, underlying_quote = self._get_quotes_by_symbol(symbol)
order = self._pre_insert_order(pack)
orders = self._orders.setdefault(symbol, {})
orders[order["order_id"]] = order # order 存入全局
self._orders_events.append(order.copy())
self._insert_order(order, symbol, quote, underlying_quote)
if order["status"] == "ALIVE":
self._match_order(order, symbol, quote, underlying_quote)
if order["status"] == "FINISHED":
self._orders_events.append(order)
del self._orders[symbol][order["order_id"]] # 删除 order
return self._return_results()
def cancel_order(self, symbol, pack):
order = self._orders.get(symbol, {}).get(pack["order_id"], {})
if order.get("status") == "ALIVE":
order["last_msg"] = "已撤单"
order["status"] = "FINISHED"
self._on_order_failed(symbol, order)
self._orders_events.append(order)
del self._orders[symbol][order["order_id"]] # 删除 order
return self._return_results()
def update_quotes(self, symbol, pack):
for q in pack.get("quotes", {}).values():
self._max_datetime = max(q.get("datetime", ""), self._max_datetime)
_simple_merge_diff(self._quotes, pack.get("quotes", {}), reduce_diff=False)
quote, underlying_quote = self._get_quotes_by_symbol(symbol)
# 某些非交易时间段,ticks 回测是 quote 的最新价有可能是 nan,无效的行情直接跳过
if math.isnan(quote["last_price"]):
return [], []
# 撮合委托单
orders = self._orders.get(symbol, {})
for order_id in list(orders.keys()): # match_order 过程中可能会删除 orders 下对象
self._match_order(orders[order_id], symbol, quote, underlying_quote)
if orders[order_id]["status"] == "FINISHED":
self._orders_events.append(orders[order_id])
del self._orders[symbol][order_id]
# 调整持仓保证金和盈亏
position = self._ensure_position(symbol)
underlying_last_price = underlying_quote["last_price"] if underlying_quote else float('nan')
future_margin = _get_future_margin(quote)
if position["volume_long"] > 0 or position["volume_short"] > 0:
if position["last_price"] != quote["last_price"] \
or (math.isnan(future_margin) or future_margin != position["future_margin"])\
or (underlying_quote and (math.isnan(underlying_last_price) or underlying_last_price != position["underlying_last_price"])):
self._adjust_position_account(symbol, quote, underlying_quote,
pre_last_price=position["last_price"],
last_price=quote["last_price"],
pre_underlying_last_price=position["underlying_last_price"],
underlying_last_price=underlying_last_price)
position["future_margin"] = future_margin
position["last_price"] = quote["last_price"]
position["underlying_last_price"] = underlying_last_price
else:
# 修改辅助变量
position["future_margin"] = future_margin
position["last_price"] = quote["last_price"]
position["underlying_last_price"] = underlying_last_price
self._send_position(position) # 一定要返回 position,下游会用到 future_margin 字段判断修改保证金是否成功
self._send_account()
return self._return_results()
def settle(self):
trade_log = {
"trades": self._trades,
"account": self._account.copy(),
"positions": {k: v.copy() for k, v in self._positions.items()}
}
# 为下一交易日调整账户
self._trades = []
for symbol in self._orders:
for order in self._orders[symbol].values():
order["frozen_margin"] = 0.0
order["frozen_premium"] = 0.0
order["last_msg"] = "交易日结束,自动撤销当日有效的委托单(GFD)"
order["status"] = "FINISHED"
self._orders_events.append(order)
self._send_order(order)
self._orders[symbol] = {}
# account 原始字段
self._account["pre_balance"] = self._account["balance"] - self._account["market_value"]
self._account["close_profit"] = 0.0
self._account["commission"] = 0.0
self._account["premium"] = 0.0
self._account["frozen_margin"] = 0.0
self._account["frozen_premium"] = 0.0
# account 计算字段
self._account["static_balance"] = self._account["pre_balance"]
self._account["position_profit"] = 0.0
self._account["risk_ratio"] = self._account["margin"] / self._account["balance"]
self._account["available"] = self._account["static_balance"] - self._account["margin"]
# 根据公式 账户权益 不需要计算 self._account["balance"] = static_balance + market_value
self._send_account()
# 对于持仓的结算放在这里,没有放在 quote_handler 里的原因:
# 1. 异步发送的话,会造成如果此时 sim 未收到 pending_peek, 就没法把结算的账户信息发送出去,此时用户代码中 api.get_postion 得到的持仓和 sim 里面的持仓是不一致的
# set_target_pos 下单时就会产生错单。而且结算时一定是已经收到过行情的数据包,在同步代码的最后一步,会发送出去这个行情包 peeding_peek,
# quote_handler 处理 settle 的时候, 所以在结算的时候 pending_peek 一定是 False, 要 api 处理过之后,才会收到 peek_message
# 2. 同步发送的话,就可以和产生切换交易日的数据包同时发送出去
# 对 order 的处理发生在下一次回复 peek_message
for position in self._positions.values():
# position 原始字段
position["volume_long_frozen_today"] = 0
position["volume_long_frozen_his"] = 0
position["volume_short_frozen_today"] = 0
position["volume_short_frozen_his"] = 0
position["volume_long_today"] = 0
position["volume_long_his"] = position["volume_long"]
position["volume_short_today"] = 0
position["volume_short_his"] = position["volume_short"]
# position 计算字段
position["pos_long_his"] = position["volume_long_his"]
position["pos_long_today"] = 0
position["pos_short_his"] = position["volume_short_his"]
position["pos_short_today"] = 0
position["volume_long_frozen"] = 0
position["volume_short_frozen"] = 0
position["position_price_long"] = position["last_price"]
position["position_price_short"] = position["last_price"]
quote, _ = self._get_quotes_by_symbol(f"{position['exchange_id']}.{position['instrument_id']}")
position["position_cost_long"] = position["last_price"] * position["volume_long"] * quote["volume_multiple"] # position 原始字段
position["position_cost_short"] = position["last_price"] * position["volume_short"] * quote["volume_multiple"] # position 原始字段
position["position_profit_long"] = 0
position["position_profit_short"] = 0
position["position_profit"] = 0
self._send_position(position)
diffs, orders_events = self._return_results()
return diffs, orders_events, trade_log
def init_snapshot(self):
"""返回初始账户截面信息"""
return {
"trade": {
self._account_key: {
"accounts": {"CNY": self._account.copy()},
"positions": {},
"orders": {},
"trades": {}
}
}
}
def _return_results(self):
"""
返回两项内容:diffs: list, orders_events: list
diffs 是截面的变更
orders_events 是委托单变化
"""
diffs = self._diffs
self._diffs = []
orders_events = self._orders_events
self._orders_events = []
return diffs, orders_events
def _ensure_position(self, symbol):
position = self._positions.setdefault(symbol, {
"exchange_id": symbol.split(".", maxsplit=1)[0],
"instrument_id": symbol.split(".", maxsplit=1)[1],
"pos_long_his": 0,
"pos_long_today": 0,
"pos_short_his": 0,
"pos_short_today": 0,
"volume_long_today": 0,
"volume_long_his": 0,
"volume_long": 0,
"volume_long_frozen_today": 0,
"volume_long_frozen_his": 0,
"volume_long_frozen": 0,
"volume_short_today": 0,
"volume_short_his": 0,
"volume_short": 0,
"volume_short_frozen_today": 0,
"volume_short_frozen_his": 0,
"volume_short_frozen": 0,
"open_price_long": float("nan"),
"open_price_short": float("nan"),
"open_cost_long": 0.0,
"open_cost_short": 0.0,
"position_price_long": float("nan"),
"position_price_short": float("nan"),
"position_cost_long": 0.0,
"position_cost_short": 0.0,
"float_profit_long": 0.0,
"float_profit_short": 0.0,
"float_profit": 0.0,
"position_profit_long": 0.0,
"position_profit_short": 0.0,
"position_profit": 0.0,
"margin_long": 0.0,
"margin_short": 0.0,
"margin": 0.0,
"last_price": float('nan'),
"underlying_last_price": float('nan'),
"market_value_long": 0.0, # 权利方市值(始终 >= 0)
"market_value_short": 0.0, # 义务方市值(始终 <= 0)
"market_value": 0.0,
})
if math.isnan(position["last_price"]):
# 该持仓第一次添加,添加辅助计算字段,last_price underlying_last_price
quote, underlying_quote = self._get_quotes_by_symbol(symbol)
position["future_margin"] = _get_future_margin(quote)
position["last_price"] = quote["last_price"]
position["underlying_last_price"] = underlying_quote["last_price"] if underlying_quote else float("nan")
return position
def _get_quotes_by_symbol(self, symbol):
"""返回指定合约及标的合约,在本模块执行过程中,应该保证一定有合约行情"""
quote = self._quotes.get(symbol)
assert quote and quote.get("datetime"), "未收到指定合约行情"
underlying_quote = None
if quote["ins_class"].endswith("OPTION"):
underlying_quote = self._quotes.get(quote["underlying_symbol"])
assert underlying_quote and underlying_quote.get("datetime"), "未收到指定合约的标的行情"
return quote, underlying_quote
def _pre_insert_order(self, pack):
"""order 对象预处理"""
order = pack.copy()
order["exchange_order_id"] = order["order_id"]
order["volume_orign"] = order["volume"]
order["volume_left"] = order["volume"]
order["frozen_margin"] = 0.0
order["frozen_premium"] = 0.0
order["last_msg"] = "报单成功"
order["status"] = "ALIVE"
order["insert_date_time"] = self._get_trade_timestamp()
del order["aid"]
del order["volume"]
self._send_order(order)
return order
def _insert_order(self, order, symbol, quote, underlying_quote=None):
"""判断 order 是否可以记录在 orderbook"""
if ("commission" not in quote or "margin" not in quote) and not quote["ins_class"].endswith("OPTION"):
order["last_msg"] = "不支持的合约类型,TqSim 目前不支持组合,股票,etf期权模拟交易"
order["status"] = "FINISHED"
if order["status"] == "ALIVE" and not self._is_in_trading_time(quote):
order["last_msg"] = "下单失败, 不在可交易时间段内"
order["status"] = "FINISHED"
position = self._ensure_position(symbol)
if order["status"] == "ALIVE" and order["offset"].startswith('CLOSE'):
if order["exchange_id"] in ["SHFE", "INE"]:
if order["offset"] == "CLOSETODAY":
if order["direction"] == "BUY" and position["volume_short_today"] - position["volume_long_frozen_today"] < order["volume_orign"]:
order["last_msg"] = "平今仓手数不足"
elif order["direction"] == "SELL" and position["volume_long_today"] - position["volume_long_frozen_today"] < order["volume_orign"]:
order["last_msg"] = "平今仓手数不足"
if order["offset"] == "CLOSE":
if order["direction"] == "BUY" and position["volume_short_his"] - position["volume_short_frozen_his"] < order["volume_orign"]:
order["last_msg"] = "平昨仓手数不足"
elif order["direction"] == "SELL" and position["volume_long_his"] - position["volume_long_frozen_his"] < order["volume_orign"]:
order["last_msg"] = "平昨仓手数不足"
else:
if order["direction"] == "BUY" and position["volume_short"] - position["volume_short_frozen"] < order["volume_orign"]:
order["last_msg"] = "平仓手数不足"
elif order["direction"] == "SELL" and position["volume_long"] - position["volume_long_frozen"] < order["volume_orign"]:
order["last_msg"] = "平仓手数不足"
if order["last_msg"].endswith("手数不足"):
order["status"] = "FINISHED"
if order["status"] == "ALIVE" and order["offset"] == "OPEN":
# 计算冻结保证金,冻结权利金
if quote["ins_class"].endswith("OPTION"):
if order["direction"] == "SELL": # 期权的SELL义务仓,开仓需要冻结保证金
order["frozen_margin"] = order["volume_orign"] * _get_option_margin(quote, quote["last_price"], underlying_quote["last_price"])
else: # 期权的BUY权利仓(市价单使用 last_price 计算需要冻结的权利金)
price = quote["last_price"] if order["price_type"] == "ANY" else order["limit_price"]
order["frozen_premium"] = order["volume_orign"] * quote["volume_multiple"] * price
else:
order["frozen_margin"] = order["volume_orign"] * _get_future_margin(quote)
if order["frozen_margin"] + order["frozen_premium"] > self._account["available"]:
order["frozen_margin"] = 0.0
order["frozen_premium"] = 0.0
order["last_msg"] = '开仓资金不足'
order["status"] = "FINISHED"
if order["status"] == "FINISHED":
self._send_order(order)
if order["status"] == "ALIVE" and order["offset"] == "OPEN":
# 修改 account 计算字段
self._adjust_account_by_order(frozen_margin=order["frozen_margin"], frozen_premium=order["frozen_premium"])
self._send_account()
if order["status"] == 'ALIVE' and order["offset"].startswith('CLOSE'):
# 修改 position 原始字段
if order["exchange_id"] in ["SHFE", "INE"]:
if order["direction"] == "BUY":
position[f"volume_short_frozen_{'today' if order['offset'] == 'CLOSETODAY' else 'his'}"] += order["volume_orign"]
else:
position[f"volume_long_frozen_{'today' if order['offset'] == 'CLOSETODAY' else 'his'}"] += order["volume_orign"]
elif order["direction"] == "BUY":
volume_short_his_available = position["volume_short_his"] - position["volume_short_frozen_his"]
if volume_short_his_available < order["volume_orign"]:
position["volume_short_frozen_his"] += volume_short_his_available
position["volume_short_frozen_today"] += order["volume_orign"] - volume_short_his_available
else:
position["volume_short_frozen_his"] += order["volume_orign"]
else:
volume_long_his_available = position["volume_long_his"] - position["volume_long_frozen_his"]
if volume_long_his_available < order["volume_orign"]:
position["volume_long_frozen_his"] += volume_long_his_available
position["volume_long_frozen_today"] += order["volume_orign"] - volume_long_his_available
else:
position["volume_long_frozen_his"] += order["volume_orign"]
# 修改 position 计算字段
self._adjust_position_volume_frozen(position)
self._send_position(position)
def _match_order(self, order, symbol, quote, underlying_quote=None):
assert order["status"] == "ALIVE"
ask_price, bid_price = _get_price_range(quote)
# order 预期成交价格
if order["price_type"] in ["ANY", "BEST", "FIVELEVEL"]:
price = ask_price if order["direction"] == "BUY" else bid_price
else:
price = order["limit_price"]
if order["price_type"] == "ANY" and math.isnan(price):
order["last_msg"] = "市价指令剩余撤销"
order["status"] = "FINISHED"
if order["time_condition"] == "IOC": # IOC | |
"""HTTP client functional tests."""
import binascii
import gc
import io
import os.path
import json
import http.cookies
import asyncio
import socket
import unittest
from unittest import mock
from multidict import MultiDict
import aiohttp
from aiohttp import client, helpers
from aiohttp import test_utils
from aiohttp.multipart import MultipartWriter
def find_unused_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 0))
port = s.getsockname()[1]
s.close()
return port
class TestHttpClientFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
def test_HTTP_200_OK_METHOD(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
for meth in ('get', 'post', 'put', 'delete', 'head'):
r = self.loop.run_until_complete(
client.request(meth, httpd.url('method', meth),
loop=self.loop))
content1 = self.loop.run_until_complete(r.read())
content2 = self.loop.run_until_complete(r.read())
content = content1.decode()
self.assertEqual(r.status, 200)
if meth == 'head':
self.assertEqual(b'', content1)
else:
self.assertIn('"method": "%s"' % meth.upper(), content)
self.assertEqual(content1, content2)
r.close()
def test_HTTP_200_OK_METHOD_connector(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
conn = aiohttp.TCPConnector(
conn_timeout=0.2, resolve=True, loop=self.loop)
conn.clear_resolved_hosts()
for meth in ('get', 'post', 'put', 'delete', 'head'):
r = self.loop.run_until_complete(
client.request(
meth, httpd.url('method', meth),
connector=conn, loop=self.loop))
content1 = self.loop.run_until_complete(r.read())
content2 = self.loop.run_until_complete(r.read())
content = content1.decode()
self.assertEqual(r.status, 200)
if meth == 'head':
self.assertEqual(b'', content1)
else:
self.assertIn('"method": "%s"' % meth.upper(), content)
self.assertEqual(content1, content2)
r.close()
def test_use_global_loop(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
try:
asyncio.set_event_loop(self.loop)
r = self.loop.run_until_complete(
client.request('get', httpd.url('method', 'get')))
finally:
asyncio.set_event_loop(None)
content1 = self.loop.run_until_complete(r.read())
content2 = self.loop.run_until_complete(r.read())
content = content1.decode()
self.assertEqual(r.status, 200)
self.assertIn('"method": "GET"', content)
self.assertEqual(content1, content2)
r.close()
def test_HTTP_302_REDIRECT_GET(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
@asyncio.coroutine
def go():
r = yield from client.request('get',
httpd.url('redirect', 2),
loop=self.loop)
self.assertEqual(r.status, 200)
self.assertEqual(2, httpd['redirects'])
r.close()
self.loop.run_until_complete(go())
def test_HTTP_302_REDIRECT_NON_HTTP(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
@asyncio.coroutine
def go():
with self.assertRaises(ValueError):
yield from client.request('get',
httpd.url('redirect_err'),
loop=self.loop)
self.loop.run_until_complete(go())
def test_HTTP_302_REDIRECT_POST(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
r = self.loop.run_until_complete(
client.request('post', httpd.url('redirect', 2),
data={'some': 'data'}, loop=self.loop))
content = self.loop.run_until_complete(r.content.read())
content = content.decode()
self.assertEqual(r.status, 200)
self.assertIn('"method": "GET"', content)
self.assertEqual(2, httpd['redirects'])
r.close()
def test_HTTP_302_REDIRECT_POST_with_content_length_header(self):
data = json.dumps({'some': 'data'})
with test_utils.run_server(self.loop, router=Functional) as httpd:
r = self.loop.run_until_complete(
client.request('post', httpd.url('redirect', 2),
data=data,
headers={'Content-Length': str(len(data))},
loop=self.loop))
content = self.loop.run_until_complete(r.content.read())
content = content.decode()
self.assertEqual(r.status, 200)
self.assertIn('"method": "GET"', content)
self.assertEqual(2, httpd['redirects'])
r.close()
def test_HTTP_307_REDIRECT_POST(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
r = self.loop.run_until_complete(
client.request('post', httpd.url('redirect_307', 2),
data={'some': 'data'}, loop=self.loop))
content = self.loop.run_until_complete(r.content.read())
content = content.decode()
self.assertEqual(r.status, 200)
self.assertIn('"method": "POST"', content)
self.assertEqual(2, httpd['redirects'])
r.close()
def test_HTTP_302_max_redirects(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
r = self.loop.run_until_complete(
client.request('get', httpd.url('redirect', 5),
max_redirects=2, loop=self.loop))
self.assertEqual(r.status, 302)
self.assertEqual(2, httpd['redirects'])
r.close()
def test_HTTP_200_GET_WITH_PARAMS(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
r = self.loop.run_until_complete(
client.request('get', httpd.url('method', 'get'),
params={'q': 'test'}, loop=self.loop))
content = self.loop.run_until_complete(r.content.read())
content = content.decode()
self.assertIn('"query": "q=test"', content)
self.assertEqual(r.status, 200)
r.close()
def test_HTTP_200_GET_MultiDict_PARAMS(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
r = self.loop.run_until_complete(
client.request('get', httpd.url('method', 'get'),
params=MultiDict(
[('q', 'test1'), ('q', 'test2')]),
loop=self.loop))
content = self.loop.run_until_complete(r.content.read())
content = content.decode()
self.assertIn('"query": "q=test1&q=test2"', content)
self.assertEqual(r.status, 200)
r.close()
def test_HTTP_200_GET_WITH_MIXED_PARAMS(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
@asyncio.coroutine
def go():
r = yield from client.request(
'get', httpd.url('method', 'get') + '?test=true',
params={'q': 'test'}, loop=self.loop)
content = yield from r.content.read()
content = content.decode()
self.assertIn('"query": "test=true&q=test"', content)
self.assertEqual(r.status, 200)
r.close()
# let loop to make one iteration to call connection_lost
# and close socket
yield from asyncio.sleep(0, loop=self.loop)
self.loop.run_until_complete(go())
def test_POST_DATA(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
r = self.loop.run_until_complete(
client.request('post', url, data={'some': 'data'},
loop=self.loop))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual({'some': ['data']}, content['form'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_DATA_with_explicit_formdata(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', 'text')
r = self.loop.run_until_complete(
client.request('post', url,
data=form,
loop=self.loop))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual({'name': ['text']}, content['form'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_DATA_with_charset(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', 'текст',
content_type='text/plain; charset=koi8-r')
r = self.loop.run_until_complete(
client.request(
'post', url, data=form,
loop=self.loop))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
field = content['multipart-data'][0]
self.assertEqual('name', field['name'])
self.assertEqual('текст', field['data'])
self.assertEqual(r.status, 200)
def test_POST_DATA_with_content_transfer_encoding(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', b'123',
content_transfer_encoding='base64')
r = self.loop.run_until_complete(
client.request(
'post', url, data=form,
loop=self.loop))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
field = content['multipart-data'][0]
self.assertEqual('name', field['name'])
self.assertEqual(b'123', binascii.a2b_base64(field['data']))
# self.assertEqual('base64', field['content-transfer-encoding'])
self.assertEqual(r.status, 200)
def test_POST_MultiDict(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
r = self.loop.run_until_complete(
client.request('post', url, data=MultiDict(
[('q', 'test1'), ('q', 'test2')]),
loop=self.loop))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual({'q': ['test1', 'test2']}, content['form'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_DATA_DEFLATE(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
r = self.loop.run_until_complete(
client.request('post', url,
data={'some': 'data'}, compress=True,
loop=self.loop))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual('deflate', content['compression'])
self.assertEqual({'some': ['data']}, content['form'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname) as f:
r = self.loop.run_until_complete(
client.request(
'post', url, data={'some': f, 'test': b'data'},
chunked=1024,
headers={'Transfer-Encoding': 'chunked'},
loop=self.loop))
content = self.loop.run_until_complete(r.json())
files = list(
sorted(content['multipart-data'],
key=lambda d: d['name']))
f.seek(0)
filename = os.path.split(f.name)[-1]
self.assertEqual(2, len(content['multipart-data']))
self.assertEqual('some', files[0]['name'])
self.assertEqual(filename, files[0]['filename'])
self.assertEqual(f.read(), files[0]['data'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_DEFLATE(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname) as f:
r = self.loop.run_until_complete(
client.request('post', url, data={'some': f},
chunked=1024, compress='deflate',
loop=self.loop))
content = self.loop.run_until_complete(r.json())
f.seek(0)
filename = os.path.split(f.name)[-1]
self.assertEqual('deflate', content['compression'])
self.assertEqual(1, len(content['multipart-data']))
self.assertEqual(
'some', content['multipart-data'][0]['name'])
self.assertEqual(
filename, content['multipart-data'][0]['filename'])
self.assertEqual(
f.read(), content['multipart-data'][0]['data'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_STR(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname) as f:
r = self.loop.run_until_complete(
client.request('post', url, data=[('some', f.read())],
loop=self.loop))
content = self.loop.run_until_complete(r.json())
f.seek(0)
self.assertEqual(1, len(content['form']))
self.assertIn('some', content['form'])
self.assertEqual(f.read(), content['form']['some'][0])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_STR_SIMPLE(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname) as f:
r = self.loop.run_until_complete(
client.request('post', url, data=f.read(), loop=self.loop))
content = self.loop.run_until_complete(r.json())
f.seek(0)
self.assertEqual(f.read(), content['content'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_LIST(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname) as f:
r = self.loop.run_until_complete(
client.request('post', url, data=[('some', f)],
loop=self.loop))
content = self.loop.run_until_complete(r.json())
f.seek(0)
filename = os.path.split(f.name)[-1]
self.assertEqual(1, len(content['multipart-data']))
self.assertEqual(
'some', content['multipart-data'][0]['name'])
self.assertEqual(
filename, content['multipart-data'][0]['filename'])
self.assertEqual(
f.read(), content['multipart-data'][0]['data'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_LIST_CT(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname) as f:
form = aiohttp.FormData()
form.add_field('some', f, content_type='text/plain')
r = self.loop.run_until_complete(
client.request('post', url, loop=self.loop,
data=form))
content = self.loop.run_until_complete(r.json())
f.seek(0)
filename = os.path.split(f.name)[-1]
self.assertEqual(1, len(content['multipart-data']))
self.assertEqual(
'some', content['multipart-data'][0]['name'])
self.assertEqual(
filename, content['multipart-data'][0]['filename'])
self.assertEqual(
f.read(), content['multipart-data'][0]['data'])
self.assertEqual(
'text/plain', content['multipart-data'][0]['content-type'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_SINGLE(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname) as f:
with self.assertRaises(ValueError):
self.loop.run_until_complete(
client.request('post', url, data=f, loop=self.loop))
def test_POST_FILES_SINGLE_BINARY(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
r = self.loop.run_until_complete(
client.request('post', url, data=f, loop=self.loop))
content = self.loop.run_until_complete(r.json())
f.seek(0)
self.assertEqual(0, len(content['multipart-data']))
self.assertEqual(content['content'], f.read().decode())
# if system cannot determine 'application/pgp-keys' MIME type
# then use 'application/octet-stream' default
self.assertIn(content['headers']['Content-Type'],
('application/pgp-keys',
'application/octet-stream'))
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_IO(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
data = io.BytesIO(b'data')
r = self.loop.run_until_complete(
client.request('post', url, data=[data], loop=self.loop))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
self.assertEqual(
{'content-type': 'application/octet-stream',
'data': 'data',
'filename': 'unknown',
'filename*': "utf-8''unknown",
'name': 'unknown'}, content['multipart-data'][0])
self.assertEqual(r.status, 200)
r.close()
def test_POST_MULTIPART(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
with MultipartWriter('form-data') as writer:
writer.append('foo')
writer.append_json({'bar': 'баз'})
writer.append_form([('тест', '4'), ('сетс', '2')])
r = self.loop.run_until_complete(
client.request('post', url, data=writer, loop=self.loop))
content = self.loop.run_until_complete(r.json())
self.assertEqual(3, len(content['multipart-data']))
self.assertEqual({'content-type': 'text/plain', 'data': 'foo'},
content['multipart-data'][0])
self.assertEqual({'content-type': 'application/json',
'data': '{"bar": "\\u0431\\u0430\\u0437"}'},
content['multipart-data'][1])
self.assertEqual(
{'content-type': 'application/x-www-form-urlencoded',
'data': '%D1%82%D0%B5%D1%81%D1%82=4&'
'%D1%81%D0%B5%D1%82%D1%81=2'},
content['multipart-data'][2])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_IO_WITH_PARAMS(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
data = io.BytesIO(b'data')
r = self.loop.run_until_complete(
client.request('post', url,
data=(('test', 'true'),
MultiDict(
[('q', 't1'), ('q', 't2')]),
data),
loop=self.loop))
content = self.loop.run_until_complete(r.json())
self.assertEqual(4, len(content['multipart-data']))
self.assertEqual(
{'content-type': 'text/plain',
'data': 'true',
'name': 'test'}, content['multipart-data'][0])
self.assertEqual(
{'content-type': 'application/octet-stream',
'data': 'data',
'filename': 'unknown',
'filename*': "utf-8''unknown",
'name': 'unknown'}, content['multipart-data'][1])
self.assertEqual(
{'content-type': 'text/plain',
'data': 't1',
'name': 'q'}, content['multipart-data'][2])
self.assertEqual(
{'content-type': 'text/plain',
'data': 't2',
'name': 'q'}, content['multipart-data'][3])
self.assertEqual(r.status, 200)
r.close()
def test_POST_FILES_WITH_DATA(self):
with test_utils.run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname) as f:
r = self.loop.run_until_complete(
client.request('post', url, loop=self.loop,
data={'test': 'true', | |
<reponame>HashSplat/np_rw_buffer
import numpy as np
from .utils import make_thread_safe
from .buffer import RingBuffer, RingBufferThreadSafe, UnderflowError
__all__ = ['UnderflowError', 'AudioFramingBuffer']
class AudioFramingBuffer(RingBufferThreadSafe):
"""The Audio Framing Buffer differs from the RingBuffer by the read and write methods.
You can have a negative length and the write will always write in the correct
position (where it was left off). This helps keep read at the proper position. The
read pointer always moves with the read data. It doesn't care how much data is in the buffer.
After it reads it back-fills zeros, so if it wraps around in the reading it will read those
zeros again. This completely decouples the read from the write unless the read wraps around a
second time. Then the write might not be caught up and issues may arise. We are not worried
about that yet.
Users should not have to set the shape or the maxsize values. Users should only set the sample
rate, seconds, and buffer delay.
Example:
..code-block :: python
>>> buffer = AudioFramingBuffer(2000, 1)
>>> buffer.write(np.array([(i,) for i in range(10)]))
>>> # Buffer: [(read ptr)0, 1, 2, 3, 4, 5, 6, 7, 8, 9, (write ptr) 0, 0, 0, 0, 0]
>>> buffer.read(15)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, (write ptr) 0, 0, 0, 0, 0] (read ptr at end)
>>> buffer.write(np.array([(i,) for i in range(10)])) # This will write in the position after 19
>>> # Buffer: [0, 0, 0, 0, 0, 0, 0, 0, 0, (was 9) 0, 0, 1, 2, 3, 4, (read ptr) 5, 6, 7, 8, 9] (write ptr at end)
>>> buffer.read(10)
[5, 6, 7, 8, 9, (write ptr) 0, 0, 0, 0, 0] (read ptr at end)
"""
def __init__(self, sample_rate=44100/2, channels=1, seconds=2, buffer_delay=0, dtype=np.float32):
if isinstance(sample_rate, (tuple, list)):
channels = sample_rate[1]
length = sample_rate[0]
sample_rate = length/seconds
self._sample_rate = sample_rate
self._buffer_delay = buffer_delay # seconds
self._seconds = seconds
self.read_frame = 0
self.write_frame = 0
self.can_read = not self._buffer_delay > 0
self._sample_counter = 0
length = np.ceil(self._sample_rate * self._seconds)
super().__init__(shape=(length, channels), dtype=dtype)
# end constructor
channels = RingBufferThreadSafe.columns
@make_thread_safe
def get_sample_rate(self):
"""Return the rate of the data in Hz."""
return self._sample_rate
@make_thread_safe
def set_sample_rate(self, rate):
"""Set the buffer's sample rate. This helps synchronize the buffer size with the total
seconds.
Note:
This method will try to reset the buffer size from set_data.
"""
self._sample_rate = rate
self.maxsize = np.ceil(self._sample_rate * self._seconds)
# self.clear()
sample_rate = property(get_sample_rate, set_sample_rate)
@property
@make_thread_safe
def seconds(self):
"""Return the total number of seconds that the buffer can hold."""
return self._seconds
@seconds.setter
@make_thread_safe
def seconds(self, seconds):
"""Set the total number of seconds that the buffer can hold."""
self._seconds = seconds
self.maxsize = np.ceil(self.get_sample_rate() * self._seconds)
if self.seconds < self.buffer_delay:
self.buffer_delay = self.seconds
# end seconds
@property
@make_thread_safe
def buffer_delay(self):
"""Return the number of seconds (of data in the buffer) before you can read data from the buffer."""
return self._buffer_delay
@buffer_delay.setter
@make_thread_safe
def buffer_delay(self, seconds):
"""Set the number of seconds (of data in the buffer) before you can read data from the buffer."""
if self._buffer_delay > self.seconds:
raise ValueError("The buffer delay cannot be greater than the total number of seconds the buffer can hold!")
self._buffer_delay = seconds
# end buffer_delay
@make_thread_safe
def clear(self):
"""Clear the data in the buffer.
This resets can_read and will wait on the buffer_delay again.
"""
super().clear()
self._data[:] = 0
self.can_read = False
# end clear
def _write(self, data, length, error, move_start=False):
"""Actually write the data to the numpy array.
# Note:
# Writing in this buffer does not move the start like the RingBuffer. The write pointer can overrun the
# read pointer.
Args:
data (np.array/np.ndarray): Numpy array of data to write. This should already be in the correct format.
length (int): Length of data to write. (This argument needs to be here for error purposes).
error (bool): Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
move_start (bool)[True]: If error is false should overrun occur or should the start pointer move.
Raises:
OverflowError: If error is True and more data is being written then there is space available.
"""
super()._write(data, length, error, move_start)
if self._length >= (self.get_sample_rate() * self.buffer_delay):
self.can_read = True
@make_thread_safe
def read(self, amount=None, error=False):
"""Read the data and move the start/read pointer, so that data is not read again.
This method reads empty if the amount specified is greater than the amount in the buffer.
Args:
amount (int)[None]: Amount of data to read
error (bool)[False]: Raise an error if there is not enough data.
Raises:
UnderflowError: If error was given as True and the amount is > the length.
Returns:
data (np.array/np.ndarray): Array of data that is the length amount filled with zeros if needed.
"""
# ===== Check if audio buffered enough =====
if amount is None:
amount = self._length
if not self.can_read:
return np.zeros(shape=(amount, self.channels), dtype=self.dtype)
# self._sample_counter += 1
# if ((self._sample_rate % 1) != 0 and mylen > 1 and
# self._sample_counter >= 15):
# self._sample_counter = 0
# try:
# self.write(self._data[self._end])
# except (OverflowError, TypeError, ValueError): pass
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(amount, error, limit_amount=False)
# Get and Reset the data
data = self._data[idxs].copy()
self._data[idxs] = 0
return data
# end read
def move_start(self, amount, error=True, limit_amount=True):
"""This is an internal method and should not need to be called by the user.
Move the start pointer the given amount (+/-).
Raises:
UnderflowError: If the amount is > the length.
Args:
amount (int): Amount to move the start pointer by.
error (bool)[True]: Raise a ValueError else sync the end pointer and length.
limit_amount (bool)[True]: If True force the amount to be less than or equal to the amount in the buffer.
"""
if amount > self._length:
if error:
raise UnderflowError("Not enough data in the buffer " + repr(self))
if limit_amount:
# You cannot read more than what you have
amount = self._length
# end error
stop = self._start + amount
try:
self._start = stop % self.maxsize
except ZeroDivisionError:
self._start = stop
#self.sync_length(False or amount < 0) # Length grows if amount was negative.
self._length -= amount
# Prevent infinite negative growth
if self._length <= - (self.maxsize * 2) and self.maxsize > 0:
self._length = ((self._end - self._start) % self.maxsize) - self.maxsize
# end move_start
def move_end(self, amount, error=True, move_start=True):
"""This is an internal method and should not need to be called by the user.
Move the end pointer the given amount (+/-).
Raises:
OverflowError: If the amount is > the available buffer space.
Args:
amount (int): Amount to move the end pointer by.
error (bool)[True]: Raise an OverflowError else sync the start pointer and length.
move_start (bool)[True]: If True and amount > available move the start pointer with the end pointer.
"""
# Check for overflow
avaliable = self.maxsize - self._length
if amount > 0 and amount > avaliable:
if error:
raise OverflowError("Not enough space in the buffer " + repr(self) +
" " + repr(len(self)) + " < " + repr(amount))
if move_start:
# Move the start to make it a circular
make_available = amount - avaliable
self.move_start(make_available, False) # Needs to move for sync_length
if amount > self.maxsize:
self.move_start(-(amount - self.maxsize) - 1, False) # Needs to move for sync_length
stop = self._end + amount
try:
self._end = stop % self.maxsize
except ZeroDivisionError:
self._end = stop
# self.sync_length(True and amount >= 0) # Length shrinks if amount was negative.
self._length += amount
# limit the length from growing infinitely
if self._length >= (self.maxsize * 2) and self.maxsize > 0:
self._length = self._length % self.maxsize
# end move_end
@property
def shape(self):
"""Return the shape of the data."""
return self._data.shape
@shape.setter
@make_thread_safe
def shape(self, new_shape):
"""Set the shape."""
RingBufferThreadSafe.shape.fset(self, new_shape)
self._seconds = self.maxsize/self.get_sample_rate()
if self._seconds < self.buffer_delay:
self.buffer_delay = self._seconds
def __len__(self):
"""Return | |
to the beginning of the line.
CTRL-E Go to the end of the line.
CTRL-K Delete through to the end of the line.
CTRL-V Paste into command line (operating system dependent.)
If no text is on the command line, then control keys correspond to molecular editing:
Command Entry Field on the External GUI (gray window).
CTRL-A Select all atoms into the named selection "(sele)".
CTRL-C Copy current selection.
CTRL-I Invert currently active selection.
CTRL-V Paste copied or cut atoms into a new object.
CTRL-X Cut the selected atoms.
CTRL-Y Redo.
CTRL-Z Undo.
EDITING
type "help edit_keys" for keyboard shortcuts used in editing.
'''
_self.help('keyboard')
def transparency(_self=cmd):
'''
TRANSPARENCY
As of version 0.68, trasparent surfaces are supported in both
realtime (OpenGL) rendering mode as well as with ray-traced images.
Transparency is currently managed by setting either the global
transparency variable or one attached to an individual molecule object.
It isn't yet possible to control transparency on a per-atom basis.
EXAMPLES
set transparency=0.5 # makes all surfaces 50% transparent
set transparency=0.5, mol3 # makes only mol3's surface transparent
'''
cmd.help('transparency')
def mouse():
'''
MOUSE CONTROLS
The configuration can be changed using the "Mouse" menu. The
current configuration is described on screen with a small matrix on
the lower right hand corner, using the following abbreviations:
Buttons (Horizontal Axis)
L = left mouse click
M = middle mouse click
R = right mouse click
Modifiers (Veritical axis on the matrix)
None = no keys held down while clicking
Shft = hold SHIFT down while clicking
Ctrl = hold CTRL down while clicking
CtSh = hold both SHIFT and CTRL down while clicking
Visualization Functions
Rota = Rotates camera about X, Y, and Z axes
RotZ = Rotates camera about the Z axis
Move = Translates along the X and Y axes
MovZ = Translates along Z axis
Clip = Y motion moves the near clipping plane while
PkAt = Pick an atom
PkBd = Pick a bond
Orig = Move origin to selected atom
+lb = Add an atom into the (lb) selection
lb = Define the (lb) selection with the indicated atom.
rb = Define the (rb) selection with the indicated atom.
Editing Functions
RotF = Rotate fragment
MovF = Move fragment
TorF = Torsion fragment
'''
_self.help('mouse')
def examples(_self=cmd):
'''
EXAMPLE ATOM SELECTIONS
select bk = ( name CA or name C or name N )
* can be abbreviated as *
sel bk = (n;CA,C,N)
select hev = ( not hydro )
* can be abbreviated as *
sel hev = (!h;)
select site = ( byres ( resi 45:52 expand 5 ))
* can be abbreviated as *
sel site = (b;(i;45:52 x;5))
select combi = ( hev and not site )
* can be abbreviated as *
sel combi = (hev&!site)
'''
_self.help('examples')
def launching(_self=cmd):
'''
PyMOL COMMAND LINE OPTIONS
-c Command line mode, no GUI. For batch opeations.
-i Disable the internal OpenGL GUI (object list, menus, etc.)
-x Disable the external GUI module.
-t Use Tcl/Tk based external GUI module (pmg_tk).
-q Quiet launch. Suppress splash screen & other chatter.
-p Listen for commands on standard input.
-e Start in full-screen mode.
-2 Start in two-button mouse mode.
-o Disable security protections for session files.
-R Launch Greg Landrum's XMLRPC listener.
-B Enable blue-line stereo signal (for Mac stereo)
-G Start in Game mode.
-S Force and launch in stereo, if possible.
-M Force mono even when hardware stereo is present.
-X <int> -Y <int> -W <int> -H <int> -V <int> Adjust window geometry.
-f <# line> Controls display of commands and feedback in OpenGL (0=off).
-r <file.py> Run a Python program (in __main__) on startup.
-l <file.py> Spawn a python program in new thread.
-d <string> Run pymol command string upon startup.
-u <script> Load and append to this PyMOL script or program file.
-s <script> Save commands to this PyMOL script or program file.
-g <file.png> Write a PNG file (after evaluating previous arguments)
<file> can have one of the following extensions, and all
files provided will be loaded or run after PyMOL starts.
.pml PyMOL command script to be run on startup
.py, .pym, .pyc Python program to be run on startup
.pdb Protein Data Bank format file to be loaded on startup
.mmod Macromodel format to be loaded on startup
.mol MDL MOL file to be loaded on startup
.sdf MDL SD file to be parsed and loaded on startup
.xplor X-PLOR Map file (ASCII) to be loaded on startup
.ccp4 CCP4 map file (BINARY) to be loaded on startup
.cc1, .cc2 ChemDraw 3D cartesian coordinate file
.pkl Pickled ChemPy Model (class "chempy.model.Indexed")
.r3d Raster3D file
.cex CEX file (Metaphorics)
.top AMBER topology file
.crd AMBER coordinate file
.rst AMBER restart file
.trj AMBER trajectory
.pse PyMOL session file
.phi Delphi/Grasp Electrostatic Potential Map
'''
_self.help('launching')
def movies(_self=cmd):
'''
MOVIES
To create a movie, simply load multiple coordinate files
into the same object. This can be accomplish at the command line,
using script files, or by writing PyMOL API-based programs.
The commands:
load frame001.pdb,mov
load frame002.pdb,mov
will create a two frame movie. So will the following program:
from pymol import cmd
for a in ( "frame001.pdb","frame002.pdb" ):
cmd.load(a,"mov")
which can be executed at the command line using the "run" command.
Python built-in glob module can be useful for loading movies.
from pymol import cmd
import glob
for a in ( glob.glob("frame*.pdb") ):
cmd.load(a,"mov")
NOTE
Because PyMOL stores all movie frames in memory, there is a
a practical limit to the number of atoms in all coordinate files.
160 MB free RAM enables 500,000 atoms with line representations.
Complex representations require significantly more memory.
'''
_self.help('movies')
### -------------------------------------------------------------------
def selections(_self=cmd):
'''
DESCRIPTION
Selections are enclosed in parentheses and contain predicates,
logical operations, object names, selection names and nested
parenthesis: ( [... [(...) ... ]] )
name <atom names> n. <atom names>
resn <residue names> r. <residue names>
resi <residue identifiers> i. <residue identifiers>
chain <chain ID> c. <chain identifiers>
segi <segment identifiers> s. <segment identifiers>
elem <element symbol> e. <element symbols>
flag <number> f. <number>
alt <code>
numeric_type <numeric type> nt. <numeric type>
text_type <text type> tt. <text type>
b <operator> <value>
q <operator> <value>
formal_charge <op> <value> fc. <operator> <value>
partial_charge <op> <value> pc. <operator> <value>
id <original-index>
hydrogen h.
all *
visible v.
hetatm
<selection> and <selection> <selection> & <selection>
<selection> or <selection> <selection> | <selection>
not <selection> ! <selection>
byres <selection> br. <selection>
byobj <selection> bo. <selection>
around <distance> a. <distance>
expand <distance> e. <distance>
gap <distance>
in <selection>
like <selection> l. <selection>
<selection> within <distance> of <selection>
<selection> w. <distance> of <selection>
'''
_self.help('selections')
def povray(_self=cmd):
'''
DESCRIPTION
PovRay: Persistance of Vision Support Information
The built-in ray-tracer (technically a ray-caster) is as fast or
faster than PovRay for many figures (provided that hash_max is
tuned appropriately for your content). However, PovRay blows
PyMOL away when it comes to rendering images without using lots of
RAM, and with PovRay you get the ability use perspective,
textures, reflections, infinite objects, and a superior lighting
model.
Assuming that PovRay is built and in your path...
ray renderer=1 # will use PovRay instead of the built-in engine
set ray_default_renderer=1 # changes the default renderer to PovRay
ray # will now use PovRay by default
cmd.get_povray() # will give you a tuple of PovRay input strings
# which you can manipulate from Python
'''
_self.help('povray')
def stereochemistry(_self=cmd):
"""
PYMOL STEREOCHEMISTRY
PyMOL can label chiral centers; however, due to the recursive and
dependent nature of the determination, PyMOL will refuse to
label structures with alternate coordinates.
To determine stereochemistry for a structure that has alternate
coordinates, you either need to clear the alternate coordinates
field in the | |
"""_import_vdb_ensemble.py: Module to import a vdb ensemble into resqml format."""
version = '15th November 2021'
# Nexus is a registered trademark of the Halliburton Company
import logging
log = logging.getLogger(__name__)
import resqpy.model as rq
import resqpy.olio.vdb as vdb
import resqpy.olio.xml_et as rqet
import resqpy.property as rp
import resqpy.time_series as rts
from resqpy.rq_import import import_nexus
def import_vdb_ensemble(
epc_file,
ensemble_run_dir,
existing_epc = False,
keyword_list = None,
property_kind_list = None,
vdb_static_properties = True, # if True, static vdb properties are imported
vdb_recurrent_properties = True,
decoarsen = True,
timestep_selection = 'all',
create_property_set_per_realization = True,
create_property_set_per_timestep = True,
create_complete_property_set = False,
# remaining arguments only used if existing_epc is False
extent_ijk = None, # 3 element numpy vector
corp_xy_units = 'm',
corp_z_units = 'm',
corp_z_inc_down = True,
ijk_handedness = 'right',
geometry_defined_everywhere = True,
treat_as_nan = None,
resqml_xy_units = 'm',
resqml_z_units = 'm',
resqml_z_inc_down = True,
shift_to_local = True,
local_origin_place = 'centre', # 'centre' or 'minimum'
max_z_void = 0.1, # import will fail if vertical void greater than this is encountered
split_pillars = True,
split_tolerance = 0.01, # applies to each of x, y, z differences
progress_fn = None):
"""Adds properties from all vdb's within an ensemble directory tree to a single RESQML dataset.
Referencing a shared grid.
args:
epc_file (string): filename of epc file to be extended with ensemble properties
ensemble_run_dir (string): path of main ensemble run directory; vdb's within this directory tree are source of import
existing_epc (boolean, default False): if True, the epc_file must already exist and contain the compatible grid
keyword_list (list of strings, optional): if present, only properties for keywords within the list are included
property_kind_list (list of strings, optional): if present, only properties which are mapped to these resqml property
kinds are included in the import
vdb_static_properties (boolean, default True): if False, no static properties are included, regardless of keyword and/or
property kind matches
vdb_recurrent_properties (boolean, default True): if False, no recurrent properties are included, regardless of keyword
and/or property kind matches
decoarsen (boolean, default True): if True and ICOARSE property exists for a grid in a case, the associated property
data is decoarsened; if False, the property data is as stored in the vdb
timestep_selection (string, default 'all'): may be 'first', 'last', 'first and last', or 'all', controlling which
reporting timesteps are included when loading recurrent data
create_property_set_per_realization (boolean, default True): if True, a property set object is created for each realization
create_property_set_per_timestep (boolean, default True): if True, a property set object is created for each timestep
included in the recurrent data import
create_complete_property_set (boolean, default False): if True, a property set object is created containing all the
properties imported; only really useful to differentiate from other properties related to the grid
extent_ijk (triple int, optional): this and remaining arguments are only used if existing_epc is False; the extent
is only needed in case automatic determination of the extent fails
corp_xy_units (string, default 'm'): the units of x & y values in the vdb corp data; should be 'm' (metres) or 'ft' (feet)
corp_z_units (string, default 'm'): the units of z values in the vdb corp data; should be 'm' (metres) or 'ft' (feet)
corp_z_inc_down (boolean, default True): set to True if corp z values are depth; False if elevation
ijk_handedness (string, default 'right'): set to the handedness of the IJK axes in the Nexus model; 'right' or 'left'
geometry_defined_everywhere (boolean, default True): set to False if inactive cells do not have valid geometry;
deprecated - use treat_as_nan argument instead
treat_as_nan (string, optional): if not None, one of 'dots', 'ij_dots', 'inactive'; controls which inactive cells
have their geometry set to undefined
resqml_xy_units (string, default 'm'): the units of x & y values to use in the generated resqml grid;
should be 'm' (metres) or 'ft' (feet)
resqml_z_units (string, default 'm'): the units of z values to use in the generated resqml grid;
should be 'm' (metres) or 'ft' (feet)
resqml_z_inc_down (boolean, default True): set to True if resqml z values are to be depth; False for elevations
shift_to_local (boolean, default True): if True, the resqml coordinate reference system will use a local origin
local_origin_place (string, default 'centre'): where to place the local origin; 'centre' or 'minimum'; only
relevant if shift_to_local is True
max_z_void (float, default 0.1): the tolerance of voids between layers, in z direction; voids greater than this
will cause the grid import to fail
split_pillars (boolean, default True): if False, a grid is generated without split pillars
split_tolerance (float, default 0.01): the tolerance applied to each of x, y, & z values, beyond which a corner
point (and hence pillar) will be split
progress_fn (function(float), optional): if present, this function is called at intervals during processing; it
must accept one floating point argument which will range from 0.0 to 1.0
returns:
resqpy.Model object containing properties for all the realisations; hdf5 and epc files having been updated
note:
if existing_epc is True, the epc file must already exist and contain one grid (or one grid named ROOT) which must
have the correct extent for all realisations within the ensemble; if existing_epc is False, the resqml dataset is
created afresh with a grid extracted from the first realisation in the ensemble; either way, the single grid is used
as the representative grid in the ensemble resqml dataset being generated;
all vdb directories within the directory tree headed by ensemble_run_dir are included in the import; by
default all properties will be imported; the keyword_list, property_kind_list, vdb_static_properties,
vdb_recurrent_properties and timestep_selection arguments can be used to filter the required properties;
if both keyword_list and property_kind_list are provided, a property must match an item in both lists in order
to be included; if recurrent properties are being included then all vdb's should contain the same number of reporting
steps in their recurrent data and these should relate to the same set of timestamps; timestamp data is extracted from a
summary file for the first realisation; no check is made to ensure that reporting timesteps in different realisations
are actually for the same date.
"""
assert epc_file.endswith('.epc')
assert vdb_static_properties or vdb_recurrent_properties, 'no properties selected for ensemble import'
if progress_fn is not None:
progress_fn(0.0)
# fetch a sorted list of the vdb paths found in the run directory tree
ensemble_list = vdb.ensemble_vdb_list(ensemble_run_dir)
if len(ensemble_list) == 0:
log.error("no vdb's found in run directory tree: " + str(ensemble_run_dir))
return None
if not existing_epc:
model = import_nexus(
epc_file[:-4], # output path and file name without .epc or .h5 extension
extent_ijk = extent_ijk, # 3 element numpy vector, in case extent is not automatically determined
vdb_file = ensemble_list[0], # vdb input file
corp_xy_units = corp_xy_units,
corp_z_units = corp_z_units,
corp_z_inc_down = corp_z_inc_down,
ijk_handedness = ijk_handedness,
geometry_defined_everywhere = geometry_defined_everywhere,
treat_as_nan = treat_as_nan,
resqml_xy_units = resqml_xy_units,
resqml_z_units = resqml_z_units,
resqml_z_inc_down = resqml_z_inc_down,
shift_to_local = shift_to_local,
local_origin_place = local_origin_place, # 'centre' or 'minimum'
max_z_void = max_z_void, # import will fail if vertical void greater than this is encountered
split_pillars = split_pillars,
split_tolerance = split_tolerance, # applies to each of x, y, z differences
vdb_static_properties = False,
vdb_recurrent_properties = False,
create_property_set = False)
model = rq.Model(
epc_file = epc_file) # shouldn't be necessary if just created but it feels safer to re-open the model
assert model is not None, 'failed to instantiate model'
grid = model.grid()
assert grid is not None, 'grid not found'
ext_uuid = model.h5_uuid()
assert ext_uuid is not None, 'failed to determine uuid for hdf5 file reference'
hdf5_file = model.h5_file_name(uuid = ext_uuid)
# create reporting timestep time series for recurrent data, if required, based on the first realisation
recur_time_series = None
recur_ts_uuid = None
timestep_list = None
if vdb_recurrent_properties:
summary_file = ensemble_list[0][:-4] + '.sum' # TODO: check timestep summary file extension, .tssum?
full_time_series = rts.time_series_from_nexus_summary(summary_file)
if full_time_series is None:
log.error('failed to extract info from timestep summary file; disabling recurrent property import')
vdb_recurrent_properties = False
if vdb_recurrent_properties:
vdbase = vdb.VDB(ensemble_list[0])
timestep_list = vdbase.list_of_timesteps()
| |
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_if_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
if_role = ET.SubElement(port, "if-role")
if_role.text = kwargs.pop('if_role')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_if_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
if_state = ET.SubElement(port, "if-state")
if_state.text = kwargs.pop('if_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_external_path_cost(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
external_path_cost = ET.SubElement(port, "external-path-cost")
external_path_cost.text = kwargs.pop('external_path_cost')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_internal_path_cost(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
internal_path_cost = ET.SubElement(port, "internal-path-cost")
internal_path_cost.text = kwargs.pop('internal_path_cost')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_configured_path_cost(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
configured_path_cost = ET.SubElement(port, "configured-path-cost")
configured_path_cost.text = kwargs.pop('configured_path_cost')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_designated_port_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
designated_port_id = ET.SubElement(port, "designated-port-id")
designated_port_id.text = kwargs.pop('designated_port_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_port_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
port_priority = ET.SubElement(port, "port-priority")
port_priority.text = kwargs.pop('port_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_designated_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
designated_bridge_id = ET.SubElement(port, "designated-bridge-id")
designated_bridge_id.text = kwargs.pop('designated_bridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_port_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
port_hello_time = ET.SubElement(port, "port-hello-time")
port_hello_time.text = kwargs.pop('port_hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_forward_transitions_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
forward_transitions_count = ET.SubElement(port, "forward-transitions-count")
forward_transitions_count.text = kwargs.pop('forward_transitions_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_received_stp_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
received_stp_type = ET.SubElement(port, "received-stp-type")
received_stp_type.text = kwargs.pop('received_stp_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_transmitted_stp_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
transmitted_stp_type = ET.SubElement(port, "transmitted-stp-type")
transmitted_stp_type.text = kwargs.pop('transmitted_stp_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_edge_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
edge_port = ET.SubElement(port, "edge-port")
edge_port.text = kwargs.pop('edge_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_auto_edge(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
auto_edge = ET.SubElement(port, "auto-edge")
auto_edge.text = kwargs.pop('auto_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_admin_edge(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
admin_edge = ET.SubElement(port, "admin-edge")
admin_edge.text = kwargs.pop('admin_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_edge_delay(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
edge_delay = ET.SubElement(port, "edge-delay")
edge_delay.text = kwargs.pop('edge_delay')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_configured_root_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
configured_root_guard = ET.SubElement(port, "configured-root-guard")
configured_root_guard.text = kwargs.pop('configured_root_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_oper_root_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
oper_root_guard = ET.SubElement(port, "oper-root-guard")
oper_root_guard.text = kwargs.pop('oper_root_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_boundary_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
boundary_port = ET.SubElement(port, "boundary-port")
boundary_port.text = kwargs.pop('boundary_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_oper_bpdu_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
oper_bpdu_guard = ET.SubElement(port, "oper-bpdu-guard")
oper_bpdu_guard.text = kwargs.pop('oper_bpdu_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rpvstp_rpvstp_port_oper_bpdu_filter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rpvstp = ET.SubElement(spanning_tree_mode, "rpvstp")
rpvstp = ET.SubElement(rpvstp, "rpvstp")
vlan_id_key = ET.SubElement(rpvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
port = ET.SubElement(rpvstp, "port")
oper_bpdu_filter = ET.SubElement(port, "oper-bpdu-filter")
oper_bpdu_filter.text = kwargs.pop('oper_bpdu_filter')
callback = kwargs.pop('callback', self._callback)
return | |
"""Internal implementation of binana software
(http://nbcr.ucsd.edu/data/sw/hosted/binana/)
"""
import numpy as np
from oddt.scoring.descriptors import (atoms_by_type,
close_contacts_descriptor,
oddt_vina_descriptor)
from oddt.interactions import (close_contacts, hbonds, hydrophobic_contacts,
pi_cation, pi_stacking, salt_bridges)
class binana_descriptor(object):
def __init__(self, protein=None):
""" Descriptor build from binana script (as used in NNScore 2.0
Parameters
----------
protein: oddt.toolkit.Molecule object (default=None)
Protein object to be used while generating descriptors.
"""
self.protein = protein
self.titles = []
self.vina = oddt_vina_descriptor(protein, vina_scores=['vina_gauss1',
'vina_gauss2',
'vina_repulsion',
'vina_hydrophobic',
'vina_hydrogen'])
self.titles += self.vina.titles
# Close contacts descriptor generators
cc_4_types = (('A', 'A'), ('A', 'C'), ('A', 'CL'), ('A', 'F'),
('A', 'FE'), ('A', 'HD'), ('A', 'MG'), ('A', 'MN'),
('A', 'N'), ('A', 'NA'), ('A', 'OA'), ('A', 'SA'),
('A', 'ZN'), ('BR', 'C'), ('BR', 'HD'), ('BR', 'OA'),
('C', 'C'), ('C', 'CL'), ('C', 'F'), ('C', 'HD'),
('C', 'MG'), ('C', 'MN'), ('C', 'N'), ('C', 'NA'),
('C', 'OA'), ('C', 'SA'), ('C', 'ZN'), ('CL', 'FE'),
('CL', 'HD'), ('CL', 'MG'), ('CL', 'N'), ('CL', 'OA'),
('CL', 'ZN'), ('F', 'HD'), ('F', 'N'), ('F', 'OA'),
('F', 'SA'), ('FE', 'HD'), ('FE', 'N'), ('FE', 'OA'),
('HD', 'HD'), ('HD', 'I'), ('HD', 'MG'), ('HD', 'MN'),
('HD', 'N'), ('HD', 'NA'), ('HD', 'OA'), ('HD', 'P'),
('HD', 'S'), ('HD', 'SA'), ('HD', 'ZN'), ('MG', 'NA'),
('MG', 'OA'), ('MN', 'N'), ('MN', 'OA'), ('N', 'N'),
('N', 'NA'), ('N', 'OA'), ('N', 'SA'), ('N', 'ZN'),
('NA', 'OA'), ('NA', 'SA'), ('NA', 'ZN'), ('OA', 'OA'),
('OA', 'SA'), ('OA', 'ZN'), ('S', 'ZN'), ('SA', 'ZN'),
('A', 'BR'), ('A', 'I'), ('A', 'P'), ('A', 'S'),
('BR', 'N'), ('BR', 'SA'), ('C', 'FE'), ('C', 'I'),
('C', 'P'), ('C', 'S'), ('CL', 'MN'), ('CL', 'NA'),
('CL', 'P'), ('CL', 'S'), ('CL', 'SA'), ('CU', 'HD'),
('CU', 'N'), ('FE', 'NA'), ('FE', 'SA'), ('I', 'N'),
('I', 'OA'), ('MG', 'N'), ('MG', 'P'), ('MG', 'S'),
('MG', 'SA'), ('MN', 'NA'), ('MN', 'P'), ('MN', 'S'),
('MN', 'SA'), ('N', 'P'), ('N', 'S'), ('NA', 'P'),
('NA', 'S'), ('OA', 'P'), ('OA', 'S'), ('P', 'S'),
('P', 'SA'), ('P', 'ZN'), ('S', 'SA'), ('SA', 'SA'),
('A', 'CU'), ('C', 'CD'))
cc_4_rec_types, cc_4_lig_types = zip(*cc_4_types)
self.titles += ['cc_%s.%s_4' % (t1, t2) for t1, t2 in cc_4_types]
self.cc_4 = close_contacts_descriptor(protein,
cutoff=4,
protein_types=cc_4_rec_types,
ligand_types=cc_4_lig_types,
mode='atom_types_ad4',
aligned_pairs=True)
self.ele_types = (('A', 'A'), ('A', 'C'), ('A', 'CL'), ('A', 'F'),
('A', 'FE'), ('A', 'HD'), ('A', 'MG'), ('A', 'MN'),
('A', 'N'), ('A', 'NA'), ('A', 'OA'), ('A', 'SA'),
('A', 'ZN'), ('BR', 'C'), ('BR', 'HD'), ('BR', 'OA'),
('C', 'C'), ('C', 'CL'), ('C', 'F'), ('C', 'HD'),
('C', 'MG'), ('C', 'MN'), ('C', 'N'), ('C', 'NA'),
('C', 'OA'), ('C', 'SA'), ('C', 'ZN'), ('CL', 'FE'),
('CL', 'HD'), ('CL', 'MG'), ('CL', 'N'), ('CL', 'OA'),
('CL', 'ZN'), ('F', 'HD'), ('F', 'N'), ('F', 'OA'),
('F', 'SA'), ('F', 'ZN'), ('FE', 'HD'), ('FE', 'N'),
('FE', 'OA'), ('HD', 'HD'), ('HD', 'I'), ('HD', 'MG'),
('HD', 'MN'), ('HD', 'N'), ('HD', 'NA'), ('HD', 'OA'),
('HD', 'P'), ('HD', 'S'), ('HD', 'SA'), ('HD', 'ZN'),
('MG', 'NA'), ('MG', 'OA'), ('MN', 'N'), ('MN', 'OA'),
('N', 'N'), ('N', 'NA'), ('N', 'OA'), ('N', 'SA'),
('N', 'ZN'), ('NA', 'OA'), ('NA', 'SA'), ('NA', 'ZN'),
('OA', 'OA'), ('OA', 'SA'), ('OA', 'ZN'), ('S', 'ZN'),
('SA', 'ZN'), ('A', 'BR'), ('A', 'I'), ('A', 'P'),
('A', 'S'), ('BR', 'N'), ('BR', 'SA'), ('C', 'FE'),
('C', 'I'), ('C', 'P'), ('C', 'S'), ('CL', 'MN'),
('CL', 'NA'), ('CL', 'P'), ('CL', 'S'), ('CL', 'SA'),
('CU', 'HD'), ('CU', 'N'), ('FE', 'NA'), ('FE', 'SA'),
('I', 'N'), ('I', 'OA'), ('MG', 'N'), ('MG', 'P'),
('MG', 'S'), ('MG', 'SA'), ('MN', 'NA'), ('MN', 'P'),
('MN', 'S'), ('MN', 'SA'), ('N', 'P'), ('N', 'S'),
('NA', 'P'), ('NA', 'S'), ('OA', 'P'), ('OA', 'S'),
('P', 'S'), ('P', 'SA'), ('P', 'ZN'), ('S', 'SA'),
('SA', 'SA'))
self.titles += ['ele_%s.%s_4' % (t1, t2) for t1, t2 in self.ele_types]
self.ligand_atom_types = ['A', 'BR', 'C', 'CL', 'F', 'HD', 'I', 'N', 'NA', 'OA', 'P', 'S', 'SA']
self.titles += ['lig_%s' % t1 for t1 in self.ligand_atom_types]
cc_25_types = [('A', 'A'), ('A', 'C'), ('A', 'CL'), ('A', 'F'),
('A', 'FE'), ('A', 'HD'), ('A', 'MG'), ('A', 'MN'),
('A', 'N'), ('A', 'NA'), ('A', 'OA'), ('A', 'SA'),
('A', 'ZN'), ('BR', 'C'), ('BR', 'HD'), ('BR', 'OA'),
('C', 'C'), ('C', 'CL'), ('C', 'F'), ('C', 'HD'),
('C', 'MG'), ('C', 'MN'), ('C', 'N'), ('C', 'NA'),
('C', 'OA'), ('C', 'SA'), ('C', 'ZN'), ('CD', 'OA'),
('CL', 'FE'), ('CL', 'HD'), ('CL', 'MG'), ('CL', 'N'),
('CL', 'OA'), ('CL', 'ZN'), ('F', 'HD'), ('F', 'N'),
('F', 'OA'), ('F', 'SA'), ('F', 'ZN'), ('FE', 'HD'),
('FE', 'N'), ('FE', 'OA'), ('HD', 'HD'), ('HD', 'I'),
('HD', 'MG'), ('HD', 'MN'), ('HD', 'N'), ('HD', 'NA'),
('HD', 'OA'), ('HD', 'P'), ('HD', 'S'), ('HD', 'SA'),
('HD', 'ZN'), ('MG', 'NA'), ('MG', 'OA'), ('MN', 'N'),
('MN', 'OA'), ('N', 'N'), ('N', 'NA'), ('N', 'OA'),
('N', 'SA'), ('N', 'ZN'), ('NA', 'OA'), ('NA', 'SA'),
('NA', 'ZN'), ('OA', 'OA'), ('OA', 'SA'), ('OA', 'ZN'),
('S', 'ZN'), ('SA', 'ZN')]
cc_25_rec_types, cc_25_lig_types = zip(*cc_25_types)
self.cc_25 = close_contacts_descriptor(protein,
cutoff=2.5,
protein_types=cc_25_rec_types,
ligand_types=cc_25_lig_types,
mode='atom_types_ad4',
aligned_pairs=True)
self.titles += ['cc_%s.%s_2.5' % (t1, t2) for t1, t2 in cc_25_types]
# H-Bonds (<4A)
self.titles += ['hb_4_mol_backbone_alpha',
'hb_4_mol_backbone_beta',
'hb_4_mol_backbone_other',
'hb_4_mol_sidechain_alpha',
'hb_4_mol_sidechain_beta',
'hb_4_mol_sidechain_other',
'hb_4_rec_backbone_alpha',
'hb_4_rec_backbone_beta',
'hb_4_rec_backbone_other',
'hb_4_rec_sidechain_alpha',
'hb_4_rec_sidechain_beta',
'hb_4_rec_sidechain_other']
# Hydrophobic Contact <4A
self.titles += ['hyd_4_backbone_alpha',
'hyd_4_backbone_beta',
'hyd_4_backbone_other',
'hyd_4_sidechain_alpha',
'hyd_4_sidechain_beta',
'hyd_4_sidechain_other',
'hyd_4_all']
# Pi-stacking (<7.5A)
self.titles += ['pi_stack_7.5_alpha',
'pi_stack_7.5_beta',
'pi_stack_7.5_other']
# T-shaped Pi-Pi interaction
self.titles += ['pi_t_7.5_alpha',
'pi_t_7.5_beta',
'pi_t_7.5_other']
# Pi-cation (<6A)
self.titles += ['pi_cat_mol_6_alpha',
'pi_cat_mol_6_beta',
'pi_cat_mol_6_other',
'pi_cat_rec_6_alpha',
'pi_cat_rec_6_beta',
'pi_cat_rec_6_other']
# Active site flexibility (<4A)
self.titles += ['as_flex_backbone_alpha',
'as_flex_backbone_beta',
'as_flex_backbone_other',
'as_flex_sidechain_alpha',
'as_flex_sidechain_beta',
'as_flex_sidechain_other',
'as_flex_all']
# Salt bridges (<5.5)
self.titles += ['salt_bridge_5.5_alpha',
'salt_bridge_5.5_beta',
'salt_bridge_5.5_other',
'salt_bridge_5.5_all']
# Rotatable bonds
self.titles += ['num_rotors']
assert len(self.titles) == len(self)
def set_protein(self, protein):
""" One function to change all relevant proteins
Parameters
----------
protein: oddt.toolkit.Molecule object
Protein object to be used while generating descriptors.
Protein becomes new global and default protein.
"""
self.protein = protein
self.vina.set_protein(protein)
self.cc_4.protein = protein
self.cc_25.protein = protein
def build(self, ligands, protein=None):
""" Descriptor building method
Parameters
----------
ligands: array-like
An array of generator of oddt.toolkit.Molecule objects for which the descriptor is computed
protein: oddt.toolkit.Molecule object (default=None)
Protein object to be used while generating descriptors.
If none, then the default protein (from constructor) is used.
Otherwise, protein becomes new global and default protein.
Returns
-------
descs: numpy array, shape=[n_samples, 351]
An array of binana descriptors, aligned with input ligands
"""
if protein:
self.set_protein(protein)
else:
protein = self.protein
protein_dict = protein.atom_dict
desc = None
for mol in ligands:
mol_dict = mol.atom_dict
vec = np.array([], dtype=float)
vec = tuple()
# Vina
# TODO: Asynchronous output from vina, push command to score and retrieve at the end?
# TODO: Check if ligand has vina scores
vec += tuple(self.vina.build(mol).flatten())
# Close Contacts (<4A)
vec += tuple(self.cc_4.build(mol).flatten())
# Electrostatics (<4A)
ele_rec_types, ele_lig_types = zip(*self.ele_types)
ele_mol_atoms = atoms_by_type(mol_dict, ele_lig_types, 'atom_types_ad4')
ele_rec_atoms = atoms_by_type(protein_dict, ele_rec_types, 'atom_types_ad4')
ele = tuple()
for r_t, m_t in self.ele_types:
mol_ele_dict, rec_ele_dict = close_contacts(ele_mol_atoms[m_t], ele_rec_atoms[r_t], 4)
if len(mol_ele_dict) and len(rec_ele_dict):
ele += (mol_ele_dict['charge'] *
rec_ele_dict['charge'] /
np.sqrt((mol_ele_dict['coords'] -
rec_ele_dict['coords'])**2).sum(axis=-1) *
138.94238460104697e4).sum(), # convert to J/mol
else:
ele += 0,
vec += tuple(np.nan_to_num(ele))
# Ligand Atom Types
atoms = atoms_by_type(mol_dict, self.ligand_atom_types, 'atom_types_ad4')
vec += tuple([len(atoms[t]) for t in self.ligand_atom_types])
# Close Contacts (<2.5A)
vec += tuple(self.cc_25.build(mol).flatten())
# H-Bonds (<4A)
hbond_mol, hbond_rec, strict = hbonds(mol, protein, 4)
# Retain only strict hbonds
hbond_mol = hbond_mol[strict]
hbond_rec = hbond_rec[strict]
backbone = hbond_rec['isbackbone']
alpha = hbond_rec['isalpha']
beta = hbond_rec['isbeta']
other = ~alpha & ~beta
donor_mol = hbond_mol['isdonor']
donor_rec = hbond_rec['isdonor']
hbond_vec = ((donor_mol & backbone & alpha).sum(),
(donor_mol & backbone & beta).sum(),
(donor_mol & backbone & other).sum(),
(donor_mol & ~backbone & alpha).sum(),
(donor_mol & ~backbone & beta).sum(),
(donor_mol & ~backbone & other).sum(),
(donor_rec & backbone & alpha).sum(),
(donor_rec & backbone & beta).sum(),
(donor_rec & backbone & other).sum(),
(donor_rec & ~backbone & alpha).sum(),
(donor_rec & ~backbone & beta).sum(),
(donor_rec & ~backbone & other).sum())
vec += tuple(hbond_vec)
# Hydrophobic contacts (<4A)
hydrophobic = hydrophobic_contacts(mol, protein, 4)[1]
backbone = hydrophobic['isbackbone']
alpha = hydrophobic['isalpha']
beta = hydrophobic['isbeta']
other = ~alpha & ~beta
hyd_vec = ((backbone & alpha).sum(),
(backbone & beta).sum(),
(backbone & other).sum(),
(~backbone & alpha).sum(),
(~backbone & beta).sum(),
(~backbone & other).sum(),
len(hydrophobic))
vec += tuple(hyd_vec)
# Pi-stacking (<7.5A)
pi_mol, pi_rec, pi_paralel, pi_tshaped = pi_stacking(mol, protein, 7.5)
alpha = pi_rec['isalpha'] & pi_paralel
beta = pi_rec['isbeta'] & pi_paralel
other = ~alpha & ~beta & pi_paralel
pi_vec = (alpha.sum(), beta.sum(), other.sum())
vec += tuple(pi_vec)
# T-shaped Pi-Pi interaction
alpha | |
<filename>skyportal/services/test_server/test_api_server.py
import glob
import json
import datetime
import re
import os
import vcr
import tornado.ioloop
import tornado.httpserver
import tornado.web
from suds import Client
import requests
from baselayer.app.env import load_env
from baselayer.log import make_log
def get_cache_file_static():
"""
Helper function to get the path to the VCR cache file for requests
that must be updated by hand in cases where regular refreshing is
infeasible, i.e. limited access to the real server.
To update this server recording:
1) delete the existing recording
2) re-run all tests (with API keys for telescopes in place)
3) replace any secret information (such as API keys) with dummy values
4) commit recording
"""
return "data/tests/test_server_recordings_static.yaml"
def get_cache_file():
"""
Helper function to get the path to the VCR cache file.
The function will also delete the existing cache if it is too old.
"""
files = glob.glob("cache/test_server_recordings_*.yaml")
today = datetime.date.today()
# If no cache files, just return a fresh one stamped for today
if len(files) == 0:
return f"cache/test_server_recordings_{today.isoformat()}.yaml"
current_file = files[0]
current_file_date = datetime.date.fromisoformat(
re.findall(r"\d+-\d+-\d+", current_file)[0]
)
# Cache should be refreshed
if (today - current_file_date).days > refresh_cache_days:
# Delete old cache and return new file path
os.remove(current_file)
return f"cache/test_server_recordings_{today.isoformat()}.yaml"
# Cache is still valid
return current_file
def lt_request_matcher(r1, r2):
"""
Helper function to help determine if two requests to the LT API are equivalent
"""
# Check that the request modes are matching (should be either "request" or "abort")
r1_request_mode = re.findall(
r"mode="[a-zA-Z]+"", r1.body.decode("utf-8")
)[0]
r2_request_mode = (
re.findall(r"mode="[a-zA-Z]+"", r2.body.decode("utf-8"))[0]
if r2.body is not None
else None
)
# For "request" calls, check that the "Device" parameters match up
r1_device_matches = (
re.findall(r"<Device name=".+?"", r1.body.decode("utf-8"))
if r1.body is not None
else []
)
r1_device = r1_device_matches[0] if (len(r1_device_matches) != 0) else None
r2_device_matches = (
re.findall(r"<Device name=".+?"", r2.body.decode("utf-8"))
if r2.body is not None
else []
)
r2_device = r2_device_matches[0] if (len(r2_device_matches) != 0) else None
# A request matches an LT request if the URL matches, the POST/GET matches,
# the mode ("request" or "abort") matches, and the instrument ("Device") matches.
assert (
r1.uri == r2.uri
and r1.method == r2.method
and r1_request_mode == r2_request_mode
and r1_device == r2_device
)
def lco_request_matcher(r1, r2):
"""
Helper function to help determine if two requests to the LCO API are equivalent
"""
# A request matches an LCO request if the URI and method matches
r1_uri = r1.uri.replace(":443", "")
r2_uri = r2.uri.replace(":443", "")
def submit_type(uri):
patterns = {
"delete": r"/api/requestgroups/[0-9]+/cancel/$",
"update": r"/api/requestgroups/[0-9]+/$",
"submit": r"/api/requestgroups/$",
}
for (submit_type, pattern) in patterns.items():
if re.search(pattern, uri) is not None:
return submit_type
return None
r1_type = submit_type(r1_uri)
r2_type = submit_type(r2_uri)
assert r1_type == r2_type and r1.method == r2.method
def ztf_request_matcher(r1, r2):
"""
Helper function to help determine if two requests to the ZTF API are equivalent
"""
# A request matches an ZTF request if the URI and method matches
r1_uri = r1.uri.replace(":443", "")
r2_uri = r2.uri.replace(":443", "")
def is_ztf_request(uri):
pattern = r"/api/triggers/ztf"
if re.search(pattern, uri) is not None:
return True
return False
r1_is_ztf = is_ztf_request(r1_uri)
r2_is_ztf = is_ztf_request(r2_uri)
assert r1_is_ztf and r2_is_ztf and r1.method == r2.method
class TestRouteHandler(tornado.web.RequestHandler):
"""
This handler intercepts calls coming from SkyPortal API handlers which make
requests to external web services (like the LT telescope) and wraps them in a
vcr context so that requests are cached and played back. The handler will forward
the request to the approriate "real" host, cache the results, and pass them back
to the SkyPortal test API server.
"""
def delete(self):
is_soap_action = "Soapaction" in self.request.headers
if "/api/requestgroups/" in self.request.uri:
cache = get_cache_file_static()
elif self.request.uri == "/api/triggers/ztf":
cache = get_cache_file_static()
else:
cache = get_cache_file()
match_on = ['uri', 'method', 'body']
if self.request.uri == "/node_agent2/node_agent":
match_on = ["lt"]
elif "/api/requestgroups/" in self.request.uri:
match_on = ["lco"]
elif self.request.uri == "/api/triggers/ztf":
match_on = ["ztf"]
with my_vcr.use_cassette(
cache,
record_mode="new_episodes",
match_on=match_on,
) as cass:
real_host = None
for route in cfg["test_server.redirects"].keys():
if re.match(route, self.request.uri):
real_host = cfg["test_server.redirects"][route]
if real_host is not None:
url = real_host + self.request.uri
if is_soap_action:
log(f"Forwarding SOAP method call {url}")
else:
log(f"Forwarding DELETE call {url}")
# Convert Tornado HTTPHeaders object to a regular dict
headers = {}
for k, v in self.request.headers.get_all():
headers[k] = v
# Convert Tornado HTTPHeaders object to a regular dict
headers = {}
for k, v in self.request.headers.get_all():
headers[k] = v
if "/api/requestgroups/" in self.request.uri:
header = {'Authorization': headers['Authorization']}
json_body = (
json.loads(self.request.body.decode())
if len(self.request.body) > 0
else None
)
requests.delete(
url,
json=json_body,
headers=header,
)
else:
log(f"Forwarding DELETE call: {url}")
s = requests.Session()
req = requests.Request(
'DELETE', url, data=self.request.body, headers=headers
)
prepped = req.prepare()
s.send(prepped)
# Get recorded document and pass it back
response = cass.responses_of(
vcr.request.Request("DELETE", url, self.request.body, headers)
)[0]
self.set_status(
response["status"]["code"], response["status"]["message"]
)
for k, v in response["headers"].items():
# The response from this test server will not be chunked even if
# the real response was
if not (k == "Transfer-Encoding" and "chunked" in v):
self.set_header(k, v[0])
self.write(response["body"]["string"])
else:
self.set_status(500)
self.write("Could not find test route redirect")
def put(self):
is_soap_action = "Soapaction" in self.request.headers
if "/api/requestgroups/" in self.request.uri:
cache = get_cache_file_static()
elif self.request.uri == "/api/triggers/ztf":
cache = get_cache_file_static()
else:
cache = get_cache_file()
match_on = ['uri', 'method', 'body']
if self.request.uri == "/node_agent2/node_agent":
match_on = ["lt"]
elif "/api/requestgroups/" in self.request.uri:
match_on = ["lco"]
elif self.request.uri == "/api/triggers/ztf":
match_on = ["ztf"]
with my_vcr.use_cassette(
cache,
record_mode="new_episodes",
match_on=match_on,
) as cass:
real_host = None
for route in cfg["test_server.redirects"].keys():
if re.match(route, self.request.uri):
real_host = cfg["test_server.redirects"][route]
if real_host is not None:
url = real_host + self.request.uri
if is_soap_action:
log(f"Forwarding SOAP method call {url}")
else:
log(f"Forwarding PUT call {url}")
# Convert Tornado HTTPHeaders object to a regular dict
headers = {}
for k, v in self.request.headers.get_all():
headers[k] = v
# Convert Tornado HTTPHeaders object to a regular dict
headers = {}
for k, v in self.request.headers.get_all():
headers[k] = v
if "/api/requestgroups/" in self.request.uri:
header = {'Authorization': headers['Authorization']}
json_body = (
json.loads(self.request.body.decode())
if len(self.request.body) > 0
else None
)
requests.put(
url,
json=json_body,
headers=header,
)
else:
log(f"Forwarding PUT call: {url}")
s = requests.Session()
req = requests.Request(
'PUT', url, data=self.request.body, headers=headers
)
prepped = req.prepare()
s.send(prepped)
# Get recorded document and pass it back
response = cass.responses_of(
vcr.request.Request("PUT", url, self.request.body, headers)
)[0]
self.set_status(
response["status"]["code"], response["status"]["message"]
)
for k, v in response["headers"].items():
# The response from this test server will not be chunked even if
# the real response was
if not (k == "Transfer-Encoding" and "chunked" in v):
self.set_header(k, v[0])
self.write(response["body"]["string"])
else:
self.set_status(500)
self.write("Could not find test route redirect")
def get(self):
is_wsdl = self.get_query_argument('wsdl', None)
if self.request.uri in ["/api/requestgroups/", "/api/triggers/ztf"]:
cache = get_cache_file_static()
else:
cache = get_cache_file()
with my_vcr.use_cassette(cache, record_mode="new_episodes") as cass:
base_route = self.request.uri.split("?")[0]
real_host = None
for route in cfg["test_server.redirects"].keys():
if re.match(route, base_route):
real_host = cfg["test_server.redirects"][route]
if real_host is not None:
url = real_host + self.request.uri
# Convert Tornado HTTPHeaders object to a regular dict
headers = {}
for k, v in self.request.headers.get_all():
# Multiple values for a header should be in a comma-separated list
if k in headers:
headers[k] += f",{v}"
else:
headers[k] = str(v)
if is_wsdl is not None:
log(f"Forwarding WSDL call {url}")
Client(url=url, headers=headers, cache=None)
else:
log(f"Forwarding GET call: {url}")
requests.get(url, headers=headers)
# Get recorded document and pass it back
response = cass.responses_of(
vcr.request.Request("GET", url, "", headers)
)[0]
self.set_status(
response["status"]["code"], response["status"]["message"]
)
for k, v in response["headers"].items():
# Content Length may change (for the SOAP call) as we overwrite the host
# in the response WSDL. Similarly, the response from this test server
# will not be chunked even if the real response was.
if k != "Content-Length" and not (
k == "Transfer-Encoding" and "chunked" in v
):
self.set_header(k, v[0])
if is_wsdl is not None:
# Override service location in the service definition
# so we can intercept the followup POST call
response_body = (
response["body"]["string"]
.decode("utf-8")
.replace(
real_host, f"http://localhost:{cfg['test_server.port']}"
)
)
else:
response_body = response["body"]["string"]
self.write(response_body)
else:
self.set_status(500)
self.write("Could not find test route redirect")
def post(self):
is_soap_action = "Soapaction" in self.request.headers
if "/api/requestgroups/" in self.request.uri:
cache = get_cache_file_static()
else:
cache = get_cache_file()
match_on | |
and word inputs
inputs_concat = tf.concat([char_inputs, word_inputs], 2)
if is_training and self.config['dropout'] < 1:
inputs_concat = tf.nn.dropout(inputs_concat, self.config['dropout'], name="dropout_inputs")
return inputs_concat
def get_char_embedding(self, pos):
with tf.variable_scope(str(pos)):
self.char_embedding = tf.get_variable("char_embedding", [self.char_vocab_size, self.char_size], dtype=tf.float32)
curr_char = self.input_chars[pos]
char_input = tf.nn.embedding_lookup(self.char_embedding, curr_char)
return char_input
class lm_cache(lm):
def __init__(self, config, word_weights, is_training, reuse, use_cache=False):
super(lm_cache, self).__init__(config, is_training, reuse)
self.use_cache = use_cache
# only use cache if we are not training
if not is_training and use_cache:
with tf.name_scope("cache_model"):
# create (+ initialize) all extra variables needed for the cache model
self.init_cache_variables(word_weights)
# split targets in a list of tensors per time_step
self._targets_split = targets_split = tf.split(self.targets, self.num_steps, 1)
# dummy operation (for first step in loop)
update_op = tf.no_op(name="eval_op")
# calculate cache prob and update cache for every time step
list_cache_probs = []
for time_step in range(self.num_steps):
# update_op should be applied before executing the following steps (i.e. cache should be updated)
with tf.control_dependencies([update_op]):
# calculate cache prob
if 'reg_cache' in self.config:
# regular cache prob (based on frequency of words only)
cache_probs = self.calc_reg_cache_prob()
else:
# neural cache prob (based on frequency of words + similarity between hidden states)
cache_probs = self.calc_neural_cache_prob(self.outputs[time_step])
list_cache_probs.append(cache_probs)
print('targets_split[time_step]: {0}'.format(targets_split[time_step]))
# update cache
update_cache_words, update_cache_states = self.update_cache(
targets_split[time_step], self.outputs[time_step])
update_op = tf.group(update_cache_words, update_cache_states)
# assign new cache probs to variable
with tf.control_dependencies([update_op]):
# concatenate the list of cache prob tensors for all time steps
all_cache_probs = tf.concat(list_cache_probs, 0, name="cache_probs_all_steps")
self.cache_probs_op = self.all_cache_probs.assign(all_cache_probs).op
# calculate interpolation of normal and cache probabilities
with tf.control_dependencies([self.cache_probs_op]):
self.interpolate_probs(self.softmax, self.all_cache_probs)
##### the code below is only used for rescoring (!!! batch size = 1 and num steps = 1) #####
if 'num_hypotheses' in self.config:
# get log probability of target word
prob_target_interp = tf.gather(tf.reshape(self.result_interpolation,
[self.vocab_size]), tf.gather(self.targets, 0))
self.prob_target_interp_op = self.prob_target_interp.assign(prob_target_interp).op
with tf.control_dependencies([self.prob_target_interp_op]):
# new logprob for whole sentence: add logprob of current word to logprob of sentence
new_prob = tf.add(prob_target_interp, self.prob_sentence_interp)
self.prob_sentence_interp_op = self.prob_sentence_interp.assign(new_prob).op
# if end of sentence reached, add cache words, cache states and
# prob of sentence to memory of previous hypotheses
self.update_prev_hyp_ops = self.update_cache_end_sentence()
# keep track of cache of best hypothesis from previous segment
self.keep_best_prev = self.get_best_prev_segment()
def init_cache_variables(self, word_weights):
'''
This function initialized all extra variables needed for the cache model.
Input:
word_weights: None or dictionary of word-specific weights
'''
self.cache_size = self.config['cache_size']
self.interp = tf.constant(self.config["interp"], name="interp_param")
self.num_hyp = tf.Variable(0, name='num_hyp', trainable=False, dtype=tf.int32)
self.increment_num_hyp_op = tf.assign(self.num_hyp, self.num_hyp+1)
self.initialize_num_hyp_op = tf.assign(self.num_hyp, 0)
if not 'reg_cache' in self.config:
self.flatness = tf.constant(self.config["flatness"], name="flatness")
if word_weights != None:
weights = collections.OrderedDict(sorted(word_weights.items())).values()
self.word_weights = tf.constant([weights])
if 'scale_ww' in self.config:
self.word_weights = self.config['scale_ww'] * self.word_weights
if 'select_cache' in self.config:
# if the weights are scaled, also scale the threshold!
if 'scale_ww' in self.config:
select_cache = self.config['scale_ww'] * self.config['select_cache']
else:
select_cache = self.config['select_cache']
# check which word weights are greater than or equal to the threshold
self.greater = greater = tf.greater_equal(self.word_weights, select_cache)
# transpose and convert True to 1 and False to 0
self.thresholded_weights = tf.transpose(tf.cast(greater, tf.float32),
name="thresholded_weights")
with tf.name_scope("cache"):
# cache states
self.cache_states = tf.Variable(tf.zeros([self.size, self.cache_size, self.batch_size]),
name="cache_states", dtype=tf.float32)
# cache words (initialize to vocab_size+1: this index means that the cache slot is empty)
self.cache_words = tf.Variable(tf.fill([self.cache_size, self.batch_size], self.vocab_size+1),
name="cache_words")
with tf.name_scope("probs"):
self.all_cache_probs = tf.Variable(tf.zeros([self.batch_size*self.num_steps,
self.vocab_size]), name="cache_probabilities", dtype=tf.float32)
self.result_interpolation = tf.Variable(tf.zeros([self.batch_size*self.num_steps,
self.vocab_size]), name="interpolated_probs", dtype=tf.float32)
self.prob_target_interp = tf.Variable(tf.zeros([1]),
name="curr_prob_target", dtype=tf.float32)
self.prob_sentence_interp = tf.Variable(tf.zeros([1]),
name="curr_prob_sentence", dtype=tf.float32)
if 'num_hypotheses' in self.config:
# variables that keep track of probs, cache words and cache states from previous hypotheses
with tf.name_scope("memory_previous_hypotheses"):
self.cache_words_prev_hyp = tf.Variable(tf.fill([self.config['num_hypotheses'], self.cache_size,
self.batch_size], self.vocab_size+1), name="memory_words")
self.cache_states_prev_hyp = tf.Variable(tf.zeros([self.config['num_hypotheses'], self.size,
self.cache_size, self.batch_size]), name="memory_states", dtype=tf.float32)
self.sentence_probs_prev_hyp = tf.Variable(tf.zeros([self.config['num_hypotheses']]),
name="memory_probs")
with tf.name_scope("cache_best_prev"):
# variables that will keep track with what the cache needs to be initialized
self.cache_words_best_prev = tf.Variable(tf.fill([self.cache_size, self.batch_size],
self.vocab_size+1), name="cache_words_best_prev")
self.cache_states_best_prev = tf.Variable(tf.zeros([self.size, self.cache_size,
self.batch_size]), name="cache_states_best_prev", dtype=tf.float32)
# initialize cache with cache of best previous hypothesis
init_words_best_prev = self.cache_words.assign(self.cache_words_best_prev).op
init_states_best_prev = self.cache_states.assign(self.cache_states_best_prev).op
self.init_cache_best_prev = tf.group(init_words_best_prev, init_states_best_prev, name="init_cache_best_prev")
self.cache_words.read_value()
self.cache_states.read_value()
if 'num_hypotheses' in self.config:
to_initialize = [self.cache_states, self.cache_words, self.cache_words_prev_hyp, self.cache_states_prev_hyp,
self.sentence_probs_prev_hyp, self.cache_words_best_prev, self.cache_states_best_prev]
else:
to_initialize = [self.cache_states, self.cache_words]
# initializes cache to empty cache - used at the beginning to avoid
# using data stored in cache from previous session
with tf.name_scope("init_cache_empty"):
self.init_cache_op = tf.variables_initializer(to_initialize, name="init_cache")
def calc_reg_cache_prob(self):
'''
Calculates the cache probability for a regular cache model.
'''
with tf.name_scope("calc_neural_cache_prob"):
# remove last word from cache
real_cache_words = tf.slice(self.cache_words, [0,0], [self.cache_size-1, self.batch_size],
name="current_cache_words")
# make one-hot vectors of words in the cache
self.cache_words_one_hot = cache_words_one_hot = tf.one_hot(real_cache_words, self.vocab_size, axis=0,
name="cache_words_one_hot")
# exponential decay
if 'exp_decay' in self.config:
print('apply exponential decay')
if not 'decay_rate' in self.config:
raise IOError("Specify a decay_rate in the config file.")
weights_np = np.exp(-self.config['decay_rate']*np.arange(self.cache_size-1, 0, -1)) #NEW
weights_tf = tf.constant(weights_np, dtype=tf.float32, name="exp_decay_weights")
weighted_words = tf.multiply(tf.reshape(cache_words_one_hot, [self.vocab_size, self.cache_size-1]),
weights_tf, name="weighted_words") #NEW
cache_words_one_hot = tf.reshape(weighted_words, [self.vocab_size, self.cache_size-1, 1])
# sum over one hot vectors to get frequencies
self.sum_one_hot = sum_one_hot = tf.reduce_sum(cache_words_one_hot, axis=1)
# normalize
self.normalized_probs = normalized_probs = sum_one_hot / tf.reduce_sum(sum_one_hot)
return tf.transpose(normalized_probs)
def calc_neural_cache_prob(self, states_one_step):
'''
Calculates the neural cache probability.
Input:
states_one_step = Tensor of size [batch_size x size], states for one time step for all batches
Returns:
probs_all_batches: cache probabilities for all words in the vocabulary for all batches
size = [batch_size x vocab_size]
'''
with tf.name_scope("calc_neural_cache_prob"):
# remove last word + state from cache
real_cache_words = tf.slice(self.cache_words, [0,0], [self.cache_size-1, self.batch_size],
name="current_cache_words")
real_cache_states = tf.slice(self.cache_states, [0,0,0], [self.size, self.cache_size-1, self.batch_size],
name="current_cache_states")
# split cache_states in a list of batch_size Tensors of size [size x cache_size x 1]
# each element in the list is thus the cache for a certain batch
split_cache_states = tf.split(real_cache_states, self.batch_size, 2, name="split_cache_states_per_batch")
# make one-hot vectors of words in the cache
cache_words_one_hot = cache_words_one_hot = tf.one_hot(real_cache_words, self.vocab_size, axis=0,
name="cache_words_one_hot") # NEW
# add a row of 1's, to prevent 'not a number (nan)' at the end:
# if all probs are 0, the normalization will try to divide by 0
cache_words_one_hot = tf.concat([cache_words_one_hot, tf.ones([1, self.cache_size-1, self.batch_size])], 0)
# split the dot products and one hot vectors per batch
split_one_hots = tf.split(cache_words_one_hot, self.batch_size, 2, name="split_cache_words_one_hot")
split_curr_states = tf.split(states_one_step, self.batch_size, 0, name="split_curr_states")
list_cache_probs = []
print('self.batch_size: {0}'.format(self.batch_size))
# calculate cache probabilities per batch
for batch_num in range(self.batch_size):
# compute cache probability:
# exp(flatness parameter * dot product of current hidden states in batch
# with hidden states in cache for that batch)
# result = Tensor of size [num_steps x cache_size]
self.dot_product = dot_product = tf.exp(self.flatness * tf.matmul(split_curr_states[batch_num],
tf.reshape(split_cache_states[batch_num], [self.size, self.cache_size-1])),
name="unnorm_cache_prob_dot_product") # NEW
self.one_hot_batch = one_hot_batch = tf.reshape(split_one_hots[batch_num], [self.vocab_size+1, self.cache_size-1],
name="one_hot_cache_words_reshaped") # NEW
print('self.one_hot_batch: {0}'.format(one_hot_batch))
# multiply one hot vectors of words with cache probs to get cache probability for all words in the cache
# if a word does not occur in the cache, the cache probability is 0
# result = Tensor of size [vocab_size x num_steps]
self.cache_probs_single_batch = cache_probs = tf.matmul(one_hot_batch, tf.transpose(dot_product),
name="unnorm_cache_probs_vocab_dummy")
# remove cache probability for dummy word (index vocab_size+1) (size = [vocab_size])
self.cache_probs_sliced = cache_probs = tf.slice(tf.reshape(cache_probs, [self.vocab_size+1]), [0],
[self.vocab_size], name="cache_probs_vocab_no_dummy")
if self.config['weights_comb'] == 'info_log_linear':
cache_probs = tf.add(cache_probs, 0.000001)
# normalize cache probs
self.cache_probs_batch_norm = cache_probs_batch = cache_probs / tf.reduce_sum(cache_probs)
cache_probs_no_nan = tf.where(tf.is_nan(cache_probs_batch), tf.zeros_like(cache_probs_batch), cache_probs_batch)
cache_probs_batch = cache_probs_no_nan
list_cache_probs.append(cache_probs_batch)
probs_all_batches = tf.stack(list_cache_probs, name="stack_cache_probs_all_batches")
return probs_all_batches
def update_cache(self, targets_one_step, states_one_step):
'''
Adds targets to self.cache_words and states to self.cache_states + reduces the size until cache_size.
Inputs:
targets_one_step: targets for all batches for one time step ([batch_size x 1])
states_one_step: hidden states for all batches for one time step ([batch_size x size])
Returns:
update_cache_words: operation that effectively assigns the new words to the cache
update_cache_states: operation that effectively assigns the new states to the cache
'''
with tf.name_scope("update_cache"):
with tf.name_scope("update_words"):
# 1) add the target words to the cache
# reshape such that vector can be concatenated with matrix
self._reshaped_words = reshaped_words = tf.reshape(targets_one_step, [1, self.batch_size],
name="reshaped_words_one_step")
# concatenate cache matrix with targets of current time step
self._concat_words = tf.concat([self.cache_words, reshaped_words], 0, name="concat_cache_targets")
# take slice starting at second element in cache (index 1) (first element drops out of cache)
# until cache_size is reached (contains the new targets)
sliced_words = tf.slice(self._concat_words, [1,0], [self.cache_size, self.batch_size],
name="sliced_cache_targets")
to_assign = sliced_words
# only add words to the cache with an information weight > threshold
if 'select_cache' in self.config:
# make one-hot vectors of target words
# size = [vocab_size x 1 x batch_size]
self.target_words_one_hot = target_words_one_hot = tf.one_hot(targets_one_step,
self.vocab_size, axis=0, name="target_words_one_hot")
print('self.target_words_one_hot: {0}'.format(self.target_words_one_hot))
print('tf.reshape(target_words_one_hot, [self.vocab_size, self.batch_size]: {0}'.format(tf.reshape(target_words_one_hot, [self.vocab_size, self.batch_size])))
# multiply one-hot vectors with thresholded weights: only words with a weight
# higher than the threshold/that have a 1 in mult should be added to the cache
self.mult = mult = tf.multiply(tf.reshape(target_words_one_hot, [self.vocab_size, self.batch_size]),
self.thresholded_weights)
print('mult: {0}'.format(mult))
# sum over weighted one-hots (if target word has zero weight, the 1 will become a 0)
# + cast to integer
self.sum_mult = sum_mult = tf.cast(tf.reduce_sum(mult, axis=0), tf.int32)
# if the sum is larger than 0, condition is True --> word should be added
condition = tf.greater(sum_mult, 0)
self.condition = condition = tf.reshape(condition, [])
# if the sum is larger than 0, assign new cache, otherwise use old cache
self.to_assign = to_assign = tf.cond(condition, lambda: sliced_words, lambda: self.cache_words)
# assign new tensor to self.cache_words
update_cache_words = self.cache_words.assign(to_assign).op
with tf.name_scope("update_states"):
# 2) add the hidden states to the cache
# reshape states to [size x 1 x batch_size] such that we can concatenate with cache_states
reshaped_states = tf.reshape(tf.transpose(states_one_step), [self.size, 1, self.batch_size],
name="reshaped_states_one_step")
# concatenate cache_states with the new states
concat_states = tf.concat([self.cache_states, reshaped_states], 1, name="concat_cache_states")
# slice to reduce size of cache again
sliced_states = tf.slice(concat_states, [0,1,0], [self.size, self.cache_size, self.batch_size],
name="sliced_cache_states")
to_assign = sliced_states
# only add states to the cache with an | |
8*m.b1110 == 0)
m.c1318 = Constraint(expr= m.x85 + 6*m.b1111 == 0)
m.c1319 = Constraint(expr= m.x86 + 2*m.b1112 == 0)
m.c1320 = Constraint(expr= m.x87 + m.b1113 == 0)
m.c1321 = Constraint(expr= m.x88 + 3*m.b1114 == 0)
m.c1322 = Constraint(expr= m.x89 + 8*m.b1115 == 0)
m.c1323 = Constraint(expr= m.x90 + 3*m.b1116 == 0)
m.c1324 = Constraint(expr= m.x91 + 4*m.b1117 == 0)
m.c1325 = Constraint(expr= m.x92 + 9*m.b1118 == 0)
m.c1326 = Constraint(expr= m.x93 + 5*m.b1119 == 0)
m.c1327 = Constraint(expr= m.x94 + m.b1120 == 0)
m.c1328 = Constraint(expr= m.x95 + 3*m.b1121 == 0)
m.c1329 = Constraint(expr= m.x96 + 9*m.b1122 == 0)
m.c1330 = Constraint(expr= m.x97 + 5*m.b1123 == 0)
m.c1331 = Constraint(expr= m.x98 + 5*m.b1124 == 0)
m.c1332 = Constraint(expr= m.x99 + 3*m.b1125 == 0)
m.c1333 = Constraint(expr= m.x100 + 3*m.b1126 == 0)
m.c1334 = Constraint(expr= m.x101 + 5*m.b1127 == 0)
m.c1335 = Constraint(expr= m.x102 + 3*m.b1128 == 0)
m.c1336 = Constraint(expr= m.x103 + 2*m.b1129 == 0)
m.c1337 = Constraint(expr= m.x104 + 6*m.b1130 == 0)
m.c1338 = Constraint(expr= m.x105 + 4*m.b1131 == 0)
m.c1339 = Constraint(expr= m.x106 + 6*m.b1132 == 0)
m.c1340 = Constraint(expr= m.x107 + 2*m.b1133 == 0)
m.c1341 = Constraint(expr= m.x108 + 6*m.b1134 == 0)
m.c1342 = Constraint(expr= m.x109 + 6*m.b1135 == 0)
m.c1343 = Constraint(expr= m.x110 + 6*m.b1136 == 0)
m.c1344 = Constraint(expr= m.x111 + 4*m.b1137 == 0)
m.c1345 = Constraint(expr= m.x112 + 3*m.b1138 == 0)
m.c1346 = Constraint(expr= m.x113 + 3*m.b1139 == 0)
m.c1347 = Constraint(expr= m.x114 + 2*m.b1140 == 0)
m.c1348 = Constraint(expr= m.x115 + m.b1141 == 0)
m.c1349 = Constraint(expr= m.x116 + 5*m.b1142 == 0)
m.c1350 = Constraint(expr= m.x117 + 8*m.b1143 == 0)
m.c1351 = Constraint(expr= m.x118 + 6*m.b1144 == 0)
m.c1352 = Constraint(expr= m.x119 + 9*m.b1145 == 0)
m.c1353 = Constraint(expr= m.x120 + 5*m.b1146 == 0)
m.c1354 = Constraint(expr= m.x121 + 2*m.b1147 == 0)
m.c1355 = Constraint(expr= m.b908 - m.b909 <= 0)
m.c1356 = Constraint(expr= m.b908 - m.b910 <= 0)
m.c1357 = Constraint(expr= m.b909 - m.b910 <= 0)
m.c1358 = Constraint(expr= m.b911 - m.b912 <= 0)
m.c1359 = Constraint(expr= m.b911 - m.b913 <= 0)
m.c1360 = Constraint(expr= m.b912 - m.b913 <= 0)
m.c1361 = Constraint(expr= m.b914 - m.b915 <= 0)
m.c1362 = Constraint(expr= m.b914 - m.b916 <= 0)
m.c1363 = Constraint(expr= m.b915 - m.b916 <= 0)
m.c1364 = Constraint(expr= m.b917 - m.b918 <= 0)
m.c1365 = Constraint(expr= m.b917 - m.b919 <= 0)
m.c1366 = Constraint(expr= m.b918 - m.b919 <= 0)
m.c1367 = Constraint(expr= m.b920 - m.b921 <= 0)
m.c1368 = Constraint(expr= m.b920 - m.b922 <= 0)
m.c1369 = Constraint(expr= m.b921 - m.b922 <= 0)
m.c1370 = Constraint(expr= m.b923 - m.b924 <= 0)
m.c1371 = Constraint(expr= m.b923 - m.b925 <= 0)
m.c1372 = Constraint(expr= m.b924 - m.b925 <= 0)
m.c1373 = Constraint(expr= m.b926 - m.b927 <= 0)
m.c1374 = Constraint(expr= m.b926 - m.b928 <= 0)
m.c1375 = Constraint(expr= m.b927 - m.b928 <= 0)
m.c1376 = Constraint(expr= m.b929 - m.b930 <= 0)
m.c1377 = Constraint(expr= m.b929 - m.b931 <= 0)
m.c1378 = Constraint(expr= m.b930 - m.b931 <= 0)
m.c1379 = Constraint(expr= m.b932 - m.b933 <= 0)
m.c1380 = Constraint(expr= m.b932 - m.b934 <= 0)
m.c1381 = Constraint(expr= m.b933 - m.b934 <= 0)
m.c1382 = Constraint(expr= m.b935 - m.b936 <= 0)
m.c1383 = Constraint(expr= m.b935 - m.b937 <= 0)
m.c1384 = Constraint(expr= m.b936 - m.b937 <= 0)
m.c1385 = Constraint(expr= m.b938 - m.b939 <= 0)
m.c1386 = Constraint(expr= m.b938 - m.b940 <= 0)
m.c1387 = Constraint(expr= m.b939 - m.b940 <= 0)
m.c1388 = Constraint(expr= m.b941 - m.b942 <= 0)
m.c1389 = Constraint(expr= m.b941 - m.b943 <= 0)
m.c1390 = Constraint(expr= m.b942 - m.b943 <= 0)
m.c1391 = Constraint(expr= m.b944 - m.b945 <= 0)
m.c1392 = Constraint(expr= m.b944 - m.b946 <= 0)
m.c1393 = Constraint(expr= m.b945 - m.b946 <= 0)
m.c1394 = Constraint(expr= m.b947 - m.b948 <= 0)
m.c1395 = Constraint(expr= m.b947 - m.b949 <= 0)
m.c1396 = Constraint(expr= m.b948 - m.b949 <= 0)
m.c1397 = Constraint(expr= m.b950 - m.b951 <= 0)
m.c1398 = Constraint(expr= m.b950 - m.b952 <= 0)
m.c1399 = Constraint(expr= m.b951 - m.b952 <= 0)
m.c1400 = Constraint(expr= m.b953 - m.b954 <= 0)
m.c1401 = Constraint(expr= m.b953 - m.b955 <= 0)
m.c1402 = Constraint(expr= m.b954 - m.b955 <= 0)
m.c1403 = Constraint(expr= m.b956 - m.b957 <= 0)
m.c1404 = Constraint(expr= m.b956 - m.b958 <= 0)
m.c1405 = Constraint(expr= m.b957 - m.b958 <= 0)
m.c1406 = Constraint(expr= m.b959 - m.b960 <= 0)
m.c1407 = Constraint(expr= m.b959 - m.b961 <= 0)
m.c1408 = Constraint(expr= m.b960 - m.b961 <= 0)
m.c1409 = Constraint(expr= m.b962 - m.b963 <= 0)
m.c1410 = Constraint(expr= m.b962 - m.b964 <= 0)
m.c1411 = Constraint(expr= m.b963 - m.b964 <= 0)
m.c1412 = Constraint(expr= m.b965 - m.b966 <= 0)
m.c1413 = Constraint(expr= m.b965 - m.b967 <= 0)
m.c1414 = Constraint(expr= m.b966 - m.b967 <= 0)
m.c1415 = Constraint(expr= m.b968 - m.b969 <= 0)
m.c1416 = Constraint(expr= m.b968 - m.b970 <= 0)
m.c1417 = Constraint(expr= m.b969 - m.b970 <= 0)
m.c1418 = Constraint(expr= m.b971 - m.b972 <= 0)
m.c1419 = Constraint(expr= m.b971 - m.b973 <= 0)
m.c1420 = Constraint(expr= m.b972 - m.b973 <= 0)
m.c1421 = Constraint(expr= m.b974 - m.b975 <= 0)
m.c1422 = Constraint(expr= m.b974 - m.b976 <= 0)
m.c1423 = Constraint(expr= m.b975 - m.b976 <= 0)
m.c1424 = Constraint(expr= m.b977 - m.b978 <= 0)
m.c1425 = Constraint(expr= m.b977 - m.b979 <= 0)
m.c1426 = Constraint(expr= m.b978 - m.b979 <= 0)
m.c1427 = Constraint(expr= m.b980 - m.b981 <= 0)
m.c1428 = Constraint(expr= m.b980 - m.b982 <= 0)
m.c1429 = Constraint(expr= m.b981 - m.b982 <= 0)
m.c1430 = Constraint(expr= m.b983 - m.b984 <= 0)
m.c1431 = Constraint(expr= m.b983 - m.b985 <= 0)
m.c1432 = Constraint(expr= m.b984 - m.b985 <= 0)
m.c1433 = Constraint(expr= m.b986 - m.b987 <= 0)
m.c1434 = Constraint(expr= m.b986 - m.b988 <= 0)
m.c1435 = Constraint(expr= m.b987 - m.b988 <= 0)
m.c1436 = Constraint(expr= m.b989 - m.b990 <= 0)
m.c1437 = Constraint(expr= m.b989 - m.b991 <= 0)
m.c1438 = Constraint(expr= m.b990 - m.b991 <= 0)
m.c1439 = Constraint(expr= m.b992 - m.b993 <= 0)
m.c1440 = Constraint(expr= m.b992 - m.b994 <= 0)
m.c1441 = Constraint(expr= m.b993 - m.b994 <= 0)
m.c1442 = Constraint(expr= m.b995 - m.b996 <= 0)
m.c1443 = Constraint(expr= m.b995 - m.b997 <= 0)
m.c1444 = Constraint(expr= m.b996 - m.b997 <= 0)
m.c1445 = Constraint(expr= m.b998 - m.b999 <= 0)
m.c1446 = Constraint(expr= m.b998 - m.b1000 <= 0)
m.c1447 = Constraint(expr= m.b999 - m.b1000 <= 0)
m.c1448 = Constraint(expr= m.b1001 - m.b1002 <= 0)
m.c1449 = Constraint(expr= m.b1001 - m.b1003 <= 0)
m.c1450 = Constraint(expr= m.b1002 - m.b1003 <= 0)
m.c1451 = Constraint(expr= m.b1004 - m.b1005 <= 0)
m.c1452 = Constraint(expr= m.b1004 - m.b1006 <= 0)
m.c1453 = Constraint(expr= m.b1005 - m.b1006 <= 0)
m.c1454 = Constraint(expr= m.b1007 - m.b1008 <= 0)
m.c1455 = Constraint(expr= m.b1007 - m.b1009 <= 0)
m.c1456 = Constraint(expr= m.b1008 - m.b1009 <= 0)
m.c1457 = Constraint(expr= m.b1010 - m.b1011 <= 0)
m.c1458 = Constraint(expr= m.b1010 - m.b1012 <= 0)
m.c1459 = Constraint(expr= m.b1011 - m.b1012 <= 0)
m.c1460 = Constraint(expr= m.b1013 - m.b1014 <= 0)
m.c1461 = Constraint(expr= m.b1013 - m.b1015 <= 0)
m.c1462 = Constraint(expr= m.b1014 - m.b1015 <= 0)
m.c1463 = Constraint(expr= m.b1016 - m.b1017 <= 0)
m.c1464 = Constraint(expr= m.b1016 - m.b1018 <= 0)
m.c1465 = Constraint(expr= m.b1017 - m.b1018 <= 0)
m.c1466 = Constraint(expr= m.b1019 - m.b1020 <= 0)
m.c1467 = Constraint(expr= m.b1019 - m.b1021 <= 0)
m.c1468 = Constraint(expr= m.b1020 - m.b1021 <= 0)
m.c1469 = Constraint(expr= m.b1022 - m.b1023 <= 0)
m.c1470 = Constraint(expr= m.b1022 - m.b1024 <= 0)
m.c1471 = Constraint(expr= m.b1023 - m.b1024 <= 0)
m.c1472 = Constraint(expr= m.b1025 - m.b1026 <= 0)
m.c1473 = Constraint(expr= m.b1025 - m.b1027 <= 0)
m.c1474 = Constraint(expr= m.b1026 - m.b1027 <= 0)
m.c1475 = Constraint(expr= m.b1028 + m.b1029 <= 1)
m.c1476 = Constraint(expr= m.b1028 + m.b1030 <= 1)
m.c1477 = Constraint(expr= m.b1028 + m.b1029 <= 1)
m.c1478 = Constraint(expr= m.b1029 + m.b1030 <= 1)
m.c1479 = Constraint(expr= m.b1028 + m.b1030 <= 1)
m.c1480 = Constraint(expr= m.b1029 + m.b1030 <= 1)
m.c1481 = Constraint(expr= m.b1031 + m.b1032 <= 1)
m.c1482 = Constraint(expr= m.b1031 + m.b1033 <= 1)
m.c1483 = Constraint(expr= m.b1031 + m.b1032 <= 1)
m.c1484 = Constraint(expr= m.b1032 + m.b1033 <= 1)
m.c1485 = Constraint(expr= m.b1031 + m.b1033 <= 1)
m.c1486 = Constraint(expr= m.b1032 + m.b1033 <= 1)
m.c1487 = Constraint(expr= m.b1034 + m.b1035 <= 1)
m.c1488 = Constraint(expr= m.b1034 + m.b1036 <= 1)
m.c1489 = Constraint(expr= m.b1034 + m.b1035 <= 1)
m.c1490 = Constraint(expr= m.b1035 + m.b1036 <= 1)
m.c1491 = Constraint(expr= m.b1034 + m.b1036 <= 1)
m.c1492 = Constraint(expr= m.b1035 + m.b1036 <= 1)
m.c1493 = Constraint(expr= m.b1037 + m.b1038 <= 1)
m.c1494 = Constraint(expr= m.b1037 + m.b1039 <= 1)
m.c1495 = Constraint(expr= m.b1037 + m.b1038 <= 1)
m.c1496 = Constraint(expr= m.b1038 + m.b1039 <= 1)
m.c1497 = Constraint(expr= m.b1037 + m.b1039 <= 1)
m.c1498 = Constraint(expr= m.b1038 + m.b1039 <= 1)
m.c1499 = Constraint(expr= m.b1040 + m.b1041 <= 1)
m.c1500 = Constraint(expr= m.b1040 | |
#
# For licensing see accompanying LICENSE.txt file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
from pylab import *
import argparse
import h5py
import inspect
import os
import pandas as pd
import scipy.linalg
import sklearn.preprocessing
import sklearn.metrics
import path_utils
path_utils.add_path_to_sys_path("../lib", mode="relative_to_current_source_dir", frame=inspect.currentframe())
import embree_utils
import octomap_utils
import random_walk_utils
parser = argparse.ArgumentParser()
parser.add_argument("--scene_dir", required=True)
parser.add_argument("--use_python_reference_implementation", action="store_true")
args = parser.parse_args()
assert os.path.exists(args.scene_dir)
path_utils.add_path_to_sys_path(os.path.join(args.scene_dir, "..", ".."), mode="relative_to_cwd", frame=inspect.currentframe())
import _dataset_config
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK] Begin...")
scene_name = os.path.basename(args.scene_dir)
asset_export_dir = os.path.join(args.scene_dir, "_asset_export")
detail_dir = os.path.join(args.scene_dir, "_detail")
mesh_dir = os.path.join(detail_dir, "mesh")
octomap_dir = os.path.join(detail_dir, "octomap")
tmp_dir = os.path.join(args.scene_dir, "_tmp")
metadata_cameras_asset_export_csv_file = os.path.join(asset_export_dir, "metadata_cameras_asset_export.csv")
metadata_cameras_csv_file = os.path.join(detail_dir, "metadata_cameras.csv")
metadata_scene_csv_file = os.path.join(detail_dir, "metadata_scene.csv")
mesh_vertices_hdf5_file = os.path.join(mesh_dir, "mesh_vertices.hdf5")
mesh_faces_vi_hdf5_file = os.path.join(mesh_dir, "mesh_faces_vi.hdf5")
mesh_faces_oi_hdf5_file = os.path.join(mesh_dir, "mesh_faces_oi.hdf5")
octomap_bt_file = os.path.join(octomap_dir, "octomap.bt")
octomap_free_space_min_hdf5_file = os.path.join(octomap_dir, "octomap_free_space_min.hdf5")
octomap_free_space_max_hdf5_file = os.path.join(octomap_dir, "octomap_free_space_max.hdf5")
scene = [ s for s in _dataset_config.scenes if s["name"] == scene_name ][0]
with h5py.File(mesh_vertices_hdf5_file, "r") as f: mesh_vertices = f["dataset"][:]
with h5py.File(mesh_faces_vi_hdf5_file, "r") as f: mesh_faces_vi = f["dataset"][:]
with h5py.File(mesh_faces_oi_hdf5_file, "r") as f: mesh_faces_oi = f["dataset"][:]
with h5py.File(octomap_free_space_min_hdf5_file, "r") as f: octomap_free_space_min = f["dataset"][:]
with h5py.File(octomap_free_space_max_hdf5_file, "r") as f: octomap_free_space_max = f["dataset"][:]
obj_ids_unique = unique(mesh_faces_oi)
color_vals_unique = arange(obj_ids_unique.shape[0])
np.random.seed(0)
np.random.shuffle(color_vals_unique)
df_scene = pd.read_csv(metadata_scene_csv_file, index_col="parameter_name")
meters_per_asset_unit = df_scene.loc["meters_per_asset_unit"][0]
asset_units_per_meter = 1.0 / meters_per_asset_unit
# parameters
# when sampling views, generate images using these image and camera parameters
n_width_pixels = 256
n_height_pixels = 192
n_fov_x = pi/3
n_samples_random_walk = 100 # each final generated camera trajectory consists of this many views
n_samples_octomap_query = 1000 # generate this many preliminary candidates from a local neighborhood, test if they're in free space
n_samples_camera_pose_candidates = 20 # generate this many candidates from free space, compute view scores
n_voxel_size = scene["voxel_extent_meters"]*asset_units_per_meter
# local neighborhood bounding boxes for random walk
n_query_half_extent_relative_to_start = array([np.inf,np.inf,0.25])*asset_units_per_meter
n_query_half_extent_relative_to_current = array([1.5,1.5,0.25])*asset_units_per_meter
if args.use_python_reference_implementation:
height_pixels = n_height_pixels
width_pixels = n_width_pixels
fov_x = n_fov_x
# when randomly sampling an up vector, perturb it according to these parameters
n_camera_up_hint_noise_std_dev = 0.1
n_camera_up_hint_nominal = array([0,0,1])
fov_y = 2.0 * arctan(height_pixels * tan(fov_x/2) / width_pixels)
uv_min = -1.0
uv_max = 1.0
half_du = 0.5 * (uv_max - uv_min) / width_pixels
half_dv = 0.5 * (uv_max - uv_min) / height_pixels
u, v = meshgrid(linspace(uv_min+half_du, uv_max-half_du, width_pixels),
linspace(uv_min+half_dv, uv_max-half_dv, height_pixels)[::-1])
ray_offset_x = u*tan(fov_x/2.0)
ray_offset_y = v*tan(fov_y/2.0)
ray_offset_z = -ones_like(ray_offset_x)
rays_cam = dstack((ray_offset_x,ray_offset_y,ray_offset_z))
V_cam = matrix(rays_cam.reshape(-1,3)).T
# margin parameter for line-of-sight queries: point A must be closer than the scene geometry to point B,
# along the ray from B to A, by a margin of eps percent, in order for A to be considered visible from B
eps = 0.01
# when attempting to find the initial look-at position, sample the occupancy map slightly closer to the
# initial look-from position than the point of mesh intersection, but at least delta units away from the
# initial look-from position to avoid a degenerate (look-from, look-at) pair
delta = 0.0001
# constant term added to the view saliency score; as lamb goes to infinty, the distribution of view saliency
# scores will approach a uniform distribution
lamb = 0.0
np.random.seed(0)
# get cameras from the original asset file
df_cameras_asset_export = pd.read_csv(metadata_cameras_asset_export_csv_file)
df_cameras = pd.DataFrame(columns=["camera_name"])
i = 0
for c in df_cameras_asset_export.to_records():
in_camera_name = c["camera_name"]
in_camera_dir = os.path.join(asset_export_dir, in_camera_name)
in_camera_keyframe_positions_hdf5_file = os.path.join(in_camera_dir, "camera_keyframe_positions.hdf5")
in_camera_keyframe_orientations_hdf5_file = os.path.join(in_camera_dir, "camera_keyframe_orientations.hdf5")
assert len(in_camera_name.lstrip("cam_")) != 2 or not in_camera_name.lstrip("cam_").isdigit()
out_camera_name = "cam_%02d" % i
out_camera_dir = os.path.join(args.scene_dir, "_detail", out_camera_name)
out_camera_preview_dir = os.path.join(args.scene_dir, "_detail", out_camera_name, "preview")
out_camera_keyframe_frame_indices_hdf5_file = os.path.join(out_camera_dir, "camera_keyframe_frame_indices.hdf5")
out_camera_keyframe_positions_hdf5_file = os.path.join(out_camera_dir, "camera_keyframe_positions.hdf5")
out_camera_keyframe_look_at_positions_hdf5_file = os.path.join(out_camera_dir, "camera_keyframe_look_at_positions.hdf5")
out_camera_keyframe_orientations_hdf5_file = os.path.join(out_camera_dir, "camera_keyframe_orientations.hdf5")
out_metadata_camera_csv_file = os.path.join(out_camera_dir, "metadata_camera.csv")
if not os.path.exists(out_camera_dir): os.makedirs(out_camera_dir)
if not os.path.exists(out_camera_preview_dir): os.makedirs(out_camera_preview_dir)
with h5py.File(in_camera_keyframe_positions_hdf5_file, "r") as f: in_camera_keyframe_positions = f["dataset"][:]
with h5py.File(in_camera_keyframe_orientations_hdf5_file, "r") as f: in_camera_keyframe_orientations = f["dataset"][:]
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK] Input camera " + in_camera_name + ", output camera " + out_camera_name + "...")
if not args.use_python_reference_implementation:
camera_look_from_positions, camera_look_at_positions, camera_orientations, intersection_distances, prim_ids = \
random_walk_utils.generate_camera_trajectory_random_walk(
mesh_vertices,
mesh_faces_vi,
octomap_bt_file,
octomap_free_space_min,
octomap_free_space_max,
in_camera_keyframe_positions[0],
in_camera_keyframe_orientations[0],
n_width_pixels,
n_height_pixels,
n_fov_x,
n_samples_random_walk,
n_samples_octomap_query,
n_samples_camera_pose_candidates,
n_voxel_size,
n_query_half_extent_relative_to_current,
n_query_half_extent_relative_to_start,
tmp_dir=tmp_dir)
if camera_look_from_positions is None or camera_look_at_positions is None or camera_orientations is None or intersection_distances is None or prim_ids is None:
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK] WARNING: random_walk_utils.generate_camera_trajectory_random_walk DID NOT EXECUTE SUCCESSFULLY. SKIPPING.")
if not os.listdir(out_camera_preview_dir): os.rmdir(out_camera_preview_dir)
if not os.listdir(out_camera_dir): os.rmdir(out_camera_dir)
i = i+1
continue
#
# save preview images
#
for j in range(n_samples_random_walk):
prim_ids_curr = prim_ids[j]
obj_ids_curr = mesh_faces_oi[prim_ids_curr]
color_vals = ones_like(obj_ids_curr)*np.nan
for obj_id,color_val in zip(obj_ids_unique,color_vals_unique):
color_vals[obj_id == obj_ids_curr] = color_val
color_vals[prim_ids_curr == -1] = np.nan
out_camera_preview_jpg_file = os.path.join(out_camera_preview_dir, "frame.%04d.jpg" % j)
imsave(out_camera_preview_jpg_file, color_vals, vmin=np.min(color_vals_unique), vmax=np.max(color_vals_unique))
#
# final output values
#
num_keyframes = n_samples_random_walk
out_camera_keyframe_frame_indices = arange(num_keyframes)
out_camera_keyframe_positions = camera_look_from_positions
out_camera_keyframe_look_at_positions = camera_look_at_positions
out_camera_keyframe_orientations = camera_orientations
out_camera_frame_time_seconds = 1.0
if args.use_python_reference_implementation:
# # note that skipping a camera trajectory like this will mean that metadata_cameras.csv will be
# # incorrectly exported, so this if statement should be uncommented for debugging purposes only
# if out_camera_name == "cam_00":
# i = i+1
# continue
#
# start_camera_look_from_position
#
start_camera_look_from_position = in_camera_keyframe_positions[0]
#
# start_camera_look_at_position
#
start_camera_R_world_from_cam = in_camera_keyframe_orientations[0]
start_camera_look_at_dir = -start_camera_R_world_from_cam[:,2]
# compute intersection distance for center ray
intersection_distances, intersection_normals, prim_ids = embree_utils.generate_ray_intersections(mesh_vertices, mesh_faces_vi, matrix(start_camera_look_from_position).A, matrix(start_camera_look_at_dir).A, tmp_dir=tmp_dir)
intersection_distance = max(intersection_distances[0] - 1.75*n_voxel_size, delta)
query_position = start_camera_look_from_position + intersection_distance*start_camera_look_at_dir
octomap_sample = octomap_utils.generate_octomap_samples(octomap_bt_file, array([query_position]), tmp_dir)[0]
if isfinite(intersection_distance) and octomap_sample == 0:
start_camera_look_at_position = start_camera_look_from_position + intersection_distance*start_camera_look_at_dir
computed_intersection_distance_image = False
else:
# try to find an unoccupied cell for the initial look-at position by shooting camera rays,
# testing for intersections with the scene mesh, and testing the occupancy map cells slightly
# before the intersections (when proceeding from the optical center of the camera to the
# mesh intersection point); if no unoccupied cells can be found, try perturbing the camera
# forwards along the camera's look-at vector slightly and trying again
camera_z_axis = start_camera_R_world_from_cam[:,2]
camera_perturb_attempts = 8
camera_perturb_length = 0.25*n_voxel_size
all_intersection_distances_at_infinity = False
encountered_unoccupied_cell = False
for p in range(camera_perturb_attempts):
V_world = start_camera_R_world_from_cam*V_cam
ray_directions_world = V_world.T.A
ray_positions_world = ones_like(ray_directions_world) * (start_camera_look_from_position + p*camera_perturb_length*camera_z_axis)
intersection_distances, intersection_normals, prim_ids = embree_utils.generate_ray_intersections(mesh_vertices, mesh_faces_vi, ray_positions_world, ray_directions_world, tmp_dir=tmp_dir)
if not any(isfinite(intersection_distances)):
all_intersection_distances_at_infinity = True
break
# clip rays against octomap bounding box, see https://tavianator.com/fast-branchless-raybounding-box-intersections/
ray_directions_world = sklearn.preprocessing.normalize(ray_directions_world)
t_min = ones_like(intersection_distances)*-np.inf
t_max = ones_like(intersection_distances)*np.inf
t_x0 = (octomap_free_space_min[0] - ray_positions_world[:,0]) / ray_directions_world[:,0]
t_x1 = (octomap_free_space_max[0] - ray_positions_world[:,0]) / ray_directions_world[:,0]
mask = logical_not(isclose(ray_directions_world[:,0], 0))
t_min[mask] = np.maximum(t_min[mask], np.minimum(t_x0[mask], t_x1[mask]))
t_max[mask] = np.minimum(t_max[mask], np.maximum(t_x0[mask], t_x1[mask]))
t_y0 = (octomap_free_space_min[1] - ray_positions_world[:,1]) / ray_directions_world[:,1]
t_y1 = (octomap_free_space_max[1] - ray_positions_world[:,1]) / ray_directions_world[:,1]
mask = logical_not(isclose(ray_directions_world[:,1], 0))
t_min[mask] = np.maximum(t_min[mask], np.minimum(t_y0[mask], t_y1[mask]))
t_max[mask] = np.minimum(t_max[mask], np.maximum(t_y0[mask], t_y1[mask]))
t_z0 = (octomap_free_space_min[2] - ray_positions_world[mask][:,2]) / ray_directions_world[:,2]
t_z1 = (octomap_free_space_max[2] - ray_positions_world[mask][:,2]) / ray_directions_world[:,2]
mask = logical_not(isclose(ray_directions_world[:,2], 0))
t_min[mask] = np.maximum(t_min[mask], np.minimum(t_z0[mask], t_z1[mask]))
t_max[mask] = np.minimum(t_max[mask], np.maximum(t_z0[mask], t_z1[mask]))
assert all(isfinite(t_min))
assert all(isfinite(t_max))
assert all(t_max > t_min) # assert all rays intersect bounding box
assert all(t_min < 0.5*1.75*n_voxel_size) # assert all rays start from inside bounding box (with a bit of slack because bounding box min and max might be off by a half voxel)
# import mayavi_utils
# import mayavi.mlab
# tmp = ray_positions_world + t_max[:,newaxis]*ray_directions_world
# mayavi_utils.points3d_color_by_scalar(tmp, scalars=zeros_like(t_max), scale_factor=1.0, opacity=1.0)
# mayavi.mlab.triangular_mesh(mesh_vertices[:,0], mesh_vertices[:,1], mesh_vertices[:,2], mesh_faces_vi, representation="surface", color=(0.75,0.75,0.75), opacity=0.25)
# mayavi.mlab.show()
t_max[logical_not(isfinite(intersection_distances))] = np.inf
intersection_distances = np.maximum(np.minimum(intersection_distances, t_max) - 1.75*n_voxel_size, delta)
query_positions = (start_camera_look_from_position + p*camera_perturb_length*camera_z_axis) + intersection_distances[:,newaxis]*ray_directions_world
octomap_samples = octomap_utils.generate_octomap_samples(octomap_bt_file, query_positions, tmp_dir)
# import mayavi_utils
# import mayavi.mlab
# tmp = query_positions
# mayavi_utils.points3d_color_by_scalar(tmp, scalars=octomap_samples, scale_factor=1.0, opacity=1.0)
# mayavi.mlab.triangular_mesh(mesh_vertices[:,0], mesh_vertices[:,1], mesh_vertices[:,2], mesh_faces_vi, representation="surface", color=(0.75,0.75,0.75), opacity=0.25)
# mayavi.mlab.show()
if any(octomap_samples == 0):
encountered_unoccupied_cell = True
break
if all_intersection_distances_at_infinity:
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK] WARNING: CAMERA DOESN'T OBSERVE ANY PART OF THE SCENE. ALL INTERSECTION DISTANCES ARE AT INFINITY. GIVING UP.")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
continue
if not encountered_unoccupied_cell:
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK] WARNING: CAMERA DOESN'T OBSERVE ANY PART OF THE SCENE. ALL OBSERVED OCTOMAP SAMPLES ARE UNKNOWN OR OCCUPIED. GIVING UP.")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
print("[HYPERSIM: SCENE_GENERATE_CAMERA_TRAJECTORIES_RANDOM_WALK]")
continue
computed_intersection_distance_image = True
half_dx = 0.5
half_dy = 0.5
pixel_center = array([(width_pixels+1)/2.0, (height_pixels+1)/2.0])
pixel_x, pixel_y = meshgrid(linspace(half_dx, width_pixels+half_dx, width_pixels),
linspace(half_dy, height_pixels+half_dy, height_pixels))
pixels = dstack((pixel_x,pixel_y)).reshape(-1,2)
center_to_pixels = pixels - pixel_center
center_to_pixels_distances = linalg.norm(center_to_pixels, axis=1)
center_to_pixels_distances[logical_not(isfinite(intersection_distances))] = np.inf
center_to_pixels_distances[octomap_samples != 0] = np.inf
assert any(isfinite(center_to_pixels_distances))
selected_index = argsort(center_to_pixels_distances)[0]
ray_direction_world = ray_directions_world[selected_index]
intersection_distance = intersection_distances[selected_index]
start_camera_look_at_position = start_camera_look_from_position + intersection_distance*ray_direction_world
#
# save preview image
#
| |
# BY: SebasttianVelez
import pygame
import random
from Objects.Player import *
from Objects.Enemy import *
from Objects.Bullet import *
from Objects.Life import *
from Objects.Boss import *
from Objects.Nave import *
#Variables globales
ancho = 700
alto = 500
close = False
fondo1 = pygame.image.load('Mapa1.png')
reloj = pygame.time.Clock()
luke1 = pygame.image.load('Luke.png')
life1 = pygame.image.load('life1.png')
life2 = pygame.image.load('life2.png')
nave1 = pygame.image.load('nave.png')
enemy1 = pygame.image.load('Enemy1.png')
vader1 = pygame.image.load('vader1.png')
vader2 = pygame.image.load('vader2.png')
vader3 = pygame.image.load('vader3.png')
#_____________________________________
i0 = 0
i1 = 0
i2 = 0
i3 = 0
i4 = 0
i5 = 0
j0 = 0
j1 = 0
j2 = 0
j3 = 0
j4 = 0
j5 = 0
k1 = 4
k2 = 0
d1 = 0
d2 = 0
d3 = 0
f0 = 0
f1 = 3
f2 = 0
f3 = 3
f4 = 0
f5 = 0
np = -1700
pg = True
pg2 = True
pg3 = True
ACTboss = False
#_____________________________________
espera1 = True
#_____________________________________
pospx = 0
pospy = 0
disp = False
disp2 = False
dir1 = True
dir2 = True
V = False
nene = 3
activate = False
l1c = 800
l2c = 1500
ln = 1500
t = 20
def CortarImagen (image, x, y, eX, eY):
info=image.get_rect()
an_image = info[2]
al_image = info[3]
an_corte = int(an_image/eX)
al_corte = int(al_image/eY)
cuadro = image.subsurface(x*an_corte,y*al_corte, an_corte, al_corte)
return cuadro
print
#Inicializacion de Pygame
if __name__ == '__main__':
#Definicion de Variables
pygame.init()
#Crear variables locales
Pantalla = pygame.display.set_mode([ancho, alto])
pygame.mixer.init()
sonido = pygame.mixer.Sound("Music/Across.wav")
sonido2 = pygame.mixer.Sound("Music/Imperial.wav")
pygame.font.init()
path = "./fonts/FluoGums.ttf"
size = 17
fuente = pygame.font.Font(path,size)
fuente2 = pygame.font.Font(path, 40)
#Grupos
todos = pygame.sprite.Group()
Players = pygame.sprite.Group()
Enemys = pygame.sprite.Group()
Bullets = pygame.sprite.Group()
Lifes = pygame.sprite.Group()
Bosses = pygame.sprite.Group()
Naves = pygame.sprite.Group()
Luke2 = Player(21,150)
Players.add(Luke2)
todos.add(Luke2)
Luke = Player(21, 300)
Players.add(Luke)
todos.add(Luke)
#1850X 225Y
time = 50
Life1 = Life(20, 450)
Lifes.add(Life1)
todos.add(Life1)
Life2 = Life(400, 450)
Lifes.add(Life2)
todos.add(Life2)
Vader = Boss(2300, 225)
Bosses.add(Vader)
todos.add(Vader)
#Iniciar el juego
while not close:
if(not ACTboss):
sonido.play()
else:
sonido.stop()
sonido2.play()
#Gestion de Eventos
for event in pygame.event.get():
if event.type == pygame.QUIT:
close = True
if event.type == pygame.KEYDOWN:
#___________________________TECLAS J2____________________________
if event.key == pygame.K_RIGHT:
Luke.vel_x = 10
Luke.vel_y = 0
dir1 = True
i1 = 0
elif event.key == pygame.K_LEFT:
Luke.vel_x = -10
Luke.vel_y = 0
dir1 = False
i2 = 0
elif event.key == pygame.K_DOWN:
Luke.vel_y = 10
Luke.vel_x = 0
i3 = 0
elif event.key == pygame.K_UP:
Luke.vel_y = -10
Luke.vel_x = 0
i4 = 0
if event.key == pygame.K_l:
disp = True
Luke.disp = True
#_________________________Teclas J2_____________________________
if event.key == pygame.K_d:
Luke2.vel_x = 10
Luke2.vel_y = 0
dir2 = True
j1 = 0
elif event.key == pygame.K_a:
Luke2.vel_x = -10
Luke2.vel_y = 0
dir2 = False
j2 = 0
elif event.key == pygame.K_s:
Luke2.vel_y = 10
Luke2.vel_x = 0
j3 = 0
elif event.key == pygame.K_w:
Luke2.vel_y = -10
Luke2.vel_x = 0
j4 = 0
if event.key == pygame.K_g:
disp2 = True
Luke2.disp = True
#_____________________________________________________________________________
if event.type == pygame.KEYUP:
if (event.key == pygame.K_RIGHT)or(event.key == pygame.K_LEFT)or(event.key == pygame.K_UP)or(event.key == pygame.K_DOWN):
Luke.vel_y = 0
Luke.vel_x = 0
i0 = 0
if (event.key == pygame.K_s)or(event.key == pygame.K_w)or(event.key == pygame.K_a)or(event.key == pygame.K_d):
Luke2.vel_y = 0
Luke2.vel_x = 0
i0 = 0
#-------------------JUGADOR 1 DEZPLAZADO A LA DERECHA------------------
if Luke.vel_x==10:
t = 20
Luke.cut=CortarImagen(luke1,i1,0,6,6)
if i1 >= 3:
i1 = 0
else:
i1 += 1
#-------------------JUGADOR 1 DEZPLAZADO A LA IZQUIERDA------------------
if Luke.vel_x==-10:
t = 20
Luke.cut=CortarImagen(luke1,i2,1,6,6)
if i2 <= 0:
i2 = 3
else:
i2 -=1
#-------------------JUGADOR 1 DEZPLAZADO A LA ABAJO------------------
if Luke.vel_y==10:
t = 20
Luke.cut=CortarImagen(luke1,i3,2,6,6)
if i3 >=3:
i3 = 0
else:
i3 += 1
#-------------------JUGADOR 1 DEZPLAZADO A LA ARRIBA------------------
if Luke.vel_y==-10:
t = 20
Luke.cut=CortarImagen(luke1,i4,3,6,6)
if i4 >=3:
i4 = 0
else:
i4 += 1
#-------------------JUGADOR 1 NO SE DEZPLAZA ------------------
if (Luke.vel_x==0) and (Luke.vel_y==0) and (disp == False) and (not Luke.siml):
if dir1:
Luke.cut=CortarImagen(luke1,4,0,6,6)
else:
Luke.cut=CortarImagen(luke1,4,1,6,6)
#-------------------JUGADOR 1 ACTIVA LASER ------------------
if (disp):
t = 20
if dir1:
Luke.cut=CortarImagen(luke1, i5, 4, 6, 6)
else:
Luke.cut=CortarImagen(luke1, i5, 5, 6, 6)
if(i5>=2):
i5 = 0
disp= False
else:
i5 +=1
ls_cl7 = pygame.sprite.spritecollide(Luke, Enemys, False)
for e in ls_cl7:
#falta mejorar el limite superior de la espada
if(Luke.rect.top>= e.rect.top - 20)and(Luke.rect.bottom >= e.rect.bottom + 20)and(e.rect.bottom > Luke.rect.top+100):
#print "enem ", e.rect.bottom
#print "luke ",Luke2.rect.top
#print e.health
if(e.health<= 0):
todos.remove(e)
Enemys.remove(e)
else:
e.sangre += 1
e.health -= 50
#for e in ls_cl5:
# Luke2.health -= 20
# print Luke2.health
#________________________________________________________________________
#-------------------JUGADOR 2 DEZPLAZADO A LA DERECHA------------------
if Luke2.vel_x==10:
t = 20
Luke2.cut=CortarImagen(luke1,j1,0,6,6)
if j1 >= 3:
j1 = 0
else:
j1 += 1
#-------------------JUGADOR 2 DEZPLAZADO A LA IZQUIERDA------------------
if Luke2.vel_x==-10:
t = 20
Luke2.cut=CortarImagen(luke1,j2,1,6,6)
if j2 <= 0:
j2 = 3
else:
j2 -=1
#-------------------JUGADOR 2 DEZPLAZADO A LA ABAJO------------------
if Luke2.vel_y==10:
t = 20
Luke2.cut=CortarImagen(luke1,j3,2,6,6)
if j3 >=3:
j3 = 0
else:
j3 += 1
#-------------------JUGADOR 2 DEZPLAZADO A LA ARRIBA------------------
if Luke2.vel_y==-10:
t = 20
Luke2.cut=CortarImagen(luke1,j4,3,6,6)
if j4 >=3:
j4 = 0
else:
j4 += 1
#-------------------JUGADOR 2 NO SE DEZPLAZA ------------------
if (Luke2.vel_x==0) and (Luke2.vel_y==0) and (disp2 == False) and (not Luke2.siml):
if dir2:
Luke2.cut=CortarImagen(luke1,4,0,6,6)
else:
Luke2.cut=CortarImagen(luke1,4,1,6,6)
#-------------------JUGADOR 2 ACTIVA LASER ------------------
if (disp2):
t = 20
if dir2:
Luke2.cut=CortarImagen(luke1, j5, 4, 6, 6)
else:
Luke2.cut=CortarImagen(luke1, j5, 5, 6, 6)
if(j5>=2):
j5 = 0
disp2= False
else:
j5 +=1
ls_cl6 = pygame.sprite.spritecollide(Luke2, Enemys, False)
for e in ls_cl6:
#falta mejorar el limite superior de la espada
if(Luke2.rect.top>= e.rect.top - 20)and(Luke2.rect.bottom >= e.rect.bottom + 20)and(e.rect.bottom > Luke2.rect.top+100):
#print "enem ", e.rect.bottom
#print "luke ",Luke2.rect.top
#print e.health
if(e.health<= 0):
todos.remove(e)
Enemys.remove(e)
else:
e.sangre += 1
e.health -= 50
#-------------------GENERAR ENEMIGOS CONTINUAMENTE--------------------
#l1c = 400
#l2c = 1500
#ln = 1300
if(pospx<=-100)and(pg == True):
Luke.simr = False
Luke2.simr = False
espera1 = False
NumNaves = 1
for i in range(NumNaves):
jet = Nave(ln, -40)
Naves.add(jet)
todos.add(jet)
Clones = nene
for i in range(Clones):
Clon = Enemy(300, 100)
Clon.rect.x = random.randrange(l1c,l2c)
Clon.rect.y = random.randrange(79, 359)
Enemys.add(Clon)
todos.add(Clon)
pg = False
if(len(Enemys)<=0):
espera1 = True
if(pospx<=-600)and(pg2 == True):
pg = True
pg2 = False
l1c += 500
l2c += 500
ln += 500
nene *= 2
if(pospx<=-1100)and(pg3 == True):
pg = True
pg3 = False
l1c += 500
l2c += 500
ln += 500
nene += 4
#______________________Comportamiento Enemigo_________________________#
#-------------------Movimiento ENemigos ------------------
for clon in Enemys:
if (clon.vel_x==-10):
clon.cut = CortarImagen(enemy1, k1, 1, 7, 4)
if k1 <= 0:
k1 = 3
else:
k1 -=1
#-------------------DIsparar ENemigos ------------------
for clon in Enemys:
if (clon.vel_x==0) and (not clon.parar):
clon.cut = CortarImagen(enemy1, k2, 1, 7, 4)
if k2 >=6:
K2 = 5
else:
k2 += 1
b = Bullet(clon.rect.x+20, clon.rect.y+55)
Bullets.add(b)
todos.add(b)
for clon in Enemys:
distancia = clon.rect.x - Luke.rect.x
distancia2 = clon.rect.x - Luke2.rect.x
if(clon.r == 1):
clon.dparada = distancia
else:
clon.dparada = distancia2
if (clon.dparada<=100): # distancia en donde paran los enemigos
clon.vel_x = 0
clon.parar = True
for clon in Enemys:
if (clon.parar and clon.vel_x ==0):
clon.cut = CortarImagen(enemy1, 5, 1, 7, 4)
if(time > 0):
time -= 1
else:
b = Bullet(clon.rect.x+20, clon.rect.y+ 55)
Bullets.add(b)
todos.add(b)
time = 50
for clon in Enemys:
if (clon.sangre>0):
clon.cut = CortarImagen(enemy1, 5, 2, 7, 4)
clon.sangre = 0
#____________________________________________________________________________________________NAVES
for n in Naves:
if(n.disparar == True):
b = Bullet(n.rect.x+40, n.rect.y+90)
b.l = 5
b.a = 40
b.vel_x = 0
b.vel_y = 10
Bullets.add(b)
todos.add(b)
#____________________________________________________________________________________________VADER
# _____________________________________________________________________________________PELEA FINAL
for L in Players:
if (pospx <= np):
ACTboss = True
#print "VADER: ", Vader.health
activate = True
#print "activar boss"
#---boss prioriza matar al jugador 1
#---jugador en la derecha
if(Vader.rect.x > L.rect.x):
Vader.dird = True
Vader.diri = False
if(Vader.rect.x - L.rect.x<= 80):
Vader.vel_x = 0
Vader.disparar = True
L.cerca = True
else:
Vader.disparar = False
L.cerca = False
Vader.vel_x = -6
if(L.disp)and(L.cerca):
Vader.health -=5
#---jugador en la izquierda
else:
Vader.dird = False
Vader.diri = True
if(L.rect.x - Vader.rect.x <= 50):
Vader.vel_x = 0
Vader.disparar = True
L.cerca = True
else:
Vader.disparar = False
L.cerca = False
Vader.vel_x = 6
if(L.disp)and(L.cerca):
Vader.health -=20
#jugador arriba
if(Vader.rect.y > L.rect.y):
if(Vader.rect.y- L.rect.y <= 30):
Vader.vel_y = 0
Vader.disparar = True
L.cerca = True
else:
Vader.disparar = False
L.cerca = False
Vader.vel_y = -6
if(L.disp)and(L.cerca):
Vader.health -=20
else:
if(L.rect.y - Vader.rect.y<= 30):
Vader.vel_y = 0
Vader.disparar = True
L.cerca = True
else:
Vader.disparar = False
L.cerca = False
Vader.vel_y = 6
if(L.disp)and(L.cerca):
Vader.health -=20
else:
Vader.vel_x = 0
Vader.disparar = False
Vader.cut = CortarImagen(vader1, 4, 2, 9, 4)
if(activate):
np = Vader.rect.x - 1000
#___________________________________________________________________SPRITES VADER
if(Vader.vel_x >0):
Vader.cut = CortarImagen(vader1, f0, 1, 9,4)
if(f0 >= 2):
f0 = 0
else:
f0 += 1
if(Vader.vel_x<0):
Vader.cut = CortarImagen(vader1, f1, 0, 9, 4)
if(f1 <=0):
f1 = 3
else:
f1 -= 1
if(Vader.vel_y>0):
Vader.cut = CortarImagen(vader1, f2, 2, 9, 4)
if(f2 >= 3):
f2 = 0
else:
f2 += 1
if(Vader.vel_y<0):
Vader.cut = CortarImagen(vader1, f3, 3, 9, 4)
if(f3 <= 0):
f3 = 3
else:
f3 -= 1
if(Vader.vel_x == 0)and(Vader.vel_y == 0)and(Vader.disparar == True):
if(Vader.dird):
Vader.cut = CortarImagen(vader2, f4, 0, 4, 1)
if(f4 >= 3):
f4 = 0
else:
f4 += 1
else:
Vader.cut = CortarImagen(vader3, f5, 0, 4, 1)
if(f5 >= 3):
f5 = 0
else:
f5 += 1
if(Luke.health>0):
Luke.health -= 2
else:
if(Luke2.health>0):
Luke2.health-= 2
#____________________________________________________________________BARRAS DE SALUD
if (Luke.health>0):
if (Luke.health>500):
Life1.cut = CortarImagen(life1, 0, 0, 1, 6)
elif (Luke.health>400 and Luke.health<500):
Life1.cut = CortarImagen(life1, 0, 1, 1, 6)
elif (Luke.health>300 and Luke.health<400):
Life1.cut = CortarImagen(life1, 0, 2, 1, 6)
elif (Luke.health>200 and Luke.health<300):
Life1.cut = CortarImagen(life1, 0, 3, 1, 6)
elif (Luke.health>100 and Luke.health<200):
Life1.cut = CortarImagen(life1, 0, 4, 1, 6)
elif (Luke.health>=1 and Luke.health<100):
Life1.cut = CortarImagen(life1, 0, 5, 1, 6)
if (Luke2.health>0):
if (Luke2.health>500):
Life2.cut = CortarImagen(life2, 0, 0, 1, 6)
elif (Luke2.health>400 and Luke2.health<500):
Life2.cut = CortarImagen(life2, 0, 1, 1, 6)
elif (Luke2.health>300 and Luke2.health<400):
Life2.cut = CortarImagen(life2, 0, 2, 1, 6)
elif (Luke2.health>200 and Luke2.health<300):
Life2.cut = CortarImagen(life2, 0, 3, 1, 6)
elif (Luke2.health>100 and Luke2.health<200):
Life2.cut = CortarImagen(life2, 0, 4, 1, 6)
elif (Luke2.health>=1 and Luke2.health<100):
Life2.cut = CortarImagen(life2, 0, 5, 1, 6)
#------------------SIMULACIONES CON EL FONDO-----------------
#---------------JUgadores desplazandose unos a otros--------------------
if(Luke2.simr):
Luke2.cut=CortarImagen(luke1,j1,0,6,6)
if j1 >= 3:
j1 = 0
else:
j1 += 1
if(Luke.simr):
Luke.cut=CortarImagen(luke1,d1,0,6,6)
if d1 >= 3:
d1 = 0
else:
d1 += 1
if(Luke2.siml):
Luke2.cut=CortarImagen(luke1,d3,1,6,6)
if d3 <= 0:
d3 = 3
else:
d3 -=1
if(Luke.siml):
Luke.cut=CortarImagen(luke1,d2,1,6,6)
if d2 <= 0:
d2 = 3
else:
d2 -=1
#__________________________VIda de los jugadores_____________________"
#_______________________________________________________#
#________________________COLICIONES_______________________________
ls_cl = pygame.sprite.spritecollide(Luke, Enemys, False)
for e in ls_cl:
e.disp = True
ls_cl2 = pygame.sprite.spritecollide(Luke2, Enemys, False)
for e in ls_cl:
e.disp = True
ls_cl3 = pygame.sprite.spritecollide(Luke, Bullets, True)
for b in ls_cl3:
if(Luke.siml):
Luke.cut=CortarImagen(luke1,4,2,6,6)
else:
Luke.cut=CortarImagen(luke1,4,3,6,6)
Luke.health -= 20 ##cambiar<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
todos.remove(b)
Bullets.remove(b)
ls_cl4 = pygame.sprite.spritecollide(Luke2, Bullets, True)
for b in ls_cl4:
if(Luke2.siml):
Luke2.cut=CortarImagen(luke1,4,2,6,6)
else:
Luke2.cut=CortarImagen(luke1,4,3,6,6)
Luke2.health -= 50 ##cambiar<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
todos.remove(b)
Bullets.remove(b)
#____________________________________________________________________
#Refresco de Pantalla
#________________________________________________________________________SI ALGUIEN MUERE
if(Luke.health<=0):
#Vader.vel_x = 0
todos.remove(Luke)
Players.remove(Luke)
Lifes.remove(Life1)
todos.remove (Life1)
if(Luke2.health<=0):
todos.remove(Luke2)
Players.remove(Luke2)
Lifes.remove(Life2)
todos.remove (Life2)
#Vader.vel_x = 10
if(Luke.health<=0)and(Luke2.health<=0):
Vader.vel_x = 0
Vader.disparar = False
Vader.cut = CortarImagen(vader1, 4, 2, 9, 4)
if(Vader.health<=0):
Vader.rect.x = 1000
Vader.disparar = False
Vader.health = 0
todos.remove(Vader)
Bosses.remove(Vader)
#______________________________________________________________SI la nave sobrepasa el limite
for n in Naves:
if (n.rect.x <= -100):
Naves.remove(n)
todos.remove(n)
#____________________________________________________________________
#
#Limites con el fondo --------
#print pospx
if(pospx > 0):
pospx -= 1
Luke.vel_x = 0
Luke2.vel_x = 0
elif(pospx<-1810):
pospx +=1
Luke.vel_x = 0
Luke2.vel_x = 0
else:
##limit jugador 1
if(espera1):
if(Luke.limitx):
pospx -= Luke.vel_x
Vader.rect.x -= Luke.vel_x
Luke2.simr = True
for e in Enemys:
e.rect.x -= Luke.vel_x
else:
Luke2.simr = False
if(Luke.limitxx):
pospx -= Luke.vel_x
Vader.rect.x -= Luke.vel_x
Luke2.siml = True
for e in Enemys:
e.rect.x -= Luke.vel_x
else:
Luke2.siml = False
#limit jugador | |
self.generate_command(c, mode=mode, level=level + 1)
if level == 0 and isinstance(item, (list, tuple)) \
and mode == 'Command':
# commandline elements should be strings
item = repr(item)
new_command.append(item)
if isinstance(command, tuple):
new_command = tuple(new_command)
elif isinstance(command, set):
new_command = set(new_command)
if mode is None:
new_command = repr(new_command).replace("'", "\"")
if six.PY2:
new_command = new_command.encode('utf-8')
elif isinstance(command, SpecialPath):
# If the entry is a SpecialPath, it is converted into the
# corresponding path representation. If the parent call cas
# done on a tuple (mode=="Tuple"), we only recover the directory path
# (get_engine_path), else we get the path to the main file
# (get_engine_main_path)
if mode == "Tuple":
new_command = (
command.pattern
% self.path_mapping[command].get_engine_path())
new_command = six.ensure_str(new_command, 'utf-8')
else:
new_command = (
command.pattern
% self.path_mapping[command].get_engine_main_path())
new_command = six.ensure_str(new_command, 'utf-8')
else:
# If the entry is anything else, we return its string
# representation
if mode not in ('Command', 'PathOnly'):
new_command = six.text_type(command)
else:
new_command = command
return new_command
@staticmethod
def escape_quotes(line):
return line.replace('"', '\\"')
def plain_command(self):
'''
Compute the actual job command (sequence of string) from the command
holding FileTransfer and SharedResourcePath objects.
returns: sequence of string
'''
if self.container_command is not None:
replaced = [i for i in range(len(self.container_command))
if '{#command}' in self.container_command[i]]
if len(replaced) == 0:
command = self.container_command + self.command
else:
user_command = self.generate_command(self.command,
mode="Command")
for i, item in enumerate(user_command):
if isinstance(item, list):
user_command[i] = ''.join(item)
user_command = [self.escape_quotes(item)
for item in user_command]
user_command = '"' + '" "'.join(user_command) + '"'
command = list(self.container_command)
for i in replaced:
command[i] \
= self.container_command[i].replace('{#command}',
user_command)
return command # no need to replace again
else:
command = self.command
repl_command = self.commandline_repl(
self.generate_command(command, mode="Command"))
# re-go through generate_command since repl_command leaves SpecialPath
# instances
res_command = [self.generate_command(x, mode='Command')
for x in repl_command]
return res_command
def plain_stdin(self):
return self.generate_command(self.stdin)
def plain_stdout(self):
return self.generate_command(self.stdout_file)
def plain_stderr(self):
return self.generate_command(self.stderr_file)
def plain_input_params_file(self):
return self.generate_command(self.input_params_file)
def plain_output_params_file(self):
return self.generate_command(self.output_params_file)
def plain_working_directory(self):
return self.generate_command(self.working_directory)
def write_input_params_file(self):
if self.use_input_params_file and self.input_params_file:
params = {} # dict(self.param_dict)
param_dict = {'parameters': params}
for param, value in six.iteritems(self.param_dict):
params[param] = self.generate_command(value, mode='PathOnly')
# include config
if self.configuration:
param_dict['configuration_dict'] = self.configuration
with open(self.input_params_file, 'w') as f:
json.dump(utils.to_json(param_dict), f)
def is_running(self):
running = self.status != constants.NOT_SUBMITTED and \
self.status != constants.FAILED and \
self.status != constants.DONE
return running
def is_done(self):
done = self.status == constants.DONE or self.status == constants.FAILED
return done
def failed(self):
failed = (self.is_done() and
((self.exit_value != 0 and self.exit_value != None) or
self.exit_status != constants.FINISHED_REGULARLY or
self.terminating_signal != None))
return failed
def ended_with_success(self):
success = self.is_done() and \
self.exit_value == 0 and \
self.exit_status == constants.FINISHED_REGULARLY and \
self.terminating_signal == None
return success
def engine_execution(self):
func = getattr(self.job_class, 'engine_execution', None)
if func:
return func(self.job_class, self)
class EngineWorkflow(Workflow):
'''
Server side representation of a :obj:`Workflow`, i.e. a list of jobs
with groups and dependencies.
'''
# workflow id
wf_id = None
# user id
_user_id = None
# path translation for each namespace a dictionary holding the traduction
#(association uuid => engine path)
# dictionary, namespace => uuid => path
_path_translation = None
# workflow status as defined in constants.WORKFLOW_STATUS
status = None
# expidation date
expiration_date = None
# name of the queue to be used to submit jobs, str
queue = None
# mapping between Job and actual EngineJob which are valid on the system
# dictionary: Job -> EngineJob
job_mapping = None
# mapping between FileTransfer and actual EngineTransfer which are valid on
# the system.
# dictionary: FileTransfer -> EngineTransfer
transfer_mapping = None
# Once registered on the database server each
# EngineJob has an job_id.
# dictonary: job_id -> EngineJob
registered_jobs = None
# Once registered on the database server each
# EngineTransfer has an transfer_id.
# dictonary: tr_id -> EngineTransfer
registered_tr = None
# Once registered on the database server each
# TemporaryPath has an id.
# dictonary: tr_id -> EngineTemporaryPath
registered_tmp = None
# docker / singularity prefix command, to be prepended to all jobs
# commandlines
container_command = None
# for each job: list of all the jobs which have to end before a job can start
# dictionary: job_id -> list of job id
_dependency_dict = None
# A workflow object. For serialisation purposes with serpent
_client_workflow = None
logger = None
def to_dict(self):
wf_dict = super(EngineWorkflow, self).to_dict()
# path_translation
# queue
# expiration_date
# container_command
wf_dict["container_command"] = self.container_command
return wf_dict
@classmethod
def from_dict(cls, d):
client_workflow = Workflow.from_dict(d)
# path_translation
path_translation = {}
# queue
queue = d.get('queue', None)
# expiration_date
expiration_date = d.get('expiration_date')
if expiration_date is not None:
expiration_date = datetime.datetime(*expiration_date)
# name
name = client_workflow.name
# container_command
container_command = d.get('container_command')
return cls(client_workflow, path_translation, queue, expiration_date,
name, container_command=container_command)
class WorkflowCache(object):
def __init__(self):
self.waiting_jobs = set()
self.to_run = set()
self.dependencies = {}
self.done = set()
self.running = set()
self.to_abort = set()
self.has_new_failed_jobs = False
def __init__(self,
client_workflow,
path_translation,
queue,
expiration_date,
name,
container_command=None):
logging.debug("Within Engine workflow constructor")
super(EngineWorkflow, self).__init__(
client_workflow.jobs,
client_workflow.dependencies,
client_workflow.root_group,
env=client_workflow.env,
env_builder_code=client_workflow.env_builder_code,
param_links=client_workflow.param_links)
# STRANGE: does not match Workflow constructor,
# ,client_workflow.groups)
self.wf_id = -1
logging.debug("After call to parent constructor, if we change the "
"prototype of the constructor suppressing the "
"last parametre nothing seem to happen, see comment above")
self.status = constants.WORKFLOW_NOT_STARTED
self._path_translation = path_translation
self.queue = queue
self.expiration_date = expiration_date
self.name = name
self.user_storage = client_workflow.user_storage
if hasattr(client_workflow, 'uuid'):
self.uuid = client_workflow.uuid
self.job_mapping = {}
self.transfer_mapping = {}
self.container_command = container_command
self._map()
self.registered_tr = {}
self.registered_tmp = {}
self.registered_jobs = {}
self._dependency_dict = {}
for dep in self.dependencies:
if dep[1] in self._dependency_dict:
self._dependency_dict[dep[1]].append(dep[0])
else:
self._dependency_dict[dep[1]] = [dep[0]]
self.cache = None
# begin without cache because it also has an overhead
self.use_cache = False
def get_environ(self):
''' Get environment variables dict for the workflow. This environment
is applied to all engine jobs (and can be specialized on a per-job
basis if jobs also have an env variable).
Env variables are built from the env variable of the workflow, and by
the result of execution of the env_builder_code source code.
'''
env = {}
if self.env_builder_code:
t = tempfile.mkstemp(prefix='swf_', suffix='.py')
try:
os.close(t[0])
with io.open(t[1], 'w', encoding='utf-8') as f:
f.write(six.ensure_text(self.env_builder_code))
f.write(u'\n')
try:
env_json = subprocess.check_output([sys.executable,
t[1]]).decode('utf-8')
env = json.loads(env_json)
except Exception as e:
logging.error(
'workflow env_builder_code could not be executed:\n' + repr(e) + '\ncode:\n' + self.env_builder_code)
finally:
os.unlink(t[1])
if self.env:
env.update(self.env)
return env
def _map(self):
'''
Fill the job_mapping attributes.
+ type checking
'''
# get workflow environment variables
env = self.get_environ()
# jobs
for job in self.jobs:
if not isinstance(job, Job):
raise WorkflowError("%s: Wrong type in the jobs attribute. "
" An object of type Job is required." % (repr(job)))
if job not in self.job_mapping:
ejob = EngineJob(client_job=job,
queue=self.queue,
path_translation=self._path_translation,
transfer_mapping=self.transfer_mapping,
container_command=self.container_command,
wf_env=env)
self.transfer_mapping.update(ejob.transfer_mapping)
self.job_mapping[job] = ejob
# dependencies
for dependency in self.dependencies:
if not isinstance(dependency[0], Job) or \
not isinstance(dependency[1], Job):
raise WorkflowError("%s, %s: Wrong type in the workflow dependencies."
" An object of type Job is required." %
(repr(dependency[0]), repr(dependency[1])))
if dependency[0] not in self.job_mapping:
self.jobs.append(dependency[0])
ejob = EngineJob(client_job=dependency[0],
queue=self.queue,
path_translation=self._path_translation,
transfer_mapping=self.transfer_mapping,
container_command=self.container_command,
wf_env=env)
self.transfer_mapping.update(ejob.transfer_mapping)
self.job_mapping[dependency[0]] = ejob
if dependency[1] not in self.job_mapping:
self.jobs.append(dependency[1])
ejob = EngineJob(client_job=dependency[1],
queue=self.queue,
path_translation=self._path_translation,
transfer_mapping=self.transfer_mapping,
container_command=self.container_command,
wf_env=env)
self.transfer_mapping.update(ejob.transfer_mapping)
self.job_mapping[dependency[1]] = ejob
# groups
groups = self.groups
for group in self.groups:
for elem in group.elements:
if isinstance(elem, Job):
if elem not in self.job_mapping:
self.jobs.append(elem)
ejob = EngineJob(
client_job=elem,
queue=self.queue,
path_translation=self._path_translation,
transfer_mapping=self.transfer_mapping,
container_command=self.container_command,
wf_env=env)
self.transfer_mapping.update(ejob.transfer_mapping)
self.job_mapping[elem] = ejob
elif not isinstance(elem, Group):
raise WorkflowError("%s: Wrong type in the workflow "
"groups. Objects of type Job or "
"Group are required. Got type: %s"
% (repr(elem), type(elem).__name__))
# root group
for elem in self.root_group:
if isinstance(elem, Job):
if elem not in self.job_mapping:
self.jobs.append(elem)
ejob = EngineJob(client_job=elem,
queue=self.queue,
path_translation=self._path_translation,
transfer_mapping=self.transfer_mapping,
container_command=self.container_command,
wf_env=env)
self.transfer_mapping.update(ejob.transfer_mapping)
self.job_mapping[elem] = ejob
elif not isinstance(elem, Group):
raise WorkflowError(
"%s: Wrong type in the workflow root_group."
" Objects of type Job or Group are required." %
(repr(elem)))
| |
# rot_z=rotbox,
# filenames_sub=confinedfield_namesub,
# vals_name=mlfield_ensemble_name)
# del ccx, ccy, ccz
# Update old whole fields to new confined fields
grad_k, k = mlfield_ensemble[:, :3], mlfield_ensemble[:, 3]
epsilon = mlfield_ensemble[:, 4]
grad_u = mlfield_ensemble[:, 5:14]
u = mlfield_ensemble[:, 14:17]
grad_p = mlfield_ensemble[:, 17:20]
uuprime2 = mlfield_ensemble[:, 20:]
"""
Rotate Fields by Given Rotation Angle
"""
# TODO: maybe rotate field here already
# if rotz != 0.:
# grad_k = rotateData(grad_k, anglez=rotz)
# # Switch to matrix form for grad(U) and rotate
# grad_u = grad_u.reshape((-1, 3, 3))
# grad_u = rotateData(grad_u, anglez=rotz).reshape((-1, 9))
# u = rotateData(u, anglez=rotz)
# grad_p = rotateData(grad_p, anglez=rotz)
# # Extend symmetric tensor to full matrix form
# uuprime2 = expandSymmetricTensor(uuprime2).reshape((-1, 3, 3))
# uuprime2 = rotateData(uuprime2, anglez=rotz).reshape((-1, 9))
# uuprime2 = contractSymmetricTensor(uuprime2)
if save_fields:
k, epsilon = k.reshape((-1, 1)), epsilon.reshape((-1, 1))
# Reassemble ML field ensemble after possible field rotation
mlfield_ensemble = np.hstack((grad_k, k, epsilon, grad_u, u, grad_p, uuprime2))
case.savePickleData(time, mlfield_ensemble, filenames=mlfield_ensemble_namefull)
case.savePickleData(time, cc, filenames='CC_' + confinedfield_namesub)
case.savePickleData(time, mask, filenames='IndexMask_' + confinedfield_namesub)
del mlfield_ensemble
# Else if directly read pickle data
else:
if proc_field and proc_invariant:
# Load rotated and/or confined field data useful for Machine Learning
mlfield_ensemble = case.readPickleData(time, mlfield_ensemble_namefull)
grad_k, k = mlfield_ensemble[:, :3], mlfield_ensemble[:, 3]
epsilon = mlfield_ensemble[:, 4]
grad_u = mlfield_ensemble[:, 5:14]
u = mlfield_ensemble[:, 14:17]
grad_p = mlfield_ensemble[:, 17:20]
uuprime2 = mlfield_ensemble[:, 20:]
# Load confined cell centers too
cc = case.readPickleData(time, 'CC_' + confinedfield_namesub)
del mlfield_ensemble
"""
Calculate Field Invariants
"""
if proc_field and proc_invariant:
# Step 1: non-dimensional strain rate and rotation rate tensor Sij and Rij
# epsilon is SGS epsilon as it's not necessary to use total epsilon
# Sij shape (n_samples, 6); Rij shape (n_samples, 9)
t0 = t.time()
sij, rij = getStrainAndRotationRateTensor(grad_u, tke=k, eps=epsilon, cap=cap_sijrij)
t1 = t.time()
print('\nFinished Sij and Rij calculation in {:.4f} s'.format(t1 - t0))
# Step 2: 10 invariant bases scaled Tij, shape (n_samples, 6, 10)
t0 = t.time()
tb = getInvariantBases(sij, rij, quadratic_only=False, is_scale=scale_tb)
t1 = t.time()
print('\nFinished Tij calculation in {:.4f} s'.format(t1 - t0))
# Step 3: anisotropy tensor bij, shape (n_samples, 6)
bij = case.getAnisotropyTensorField(uuprime2, use_oldshape=False)
del uuprime2
# Save tensor invariants related fields
if save_fields:
case.savePickleData(time, sij, filenames=('Sij_' + confinedfield_namesub))
case.savePickleData(time, rij, filenames=('Rij_' + confinedfield_namesub))
case.savePickleData(time, tb, filenames=('Tij_' + confinedfield_namesub))
case.savePickleData(time, bij, filenames=('bij_' + confinedfield_namesub))
# Else if read invariants data from pickle
else:
if proc_field and proc_field_feature:
invariants = case.readPickleData(time, filenames=('Sij_' + confinedfield_namesub,
'Rij_' + confinedfield_namesub,
'Tij_' + confinedfield_namesub,
'bij_' + confinedfield_namesub))
sij = invariants['Sij_' + confinedfield_namesub]
rij = invariants['Rij_' + confinedfield_namesub]
tb = invariants['Tij_' + confinedfield_namesub]
bij = invariants['bij_' + confinedfield_namesub]
del invariants
"""
Calculate Feature Sets
"""
if proc_field and proc_field_feature:
if fs == 'grad(TKE)':
fs_data, labels = getInvariantFeatureSet(sij, rij, grad_k, k=k, eps=epsilon)
elif fs == 'grad(p)':
fs_data, labels = getInvariantFeatureSet(sij, rij, grad_p=grad_p, u=u, grad_u=grad_u)
elif 'grad(TKE)_grad(p)' in fs:
fs_data, labels = getInvariantFeatureSet(sij, rij, grad_k=grad_k, grad_p=grad_p, k=k, eps=epsilon, u=u,
grad_u=grad_u)
# 4 additional invariant features
if '+' in fs:
nu *= np.ones_like(k)
# Radial distance to (closest) turbine center.
# Don't supply z to get horizontal radial distance
r = getRadialTurbineDistance(cc[:, 0], cc[:, 1], z=None, turblocs=turblocs)
fs_data2, labels2 = getSupplementaryInvariantFeatures(k, cc[:, 2], epsilon, nu, sij, r=r)
fs_data = np.hstack((fs_data, fs_data2))
del nu, r, fs_data2
del sij, rij, grad_k, k, epsilon, grad_u, u, grad_p
if save_fields:
case.savePickleData(time, fs_data, filenames='FS_' + fs + '_' + confinedfield_namesub)
# Else, directly read feature set data
else:
if proc_field and proc_field_traintest_split: fs_data = case.readPickleData(time, filenames='FS_' + fs + '_' + confinedfield_namesub)
"""
Train, Test Data Preparation
"""
if proc_field and proc_field_traintest_split:
# X is either RANS or LES invariant features shape (n_samples, n_features)
x = fs_data
del fs_data
# y is LES bij shape (n_samples, 6)
y = bij
del bij
# Prepare GS samples of specified size
if 'OneTurb' in casename:
list_data_gs, _ = splitTrainTestDataList([cc, x, y, tb], test_fraction=0., seed=seed, sample_size=samples_gs)
# Prepare training samples of specified size for actual training
list_data_train, _ = splitTrainTestDataList([cc, x, y, tb], test_fraction=0., seed=seed, sample_size=samples_train)
list_data_test = [cc, x, y, tb, mask]
del cc, x, y, tb
if save_fields:
if 'OneTurb' in casename:
# Extra tuple treatment to list_data_* that's already a tuple since savePickleData thinks tuple means multiple files
case.savePickleData(time, (list_data_train,), 'list_data_train_' + confinedfield_namesub)
case.savePickleData(time, (list_data_gs,), 'list_data_GS_' + confinedfield_namesub)
case.savePickleData(time, (list_data_test,), 'list_data_test_' + confinedfield_namesub)
# Else if directly read GS, train and test data from pickle data
else:
if proc_field:
list_data_gs = case.readPickleData(time, 'list_data_GS_' + confinedfield_namesub)
list_data_train = case.readPickleData(time, 'list_data_train_' + confinedfield_namesub)
list_data_test = case.readPickleData(time, 'list_data_test_' + confinedfield_namesub)
"""
Process Slices for Prediction Visualizations
"""
if proc_slice:
# Initialize case
slice = SliceProperties(time=time, casedir=casedir, casename=casename, rot_z=rotbox, result_folder=resultfolder)
# Update time to the actual detected time if time was 'latestTime'
time = slice.time
# Read slices
slice.readSlices(properties=fields, slicenames=slicenames, slicenames_sub=slicename_sub)
slice_data = slice.slices_val
# Dict storing all slice values with slice type as key, e.g. alongWind, hubHeight, etc.
list_slicevals, list_sliceproperties, list_slicecoor = {}, {}, {}
slicenames_iter = iter(slicenames)
# Go through each slice type, e.g. alongWind, hubHeight, etc.
for itype in range(len(slicenames)):
# Get it's type, e.g. alongWind, hubHeight, etc.
slice_type = next(slicenames_iter)
list_slicevals[slice_type], list_sliceproperties[slice_type] = [], []
store_slicescoor = True
# Go through every slices incl. every type and every flow property
for i in range(len(slice.slicenames)):
slicename = slice.slicenames[i]
# Skip kSGSmean since it will be discovered when kResolved or epsilonSGSmean is discovered
if 'kSGSmean' in slicename: continue
# If matching slice type, proceed
if slice_type in slicename:
val = slice.slices_val[slicename]
# If kResolved and kSGSmean in properties, get total kMean
# Same with grad_kResolved
if 'kResolved' in slicename:
if grad_kw in slicename:
for i2 in range(len(slice.slicenames)):
slicename2 = slice.slicenames[i2]
if slice_type in slicename2 and 'kSGSmean' in slicename2 and grad_kw in slicename2:
print(' Calculating total grad(<k>) for {}...'.format(slicenames[itype]))
val += slice.slices_val[slicename2]
break
else:
for i2 in range(len(slice.slicenames)):
slicename2 = slice.slicenames[i2]
if slice_type in slicename2 and 'kSGSmean' in slicename2 and grad_kw not in slicename2:
print(' Calculating total <k> for {}...'.format(slicenames[itype]))
val += slice.slices_val[slicename2]
val = val.reshape((-1, 1))
break
list_slicevals[slice_type].append(val)
list_sliceproperties[slice_type].append(slicename)
list_slicecoor[slice_type] = slice.slices_coor[slicename]
store_slicescoor = False
# Assign list to individual variables
for i, name in enumerate(list_sliceproperties[slice_type]):
if 'k' in name:
if grad_kw in name:
grad_k = list_slicevals[slice_type][i]
else:
k = list_slicevals[slice_type][i]
elif 'U' in name:
if grad_kw in name:
grad_u = list_slicevals[slice_type][i]
else:
u = list_slicevals[slice_type][i]
elif 'p_rgh' in name:
if grad_kw in name:
grad_p = list_slicevals[slice_type][i]
else:
p = list_slicevals[slice_type][i]
# epsilon SGS
elif 'epsilon' in name:
epsilon = list_slicevals[slice_type][i]
elif 'uu' in name:
uuprime2 = list_slicevals[slice_type][i]
elif 'G' in name:
g_tke = list_slicevals[slice_type][i]
elif 'divDevR' in name:
div_devr = list_slicevals[slice_type][i]
elif 'dDevRab_db' in name:
ddevr_dj = list_slicevals[slice_type][i]
else:
warn("\n{} slice not assigned to a variable!".format(name), stacklevel=2)
# TODO: maybe rotate fields here already
# # Rotate fields if requested
# if rotz != 0.:
# grad_k = rotateData(grad_k, anglez=rotz)
# # Switch to matrix form for grad(U) and rotate
# grad_u = grad_u.reshape((-1, 3, 3))
# grad_u = rotateData(grad_u, anglez=rotz).reshape((-1, 9))
# u = rotateData(u, anglez=rotz)
# grad_p = rotateData(grad_p, anglez=rotz)
# # Extend symmetric tensor to full matrix form
# uuprime2 = expandSymmetricTensor(uuprime2).reshape((-1, 3, 3))
# uuprime2 = rotateData(uuprime2, anglez=rotz).reshape((-1, 9))
# uuprime2 = contractSymmetricTensor(uuprime2)
# Process invariants
# Non-dimensional Sij (n_samples, 6) and Rij (n_samples, 9)
# Using epsilon SGS as epsilon total is not necessary
# according to the definition in Eq 5.65, Eq 5.66 in Sagaut (2006)
sij, rij = getStrainAndRotationRateTensor(grad_u, tke=k, eps=epsilon, cap=cap_sijrij)
# Step 2: 10 invariant bases scaled Tij, shape (n_samples, 6)
# possibly with scaling of 1/[1, 10, 10, 10, 100, 100, 1000, 1000, 1000, 1000]
tb = getInvariantBases(sij, rij, quadratic_only=False, is_scale=scale_tb)
# Step 3: anisotropy tensor bij, shape (n_samples, 6)
bij = case.getAnisotropyTensorField(uuprime2, use_oldshape=False)
# Save tensor invariants related fields
if save_fields:
case.savePickleData(time, sij, filenames=('Sij_' + slice_type))
case.savePickleData(time, rij, filenames=('Rij_' + slice_type))
case.savePickleData(time, tb, filenames=('Tij_' + slice_type))
case.savePickleData(time, bij, filenames=('bij_' + slice_type))
# Visualization related slice data
case.savePickleData(time, k, 'TKE_' + slice_type)
case.savePickleData(time, grad_u, 'grad(U)_' + slice_type)
# case.savePickleData(time, g_tke, | |
"""
post_bands:
post_bands extract data from static-o_DS3_EBANDS.agr and it will build
the kpoints length: xcoord_k from the high symmetry line and the corresponding
basis for reciprocal space.
b1 = 1 / a1, b2 = 1 / a2 and b3 = 1 / a3.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
class PostBands:
def __init__(self):
pass
def get_xcoord_k(self, kpath, cell):
"""
Note:
xcoord_k is the x axis of the band structure plot
let's see how it is build from kpoints and the
crystal lattice or reciprocal lattice.
"""
self.kpath = kpath
self.xcoord_k = []
b1 = 1 / np.sqrt(cell[0][0]**2 + cell[0][1]**2 + cell[0][2]**2)
b2 = 1 / np.sqrt(cell[1][0]**2 + cell[1][1]**2 + cell[1][2]**2)
b3 = 1 / np.sqrt(cell[2][0]**2 + cell[2][1]**2 + cell[2][2]**2)
# actually you will find that in vasp b1=1/a1, b2=1/a2, b3=1/a3
V = np.dot(cell[0], np.cross(cell[1], cell[2]))
b1_vec = np.cross(cell[1], cell[2]) * 2 * np.pi / V
b2_vec = np.cross(cell[2], cell[0]) * 2 * np.pi / V
b3_vec = np.cross(cell[0], cell[1]) * 2 * np.pi / V
print("cell a:")
print("%f %f %f\n" % (cell[0][0], cell[0][1], cell[0][2]))
print("%f %f %f\n" % (cell[1][0], cell[1][1], cell[1][2]))
print("%f %f %f\n" % (cell[2][0], cell[2][1], cell[2][2]))
print("cell b:\n")
print("%f %f %f\n" % (b1_vec[0], b1_vec[1], b1_vec[2]))
print("%f %f %f\n" % (b2_vec[0], b2_vec[1], b2_vec[2]))
print("%f %f %f\n" % (b3_vec[0], b3_vec[1], b3_vec[2]))
self.xcoord_k.append(0.0000000)
for i in range(len(self.kpath) - 1):
# the step in the xcoord_k for each segment is different and it is actually
# the distance between the two high symmetry kpoint in unit of reciprocal coordinates
# divided by the conneciting number kpath[i][4]
if self.kpath[i][4] != "|":
#delta_b_1 = b1*(self.kpath[i+1][0] - self.kpath[i][0])
#delta_b_2 = b2*(self.kpath[i+1][1] - self.kpath[i][1])
#delta_b_3 = b3*(self.kpath[i+1][2] - self.kpath[i][2])
#step = np.sqrt(delta_b_1**2+delta_b_2**2+delta_b_3**2) / (self.kpath[i][4])
# the above way to calculate step is only applicable when
# b1 b2 b3 are perpendicular to each other so they are abandoned.
vec1 = self.kpath[i][0] * np.array(b1_vec) + self.kpath[i][1] * np.array(b2_vec) + self.kpath[i][2] * np.array(b3_vec)
vec2 = self.kpath[i+1][0] * np.array(b1_vec) + self.kpath[i+1][1] * np.array(b2_vec) + self.kpath[i+1][2] * np.array(b3_vec)
distance_in_b = np.linalg.norm(np.array(vec2)-np.array(vec1))
step = distance_in_b / (self.kpath[i][4])
for j in range(self.kpath[i][4]):
self.xcoord_k.append(self.xcoord_k[-1] + step)
else:
self.xcoord_k.append(self.xcoord_k[-1])
# label for plot
self.locs = []
self.labels_for_matplotlib = []
self.labels_for_gnuplot = []
self.locs.append(0.0000000)
nk = 0
print("%d\n" % nk)
for i in range(len(self.kpath)-1):
if self.kpath[i][4] != "|":
nk = nk + self.kpath[i][4]
self.locs.append(self.xcoord_k[nk])
print("%d\n" % nk)
else:
nk = nk + 1
self.labels_for_matplotlib.append(r"$%s$" % self.kpath[0][3].upper() if self.kpath[0][3].upper() != "GAMMA" else r"$\Gamma$")
self.labels_for_gnuplot.append("%s" % self.kpath[0][3].upper() if self.kpath[0][3].upper() != "GAMMA" else "{/symbol G}")
for i in range(1, len(self.kpath)):
if self.kpath[i-1][4] != "|":
self.labels_for_matplotlib.append(r"$%s$" % self.kpath[i][3].upper() if self.kpath[i][3].upper() != "GAMMA" else r"$\Gamma$")
self.labels_for_gnuplot.append("%s" % self.kpath[i][3].upper() if self.kpath[i][3].upper() != "GAMMA" else "{/symbol G}")
else:
self.labels_for_matplotlib[-1] = r"$%s | %s$" % (self.labels_for_matplotlib[-1].split("$")[1], self.kpath[i][3].upper())
self.labels_for_gnuplot[-1] = "%s | %s" % (self.labels_for_gnuplot[-1], self.kpath[i][3].upper())
def get_ebands_agr(self, filepath="static-o_DS3_EBANDS.agr"):
with open(filepath, 'r') as fin:
self.lines = fin.readlines()
# get the band energy
# in xxx_EBANDS.agr, energy are in unit of eV, and fermi energy are already shfited to 0
# first check the magnetic_status
for line in self.lines:
if len(line.split()) == 0:
continue
if line.split()[0] == "#" and line.split()[1] == "mband:":
self.mband = int(line.split()[2].split(",")[0])
self.nkpt = int(line.split()[4].split(",")[0])
self.nsppol = int(line.split()[6].split(",")[0])
self.nspinor = int(line.split()[8])
# get the eigenval (in agr, efermi is shfited to 0 already)
self.energies_agr = []
for i in range(len(self.lines)):
if len(self.lines[i].split()) == 0:
continue
if self.lines[i].split()[0] == "@type" and self.lines[i].split()[1].split("\n")[0] == "xy":
band = []
for j in range(self.nkpt):
band.append(float(self.lines[i+j+1].split()[1]))
self.energies_agr.append(band)
def _plot_band_matplotlib(self, bandrange=[0, 1.0]):
"""
:param bandrange:
a list of two values(between 0 and 1) defining the percentage
of bands to plot.
plotrange[0]: left boundary of the nth band to plot
plotrange[1]: right boundary of the nth band to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the band available will be plot.
Be aware that the range if not for energy but for band number
:param imagebase: image file name(not full)
"""
if self.nsppol == 1:
band_min = int(bandrange[0] * self.mband)
band_max = int(bandrange[1] * self.mband)
for i in range(band_min, band_max, 1):
plt.plot(self.xcoord_k, self.energies_agr[i], color='blue', linewidth=1)
plt.xticks(self.locs, self.labels_for_matplotlib)
plt.title("Band Structure")
plt.xlabel("K")
plt.ylabel("Energy(eV)")
plt.grid(b=True, which='major')
plt.savefig("band-structure-spin-unpolarized.png")
if self.nsppol == 2:
# half of self.energies_agr are spin up, and half are spin down
band_min = int(bandrange[0] * self.mband)
band_max = int(bandrange[1] * self.mband)
# spin up
for i in range(band_min, band_max, 1):
plt.plot(self.xcoord_k, self.energies_agr[i])
plt.title("Band Structure(Spin Up)")
plt.xlabel("K")
plt.ylabel("Energy(eV)")
plt.xticks(self.locs, self.labels_for_matplotlib)
plt.grid(b=True, which='major')
plt.savefig("band-structure-spin-polarized-1.png")
plt.close()
# spin down
for i in range(int(band_min+self.mband), int(band_max+self.mband), 1):
plt.plot(self.xcoord_k, self.energies_agr[i])
plt.title("Band Structure(Spin Down)")
plt.xlabel("K")
plt.ylabel("Energy(eV)")
plt.xticks(self.locs, self.labels_for_matplotlib)
plt.grid(b=True, which='major')
plt.savefig("band-structure-spin-polarized-2.png")
plt.close()
# all in one picture
for i in range(band_min, band_max, 1):
plt.plot(self.xcoord_k, self.energies_agr[i], color="blue", linewidth=1)
for i in range(int(band_min+self.mband), int(band_max+self.mband), 1):
plt.plot(self.xcoord_k, self.energies_agr[i], color="red", linewidth=1)
plt.title("Band Structure(Spin Up&Down)")
plt.xlabel("K")
plt.ylabel("Energy(eV)")
plt.xticks(self.locs, self.labels_for_matplotlib)
plt.grid(b=True, which='major')
plt.savefig("band-structure-spin-polarized-all.png")
plt.close()
def _plot_band_gnuplot(self, bandrange=[0, 1.0]):
"""
:param bandrange:
a list of two values(between 0 and 1) defining the percentage
of bands to plot.
plotrange[0]: left boundary of the nth band to plot
plotrange[1]: right boundary of the nth band to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the band available will be plot.
Be aware that the range if not for energy but for band number
:param imagebase: image file name(not full)
"""
if self.nsppol == 1:
band_min = int(bandrange[0] * self.mband)
band_max = int(bandrange[1] * self.mband)
with open("all-bands-spin-unpolarized.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shfited to 0 already\n")
for i in range(self.mband):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[i][j]))
fout.write("\n")
with open("specified-bands-spin-unpolarized.data", 'w') as fout:
fout.write("# band structure extracted from ***_EBANDS.agr\n")
fout.write("# efermi shifted to 0 already\n")
for i in range(band_min, band_max, 1):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[i][j]))
fout.write("\n")
with open("all-bands-spin-unpolarized.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'all-bands-spin-unpolarized.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'all-bands-spin-unpolarized.data' using 1:2 w l\n")
os.system("gnuplot all-bands-spin-unpolarized.gnuplot")
with open("specified-bands-spin-unpolarized.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'specified-bands-spin-unpolarized.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'specified-bands-spin-unpolarized.data' using 1:2 w l\n")
os.system("gnuplot specified-bands-spin-unpolarized.gnuplot")
if self.nsppol == 2:
band_min = int(bandrange[0] * self.mband)
band_max = int(bandrange[1] * self.mband)
with open("all-bands-spin-polarized-spin-1.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shfited to 0 already\n")
for i in range(self.mband):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[i][j]))
fout.write("\n")
with open("all-bands-spin-polarized-spin-2.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shifted to 0 already\n")
for i in range(self.mband):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[self.mband+i][j]))
fout.write("\n")
with open("specified-bands-spin-polarized-spin-1.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shifted to 0 already\n")
for i in range(band_min, band_max, 1):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[i][j]))
fout.write("\n")
with open("specified-bands-spin-polarized-spin-2.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shifted to 0 already\n")
for i in range(band_min, band_max, 1):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[self.mband+i][j]))
fout.write("\n")
with open("all-bands-spin-polarized-spin-1.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'all-bands-spin-polarized-spin-1.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'all-bands-spin-polarized-spin-1.data' using 1:2 w l\n")
os.system("gnuplot all-bands-spin-polarized-spin-1.gnuplot")
with open("all-bands-spin-polarized-spin-2.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'all-bands-spin-polarized-spin-2.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" | |
Campus"),
("Remington College-Colorado Springs Campus","Remington College-Colorado Springs Campus"),
("Remington College-Columbia Campus","Remington College-Columbia Campus"),
("Remington College-Dallas Campus","Remington College-Dallas Campus"),
("Remington College-Fort Worth Campus","Remington College-Fort Worth Campus"),
("Remington College-Heathrow Campus","Remington College-Heathrow Campus"),
("Remington College-Honolulu Campus","Remington College-Honolulu Campus"),
("Remington College-Houston Campus","Remington College-Houston Campus"),
("Remington College-Houston Southeast Campus","Remington College-Houston Southeast Campus"),
("Remington College-Lafayette Campus","Remington College-Lafayette Campus"),
("Remington College-Little Rock Campus","Remington College-Little Rock Campus"),
("Remington College-Memphis Campus","Remington College-Memphis Campus"),
("Remington College-Mobile Campus","Remington College-Mobile Campus"),
("Remington College-Nashville Campus","Remington College-Nashville Campus"),
("Remington College-North Houston Campus","Remington College-North Houston Campus"),
("Remington College-Shreveport Campus","Remington College-Shreveport Campus"),
("Remington College-Tampa Campus","Remington College-Tampa Campus"),
("Renaissance Academie","Renaissance Academie"),
("Renaissance College-Massage Program","Renaissance College-Massage Program"),
("Rend Lake College","Rend Lake College"),
("Rensselaer BOCES-Practical Nursing Program","Rensselaer BOCES-Practical Nursing Program"),
("Rensselaer Hartford Graduate Center Inc","Rensselaer Hartford Graduate Center Inc"),
("Rensselaer Polytechnic Institute","Rensselaer Polytechnic Institute"),
("Renton Technical College","Renton Technical College"),
("Research College of Nursing","Research College of Nursing"),
("Resurrection University","Resurrection University"),
("Rhode Island College","Rhode Island College"),
("Rhode Island School of Design","Rhode Island School of Design"),
("Rhodes College","Rhodes College"),
("Rice University","Rice University"),
("Rich Mountain Community College","Rich Mountain Community College"),
("Richard Bland College of the College of William and Mary","Richard Bland College of the College of William and Mary"),
("Richland College","Richland College"),
("Richland Community College","Richland Community College"),
("Richmond Community College","Richmond Community College"),
("Richmond School of Health and Technology","Richmond School of Health and Technology"),
("Richmont Graduate University","Richmont Graduate University"),
("Rider University","Rider University"),
("Ridge Career Center","Ridge Career Center"),
("Ridgewater College","Ridgewater College"),
("Ridley-Lowell Business & Technical Institute-Binghamton","Ridley-Lowell Business & Technical Institute-Binghamton"),
("Ridley-Lowell Business & Technical Institute-Danbury","Ridley-Lowell Business & Technical Institute-Danbury"),
("Ridley-Lowell Business & Technical Institute-New London","Ridley-Lowell Business & Technical Institute-New London"),
("Ridley-Lowell Business & Technical Institute-Poughkeepsie","Ridley-Lowell Business & Technical Institute-Poughkeepsie"),
("Ringling College of Art and Design","Ringling College of Art and Design"),
("Rio Grande Bible Institute","Rio Grande Bible Institute"),
("Rio Hondo College","Rio Hondo College"),
("Rio Salado College","Rio Salado College"),
("Ripon College","Ripon College"),
("River Parishes Community College","River Parishes Community College"),
("River Valley Community College","River Valley Community College"),
("Riverland Community College","Riverland Community College"),
("Riverside City College","Riverside City College"),
("Riverside County Office of Education","Riverside County Office of Education"),
("Riverside School of Health Careers","Riverside School of Health Careers"),
("Rivertown School of Beauty Barber Skin Care and Nails","Rivertown School of Beauty Barber Skin Care and Nails"),
("Rivier University","Rivier University"),
("Rizzieri Aveda School for Beauty and Wellness","Rizzieri Aveda School for Beauty and Wellness"),
("Rizzieri Institute","Rizzieri Institute"),
("Roane State Community College","Roane State Community College"),
("Roane-Jackson Technical Center","Roane-Jackson Technical Center"),
("Roanoke College","Roanoke College"),
("Roanoke-Chowan Community College","Roanoke-Chowan Community College"),
("<NAME> Academy-Fall River","<NAME> Academy-Fall River"),
("<NAME> Academy-New Bedford","<NAME> Academy-New Bedford"),
("<NAME> Academy-Taunton","<NAME> Academy-Taunton"),
("<NAME> Academy-Worcester","<NAME> Academy-Worcester"),
("<NAME> Beauty Schools-North Plainfield","<NAME> Beauty Schools-North Plainfield"),
("<NAME> Beauty Schools-Perth Amboy","<NAME> Beauty Schools-Perth Amboy"),
("<NAME>iance Beauty Schools-West New York","<NAME> Beauty Schools-West New York"),
("<NAME> Educational Center","<NAME> Educational Center"),
("<NAME>ris University Illinois","<NAME>ris University Illinois"),
("Robert Morris University","Robert Morris University"),
("Robert Paul Academy of Cosmetology Arts & Sciences","Robert Paul Academy of Cosmetology Arts & Sciences"),
("Roberto-Venn School of Luthiery","Roberto-Venn School of Luthiery"),
("Roberts Wesleyan College","Roberts Wesleyan College"),
("Robeson Community College","Robeson Community College"),
("Rochester College","Rochester College"),
("Rochester Community and Technical College","Rochester Community and Technical College"),
("Rochester General Hospital School of Medical Technology","Rochester General Hospital School of Medical Technology"),
("Rochester Institute of Technology","Rochester Institute of Technology"),
("Rochester School of Hair Design","Rochester School of Hair Design"),
("Rock Valley College","Rock Valley College"),
("Rockefeller University","Rockefeller University"),
("Rockford University","Rockford University"),
("Rockhurst University","Rockhurst University"),
("Rockingham Community College","Rockingham Community College"),
("Rockland Community College","Rockland Community College"),
("Rockland County BOCES-Practical Nursing Program","Rockland County BOCES-Practical Nursing Program"),
("Rocky Mountain College of Art and Design","Rocky Mountain College of Art and Design"),
("Rocky Mountain College","Rocky Mountain College"),
("Rocky Mountain University of Health Professions","Rocky Mountain University of Health Professions"),
("Rocky Vista University","Rocky Vista University"),
("Roger Williams University School of Law","Roger Williams University School of Law"),
("Roger Williams University","Roger Williams University"),
("Rogers Academy of Hair Design","Rogers Academy of Hair Design"),
("Rogers State University","Rogers State University"),
("Rogies School of Beauty Culture","Rogies School of Beauty Culture"),
("Rogue Community College","Rogue Community College"),
("Rolf Institute of Structural Integration","Rolf Institute of Structural Integration"),
("Rolla Technical Institute","Rolla Technical Institute"),
("Rollins College","Rollins College"),
("Roman Academy of Beauty Culture","Roman Academy of Beauty Culture"),
("<NAME>'s Barber Styling","Ronny J's Barber Styling"),
("Roosevelt University","Roosevelt University"),
("Rosalind Franklin University of Medicine and Science","Rosalind Franklin University of Medicine and Science"),
("Rose State College","Rose State College"),
("Rose-Hulman Institute of Technology","Rose-Hulman Institute of Technology"),
("Roseburg Beauty College","Roseburg Beauty College"),
("Rosedale Bible College","Rosedale Bible College"),
("Rosedale Technical Institute","Rosedale Technical Institute"),
("Rosel School of Cosmetology","Rosel School of Cosmetology"),
("Roseman University of Health Sciences","Roseman University of Health Sciences"),
("Rosemead Beauty School","Rosemead Beauty School"),
("Rosemont College","Rosemont College"),
("Ross College-Sylvania","Ross College-Sylvania"),
("Ross Medical Education Center-Ann Arbor","Ross Medical Education Center-Ann Arbor"),
("Ross Medical Education Center-Bowling Green","Ross Medical Education Center-Bowling Green"),
("Ross Medical Education Center-Brighton","Ross Medical Education Center-Brighton"),
("Ross Medical Education Center-Canton","Ross Medical Education Center-Canton"),
("Ross Medical Education Center-Charleston","Ross Medical Education Center-Charleston"),
("Ross Medical Education Center-Cincinnati","Ross Medical Education Center-Cincinnati"),
("Ross Medical Education Center-Davison","Ross Medical Education Center-Davison"),
("Ross Medical Education Center-Dayton","Ross Medical Education Center-Dayton"),
("Ross Medical Education Center-Erlanger","Ross Medical Education Center-Erlanger"),
("Ross Medical Education Center-Flint","Ross Medical Education Center-Flint"),
("Ross Medical Education Center-Fort Wayne","Ross Medical Education Center-Fort Wayne"),
("Ross Medical Education Center-Granger","Ross Medical Education Center-Granger"),
("Ross Medical Education Center-Kentwood","Ross Medical Education Center-Kentwood"),
("Ross Medical Education Center-Kokomo","Ross Medical Education Center-Kokomo"),
("Ross Medical Education Center-Lansing","Ross Medical Education Center-Lansing"),
("Ross Medical Education Center-Madison Heights","Ross Medical Education Center-Madison Heights"),
("Ross Medical Education Center-Morgantown","Ross Medical Education Center-Morgantown"),
("Ross Medical Education Center-New Baltimore","Ross Medical Education Center-New Baltimore"),
("Ross Medical Education Center-Niles","Ross Medical Education Center-Niles"),
("Ross Medical Education Center-Ontario","Ross Medical Education Center-Ontario"),
("Ross Medical Education Center-Port Huron","Ross Medical Education Center-Port Huron"),
("Ross Medical Education Center-Portage","Ross Medical Education Center-Portage"),
("Ross Medical Education Center-Roosevelt Park","Ross Medical Education Center-Roosevelt Park"),
("Ross Medical Education Center-Saginaw","Ross Medical Education Center-Saginaw"),
("Ross Medical Education Center-Taylor","Ross Medical Education Center-Taylor"),
("Rosslyn Training Academy of Cosmetology","Rosslyn Training Academy of Cosmetology"),
("Rowan University","Rowan University"),
("Rowan-Cabarrus Community College","Rowan-Cabarrus Community College"),
("Roxborough Memorial Hospital School of Nursing","Roxborough Memorial Hospital School of Nursing"),
("Roxbury Community College","Roxbury Community College"),
("Royale College of Beauty","Royale College of Beauty"),
("Rudae's School of Beauty Culture-Ft Wayne","Rudae's School of Beauty Culture-Ft Wayne"),
("Rudae's School of Beauty Culture-Kokomo","Rudae's School of Beauty Culture-Kokomo"),
("Rudy & Kelly Academy of Hair and Nails","Rudy & Kelly Academy of Hair and Nails"),
("Rush University","Rush University"),
("Rust College","Rust College"),
("Rutgers University-Camden","Rutgers University-Camden"),
("Rutgers University-New Brunswick","Rutgers University-New Brunswick"),
("Rutgers University-Newark","Rutgers University-Newark"),
("Ryder Memorial School for Practical Nursing","Ryder Memorial School for Practical Nursing"),
("SABER College","SABER College"),
("SAE Institute of Technology-Atlanta","SAE Institute of Technology-Atlanta"),
("SAE Institute of Technology-Los Angeles","SAE Institute of Technology-Los Angeles"),
("SAE Institute of Technology-Miami","SAE Institute of Technology-Miami"),
("SAE Institute of Technology-Nashville","SAE Institute of Technology-Nashville"),
("SAE Institute of Technology-New York","SAE Institute of Technology-New York"),
("SBI Campus-An Affiliate of Sanford-Brown","SBI Campus-An Affiliate of Sanford-Brown"),
("SICE Paul Mitchell Partner School","SICE Paul Mitchell Partner School"),
("SIT Graduate Institute","SIT Graduate Institute"),
("SOLEX College","SOLEX College"),
("SOLEX Medical Academy","SOLEX Medical Academy"),
("SOWELA Technical Community College","SOWELA Technical Community College"),
("SUM Bible College and Theological Seminary","SUM Bible College and Theological Seminary"),
("SUNY Broome Community College","SUNY Broome Community College"),
("SUNY College at Brockport","SUNY College at Brockport"),
("SUNY College at Cortland","SUNY College at Cortland"),
("SUNY College at Geneseo","SUNY College at Geneseo"),
("SUNY College at Old Westbury","SUNY College at Old Westbury"),
("SUNY College at Oswego","SUNY College at Oswego"),
("SUNY College at Plattsburgh","SUNY College at Plattsburgh"),
("SUNY College at Potsdam","SUNY College at Potsdam"),
("SUNY College of Agriculture and Technology at Cobleskill","SUNY College of Agriculture and Technology at Cobleskill"),
("SUNY College of Environmental Science and Forestry","SUNY College of Environmental Science and Forestry"),
("SUNY College of Optometry","SUNY College of Optometry"),
("SUNY College of Technology at Alfred","SUNY College of Technology at Alfred"),
("SUNY College of Technology at Canton","SUNY College of Technology at Canton"),
("SUNY College of Technology at Delhi","SUNY College of Technology at Delhi"),
("SUNY Downstate Medical Center","SUNY Downstate Medical Center"),
("SUNY Empire State College","SUNY Empire State College"),
("SUNY Institute of Technology at Utica-Rome","SUNY Institute of Technology at Utica-Rome"),
("SUNY Maritime College","SUNY Maritime College"),
("SUNY Oneonta","SUNY Oneonta"),
("SUNY Westchester Community College","SUNY Westchester Community College"),
("SUNY at Albany","SUNY at Albany"),
("SUNY at Binghamton","SUNY at Binghamton"),
("SUNY at Fredonia","SUNY at Fredonia"),
("SUNY at Purchase College","SUNY at Purchase College"),
("SUNY-System Office","SUNY-System Office"),
("SW School of Business and Technical Careers","SW School of Business and Technical Careers"),
("SW School of Business and Technical Careers-Cosmetology","SW School of Business and Technical Careers-Cosmetology"),
("Sacramento City College","Sacramento City College"),
("Sacred Heart Major Seminary","Sacred Heart Major Seminary"),
("Sacred Heart School of Theology","Sacred Heart School of Theology"),
("Sacred Heart University","Sacred Heart University"),
("Saddleback College","Saddleback College"),
("Sage College","Sage College"),
("Sage School of Massage","Sage School of Massage"),
("Saginaw Chippewa Tribal College","Saginaw Chippewa Tribal College"),
("Saginaw Valley State University","Saginaw Valley State University"),
("Saint Ambrose University","Saint Ambrose University"),
("Saint Anselm College","Saint Anselm College"),
("Saint Anthony College of Nursing","Saint Anthony College of Nursing"),
("Saint Augustine College","Saint Augustine College"),
("Saint Augustine's University","Saint Augustine's University"),
("Saint Catharine College","Saint Catharine College"),
("Saint Charles Borromeo Seminary-Overbrook","Saint Charles Borromeo Seminary-Overbrook"),
("Saint Cloud State University","Saint Cloud State University"),
("Saint Edward's University","Saint Edward's University"),
("Saint Elizabeth College of Nursing","Saint Elizabeth College of Nursing"),
("Saint Elizabeth Medical Center School of Radiography","Saint Elizabeth Medical Center School of Radiography"),
("Saint Elizabeth School of Nursing","Saint Elizabeth School of Nursing"),
("Saint Francis Medical Center College of Nursing","Saint Francis Medical Center College of Nursing"),
("Saint Francis Medical Center School of Nursing","Saint Francis Medical Center School of Nursing"),
("Saint Francis University","Saint | |
= models.CharField(max_length=255)
education = models.CharField(max_length=255)
# profession
def vote_smart_candidate_bio_object_filter(one_candidate_bio):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param one_candidate_bio:
:return:
"""
one_candidate_bio_filtered = {
'candidateId': one_candidate_bio.candidateId,
'crpId': one_candidate_bio.crpId, # Open Secrets ID
'firstName': one_candidate_bio.firstName,
'nickName': one_candidate_bio.nickName,
'middleName': one_candidate_bio.middleName,
'lastName': one_candidate_bio.lastName,
'suffix': one_candidate_bio.suffix,
'birthDate': one_candidate_bio.birthDate,
'birthPlace': one_candidate_bio.birthPlace,
'pronunciation': one_candidate_bio.pronunciation,
'gender': one_candidate_bio.gender,
'family': one_candidate_bio.family,
'photo': one_candidate_bio.photo,
'homeCity': one_candidate_bio.homeCity,
'homeState': one_candidate_bio.homeState,
'religion': one_candidate_bio.religion,
# 'specialMsg': one_candidate_bio.specialMsg,
# 'parties': one_candidate_bio.parties,
# 'title': one_candidate_bio.title,
# 'shortTitle': one_candidate_bio.shortTitle,
# 'name': one_candidate_bio.name,
# 'type': one_candidate_bio.type,
# 'status': one_candidate_bio.status,
# 'firstElect': one_candidate_bio.firstElect,
# 'lastElect': one_candidate_bio.lastElect,
# 'nextElect': one_candidate_bio.nextElect,
# 'termStart': one_candidate_bio.termStart,
# 'termEnd': one_candidate_bio.termEnd,
# 'district': one_candidate_bio.district,
# 'districtId': one_candidate_bio.districtId,
# 'stateId': one_candidate_bio.stateId,
}
return one_candidate_bio_filtered
class VoteSmartOfficialManager(models.Model):
def __unicode__(self):
return "VoteSmartOfficialManager"
def retrieve_official_from_vote_smart_id(self, vote_smart_candidate_id):
return self.retrieve_vote_smart_official(vote_smart_candidate_id)
def retrieve_vote_smart_official_from_we_vote_id(self, we_vote_id):
vote_smart_candidate_id = 0
vote_smart_official_manager = VoteSmartOfficialManager()
return vote_smart_official_manager.retrieve_vote_smart_official(vote_smart_candidate_id, we_vote_id)
def fetch_vote_smart_candidate_id_from_we_vote_id(self, we_vote_id):
vote_smart_candidate_id = 0
vote_smart_official_manager = VoteSmartOfficialManager()
results = vote_smart_official_manager.retrieve_vote_smart_official(vote_smart_candidate_id, we_vote_id)
if results['success']:
return results['vote_smart_candidate_id']
return 0
#
# def retrieve_vote_smart_official_from_we_vote_local_id(self, local_official_id):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# vote_smart_official_manager = VoteSmartOfficialManager()
# return vote_smart_official_manager.retrieve_vote_smart_official(
# vote_smart_candidate_id, we_vote_id, official_maplight_id)
#
# def retrieve_vote_smart_official_from_full_name(self, official_name, state_code=None):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# official_maplight_id = ''
# vote_smart_official_manager = VoteSmartOfficialManager()
#
# results = vote_smart_official_manager.retrieve_vote_smart_official(
# vote_smart_candidate_id, first_name, last_name, state_code)
# return results
def retrieve_vote_smart_official_from_name_components(self, first_name=None, last_name=None, state_code=None):
vote_smart_candidate_id = 0
vote_smart_official_manager = VoteSmartOfficialManager()
results = vote_smart_official_manager.retrieve_vote_smart_official(
vote_smart_candidate_id, first_name, last_name, state_code)
return results
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_vote_smart_official(
self, vote_smart_candidate_id=None, first_name=None, last_name=None, state_code=None):
"""
We want to return one and only one official
:param vote_smart_candidate_id:
:param first_name:
:param last_name:
:param state_code:
:return:
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
vote_smart_official = VoteSmartOfficial()
try:
if positive_value_exists(vote_smart_candidate_id):
vote_smart_official = VoteSmartOfficial.objects.get(candidateId=vote_smart_candidate_id)
vote_smart_candidate_id = convert_to_int(vote_smart_official.candidateId)
status = "RETRIEVE_VOTE_SMART_OFFICIAL_FOUND_BY_ID"
elif positive_value_exists(first_name) or positive_value_exists(last_name):
official_queryset = VoteSmartOfficial.objects.all()
if positive_value_exists(first_name):
official_queryset = official_queryset.filter(firstName__istartswith=first_name)
if positive_value_exists(last_name):
official_queryset = official_queryset.filter(lastName__iexact=last_name)
if positive_value_exists(state_code):
official_queryset = official_queryset.filter(officeStateId__iexact=state_code)
vote_smart_official_list = list(official_queryset[:1])
if vote_smart_official_list:
vote_smart_official = vote_smart_official_list[0]
else:
vote_smart_official = VoteSmartOfficial()
vote_smart_candidate_id = convert_to_int(vote_smart_official.candidateId)
status = "RETRIEVE_VOTE_SMART_OFFICIAL_FOUND_BY_NAME"
else:
status = "RETRIEVE_VOTE_SMART_OFFICIAL_SEARCH_INDEX_MISSING"
except VoteSmartOfficial.MultipleObjectsReturned as e:
exception_multiple_object_returned = True
status = "RETRIEVE_VOTE_SMART_OFFICIAL_MULTIPLE_OBJECTS_RETURNED"
except VoteSmartOfficial.DoesNotExist:
exception_does_not_exist = True
status = "RETRIEVE_VOTE_SMART_OFFICIAL_NOT_FOUND"
results = {
'success': True if positive_value_exists(vote_smart_candidate_id) else False,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'vote_smart_official_found': True if positive_value_exists(vote_smart_candidate_id) else False,
'vote_smart_candidate_id': vote_smart_candidate_id,
'vote_smart_official': vote_smart_official,
}
return results
class VoteSmartOfficial(models.Model):
"""
http://api.votesmart.org/docs/Officials.html
"""
candidateId = models.CharField(max_length=15, primary_key=True)
firstName = models.CharField(max_length=255)
nickName = models.CharField(max_length=255)
middleName = models.CharField(max_length=255)
lastName = models.CharField(max_length=255)
suffix = models.CharField(max_length=255)
title = models.CharField(max_length=255)
electionParties = models.CharField(max_length=255)
officeParties = models.CharField(max_length=255)
officeStatus = models.CharField(max_length=255)
officeDistrictId = models.CharField(max_length=255)
officeDistrictName = models.CharField(max_length=255)
officeTypeId = models.CharField(max_length=255)
officeId = models.CharField(max_length=255)
officeName = models.CharField(max_length=255)
officeStateId = models.CharField(max_length=255)
def vote_smart_official_object_filter(one_official):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param one_official:
:return:
"""
one_official_filtered = {
'candidateId': one_official.candidateId,
'firstName': one_official.firstName,
'nickName': one_official.nickName,
'middleName': one_official.middleName,
'lastName': one_official.lastName,
'suffix': one_official.suffix,
'title': one_official.title,
'electionParties': one_official.electionParties,
'officeParties': one_official.officeParties,
'officeStatus': one_official.officeStatus,
'officeDistrictId': one_official.officeDistrictId,
'officeDistrictName': one_official.officeDistrictName,
'officeTypeId': one_official.officeTypeId,
'officeId': one_official.officeId,
'officeName': one_official.officeName,
'officeStateId': one_official.officeStateId,
}
return one_official_filtered
class VoteSmartRatingManager(models.Model):
def __unicode__(self):
return "VoteSmartRatingManager"
# def retrieve_candidate_from_vote_smart_id(self, vote_smart_candidate_id):
# return self.retrieve_vote_smart_candidate(vote_smart_candidate_id)
#
# def retrieve_vote_smart_candidate_from_we_vote_id(self, we_vote_id):
# vote_smart_candidate_id = 0
# vote_smart_candidate_manager = VoteSmartCandidateManager()
# return vote_smart_candidate_manager.retrieve_vote_smart_candidate(vote_smart_candidate_id, we_vote_id)
#
# def fetch_vote_smart_candidate_id_from_we_vote_id(self, we_vote_id):
# vote_smart_candidate_id = 0
# vote_smart_candidate_manager = VoteSmartCandidateManager()
# results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(vote_smart_candidate_id, we_vote_id)
# if results['success']:
# return results['vote_smart_candidate_id']
# return 0
#
# def retrieve_vote_smart_candidate_from_we_vote_local_id(self, local_candidate_id):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# vote_smart_candidate_manager = VoteSmartCandidateManager()
# return vote_smart_candidate_manager.retrieve_vote_smart_candidate(
# vote_smart_candidate_id, we_vote_id, candidate_maplight_id)
#
# def retrieve_vote_smart_candidate_from_full_name(self, candidate_name, state_code=None):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# candidate_maplight_id = ''
# vote_smart_candidate_manager = VoteSmartCandidateManager()
#
# results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(
# vote_smart_candidate_id, first_name, last_name, state_code)
# return results
#
# def retrieve_vote_smart_candidate_from_name_components(self, first_name=None, last_name=None, state_code=None):
# vote_smart_candidate_id = 0
# vote_smart_candidate_manager = VoteSmartCandidateManager()
#
# results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(
# vote_smart_candidate_id, first_name, last_name, state_code)
# return results
#
# # NOTE: searching by all other variables seems to return a list of objects
# def retrieve_vote_smart_candidate(
# self, vote_smart_candidate_id=None, first_name=None, last_name=None, state_code=None):
# """
# We want to return one and only one candidate
# :param vote_smart_candidate_id:
# :param first_name:
# :param last_name:
# :param state_code:
# :return:
# """
# error_result = False
# exception_does_not_exist = False
# exception_multiple_object_returned = False
# vote_smart_candidate = VoteSmartCandidate()
#
# try:
# if positive_value_exists(vote_smart_candidate_id):
# vote_smart_candidate = VoteSmartCandidate.objects.get(candidateId=vote_smart_candidate_id)
# vote_smart_candidate_id = convert_to_int(vote_smart_candidate.candidateId)
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_FOUND_BY_ID"
# elif positive_value_exists(first_name) or positive_value_exists(last_name):
# candidate_queryset = VoteSmartCandidate.objects.all()
# if positive_value_exists(first_name):
# first_name = first_name.replace("`", "'") # Vote Smart doesn't like this kind of apostrophe: `
# candidate_queryset = candidate_queryset.filter(Q(firstName__istartswith=first_name) |
# Q(nickName__istartswith=first_name) |
# Q(preferredName__istartswith=first_name))
# if positive_value_exists(last_name):
# last_name = last_name.replace("`", "'") # Vote Smart doesn't like this kind of apostrophe: `
# candidate_queryset = candidate_queryset.filter(lastName__iexact=last_name)
# if positive_value_exists(state_code):
# candidate_queryset = candidate_queryset.filter(Q(electionStateId__iexact=state_code) |
# Q(electionStateId__iexact="NA"))
# vote_smart_candidate_list = list(candidate_queryset[:1])
# if vote_smart_candidate_list:
# vote_smart_candidate = vote_smart_candidate_list[0]
# else:
# vote_smart_candidate = VoteSmartCandidate()
# vote_smart_candidate_id = convert_to_int(vote_smart_candidate.candidateId)
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_FOUND_BY_NAME"
# else:
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_SEARCH_INDEX_MISSING"
# except VoteSmartCandidate.MultipleObjectsReturned as e:
# exception_multiple_object_returned = True
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_MULTIPLE_OBJECTS_RETURNED"
# except VoteSmartCandidate.DoesNotExist:
# exception_does_not_exist = True
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_NOT_FOUND"
#
# results = {
# 'success': True if positive_value_exists(vote_smart_candidate_id) else False,
# 'status': status,
# 'error_result': error_result,
# 'DoesNotExist': exception_does_not_exist,
# 'MultipleObjectsReturned': exception_multiple_object_returned,
# 'vote_smart_candidate_found': True if positive_value_exists(vote_smart_candidate_id) else False,
# 'vote_smart_candidate_id': vote_smart_candidate_id,
# 'vote_smart_candidate': vote_smart_candidate,
# }
# return results
class VoteSmartCategory(models.Model):
"""http://api.votesmart.org/docs/Rating.html
"""
categoryId = models.CharField(max_length=15, primary_key=True)
name = models.CharField(max_length=255)
def vote_smart_category_filter(category):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param category:
:return:
"""
category_filtered = {
'categoryId': category.categoryId,
'name': category.name,
}
return category_filtered
class VoteSmartRating(models.Model):
"""
http://api.votesmart.org/docs/Rating.html
A Vote Smart rating is like a voter guide, because it contains a package of candidateId/rating pairs like this:
{'candidateRating': [{'candidateId': '53279', 'rating': '40'},
{'candidateId': '53266', 'rating': '90'},
"""
ratingId = models.CharField(max_length=15, primary_key=True)
sigId = models.CharField(verbose_name="special interest group id", max_length=15)
timeSpan = models.CharField(max_length=255)
ratingName = models.CharField(max_length=255)
ratingText = models.TextField()
# This is the filter used for the Vote Smart call: Rating.getCandidateRating
# http://api.votesmart.org/docs/Rating.html
def vote_smart_candidate_rating_filter(rating):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_filtered = {
'ratingId': rating.ratingId,
'rating': rating.rating,
'timeSpan': rating.timespan, # Seems to be typo with lower case "s"
'ratingName': rating.ratingName,
'ratingText': rating.ratingText,
'sigId': rating.sigId,
}
return rating_filtered
# This is the filter used for the Vote Smart call: Rating.getSigRatings
# http://api.votesmart.org/docs/Rating.html
def vote_smart_rating_list_filter(rating):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_filtered = {
'ratingId': rating.ratingId,
'timeSpan': rating.timespan, # Seems to be typo with lower case "s"
'ratingName': rating.ratingName,
'ratingText': rating.ratingText,
}
return rating_filtered
class VoteSmartRatingOneCandidate(models.Model):
"""
http://api.votesmart.org/docs/Rating.html
A Vote Smart rating is like a voter guide, because it contains a package of candidateId/rating pairs like this:
{'candidateRating': [{'candidateId': '53279', 'rating': '40'},
{'candidateId': '53266', 'rating': '90'},
"""
ratingId = models.CharField(max_length=15)
sigId = models.CharField(verbose_name="special interest group id", max_length=15)
candidateId = models.CharField(max_length=15)
timeSpan = models.CharField(max_length=255)
rating = models.CharField(max_length=255)
ratingName = models.CharField(max_length=255)
ratingText = models.TextField()
def vote_smart_rating_one_candidate_filter(rating_one_candidate):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_one_candidate_filtered = {
'candidateId': rating_one_candidate.candidateId,
'rating': rating_one_candidate.rating,
}
return rating_one_candidate_filtered
class VoteSmartRatingCategoryLink(models.Model):
"""http://api.votesmart.org/docs/Rating.html
"""
ratingId = models.CharField(max_length=15)
sigId = models.CharField(verbose_name="group id for this rating", max_length=15)
candidateId = models.CharField(verbose_name="vote smart candidate id for this rating", max_length=15)
timeSpan = models.CharField(max_length=255)
categoryId = models.CharField(verbose_name="category id for this rating", max_length=15)
categoryName = models.CharField(verbose_name="category name", max_length=255)
class VoteSmartSpecialInterestGroup(models.Model):
"""http://api.votesmart.org/docs/Rating.html
"""
sigId = models.CharField(verbose_name="special interest group id", max_length=15, primary_key=True)
parentId = models.CharField(max_length=15)
stateId = models.CharField(max_length=2)
name = models.CharField(verbose_name="name of special interest group", max_length=255)
description = models.TextField()
address = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=255)
zip = models.CharField(max_length=255)
phone1 = models.CharField(max_length=255)
| |
<gh_stars>0
# usage:
#
# import SedmlToRr as s2p
# ret = s2p.sedml_to_python("full_path/sedml_file.sedml")
# exec ret
#
# import SedmlToRr as s2p
# ret = s2p.sedml_to_python("full_path/sedml_archive.sedx")
# exec ret
# a "full_path/sedml_archive" folder will be created and the unarchived files placed within
#
# import SedmlToRr as s2p
# execfile("full_path/sedml_archive.py")
#
# the .sedml extension indicates a sedml file, the .sedx extension indicates a sedml archive
import sys, re, zipfile, shutil
import os.path
import libsedml
from collections import namedtuple
MatchingSetsOfVariableIDs = namedtuple("MatchingSetsOfVariableIDs", "datagenID, taskReference, sedmlID, sbmlID")
MatchingSetsOfRepeatedTasksDataGenerators = namedtuple("MatchingSetsOfRepeatedTasksDataGenerators", "datagenID, rangeSize")
modelname = str()
outdir = str()
mapping = [ ('repeated','r'), ('Repeated','r'), ('task','t'), ('Task', 't'),
('data','d'), ('Data','d'), ('generator','g'), ('Generator', 'g')]
# Map of replaced words
# Entry point
def sedml_to_python(fullPathName): # full path name to SedML model
from os.path import basename
global modelname
modelName = os.path.splitext(basename(fullPathName))[0]
extension = os.path.splitext(basename(fullPathName))[1]
path = fullPathName.rsplit(basename(fullPathName),1)[0]
class Tee(object):
def __init__(self, *files):
self.files = files
def write(self, obj):
for f in self.files:
f.write(obj)
if extension == ".sedx":
import unzipy as uz
zip = zipfile.ZipFile(fullPathName, 'r')
path = path + modelName
uz.unZip(path, zip)
zip.close()
fullPathName = uz.readManifest(path + "/manifest.xml")
k = fullPathName.rfind("/")
fullPathName = fullPathName[k+1:]
fullPathName = path + "/" + fullPathName;
sedmlDoc = libsedml.readSedML(fullPathName)
if sedmlDoc.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0:
print sedmlDoc.getErrorLog().toString()
sys.exit(2)
import StringIO
f = StringIO.StringIO()
original = sys.stdout
#sys.stdout = Tee(sys.stdout, f) # output to console and file
sys.stdout = Tee(f) # output to file only
print "# Translated SED-ML"
print "# Beginning of generated script"
print "import roadrunner"
print "import numpy as np"
print "import matplotlib.pyplot as plt"
print ""
for i in range(0, sedmlDoc.getNumModels()):
currentModel = sedmlDoc.getModel(i)
print "# Execute the tasks of model: " + currentModel.getId()
rrName = currentModel.getId()
#rrName = "rr" + str(i)
print rrName + " = roadrunner.RoadRunner()"
generateTasks(rrName, sedmlDoc, currentModel, path)
print ""
print "# List of Data Generators"
dataGeneratorsList = []
for i in range(0, sedmlDoc.getNumModels()):
currentModel = sedmlDoc.getModel(i)
generateData(sedmlDoc, currentModel, dataGeneratorsList)
print "# List of Outputs"
generateOutputs(sedmlDoc, dataGeneratorsList)
print "# End of generated script"
contents = f.getvalue()
sys.stdout = original # restore print to stdout only
f.close()
return contents
def generateTasks(rrName, sedmlDoc, currentModel, path):
listOfChanges = []
loadModel(rrName, sedmlDoc, currentModel, path)
#print(rrName + ".simulateOptions.structuredResult = False")
bFoundAtLeastOneTask = False
for i in range(0, currentModel.getNumChanges()):
aChange = currentModel.getChange(i)
if aChange.getElementName() == "changeAttribute":
newValue = aChange.getNewValue()
variableName = aChange.getTarget()
if (("model" in variableName) and ("parameter" in variableName)):
pass
elif (("model" in variableName) and ("species" in variableName)):
pass
else:
print "# Unsupported changeAttribute target " + variableName
return # nothing to do repeatedly since our change is bad
variableName = variableName.rsplit("id=\'",1)[1]
variableName = variableName.rsplit("\'",1)[0]
aStr = rrName + ".model[\"init([" + variableName + "])\"] = " + newValue # set amount
#aStr = rrName + ".model[\"[" + variableName + "]\"] = " + newValue # set amount
listOfChanges.append(aStr)
print aStr
else:
aStr = "# Unsupported change " + aChange.getElementName() + " for model " + currentModel.getId()
print aStr
return
# The 'selections' are a list of all the 'variable' elements from the dataGenerators
# we first deal with normal tasks, if any
for e in range(0,sedmlDoc.getNumTasks()):
task1 = sedmlDoc.getTask(e)
if task1.getElementName() == "repeatedTask":
pass
else:
if task1.getModelReference() != currentModel.getId():
continue
variablesDictionary = []
variablesList = []
populateVariableLists(sedmlDoc, task1, variablesList, variablesDictionary)
if len(variablesList) == 0: # nothing to do if no data generators refer to this task
continue
generateSimulation(rrName, sedmlDoc, currentModel, task1, variablesList, variablesDictionary, -1)
bFoundAtLeastOneTask = True
# now deal with repeated tasks, if any
for e in range(0,sedmlDoc.getNumTasks()):
task1 = sedmlDoc.getTask(e)
if task1.getElementName() == "repeatedTask":
for i in range(0, task1.getNumSubTasks()):
task2 = task1.getSubTask(i) # the subtask which points to the real task we need to call repeatedly for each value in range
task2 = task2.getTask() # the Id of the real task
task2 = sedmlDoc.getTask(task2) # get real task by Id
if task2.getModelReference() != currentModel.getId():
continue
aRange = task1.getRange(0) # we assume one single master range - we don't know how to deel flatten
if aRange.getElementName() != "uniformRange":
print "# Only uniformRange ranges are supported at this time"
continue
# if resetModel is true we need to reapply all the changes from above
print ""
bResetModel = task1.getResetModel()
if bResetModel == True:
print rrName + ".simulateOptions.resetModel = True"
else:
print rrName + ".simulateOptions.resetModel = False"
# need to use the RepeatedTask because the data generators refer to it
variablesDictionary = [] # matching pairs of sedml variable ID and sbml variable ID
variablesList = [] # the IDs of the sbml variables, non duplicate entries
populateVariableLists(sedmlDoc, task1, variablesList, variablesDictionary)
# iterate over all changes
aChange = task1.getTaskChange(0)
if aChange.getElementName() != "setValue":
print "# Only setValue changes are supported at this time"
continue
variableName = aChange.getTarget()
vn = variableName
vn = vn.rsplit("id=\'",1)[1]
vn = vn.rsplit("\'",1)[0]
# for each point in the range we compute the new values of the variables affected
# and generate a task
for j in range(0, aRange.getNumberOfPoints()):
print ""
if bResetModel == True: # if we reset the model we need to repeat again all the Changes from above
for aStr in listOfChanges:
print aStr
start = aRange.getStart()
end = aRange.getEnd()
newValue = start + j * (end - start) / (aRange.getNumberOfPoints()-1)
if (("model" in variableName) and ("parameter" in variableName)):
pass
elif (("model" in variableName) and ("species" in variableName)):
pass
else:
print "# Unsupported setValue target " + variableName
return # nothing to do repeatedly since our change is bad
print rrName + ".model[\"init([" + vn + "])\"] = " + str(newValue) # set amount
# need to use both the real Task (task2) because it has the reference to model and simulation
# and the repeated task (task1) because its Id is used for generating the flattened Id's
generateSimulation(rrName, sedmlDoc, currentModel, task2, variablesList, variablesDictionary, j, task1)
bFoundAtLeastOneTask = True
if bFoundAtLeastOneTask == False:
print "# There are no simulations to run for this model: " + currentModel.getId();
def loadModel(rrName, sedmlDoc, currentModel, path):
global modelname
global outdir
string = currentModel.getSource()
if isId(string): # it's the Id of a model
originalModel = sedmlDoc.getModel(string)
if originalModel != None:
string = originalModel.getSource() # !!! for now, we reuse the original model to which the current model is referring to
else:
pass
if string.startswith("."): # relative location, we trust it but need it trimmed
if string.startswith("../"):
string = string[3:]
elif string.startswith("./"):
string = string[2:]
print rrName + ".load('" + path.replace("\\","/") + string + "')" # SBML model name recovered from "source" attr
#from os.path import expanduser
#path = expanduser("~")
#print(rrName + ".load('" + path + "\\" + string + "')") # SBML model name recovered from "source" attr
elif "\\" or "/" or "urn:miriam" not in string:
print rrName + ".load('" + path.replace("\\","/") + string + "')"
elif string.startswith("urn:miriam"):
print "Downloading model from BioModels Database..."
astr = string.rsplit(':', 1)
astr = astr[1]
string = path + astr + ".xml"
if os.path.exists(string) == False:
import httplib
conn = httplib.HTTPConnection("www.ebi.ac.uk")
conn.request("GET", "/biomodels-main/download?mid=" + astr)
r1 = conn.getresponse()
#print(r1.status, r1.reason)
data1 = r1.read()
conn.close()
f1 = open(string, 'w')
f1.write(data1);
f1.close()
else:
pass
print rrName + ".load('" + string +"'))"
else: # assume absolute path pointing to hard disk location
string = string.replace("\\", "/")
print rrName + ".load('" + string + "')"
def populateVariableLists(sedmlDoc, task1, variablesList, variablesDictionary):
for i in range(0, sedmlDoc.getNumDataGenerators()):
current = sedmlDoc.getDataGenerator(i)
vl = current.getListOfVariables()
for j in range(0, vl.size()):
currentVar = vl[j]
if currentVar.getTaskReference() != task1.getId():
continue
if currentVar.isSetSymbol(): # symbol field of variable is set
cvs = currentVar.getSymbol()
astr = cvs.rsplit("symbol:")
astr = astr[1]
if variablesList.count(astr) < 1:
variablesList.append(astr)
m = MatchingSetsOfVariableIDs(current.getId(), currentVar.getTaskReference(), currentVar.getId(), astr)
#print m
variablesDictionary.append(m)
elif currentVar.isSetTarget():
cvt = currentVar.getTarget() # target field of variable is set
astr = cvt.rsplit("@id='")
astr = astr[1]
astr = astr[:-2]
if variablesList.count(astr) < 1:
variablesList.append(astr)
m = MatchingSetsOfVariableIDs(current.getId(), currentVar.getTaskReference(), currentVar.getId(), astr)
variablesDictionary.append(m)
else:
print "# Unrecognized data generator variable"
sys.exit(5)
return
def generateSimulation(rrName, sedmlDoc, currentModel, task1, variablesList, variablesDictionary, repeatedTaskIndex, repeatedTask = None):
__uniform = False
__steady = False
for j in range(0, sedmlDoc.getNumSimulations()):
currentSimulation | |
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Disjunctive:"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_disjunctive, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Disjunctive (unsafe):"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_disjunctive_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Yager:"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if | |
import jax.numpy as np
from jax import vmap
from jax.ops import index_add, index_update, index
from jax.scipy.linalg import cho_factor, cho_solve
from jax.scipy.linalg import solve as jsc_solve
from .utils import mvn_logpdf, solve, transpose, inv, inv_vmap
from jax.lax import scan, associative_scan
import math
INV2PI = (2 * math.pi) ** -1
def get_diag_and_offdiag_components(num_latents, zeros, i, noise_cov):
temp_vec = index_add(zeros, index[i], 1.)
temp_mat = temp_vec.reshape(num_latents, num_latents)
return np.kron(np.diag(noise_cov), temp_mat) # block-diag
def blocktensor_to_blockdiagmatrix(blocktensor):
"""
Convert [N, D, D] tensor to [ND, ND] block-diagonal matrix
"""
N = blocktensor.shape[0]
D = blocktensor.shape[1]
diag_and_offdiag_components = vmap(get_diag_and_offdiag_components, in_axes=(None, None, 0, 1))(
D, np.zeros(D ** 2), np.arange(D ** 2), blocktensor.reshape(N, -1)
)
return np.sum(diag_and_offdiag_components, axis=0)
def get_blocks(blockdiagmatrix, D, i):
return blockdiagmatrix[0+D*i:D+D*i, 0+D*i:D+D*i]
@vmap
def get_3d_off_diag(offdiag_elems):
return np.sum(
offdiag_elems[:, None, None]
* np.array([[[0., 1., 0.], [1., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 1.], [0., 1., 0.]]]),
axis=0
)
def blockdiagmatrix_to_blocktensor(blockdiagmatrix, N, D):
"""
Convert [ND, ND] block-diagonal matrix to [N, D, D] tensor
TODO: extend to D>3 case
"""
diags = vmap(np.diag)(np.diag(blockdiagmatrix).reshape(N, D))
if D == 1:
blocktensors = diags
elif D == 2:
offdiag_elems = np.diag(np.concatenate([np.zeros([N * D, 1]), blockdiagmatrix], axis=1)).reshape(N, D)[:, 1:]
offdiags = offdiag_elems[..., None] * np.fliplr(np.eye(D))
blocktensors = diags + offdiags
elif D == 3:
addzeros = np.concatenate([np.zeros([N * D, 1]), blockdiagmatrix], axis=1)
addzeros2 = np.concatenate([np.zeros([N * D, 1]), addzeros], axis=1)
offdiag_elems = np.diag(addzeros).reshape(N, D)[:, 1:]
offdiags = get_3d_off_diag(offdiag_elems)
corner_elements = np.diag(addzeros2).reshape(N, D)[:, 2:]
corners = corner_elements[..., None] * np.array([[0., 0., 1.], [0., 0., 0.], [1., 0., 0.]])
blocktensors = diags + offdiags + corners
else:
raise NotImplementedError('Multi-latent case with D>3 not implemented')
return blocktensors
def gaussian_conditional(kernel, y, noise_cov, X, X_star=None):
"""
Compute the GP posterior / predictive distribution using standard Gaussian identities
:param kernel: an instantiation of the kernel class
:param y: observations [N, 1]
:param noise_cov: observation noise covariance [N, 1]
:param X: training inputs [N, D]
:param X_star: test inputs [N*, D]
:return:
mean: posterior mean [N, 1]
covariance: posterior covariance [N, N]
"""
Kff = kernel(X, X)
if X_star is None: # inference / learning
Kfs = Kff
Kss = Kff
else: # prediction
Kfs = kernel(X, X_star)
Kss = kernel(X_star, X_star)
# Ky = Kff + np.diag(np.squeeze(noise_cov)) # single-latent version
noise_cov_block_diag = blocktensor_to_blockdiagmatrix(noise_cov) # multi-latent version
Ky = Kff + noise_cov_block_diag
# ---- compute approximate posterior using standard Gaussian conditional formula ----
Kfs_iKy = solve(Ky, Kfs).T
# mean = Kfs_iKy @ diag(y)
mean = Kfs_iKy @ y.reshape(-1, 1)
covariance = Kss - Kfs_iKy @ Kfs
return mean, covariance
def sparse_gaussian_conditional(kernel, nat1lik, nat2lik, X, Z):
"""
Compute q(u)
:param kernel: an instantiation of the kernel class
:param nat1lik: likelihood first natural parameter [N, 1]
:param nat2lik: likelihood noise precision [N, 1]
:param X: training inputs [N, D]
:param Z: inducing inputs [N*, D]
:return:
mean: posterior mean [N, 1]
covariance: posterior covariance [N, N]
"""
Kuf = kernel(Z, X)
Kuu = kernel(Z, Z)
# nat2prior = inv(Kuu)
Wuf = solve(Kuu, Kuf) # conditional mapping, Kuu^-1 Kuf
nat1lik_fullrank = Wuf @ nat1lik.reshape(-1, 1)
# nat2lik_fullrank = Wuf @ np.diag(np.squeeze(nat2lik)) @ transpose(Wuf)
nat2lik_block_diag = blocktensor_to_blockdiagmatrix(nat2lik) # multi-latent version
nat2lik_fullrank = Wuf @ nat2lik_block_diag @ transpose(Wuf)
# nat1post = nat1lik_fullrank # prior nat1 is zero
# nat2post = nat2prior + nat2lik_fullrank
# covariance = inv(nat2post)
# mean = covariance @ nat1post
likcov = inv(nat2lik_fullrank)
likmean = likcov @ nat1lik_fullrank
Ky = Kuu + likcov
# ---- compute approximate posterior using standard Gaussian conditional formula ----
Kuu_iKy = solve(Ky, Kuu).T
mean = Kuu_iKy @ likmean
covariance = Kuu - Kuu_iKy @ Kuu
return mean, covariance
def sparse_conditional_post_to_data(kernel, post_mean, post_cov, X, Z):
"""
Compute int p(f|u) q(u) du
:param kernel: an instantiation of the kernel class
:param post_mean: posterior mean [M, 1]
:param post_cov: posterior covariance [M, M]
:param X: training inputs [N, D]
:param Z: inducing inputs [N*, D]
:return:
mean: posterior mean [N, 1]
covariance: posterior covariance [N, N]
"""
X = X.reshape(X.shape[0], -1)
Kff = kernel(X, X)
Kuf = kernel(Z, X)
Kuu = kernel(Z, Z)
Wuf = solve(Kuu, Kuf) # conditional mapping, Kuu^-1 Kuf
Qff = transpose(Kuf) @ Wuf # Kfu Kuu^-1 Kuf
conditional_cov = Kff - Qff
mean_f = transpose(Wuf) @ post_mean.reshape(-1, 1)
cov_f = conditional_cov + transpose(Wuf) @ post_cov @ Wuf
return mean_f, cov_f
def process_noise_covariance(A, Pinf):
Q = Pinf - A @ Pinf @ transpose(A)
return Q
def _sequential_kf(As, Qs, H, ys, noise_covs, m0, P0, masks, return_predict=False):
def body(carry, inputs):
y, A, Q, obs_cov, mask = inputs
m, P, ell = carry
m_ = A @ m
P_ = A @ P @ A.T + Q
obs_mean = H @ m_
HP = H @ P_
S = HP @ H.T + obs_cov
ell_n = mvn_logpdf(y, obs_mean, S, mask)
ell = ell + ell_n
K = solve(S, HP).T
m = m_ + K @ (y - obs_mean)
P = P_ - K @ HP
if return_predict:
return (m, P, ell), (m_, P_)
else:
return (m, P, ell), (m, P)
(_, _, loglik), (fms, fPs) = scan(f=body,
init=(m0, P0, 0.),
xs=(ys, As, Qs, noise_covs, masks))
return loglik, fms, fPs
def parallel_filtering_element_(A, Q, H, noise_cov, y):
HQ, HA = H @ Q, H @ A # pre-compute intermediates
S = HQ @ H.T + noise_cov # H Q H.T + R
SinvH = solve(S, H) # S^{-1} H
K = Q @ SinvH.T # Q H.T S^{-1}
AA = A - K @ HA # A - K H A
b = K @ y # K y
C = Q - K @ HQ # Q - K H Q
SinvHA = (SinvH @ A).T # A.T H.T S^{-1}
eta = SinvHA @ y # A.T H.T S^{-1} y
J = SinvHA @ HA # A.T H.T S^{-1} H A
return AA, b, C, J, eta
parallel_filtering_element = vmap(parallel_filtering_element_, in_axes=(0, 0, None, 0, 0))
@vmap
def parallel_filtering_operator(elem1, elem2):
A1, b1, C1, J1, eta1 = elem1
A2, b2, C2, J2, eta2 = elem2
C1inv = inv(C1)
temp = solve(C1inv + J2, C1inv) # we should avoid inverting non-PSD matrices here
A2temp = A2 @ temp
AA = A2temp @ A1
b = A2temp @ (b1 + C1 @ eta2) + b2
C = A2temp @ C1 @ A2.T + C2
A1temp = A1.T @ temp.T # re-use previous solve
eta = A1temp @ (eta2 - J2 @ b1) + eta1
J = A1temp @ J2 @ A1 + J1
return AA, b, C, J, eta
def make_associative_filtering_elements(As, Qs, H, ys, noise_covs, m0, P0):
Qs = index_update(Qs, index[0], P0) # first element requires different initialisation
AA, b, C, J, eta = parallel_filtering_element(As, Qs, H, noise_covs, ys)
# modify initial b to account for m0 (not needed if m0=zeros)
S = H @ Qs[0] @ H.T + noise_covs[0]
K0 = solve(S, H @ Qs[0]).T
b = index_add(b, index[0], m0 - K0 @ H @ m0)
return AA, b, C, J, eta
@vmap
def vmap_mvn_logpdf(*args, **kwargs):
return mvn_logpdf(*args, **kwargs)
def _parallel_kf(As, Qs, H, ys, noise_covs, m0, P0, masks, return_predict=False):
# perform parallel filtering
initial_elements = make_associative_filtering_elements(As, Qs, H, ys, noise_covs, m0, P0)
final_elements = associative_scan(parallel_filtering_operator, initial_elements)
fms, fPs = final_elements[1], final_elements[2]
# now compute the log likelihood
mpredict = As @ np.concatenate([m0[None], fms[:-1]])
Ppredict = As @ np.concatenate([P0[None], fPs[:-1]]) @ transpose(As) + Qs
loglik = np.sum(vmap_mvn_logpdf(ys, H @ mpredict, H @ Ppredict @ H.T + noise_covs, masks))
if return_predict:
return loglik, mpredict, Ppredict
else:
return loglik, fms, fPs
def kalman_filter(dt, kernel, y, noise_cov, mask=None, parallel=False, return_predict=False):
"""
Run the Kalman filter to get p(fₙ|y₁,...,yₙ).
Assumes a heteroscedastic Gaussian observation model, i.e. var is vector valued
:param dt: step sizes [N, 1]
:param kernel: an instantiation of the kernel class, used to determine the state space model
:param y: observations [N, D, 1]
:param noise_cov: observation noise covariances [N, D, D]
:param mask: boolean mask for the observations (to indicate missing data locations) [N, D, 1]
:param parallel: flag to switch between parallel and sequential implementation of Kalman filter
:param return_predict: | |
<filename>rcnn/symbol/symbol_vgg.py<gh_stars>1-10
import mxnet as mx
import proposal
import proposal_target
from rcnn.config import config
import numpy as np
def get_vgg_conv(data):
"""
shared convolutional layers
:param data: Symbol
:return: Symbol
"""
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv1_1_new")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
# pool4 = mx.symbol.Pooling( # remove pool4 to increase size of feature map
# data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=relu4_3, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
return relu5_3
def _get_rpn(is_train, ft_map, im_info, num_anchors, rpn_label=None, rpn_bbox_target=None, rpn_bbox_weight=None):
# RPN layers
rpn_conv = mx.symbol.Convolution(
data=ft_map, kernel=(3, 3), pad=(1, 1), num_filter=256, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
if is_train:
# classification
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
# bounding box regression
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss_norm = rpn_bbox_loss_ / config.TRAIN.RPN_BATCH_SIZE / config.TRAIN.SAMPLES_PER_BATCH
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_norm, grad_scale=config.TRAIN.RPN_REG_LOSS_WEIGHT)
# ROI proposal
rpn_cls_act = mx.symbol.SoftmaxActivation(data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
cfg1 = config.TRAIN if is_train else config.TEST
if cfg1.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg1.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg1.RPN_POST_NMS_TOP_N,
threshold=cfg1.RPN_NMS_THRESH, rpn_min_size=cfg1.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg1.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg1.RPN_POST_NMS_TOP_N,
threshold=cfg1.RPN_NMS_THRESH, rpn_min_size=cfg1.RPN_MIN_SIZE)
if is_train:
return rois, rpn_cls_prob, rpn_bbox_loss
else:
return rois
def _get_3DCE_dual_att_head(is_train, ft_map, rois, num_classes):
num_rfcn_chn = 10
S = 7
cfg1 = config.TRAIN if is_train else config.TEST
#10 * 490 * w * h
softmax_temp_spatial = config.ATT_TEMP_SPATIAL
softmax_temp_vertical = config.ATT_TEMP_VERTICAL
assert (softmax_temp_spatial > 0 or softmax_temp_vertical > 0) # Enable at least one attention type
if softmax_temp_spatial >0 and softmax_temp_vertical>0:
print('Creating dual attention model.\
C_Att temperature: {}, S_Att temperature: {}.'.format(softmax_temp_vertical, softmax_temp_spatial))
elif softmax_temp_spatial <0 and softmax_temp_vertical>0:
print('Creating contextual attention model.\
C_Att temperature: {}.'.format(softmax_temp_vertical))
elif softmax_temp_spatial >0 and softmax_temp_vertical<0:
print('Creating spatial attention model.\
S_Att temperature: {}.'.format(softmax_temp_spatial))
# original feature
# conv_new_cat = mx.sym.reshape(conv_new_1, shape=(cfg1.SAMPLES_PER_BATCH, -1, 0,0), name='conv_new_cat')
# 2 * 2450 * w * h
att_fea =ft_map
if softmax_temp_vertical>0:
conv_new_att_v = mx.sym.Convolution(data=att_fea, kernel=(1, 1), num_filter=512,
name="conv_new_att_v", lr_mult=3.0)
# 10 * 490 * w * h
conv_new_att_v_extend = mx.sym.expand_dims(data=conv_new_att_v, axis=0, name='conv_new_att_v_extend')
# 1 * 10 * 490 *w * h
conv_new_att_v_reshape = mx.sym.reshape(data=conv_new_att_v_extend,
shape=(cfg1.SAMPLES_PER_BATCH, config.NUM_IMAGES_3DCE, 0, 0, 0),
name='conv_new_att_v_reshape')
# 2*5 * 490 *w * h
conv_new_att_v_reshape = conv_new_att_v_reshape / softmax_temp_vertical
conv_new_att_v_softmax = mx.sym.softmax(data=conv_new_att_v_reshape, axis=1,
name='conv_new_att_v_softmax')
# 2*5 * 490 *w * h
conv_new_att_v_softmax_max = mx.sym.max(data=conv_new_att_v_softmax, axis=1, keepdims=True,
name='conv_new_att_v_softmax_max')
# 2*1 * 490 *w * h
conv_new_att_v_softmax_max_b = mx.sym.broadcast_like(lhs=conv_new_att_v_softmax_max,rhs=conv_new_att_v_softmax,
name='conv_new_att_v_softmax_max_b')
# 2*5 * 490 *w * h
conv_new_att_v_norm = conv_new_att_v_softmax/conv_new_att_v_softmax_max_b
# 2*5 * 490 *w * h
att_v = mx.sym.reshape_like(lhs=conv_new_att_v_norm,rhs=conv_new_att_v,name='att_v')
# 10 * 490*w*h
att_fea = att_fea * att_v
# 10 * 490 *w * h
if softmax_temp_spatial>0:
conv_new_att_s = mx.sym.Convolution(data=att_fea, kernel=(1, 1), num_filter=512,
name="conv_new_att_s", lr_mult=3.0)
# 10 * 490 *w * h
conv_new_att_s_flatten = mx.sym.reshape(data=conv_new_att_s, shape=(0, 0, -1), name='conv_new_att_s_flatten')
# 10 * 490 * (w * h)
conv_new_att_s_flatten = conv_new_att_s_flatten/softmax_temp_spatial
# 10 * 490 * (w * h)
conv_new_att_s_flatten_softmax = mx.sym.softmax(data=conv_new_att_s_flatten,axis=-1,name='conv_new_att_s_flatten_softmax')
# 10 * 490 * (w * h)
conv_new_att_s_flatten_softmax_max =mx.sym.max(data=conv_new_att_s_flatten_softmax,axis=-1,keepdims=True, name='conv_new_att_s_flatten_softmax_max')
# 10 * 490 * 1
conv_new_att_s_flatten_softmax_max_b = mx.sym.broadcast_like(lhs=conv_new_att_s_flatten_softmax_max,
rhs=conv_new_att_s_flatten_softmax, name='conv_new_att_s_flatten_softmax_max_b')
# 10 * 490 * (w * h)
conv_new_att_s_norm = conv_new_att_s_flatten_softmax/conv_new_att_s_flatten_softmax_max_b
#conv_new_att_norm =mx.sym.Custom(data=conv_new_att_flatten_softmax, axis=-1,name='global_att_norm', op_type='l1_norm')
# 10 * 490 * (w * h)
att_s = mx.sym.reshape_like(lhs=conv_new_att_s_norm,rhs=conv_new_att_s, name='att_s')
# 10 * 490 *w * h
att_fea = att_fea * att_s
# 10 * 490 *w * h
conv_new_1 = mx.sym.Convolution(data=att_fea, kernel=(1, 1), num_filter=S * S * num_rfcn_chn, name="conv_new_1",
lr_mult=3.0)
conv_new_cat = mx.sym.reshape(conv_new_1, shape=(cfg1.SAMPLES_PER_BATCH, -1, 0, 0), name='conv_new_cat')
# 2 * 2450 *w * h
psroipool5 = mx.contrib.sym.PSROIPooling(name='psroipool5', data=conv_new_cat, rois=rois,
group_size=S, pooled_size=S,
output_dim=num_rfcn_chn*config.NUM_IMAGES_3DCE, spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
fc6 = mx.symbol.FullyConnected(name='fc6', data=psroipool5, num_hidden=2048, lr_mult=2.0)
relu6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu6')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=relu6, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=relu6, num_hidden=num_classes * 4)
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_classes))
return cls_score, bbox_pred
def _get_3DCE_head(is_train, ft_map, rois, num_classes):
num_rfcn_chn = 10
S = 7
num_hidden = 2048
cfg1 = config.TRAIN if is_train else config.TEST
conv_new_1 = mx.sym.Convolution(data=ft_map, kernel=(1, 1), num_filter=S * S * num_rfcn_chn, name="conv_new_1", lr_mult=3.0)
conv_new_cat = mx.sym.reshape(conv_new_1, shape=(cfg1.SAMPLES_PER_BATCH, -1, 0,0), name='conv_new_cat')
# rfcn_cls/rfcn_bbox
psroipool5 = mx.contrib.sym.PSROIPooling(name='psroipool5', data=conv_new_cat, rois=rois,
group_size=S, pooled_size=S,
output_dim=num_rfcn_chn*config.NUM_IMAGES_3DCE, spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
fc6 = mx.symbol.FullyConnected(name='fc6', data=psroipool5, num_hidden=2048, lr_mult=2.0)
relu6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu6')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=relu6, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=relu6, num_hidden=num_classes * 4)
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_classes))
return cls_score, bbox_pred
def _get_RFCN_head(is_train, ft_map, rois, num_classes):
num_rfcn_chn = 512
S = 7
conv_new_1 = mx.sym.Convolution(data=ft_map, kernel=(1, 1), num_filter=num_rfcn_chn, name="conv_new_1", lr_mult=3.0)
relu_new_1 = mx.sym.Activation(data=conv_new_1, act_type='relu', name='conv_new_1_relu')
rfcn_cls = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=S * S * num_classes, name="rfcn_cls", lr_mult=3.0)
rfcn_bbox = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=S * S * 4 * num_classes,
name="rfcn_bbox", lr_mult=3.0)
# rfcn_cls/rfcn_bbox
psroipool5_cls = mx.contrib.sym.PSROIPooling(name='psroipool5_cls', data=rfcn_cls, rois=rois,
group_size=S, pooled_size=S,
output_dim=num_classes, spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
psroipool5_reg = mx.contrib.sym.PSROIPooling(name='psroipool5_reg', data=rfcn_bbox, rois=rois,
group_size=S, pooled_size=S,
output_dim=num_classes * 4,
spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
cls_score = mx.symbol.Pooling(data=psroipool5_cls, global_pool=True, kernel=(S, S), pool_type="avg",
name="cls_score")
bbox_pred = mx.symbol.Pooling(data=psroipool5_reg, global_pool=True, kernel=(S, S), pool_type="avg",
name="bbox_pred")
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_classes))
return cls_score, bbox_pred
def _get_Faster_head(is_train, ft_map, rois, num_classes):
# Fast R-CNN
S = 7
num_faster_fc = 2048
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=ft_map, rois=rois, pooled_size=(S, S), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_faster_fc, name="fc6_small")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=num_faster_fc, name="fc7_small")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
return cls_score, bbox_pred
def get_vgg(is_train, num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
"""
end-to-end train with VGG 16 conv layers with RPN
:param num_classes: used to determine output size
:param num_anchors: used to determine output size
:return: Symbol
"""
# data
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
if is_train:
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# RPNs
if is_train:
rois, rpn_cls_prob, rpn_bbox_loss = _get_rpn(
is_train, relu5_3, im_info, num_anchors, rpn_label, rpn_bbox_target, rpn_bbox_weight)
# ROI proposal target
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes, op_type='proposal_target',
num_classes=num_classes, batch_images=config.TRAIN.SAMPLES_PER_BATCH,
batch_rois=config.TRAIN.BATCH_ROIS, fg_fraction=config.TRAIN.FG_FRACTION)
rois, label, bbox_target, bbox_weight = group
else:
rois = _get_rpn(is_train, relu5_3, im_info, num_anchors)
# RCNN head
cls_score, bbox_pred = eval('_get_'+config.FRAMEWORK+'_head')(is_train, relu5_3, rois, num_classes)
# loss and output
if is_train:
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
bbox_loss_ = bbox_weight * | |
<filename>src/azure-cli/azure/cli/command_modules/ams/tests/latest/test_ams_live_event_scenarios.py<gh_stars>1-10
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
from azure.cli.core.util import CLIError
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
from azure.cli.command_modules.ams._test_utils import _get_test_data_file
class AmsLiveEventTests(ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_create(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
customHostnamePrefix = self.create_random_name(prefix='custom', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'centralus',
'streamingProtocol': 'RTMP',
'liveEventName': live_event_name,
'encodingType': 'Standard',
'tags': 'key=value',
'previewLocator': self.create_guid(),
'keyFrameInterval': 'PT2S',
'liveTranscriptionLanguage': 'ca-ES',
'customHostnamePrefix': customHostnamePrefix,
'stretchMode': 'AutoSize',
'description': 'asd',
'accessToken': '<KEY>',
'clientAccessPolicy': '@' + _get_test_data_file('clientAccessPolicy.xml'),
'crossDomainPolicy': '@' + _get_test_data_file('crossDomainPolicy.xml')
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'Central US')
])
live_event = self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --auto-start --transcription-lang {liveTranscriptionLanguage} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --key-frame-interval {keyFrameInterval} --tags {tags} --stream-options Default LowLatency --preview-locator {previewLocator} --ips 1.2.3.4 5.6.7.8 192.168.0.0/28 --preview-ips 192.168.0.0/28 0.0.0.0 --access-token {accessToken} --description {description} --client-access-policy "{clientAccessPolicy}" --cross-domain-policy "{crossDomainPolicy}" --use-static-hostname --hostname-prefix {customHostnamePrefix} --stretch-mode {stretchMode}', checks=[
self.check('name', '{liveEventName}'),
self.check('location', 'Central US'),
self.check('input.streamingProtocol', '{streamingProtocol}'),
self.check('encoding.encodingType', '{encodingType}'),
self.check('encoding.keyFrameInterval', '0:00:02'),
self.check('encoding.stretchMode', '{stretchMode}'),
self.check('transcriptions[0].language', '{liveTranscriptionLanguage}'),
self.check('length(preview.accessControl.ip.allow)', 2),
self.check('length(input.accessControl.ip.allow)', 3),
self.check('preview.previewLocator', '{previewLocator}'),
self.check('length(streamOptions)', 2),
self.check('description', '{description}'),
self.check('input.accessToken', '{accessToken}'),
self.check('useStaticHostname', True),
self.check('input.accessControl.ip.allow[2].address', '192.168.0.0'),
self.check('input.accessControl.ip.allow[2].subnetPrefixLength', '28'),
self.check('preview.accessControl.ip.allow[0].address', '192.168.0.0'),
self.check('preview.accessControl.ip.allow[0].subnetPrefixLength', '28'),
]).get_output_in_json()
self.assertIsNotNone(live_event['crossSiteAccessPolicies']['crossDomainPolicy'])
self.assertIsNotNone(live_event['crossSiteAccessPolicies']['clientAccessPolicy'])
self.assertNotEquals('Stopping', live_event['resourceState'])
self.assertNotEquals('Stopped', live_event['resourceState'])
self.cmd('az ams live-event stop -a {amsname} -n {liveEventName} -g {rg}')
self.cmd('az ams live-event delete -a {amsname} -n {liveEventName} -g {rg}')
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_start(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'southindia',
'streamingProtocol': 'FragmentedMP4',
'liveEventName': live_event_name,
'encodingType': 'Standard'
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'South India')
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --tags key=value --ips AllowAll', checks=[
self.check('name', '{liveEventName}'),
self.check('location', 'South India'),
self.check('input.streamingProtocol', 'FragmentedMP4'),
self.check('encoding.encodingType', '{encodingType}')
])
live_event = self.cmd('az ams live-event start -a {amsname} --name {liveEventName} -g {rg}', checks=[
self.check('name', '{liveEventName}'),
self.check('location', 'South India'),
self.check('input.streamingProtocol', 'FragmentedMP4')
]).get_output_in_json()
self.assertNotEquals('Stopping', live_event['resourceState'])
self.assertNotEquals('Stopped', live_event['resourceState'])
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_standby(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'southindia',
'streamingProtocol': 'FragmentedMP4',
'liveEventName': live_event_name,
'accessToken': '<KEY>',
'encodingType': 'Standard'
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'South India')
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --access-token {accessToken} --tags key=value --ips AllowAll', checks=[
self.check('name', '{liveEventName}'),
self.check('location', 'South India'),
self.check('input.streamingProtocol', 'FragmentedMP4'),
self.check('encoding.encodingType', '{encodingType}')
])
live_event = self.cmd('az ams live-event standby -a {amsname} --name {liveEventName} -g {rg}', checks=[
self.check('name', '{liveEventName}'),
self.check('location', 'South India'),
self.check('input.streamingProtocol', 'FragmentedMP4'),
self.check('input.accessToken', '{accessToken}'),
]).get_output_in_json()
self.assertNotEquals('Stopping', live_event['resourceState'])
self.assertNotEquals('Stopped', live_event['resourceState'])
self.assertEquals('StandBy', live_event['resourceState'])
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_stop(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'brazilsouth',
'streamingProtocol': 'FragmentedMP4',
'liveEventName': live_event_name,
'encodingType': 'Standard'
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'Brazil South')
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --tags key=value --ips AllowAll --auto-start')
live_event = self.cmd('az ams live-event stop -a {amsname} -n {liveEventName} -g {rg}', checks=[
self.check('name', '{liveEventName}')
]).get_output_in_json()
self.assertNotEquals('Starting', live_event['resourceState'])
self.assertNotEquals('Running', live_event['resourceState'])
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_stop_and_remove_outputs(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'japaneast',
'streamingProtocol': 'FragmentedMP4',
'liveEventName': live_event_name,
'encodingType': 'Standard'
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'Japan East')
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --tags key=value --ips AllowAll --auto-start')
assetName = self.create_random_name(prefix='asset', length=12)
live_output_name1 = self.create_random_name(prefix='lo1', length=12)
live_output_name2 = self.create_random_name(prefix='lo2', length=12)
manifest_name1 = self.create_random_name(prefix='man1', length=12)
manifest_name2 = self.create_random_name(prefix='man2', length=12)
self.kwargs.update({
'assetName': assetName,
'liveOutputName1': live_output_name1,
'liveOutputName2': live_output_name2,
'archiveWindowLength': 'PT5M',
'manifestName1': manifest_name1,
'manifestName2': manifest_name2
})
self.cmd('az ams asset create -a {amsname} -n {assetName} -g {rg}')
self.cmd('az ams live-output create -a {amsname} -n {liveOutputName1} -g {rg} --asset-name {assetName} --live-event-name {liveEventName} --archive-window-length {archiveWindowLength} --manifest-name {manifestName1}')
self.cmd('az ams live-output create -a {amsname} -n {liveOutputName2} -g {rg} --asset-name {assetName} --live-event-name {liveEventName} --archive-window-length {archiveWindowLength} --manifest-name {manifestName2}')
self.cmd('az ams live-output list -a {amsname} -g {rg} --live-event-name {liveEventName}', checks=[
self.check('length(@)', 2)
])
live_event = self.cmd('az ams live-event stop -a {amsname} -n {liveEventName} -g {rg} --remove-outputs-on-stop', checks=[
self.check('name', '{liveEventName}')
]).get_output_in_json()
self.assertNotEquals('Starting', live_event['resourceState'])
self.assertNotEquals('Running', live_event['resourceState'])
self.cmd('az ams live-output list -a {amsname} -g {rg} --live-event-name {liveEventName}', checks=[
self.check('length(@)', 0)
])
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_list(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
live_event_name2 = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'westeurope',
'streamingProtocol': 'FragmentedMP4',
'liveEventName': live_event_name,
'liveEventName2': live_event_name2,
'encodingType': 'Standard'
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'West Europe')
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --ips AllowAll --tags key=value')
self.cmd('az ams live-event list -a {amsname} -g {rg}', checks=[
self.check('length(@)', 1)
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName2} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --ips AllowAll --tags key=value')
self.cmd('az ams live-event list -a {amsname} -g {rg}', checks=[
self.check('length(@)', 2)
])
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_delete(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
live_event_name2 = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'northeurope',
'streamingProtocol': 'FragmentedMP4',
'liveEventName': live_event_name,
'liveEventName2': live_event_name2,
'encodingType': 'Standard'
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'North Europe')
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --ips AllowAll --tags key=value')
self.cmd('az ams live-event list -a {amsname} -g {rg}', checks=[
self.check('length(@)', 1)
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName2} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --ips AllowAll --tags key=value')
self.cmd('az ams live-event list -a {amsname} -g {rg}', checks=[
self.check('length(@)', 2)
])
self.cmd('az ams live-event delete -a {amsname} -g {rg} -n {liveEventName2}')
self.cmd('az ams live-event list -a {amsname} -g {rg}', checks=[
self.check('length(@)', 1)
])
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_reset(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'eastus',
'streamingProtocol': 'FragmentedMP4',
'liveEventName': live_event_name,
'encodingType': 'Standard'
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'East US')
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --ips AllowAll --auto-start', checks=[
self.check('name', '{liveEventName}'),
self.check('location', 'East US'),
self.check('input.streamingProtocol', 'FragmentedMP4'),
self.check('encoding.encodingType', '{encodingType}')
])
live_event = self.cmd('az ams live-event reset -a {amsname} -n {liveEventName} -g {rg}', checks=[
self.check('name', '{liveEventName}'),
self.check('location', 'East US')
]).get_output_in_json()
self.assertNotEquals('Stopping', live_event['resourceState'])
self.assertNotEquals('Stopped', live_event['resourceState'])
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_update(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'eastasia',
'streamingProtocol': 'FragmentedMP4',
'liveEventName': live_event_name
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}')
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --ips AllowAll --streaming-protocol {streamingProtocol}')
self.kwargs.update({
'tags': 'key=value',
'keyFrameIntervalDuration': 'PT2S',
'description': 'asd',
'clientAccessPolicy': '@' + _get_test_data_file('clientAccessPolicy.xml'),
'crossDomainPolicy': '@' + _get_test_data_file('crossDomainPolicy.xml')
})
live_event_updated = self.cmd('az ams live-event update -a {amsname} -n {liveEventName} -g {rg} --ips 1.2.3.4 5.6.7.8 9.10.11.12 --preview-ips 1.1.1.1 0.0.0.0 --key-frame-interval-duration {keyFrameIntervalDuration} --description {description} --client-access-policy "{clientAccessPolicy}" --cross-domain-policy "{crossDomainPolicy}" --tags {tags}', checks=[
self.check('description', '{description}'),
self.check('input.keyFrameIntervalDuration', '{keyFrameIntervalDuration}'),
self.check('length(preview.accessControl.ip.allow)', 2),
self.check('length(input.accessControl.ip.allow)', 3),
self.check('tags.key', 'value')
]).get_output_in_json()
self.assertIsNotNone(live_event_updated['crossSiteAccessPolicies']['crossDomainPolicy'])
self.assertIsNotNone(live_event_updated['crossSiteAccessPolicies']['clientAccessPolicy'])
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_live_event_show(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
live_event_name = self.create_random_name(prefix='le', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'westus',
'streamingProtocol': 'RTMP',
'liveEventName': live_event_name,
'encodingType': 'Standard',
'tags': 'key=value',
'previewLocator': self.create_guid(),
'description': 'asd',
'accessToken': '<KEY>',
'clientAccessPolicy': '@' + _get_test_data_file('clientAccessPolicy.xml'),
'crossDomainPolicy': '@' + _get_test_data_file('crossDomainPolicy.xml')
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'West US')
])
self.cmd('az ams live-event create -a {amsname} -n {liveEventName} -g {rg} --streaming-protocol {streamingProtocol} --encoding-type {encodingType} --tags {tags} --stream-options Default LowLatency --preview-locator {previewLocator} --ips 1.2.3.4 5.6.7.8 9.10.11.12 --preview-ips 1.1.1.1 0.0.0.0 --access-token {accessToken} --description {description} --client-access-policy "{clientAccessPolicy}" --cross-domain-policy "{crossDomainPolicy}"')
self.cmd('az ams live-event show -a {amsname} -n {liveEventName} -g {rg}', checks=[
self.check('name', '{liveEventName}'),
self.check('location', 'West US'),
self.check('input.streamingProtocol', '{streamingProtocol}'),
| |
from collections import OrderedDict
from .model import Param, RustType, non_keyword_name, prefixed, pascal_to_snake
class RustClassBinding:
def __init__(self, model):
self.__model = model
self.overloads = OverloadTree(model)
self.overloads.print_tree()
self.__methods = [RustMethodBinding(self, m) for m in model.methods]
def is_a(self, base):
return self.__model.manager.is_a(self.__model, base)
def lines(self, for_ffi=False, for_methods=False):
yield ''
yield '// %s' % (
self.__model.name,
)
if for_ffi:
if not self.is_a('wxObject'):
yield 'pub fn %s_delete(self_: *mut c_void);' % (
self.__model.name,
)
for method in self.__methods:
for line in method.lines(for_ffi=True):
yield line
elif for_methods:
for line in self._trait_with_methods():
yield line
else:
unprefixed = self.__model.unprefixed()
yield 'wx_class! { %s = ' % (unprefixed,)
yield ' %sIsOwned<true>(%s) impl' % (
unprefixed,
self.__model.name,
)
yield ',\n'.join(self._ancestor_methods())
yield '}'
for line in self._impl_with_ctors():
yield line
for line in self._impl_drop_if_needed():
yield line
for line in self._impl_non_virtual_overrides():
yield line
def _ancestor_methods(self):
for ancestor in self.__model.manager.ancestors_of(self.__model):
comment_or_not = ''
if any(m.is_non_virtual_override(ancestor) for m in self.__methods):
comment_or_not = '// '
yield ' %s%sMethods' % (
comment_or_not,
ancestor.name[2:],
)
def _impl_with_ctors(self):
unprefixed = self.__model.unprefixed()
yield 'impl<const OWNED: bool> %sIsOwned<OWNED> {' % (unprefixed,)
for enum in self.__model.enums:
for line in enum.generate():
yield ' %s' % (line,)
yield ''
for ctor in self._ctors():
for line in ctor.lines():
yield ' %s' % (line,)
yield " pub fn none() -> Option<&'static Self> {"
yield ' None'
yield ' }'
yield '}'
def _impl_drop_if_needed(self):
if (self.is_a('wxEvtHandler') or
self.is_a('wxSizer')):
return
deleter_class = self.__model.name
if self.is_a('wxObject'):
deleter_class = 'wxObject'
yield 'impl<const OWNED: bool> Drop for %sIsOwned<OWNED> {' % (self.__model.unprefixed(),)
yield ' fn drop(&mut self) {'
yield ' if OWNED {'
yield ' unsafe { ffi::%s_delete(self.0) }' % (deleter_class,)
yield ' }'
yield ' }'
yield '}'
def _impl_non_virtual_overrides(self):
for ancestor in self.__model.manager.ancestors_of(self.__model):
methods = [m for m in self.__methods if m.is_non_virtual_override(ancestor)]
if not methods:
continue
yield 'impl<const OWNED: bool> %sMethods for %sIsOwned<OWNED> {' % (
ancestor.unprefixed(),
self.__model.unprefixed(),
)
for method in methods:
for line in method.lines():
yield ' %s' % (line)
yield '}'
def _ctors(self):
return (m for m in self.__methods if m.is_ctor)
def _trait_with_methods(self):
indent = ' ' * 4 * 1
base = self.__model.base
if not base:
base = '__WxRust'
yield 'pub trait %sMethods: %sMethods {' % (
self.__model.unprefixed(),
base[2:],
)
ancestors = self.__model.manager.ancestors_of(self.__model)
for method in self.__methods:
if method.is_ctor:
continue
if any(method.is_non_virtual_override(c) for c in ancestors):
continue
for line in method.lines():
yield '%s%s' % (indent, line)
yield '}'
class Overload:
def __init__(self, name):
self.name = name
self.method = None
self.items = OrderedDict()
class OverloadTree:
def __init__(self, cls):
self.__cls = cls
self.__root = Overload("")
for m in cls.methods:
self._add(m)
def _path(self, method):
path = []
path.append('%s.%s' % (
self.__cls.name,
method.name(without_index=True),
))
for p in method.params:
path.append(p.type)
return path
def _add(self, method):
if method.suppressed_reason():
return
path = self._path(method)
node = self.__root
for item in path:
items = node.items
if item not in items:
items[item] = Overload(item)
node = items[item]
node.method = method
def has_overload(self, method):
if method.suppressed_reason():
return False
by_name = self._path(method)[0]
node = self.__root.items[by_name]
return self._count_in_subtree(node) > 1
def args_to_disambiguate(self, method):
if method.suppressed_reason():
return []
result = []
path = self._path(method)
prev_count = None
current = self.__root
for item in path:
current = current.items[item]
count = self._count_in_subtree(current)
if prev_count is None or count < prev_count:
result.append(item)
if count < 2:
break
prev_count = count
return [arg.in_overload_name() for arg in result[1:]]
def _count_in_subtree(self, node):
count = 0
if node.method is not None:
count += 1
for k, v in node.items.items():
count += self._count_in_subtree(v)
return count
def print_tree(self):
self.print_node(self.__root, 0)
def print_node(self, node, level):
indent = ' ' * level
for k, v in node.items.items():
count = self._count_in_subtree(v)
if level == 0 and count == 1:
continue
args = ''
method = v.method
if method is not None:
args = self.args_to_disambiguate(method)
args = '(%s)' % (', '.join(args),)
method = method.name()
print("%s- %s: %s: %s %s" % (indent, count, k, method, args))
self.print_node(v, level + 1)
class RustMethodBinding:
def __init__(self, cls, model):
self.__cls = cls
self.__model = model
self.is_ctor = model.is_ctor
self.__self_param = Param(RustType(model.cls.name, model.const), 'self')
# must be name neither self or this
self.__ffi_self = Param(RustType(model.cls.name, model.const), 'self_')
self.__generic_params = GenericParams(self.__model.params)
def is_blocked(self):
return self.__model.is_blocked()
def _returns_or_not(self, for_ffi=False):
if self.__model.returns.is_void():
return ''
returns = self.__model.returns.in_rust(for_ffi=True)
wrapped = self.__model.wrapped_return_type(allows_ptr=True)
if self.__model.maybe_returns_self():
if for_ffi:
returns = '*mut c_void'
else:
returns = '&Self'
elif wrapped:
if for_ffi:
returns = '*mut c_void'
else:
returns = wrapped[2:]
if self.__model.returns.is_ref_to_binding():
returns = '%sIsOwned<false>' % (returns,)
elif (self.is_ctor or
self.__model.returns.is_ptr_to_binding()):
if self.is_ctor:
returns = '%sIsOwned<OWNED>' % (returns,)
elif not self.__model.returns_owned():
if self.__model.returns_trackable():
returns = 'WeakRef<%s>' % (returns,)
else:
returns = 'Option<%sIsOwned<false>>' % (returns,)
if self.__model.returns.is_str():
returns = 'String'
return ' -> %s' % (returns,)
def lines(self, for_ffi=False):
pub_or_not = 'pub '
gen_params = ''
name = self.__model.name(for_ffi=True)
if not for_ffi:
if not self.is_ctor:
pub_or_not = '' if not self.is_ctor else 'pub '
name = self._rust_method_name()
if self.__generic_params.names:
gen_params = '<%s>' % (
', '.join('%s: %s' % p for p in self.__generic_params.names),
)
signature = '%sfn %s%s(%s)%s' % (
pub_or_not,
name,
gen_params,
self._rust_params(for_ffi=for_ffi),
self._returns_or_not(for_ffi=for_ffi),
)
suppressed = self.__model.suppressed_reason()
if suppressed:
if for_ffi:
body = '%s;' % (signature,)
else:
body = 'fn %s()' % (self.__model.name(),)
yield '// %s: %s' % (
suppressed,
body,
)
return
if for_ffi:
yield '%s;' % (signature,)
else:
yield '%s {' % (signature,)
body_lines = list(self._binding_body())
for line in self._wrap_unsafe(body_lines):
yield ' %s' % (line,)
yield '}'
def _binding_body(self):
params = self.__model.params
for param in params:
marshalling = param.marshal()
if marshalling:
for line in marshalling:
yield '%s' % (line,)
name = prefixed(self.__model.name(for_ffi=True), with_ffi=True)
self_to_insert = None
if self.__model.is_instance_method:
is_mut_self = not self.__model.const
self_param = self.__self_param.rust_ffi_ref(
is_mut_self=is_mut_self,
)
self_to_insert = self_param
call = '%s(%s)' % (
name,
self._call_params(params, self_to_insert),
)
yield self._wrap_return_type(call)
def _call_params(self, params, self_to_insert):
params = [p.name for p in params]
if self_to_insert:
params.insert(0, self_to_insert)
return ', '.join(params)
def _rust_method_name(self):
method_name = pascal_to_snake(self.__model.name(
without_index=True,
))
overloads = self.__cls.overloads
if self.__model.is_ctor:
method_name = 'new'
if overloads.has_overload(self.__model):
splitter = '_'
arg_types = overloads.args_to_disambiguate(self.__model)
if self.__model.is_ctor:
if self.__cls.is_a('wxWindow'):
return 'new_2step' if len(arg_types) == 0 else 'new'
splitter = '_with_'
if len(arg_types) > 0:
method_name += splitter + '_'.join(arg_types)
method_name = non_keyword_name(method_name)
return method_name
def _rust_params(self, for_ffi=False):
params = self.__model.params.copy()
if self.__model.is_instance_method:
if for_ffi:
params.insert(0, self.__ffi_self)
else:
params.insert(0, self.__self_param)
return ', '.join(self._rust_param(p, for_ffi) for p in params)
def _rust_param(self, param, for_ffi):
typename = param.type.in_rust(for_ffi=for_ffi)
if not for_ffi:
if param.is_self():
return '&self'
elif param.type.generic_name:
typename = '&%s' % (param.type.generic_name,)
if param.type.generic_option:
typename = 'Option<%s>' % (typename,)
return '%s: %s' % (
param.name,
typename,
)
def _wrap_unsafe(self, lines):
if len(lines) < 2:
yield 'unsafe { %s }' % (lines[0],)
else:
yield 'unsafe {'
for line in lines:
yield ' %s' % (line,)
yield '}'
def _wrap_return_type(self, call):
if self.__model.returns.is_str():
return 'wx_base::from_wx_string(%s)' % (call,)
if self.__model.maybe_returns_self():
return '%s; &self' % (call,)
wrapped = self.__model.wrapped_return_type(allows_ptr=False)
if wrapped:
return '%sIsOwned(%s)' % (wrapped[2:], call)
wrapped = self.__model.wrapped_return_type(allows_ptr=True)
if wrapped:
if self.__model.returns_owned():
return '%s::from_ptr(%s)' % (wrapped[2:], call)
elif self.__model.returns.is_ref_to_binding():
return '%sIsOwned::from_ptr(%s)' % (wrapped[2:], call)
elif self.__model.returns_trackable():
return 'WeakRef::<%s>::from(%s)' % (wrapped[2:], call)
else:
return '%s::option_from(%s)' % (wrapped[2:], call)
return call
def _uses_ptr_type(self):
return any(p.type.is_ptr() for p in self.__model.params)
def is_non_virtual_override(self, cls):
return self.__model.is_non_virtual_override(cls)
class GenericParams:
def __init__(self, params):
self.__used_names = dict()
self.names = self._make_params_generic(params)
def _make_params_generic(self, params):
self.names = []
for param in params:
is_ptr_to_binding = param.type.is_ptr_to_binding()
if (is_ptr_to_binding or
param.type.is_const_ref_to_binding()):
name = self._new_name_for(param.type)
self.names.append(param.type.make_generic(
name,
is_option=is_ptr_to_binding,
))
return self.names
def _new_name_for(self, param_type):
name = param_type.in_overload_name()[0].upper()
used = self.__used_names
if name not in used:
used[name] = 0
n = used[name]
n += 1
used[name] = n
if n > 1:
name = '%s%s' % (name, n)
return name
class CxxClassBinding:
def __init__(self, model, config):
self.__model = model
self.conditions = config.get('conditions')
self.__methods = [CxxMethodBinding(self, m) for m in model.methods]
def lines(self, is_cc=False):
yield '// CLASS: %s' % (self.__model.name,)
for line in self._dtor_lines(is_cc):
yield line
self.in_condition = None
for method in self.__methods:
for line in method.lines(is_cc):
yield line
if | |
<filename>cfnet/data.py
import os
import random
import h5py
import numpy as np
import math
from PIL import Image
from scipy import ndimage
from scipy.stats import truncnorm
def stretch_images(images, min_values, max_values):
perc = np.percentile(images, [0.1, 99.9], axis=[0, 1])
min_values = perc[0, :]
max_values = perc[1, :]
images_stretched = images - min_values
images_stretched[images_stretched < 0] = 0
images_stretched = images_stretched / ((max_values - min_values).astype('float32'))
images_stretched[images_stretched > 1] = 1
return images_stretched
def add_jitter(image, offset, image_size):
jittered_offset_x = random.randint(0, 2*offset)
jittered_offset_y = random.randint(0, 2*offset)
image_jittered = image[jittered_offset_x:image_size + jittered_offset_x,
jittered_offset_y:image_size + jittered_offset_y]
return image_jittered
def crop_center(image, offset, image_size):
offset = int(offset/2)
image_cropped = image[offset:offset + image_size,
offset:offset + image_size]
return image_cropped
class HDF5DataSet(object):
def __init__(self, images, labels, label_list,
image_shape_full, jitter_offset=0, crop=False,
min_values=None, max_values=None,
calculate_min_max_values=False,
n_samples_for_min_max_estimate=-1,
balance_classes=True,
rotate_images=False,
flip_images=False,
stretch_images=True,
one_hot=True):
# TODO: assumes labels are one-hot encoded! should change
self._n_examples = len(images)
# TODO: check that dtype is float
self._image_dtype = images[0].dtype
self._image_shape_full = image_shape_full
self._jitter_offset = jitter_offset
self._crop = crop
self._rotate_images = rotate_images
self._flip_images = flip_images
self._stretch_images = stretch_images
# self._images = images
self._images = np.reshape(images, [self._n_examples] + image_shape_full, order='F')
self._labels = labels[:]
self._epochs_completed = 0
self._index_in_epoch = 0
self._index_in_no_repeats = 0
self._label_list = label_list
self._n_classes = len(label_list)
self._balance_classes = balance_classes
if len(self._image_shape_full) < 3:
self._image_depth = 1
else:
self._image_depth = self._image_shape_full[2]
self._image_size = image_shape_full[0] - 2*jitter_offset
self._image_shape = [self._image_size, self._image_size, self._image_depth]
# TODO: assumes both image_size and image_size full are even!
self._image_size_offest = (image_shape_full[0] - self._image_size) / 2
perm = np.arange(self._n_examples)
self._ind = [list(perm[labels[:, i] == 1]) for i in range(self._n_classes)]
self._n_samples_per_label = []
for i in range(self._n_classes):
self._n_samples_per_label.append(len(self._ind[i]))
#self._ind = [[i for i in range(self._n_examples) if label_names[i] == l] for l in self._label_list]
#self._n_samples_per_label = [len(ind_i) for ind_i in self._ind]
self._ind_max_label = np.argmax(np.array(self._n_samples_per_label))
self._max_n_samples = self._n_samples_per_label[self._ind_max_label]
self._n_examples_in_epoch = self._n_examples
if balance_classes:
self._n_examples_in_epoch = self._max_n_samples*self._n_classes
# How many times must all samples of under-represented classes
# be resampled
self._n_repeats = [self._max_n_samples / n for n in self._n_samples_per_label]
# How many additional samples of under-represented classes must be
# generated
self._n_remainder = [self._max_n_samples % n for n in self._n_samples_per_label]
self._one_hot = True
# self._images_batch = np.zeros((self._n_examples_in_epoch,
# self._image_size, self._image_size,
# self._image_depth), dtype='uint8')
self.shuffle_data()
self._min_values = min_values
self._max_values = max_values
if calculate_min_max_values:
self.calculate_percentiles(n_samples_for_min_max_estimate)
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def n_classes(self):
return self._n_classes
@property
def n_examples(self):
return self._n_examples
@property
def epochs_completed(self):
return self._epochs_completed
def calculate_percentiles(self, n_samples_for_min_max_estimate):
if n_samples_for_min_max_estimate == -1:
images_temp = np.reshape(self._images[:],
[self._n_examples] + self._image_shape_full, order='F')
else:
ind = list(np.random.choice(self._n_examples, n_samples_for_min_max_estimate,
replace=False))
ind.sort()
images_temp = np.reshape(self._images[ind], [n_samples_for_min_max_estimate] +
self._image_shape_full, order='F')
perc = np.percentile(images_temp, [0.1, 99.9], axis=[0, 1, 2])
self._min_values = perc[0, :]
self._max_values = perc[1, :]
def shuffle_data(self):
'''Shuffle the ordering of the samples.'''
if self._balance_classes:
self._perm = []
for i in range(self._n_classes):
self._perm += self._ind[i]*self._n_repeats[i]
self._perm += random.sample(self._ind[i], self._n_remainder[i])
else:
self._perm = range(self._n_examples)
if self._flip_images:
self._flip_ver = np.random.random_integers(0, 1, self._n_examples_in_epoch).astype('bool')
self._flip_hor = np.random.random_integers(0, 1, self._n_examples_in_epoch).astype('bool')
else:
self._flip_ver = np.zeros((self._n_examples_in_epoch,), dtype='bool')
self._flip_hor = np.zeros((self._n_examples_in_epoch,), dtype='bool')
self._rot = np.random.random_integers(0, 3, self._n_examples_in_epoch)
random.shuffle(self._perm)
def reset_epoch(self):
'''Reset the epoch counter.'''
self._index_in_epoch = np.inf
def reset_no_repeats(self):
'''Reset the no_repeats epoch counter.'''
self._index_in_no_repeats = np.inf
def next_batch(self, batch_size):
'''Return the next batch.'''
start = self._index_in_epoch
self._index_in_epoch += batch_size
# Finished epoch
if self._index_in_epoch > self._n_examples_in_epoch:
self.shuffle_data()
self._epochs_completed += 1
self._index_in_epoch = batch_size
start = 0
end = self._index_in_epoch
batch_ind = self._perm[start:end]
images = self._images[batch_ind]
labels = self._labels[batch_ind]
return images, labels
def next_batch_hdf5(self, batch_size):
'''Return the next batch.'''
start = self._index_in_epoch
self._index_in_epoch += batch_size
# Finished epoch
if self._index_in_epoch > self._n_examples_in_epoch:
self.shuffle_data()
self._epochs_completed += 1
self._index_in_epoch = batch_size
start = 0
end = self._index_in_epoch
batch_ind = self._perm[start:end]
# indices cannot have repeats, so do them separately
bi_unique = set()
bi_repeats = []
for bi in batch_ind:
if bi in bi_unique:
bi_repeats.append(bi)
else:
bi_unique.add(bi)
if bi_repeats:
batch_ind = list(set(batch_ind))
n_repeats = len(bi_repeats)
# indices must be in increasing order for hdf5 access
batch_ind.sort()
images_temp = np.reshape(self._images[batch_ind], [batch_size - n_repeats] + self._image_shape_full, order='F')
for bi in bi_repeats:
images_temp = np.concatenate([images_temp,
np.reshape(self._images[bi], [1] + self._image_shape_full, order='F')],
axis=0)
labels = self._labels[batch_ind]
for bi in bi_repeats:
labels = np.concatenate([labels, np.expand_dims(self._labels[bi], axis=0)], axis=0)
# images_temp = stretch_images(images_temp, self._min_values, self._max_values)
images = np.zeros((batch_size, self._image_size, self._image_size,
self._image_depth), dtype='float32')
if self._rotate_images:
for i in range(batch_size):
# images[i] = np.rot90(images[i], self._rot[i])
ndimage.rotate(images_temp[i], random.randint(0, 360), reshape=False,
output=images_temp[i], mode='reflect')
if self._stretch_images:
for i in range(batch_size):
images_temp[i] = stretch_images(images_temp[i], self._min_values, self._max_values)
if self._jitter_offset != 0:
if self._crop:
for i in range(batch_size):
images[i] = crop_center(images_temp[i], self._jitter_offset,
self._image_size)
else:
for i in range(batch_size):
images[i] = add_jitter(images_temp[i], self._jitter_offset,
self._image_size)
else:
image_size_end = self._image_size + self._image_size_offest
for i in range(batch_size):
images[i] = images_temp[i][self._image_size_offest:
image_size_end,
self._image_size_offest:
image_size_end, :]
del images_temp
for i in range(batch_size):
if self._flip_hor[i]:
images[i] = np.fliplr(images[i])
if self._flip_ver[i]:
images[i] = np.flipud(images[i])
return images, labels
def next_batch_no_repeats(self, batch_size):
'''Return the next batch without repeats.'''
start = self._index_in_no_repeats
self._index_in_no_repeats += batch_size
# Processed all unique samples
if self._index_in_no_repeats > self._n_examples:
self._index_in_no_repeats = batch_size
start = 0
end = self._index_in_no_repeats
labels = self._labels[start:end]
images = self._images[start:end]
return images, labels
def next_batch_no_repeats_hdf5(self, batch_size):
'''Return the next batch without repeats.'''
start = self._index_in_no_repeats
self._index_in_no_repeats += batch_size
# Processed all unique samples
if self._index_in_no_repeats > self._n_examples:
self._index_in_no_repeats = batch_size
start = 0
end = self._index_in_no_repeats
images_temp = np.reshape(self._images[start:end], [batch_size] + self._image_shape_full, order='F')
labels = self._labels[start:end]
# images_temp = stretch_images(images_temp, self._min_values, self._max_values)
images = np.zeros((batch_size, self._image_size, self._image_size,
self._image_depth), dtype='float32')
if self._stretch_images:
for i in range(batch_size):
images_temp[i] = stretch_images(images_temp[i], self._min_values, self._max_values)
image_size_end = self._image_size + self._image_size_offest
for i in range(batch_size):
images[i] = images_temp[i][self._image_size_offest:
image_size_end,
self._image_size_offest:
image_size_end, :]
return images, labels
def save_example_images(self, output_dir, channel_order=[0, 1]):
# TODO: this code is no longer correct! (10/30)
for ind in self._ind:
# Save as RGB
img = np.zeros((self._image_size, self._image_size, 3), dtype='uint8')
#img_temp = np.reshape(self._images[ind[0]] / self._intensity_norm_constant, self._image_shape, order='F')
img_temp = np.reshape(self._images[ind[0]] / 100, self._image_shape_full, order='F')
img_temp[img_temp > 1.0] = 1.0
img_temp = (255*img_temp).astype('uint8')
label = self._label_list[np.argmax(self._labels[ind[0]])]
for i, c in zip(range(len(channel_order)), channel_order):
img[:, :, i] = img_temp[:, :, c]
Image.fromarray(img).save(os.path.join(output_dir, label + '.png'))
def get_examples_of_classes(self):
images = np.zeros((self._n_classes, self._image_size, self._image_size,
self._image_depth), dtype='float32')
labels = range(self._n_classes)
image_size_end = self._image_size + self._image_size_offest
for c in range(self._n_classes):
idx = random.choice(self._ind[c])
images[c] = np.reshape(self._images[idx], [1] + self._image_shape_full, order='F')[0, self._image_size_offest:
image_size_end,
self._image_size_offest:
image_size_end, :]
if self._stretch_images:
images[c] = stretch_images(images[c], self._min_values, self._max_values)
return images, labels
def read_image_patch_data_set_hdf5(data_set_filename,
images_key,
labels_key,
label_list_key,
image_shape,
jitter_offset=0,
crop=False,
min_values=None,
max_values=None,
calculate_min_max_values=False,
rotate_images=False,
flip_images=False,
stretch_images=True,
one_hot=True,
balance_classes=True):
data_set = h5py.File(data_set_filename)
return HDF5DataSet(data_set[images_key], data_set[labels_key],
list(data_set.attrs[label_list_key]),
image_shape,
jitter_offset=jitter_offset,
crop=crop,
min_values=min_values,
max_values=max_values,
calculate_min_max_values=calculate_min_max_values,
rotate_images=rotate_images,
flip_images=flip_images,
stretch_images=stretch_images,
one_hot=one_hot,
balance_classes=balance_classes)
def subsample_hdf5_data_set(data_set_filename,
images_key,
labels_key,
n_samples,
output_filename=None):
if not output_filename:
output_filename = os.path.splitext(data_set_filename)[0] + \
'_subsampled_' + str(n_samples) + '.hdf5'
data_set = h5py.File(data_set_filename)
labels = data_set[labels_key]
n_examples = len(labels)
n_classes = data_set[labels_key].shape[1]
n_samples_per_label = [n_samples] * n_classes
perm = np.arange(n_examples)
ind = [perm[labels[:, i] == 1] for i in range(n_classes)]
for i in range(n_classes):
n = len(ind[i])
if n < n_samples:
n_samples_per_label[i] = n
n_total_samples = sum(n_samples_per_label)
data_dim = data_set[images_key].shape[1]
data = np.zeros((n_total_samples, data_dim))
labels = np.zeros((n_total_samples, n_classes), dtype='uint8')
idx = 0
for i in range(n_classes):
ind_sub = random.sample(ind[i], n_samples_per_label[i])
ind_sub.sort()
print(n_samples_per_label[i], ind_sub)
idx_new = idx + n_samples_per_label[i]
data[idx:idx_new] = data_set[images_key][ind_sub]
labels[idx:idx_new] = data_set[labels_key][ind_sub]
idx = idx_new
f_out = h5py.File(output_filename, 'w')
f_out.create_dataset(images_key, data=data)
f_out.create_dataset(labels_key, data=labels)
for a in data_set.attrs.keys():
f_out.attrs.create(a, data_set.attrs[a])
f_out.close()
data_set.close()
class GaussianGenerator(object):
# NOTE: All angles are considered as radians.
# TODO: Check that x and y are not swapped.
def __init__(self, x_hidden, y_hidden, sigma_hidden, sigma, n_points,
n_points_var):
self.x_hidden = x_hidden
self.y_hidden = y_hidden
self.sigma_hidden = sigma_hidden
self.sigma = sigma
self.n_points = n_points
self.n_points_var = n_points_var
@classmethod
def random(cls, x_max=1, y_max=1, sigma_hidden=1.0,
sigma_max=1.0, n_points=20, n_points_var=3):
x_hidden = random.randint(-x_max, x_max)
y_hidden = random.randint(-y_max, y_max)
theta = 2*np.pi*random.random()
sigma_hidden = np.array([(sigma_hidden, 0), (0, sigma_hidden)], dtype='float')
sigma_x = math.sqrt(sigma_max)*random.random()
sigma_y = math.sqrt(sigma_max)*random.random()
sigma = np.array([(sigma_x, 0), (0, sigma_y)], dtype='float')
rot_matrix = np.array([(math.cos(theta), -math.sin(theta)),
(math.sin(theta), math.cos(theta))])
sigma = np.dot(rot_matrix, np.dot(sigma, np.dot(sigma, np.linalg.inv(rot_matrix))))
return cls(x_hidden, y_hidden, sigma_hidden, sigma, n_points,
n_points_var)
def generate_points(self, theta, int_locations=True):
offset = np.random.multivariate_normal([self.x_hidden, self.y_hidden],
self.sigma_hidden,
size=1)
x_mean = offset[0, 0]
y_mean = offset[0, 1]
r_mean = math.sqrt(x_mean**2 + y_mean**2)
theta_mean = math.atan2(y_mean, x_mean) + theta
x_mean = r_mean*math.cos(theta_mean)
| |
import os
import unittest
import numpy as np
import pandas as pd
from swamp.utils import create_tempfile
from swamp.wrappers.gesamt import Gesamt
class MockGesamt(Gesamt):
"""A class to mock :py:obj:`~swmap.wrappers.gesamt.Gesamt` for testing purposes"""
def run(self):
"""Override :py:func:`~swmap.wrappers.gesamt.Gesamt.run` for testing purposes"""
if self.mode == "search-archive":
file_contents = """# Hit PDB Chain Q-score r.m.s.d Seq. Nalign nRes File
# No. code Id Id. name
1 A 1.0000 0.0000 1.0000 47 47 6qd5_5A_17A.pdb
2 A 0.4736 0.0000 1.0000 32 46 6qd5_5A_15A.pdb
3 A 0.4736 0.0000 1.0000 32 46 6qd5_5A_9A.pdb
"""
self.hits_out = create_tempfile(content=file_contents)
elif self.mode == "alignment":
if len(self.pdbin) == 2:
self.logcontents = """
GESAMT: General Efficient Structural Alignment of Macromolecular Targets
------------------------------------------------------------------------
Version 1.15 of 25-Jan-2017, built with MMDB v.2.0.17
###############################################################
###############################################################
###############################################################
### CCP4 7.0.073: Gesamt version 7.0.073 : ##
###############################################################
User: filo Run date: 22/ 2/2020 Run time: 13:12:47
Please reference: Collaborative Computational Project, Number 4. 2011.
"Overview of the CCP4 suite and current developments". Acta Cryst. D67, 235-242.
as well as any specific reference in the program write-up.
$TEXT:Reference: $$Please cite$$
<NAME> (2012). Enhanced fold recognition using efficient
short fragment clustering. J. Mol. Biochem. 1(2) 76-85.
$$
<!--SUMMARY_BEGIN-->
===============================================================================
... reading FIXED structure : file '/mnt/sdb1/SWAMP_benchmark/3zux/3zux.pdb', selection '*'
308 atoms selected
... reading MOVING structure: file '/mnt/sdb1/SWAMP_benchmark/5mlz/5mlz.pdb', selection '*'
355 atoms selected
<!--SUMMARY_END-->
===============================================================================
CPU stage 1 (clustering): 0.06758 secs
CPU stage 2 (refinement): 0.02398 secs
===== Structures
Ref. | Nres | File (selection)
========+========+=============================================
FIXED | 308 | /mnt/sdb1/SWAMP_benchmark/3zux/3zux.pdb (*)
MOVING | 355 | /mnt/sdb1/SWAMP_benchmark/5mlz/5mlz.pdb (*)
have been aligned and superposed.
===============================================================================
SUPERPOSITION
~~~~~~~~~~~~~
Q-score : 0.029
RMSD : 3.357
Aligned residues : 84
Sequence Id: : 0.048
Transformation matrix for FIXED structure is identity.
Transformation matrix for MOVING structure:
Rx Ry Rz T
0.95978 0.09955 0.26253 -15.08154
-0.23123 -0.25016 0.94019 84.32709
0.15927 -0.96307 -0.21708 83.61516
Direction cosines of the rotation axis: -0.98383 0.05338 -0.17098
Rotation angle : 104.69832
in fractional coordinates of FIXED structure:
Rx Ry Rz T
0.95978 0.09955 0.58542 -0.20567
-0.23123 -0.25016 2.09655 1.14997
0.07142 -0.43189 -0.21708 0.51135
in fractional coordinates of MOVING structure:
Rx Ry Rz T
0.95978 0.16017 0.27547 -0.16597
-0.14371 -0.25016 0.61314 0.57675
0.15178 -1.47678 -0.21708 0.87693
-------------------------------------------------------------------------------
CENTROIDS
~~~~~~~~~ Orthogonal Fractional
X Y Z XF YF ZF
FIXED 18.19964 69.74116 81.11004 0.24819 0.95106 0.49603
MOVING 18.75503 20.49314 -5.04154 0.20639 0.14016 -0.05287
Distance between centroids : 99.23592
Direction cosines of vector between centroids: 0.00560 -0.49627 -0.86815
Angle between rotation axis and vector between centroids: 83.31328
-------------------------------------------------------------------------------
CCP4 format rotation-translation operator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Polar angles (omega,phi,kappa) : 99.84487 176.89438 104.69832
Euler angles (alpha,beta,gamma) : 74.39866 102.53743 -99.39011
Orthogonal translation (Angstrom): -15.08154 84.32709 83.61516
===============================================================================
RESIDUE ALIGNMENT
~~~~~~~~~~~~~~~~~
$$
.-------------.------------.-------------.
| FIXED | Dist.(A) | MOVING |
|-------------+------------+-------------|
| | | - A:PHE -2 |
| | | + A:GLN -1 |
| | | . A:SER 0 |
|H- A:ALA 307 | | |
|H+ A:LYS 308 | | |
| - A:ALA 309 | | |
`-------------'------------'-------------'
Notations:
S/H residue belongs to a strand/helix
+/-/. hydrophylic/hydrophobic/neutral residue
** identical residues matched: similarity 5
++ similarity 4
== similarity 3
-- similarity 2
:: similarity 1
.. dissimilar residues: similarity 0
Gesamt: Normal termination
"""
else:
self.logcontents = """
GESAMT: General Efficient Structural Alignment of Macromolecular Targets
------------------------------------------------------------------------
Version 1.15 of 25-Jan-2017, built with MMDB v.2.0.17
###############################################################
###############################################################
###############################################################
### CCP4 7.0.073: Gesamt version 7.0.073 : ##
###############################################################
User: filo Run date: 22/ 2/2020 Run time: 13:14:47
Please reference: Collaborative Computational Project, Number 4. 2011.
"Overview of the CCP4 suite and current developments". Acta Cryst. D67, 235-242.
as well as any specific reference in the program write-up.
$TEXT:Reference: $$Please cite$$
<NAME> (2012). Enhanced fold recognition using efficient
short fragment clustering. J. Mol. Biochem. 1(2) 76-85.
$$
<!--SUMMARY_BEGIN-->
===========================================================
... reading file '/mnt/sdb1/SWAMP_benchmark/3zux/3zux.pdb', selection '*':
308 atoms selected
... reading file '/mnt/sdb1/SWAMP_benchmark/5mlz/5mlz.pdb', selection '*':
355 atoms selected
... reading file '/mnt/sdb1/SWAMP_benchmark/4njn/4njn.pdb', selection '*':
182 atoms selected
Parameter Q-score: 3.000 angstroms
Weighted superposition is not used
Number of threads used: 1
CPU stage 1 (cross-alignments): 0.20074 secs
CPU stage 2 (refinement): 0.00997 secs
===== Structures
Ref. | Nres | File (selection)
========+========+=============================================
S001 | 308 | /mnt/sdb1/SWAMP_benchmark/3zux/3zux.pdb (*)
S002 | 355 | /mnt/sdb1/SWAMP_benchmark/5mlz/5mlz.pdb (*)
S003 | 182 | /mnt/sdb1/SWAMP_benchmark/4njn/4njn.pdb (*)
have been aligned and superposed.
===== Superposition matrices (orthogonal):
____________________________________________________________________
(o) For structure S001 [/mnt/sdb1/SWAMP_benchmark/3zux/3zux.pdb(*)]:
Rx Ry Rz T
1.000 -0.000 -0.000 -0.000
0.000 1.000 0.000 0.000
0.000 0.000 1.000 0.000
____________________________________________________________________
(o) For structure S002 [/mnt/sdb1/SWAMP_benchmark/5mlz/5mlz.pdb(*)]:
Rx Ry Rz T
0.817 -0.080 -0.571 -24.547
0.575 0.189 0.796 64.490
0.044 -0.979 0.200 86.418
____________________________________________________________________
(o) For structure S003 [/mnt/sdb1/SWAMP_benchmark/4njn/4njn.pdb(*)]:
Rx Ry Rz T
0.810 -0.569 -0.144 31.572
0.510 0.803 -0.309 76.523
0.292 0.177 0.940 34.279
===== Superposition matrices (fractional):
===== Scores achieved:
quality Q: 0.0099 (normalised to [0...1])
r.m.s.d: 2.1311 (A)
Nalign: 44 (residues)
______________________________________________________
(o) pairwise Q-scores (consensus Q-score on diagonal):
S001 S002 S003
.------------------------
S001| 0.045 0.002 0.006
S002| 0.002 0.036 0.005
S003| 0.006 0.005 0.101
_______________________________________________________
(o) pairwise r.m.s.d. (consensus r.m.s.d. on diagonal):
S001 S002 S003
.------------------------
S001| 2.212 4.225 3.234
S002| 4.225 2.366 3.545
S003| 3.234 3.545 1.770
_____________________
(o) pairwise seq. Id:
S001 S002 S003
.------------------------
S001| 1.000 0.091 0.091
S002| 0.091 1.000 0.000
S003| 0.091 0.000 1.000
===== Residue alignment:
Disp. | | S001 | | S002 | | S003
-------+-+------------+-+------------+-+------------
| | | | A:PHE -2 | |
| | | | A:GLN -1 | |
| | | | A:SER 0 | |
| | | | A:MET 1 | |
| | A:ALA 309 | | | |
-------'-'------------'-'------------'-'------------
Gesamt: Normal termination
"""
self.get_scores()
class GesamtWrapperTestCase(unittest.TestCase):
def test_1(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='alignment', pdbout='/empty/path/pdbout',
pdbin=('/empty/path/pdb_a', '/empty/path/pdb_b'))
self.assertIsNone(gesamt.keywords)
self.assertListEqual(gesamt.cmd, [os.path.join(os.environ['CCP4'], 'bin', 'gesamt'), '/empty/path/pdb_a',
'/empty/path/pdb_b', '-o', '/empty/path/pdbout'])
gesamt.run()
self.assertTupleEqual((gesamt.qscore, gesamt.rmsd, gesamt.seq_id, gesamt.n_align), (0.029, 3.357, 0.048, 84))
def test_2(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='alignment', pdbout='/empty/path/pdbout',
pdbin=('/empty/path/pdb_a', '/empty/path/pdb_b', '/empty/path/pdb_c'))
self.assertListEqual(gesamt.cmd, [os.path.join(os.environ['CCP4'], 'bin', 'gesamt'), '/empty/path/pdb_a',
'/empty/path/pdb_b', '/empty/path/pdb_c', '-o', '/empty/path/pdbout'])
gesamt.run()
self.assertTupleEqual((gesamt.qscore, gesamt.rmsd, gesamt.seq_id, gesamt.n_align), (0.0099, 2.1311, np.nan, 44))
def test_3(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='alignment', pdbout='/empty/path/pdbout',
pdbin=('/empty/path/pdb_a',))
self.assertIsNone(gesamt.cmd)
self.assertTrue(gesamt.error)
def test_4(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='search-archive', pdbin='/empty/path/pdbin', min2=0.7,
gesamt_archive='/empty/path/gesamt-archive', hits_out='/empty/path/out.hits', nthreads=15,
min1=0.4)
self.assertListEqual(gesamt.cmd, [os.path.join(os.environ['CCP4'], 'bin', 'gesamt'), '/empty/path/pdbin',
'-archive', '/empty/path/gesamt-archive', '-o', '/empty/path/out.hits',
'-nthreads=15', '-min1=0.4', '-min2=0.7'])
self.assertListEqual(gesamt._filthy_files, ['/empty/path/out.hits'])
gesamt.run()
df = pd.DataFrame([['1.0000', '0.0000', '1.0000', '47', '47', '6qd5_5A_17A.pdb'],
['0.4736', '0.0000', '1.0000', '32', '46', '6qd5_5A_15A.pdb'],
['0.4736', '0.0000', '1.0000', '32', '46', '6qd5_5A_9A.pdb']])
df.columns = ["qscore", "rmsd", "seq_id", "n_align", "n_res", "fname"]
try:
pd.testing.assert_frame_equal(df, gesamt.summary_results)
except AssertionError as e:
raise self.failureException(e)
self.addCleanup(os.remove, gesamt.hits_out)
def test_5(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='search-archive', pdbin=None, min2=0.7, nthreads=15,
gesamt_archive='/empty/path/gesamt-archive', hits_out='/empty/path/out.hits', min1=0.4)
self.assertTrue(gesamt.error)
def test_6(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='unknown', pdbin=None, min2=0.7, nthreads=15,
gesamt_archive='/empty/path/gesamt-archive', hits_out='/empty/path/out.hits', min1=0.4)
self.assertTrue(gesamt.error)
def test_7(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='make-archive', pdb_archive='/empty/path/pdb-archive',
gesamt_archive='/empty/path/gesamt-archive')
self.assertListEqual(gesamt.cmd, [os.path.join(os.environ['CCP4'], 'bin', 'gesamt'), '--make-archive',
'/empty/path/gesamt-archive', '-pdb', '/empty/path/pdb-archive'])
def test_8(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='make-archive', pdb_archive=None,
gesamt_archive='/empty/path/gesamt-archive')
self.assertIsNone(gesamt.cmd)
self.assertTrue(gesamt.error)
def test_9(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='dummy-mode', pdb_archive=None,
gesamt_archive='/empty/path/gesamt-archive')
self.assertTrue(gesamt.error)
def test_10(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='alignment', pdbout='/empty/path/pdbout',
pdbin=('/empty/path/pdb_a', '/empty/path/pdb_b'))
gesamt.logcontents = b''
gesamt.get_scores()
self.assertTrue(gesamt.error)
def test_11(self):
gesamt = MockGesamt(workdir='/empty/path/workdir', mode='alignment', pdbout='/empty/path/pdbout',
pdbin=('/empty/path/pdb_a', '/empty/path/pdb_b'))
gesamt.logcontents = """ GESAMT: General Efficient Structural Alignment of Macromolecular Targets
------------------------------------------------------------------------
Version 1.16 of 14-Jan-2020, built with MMDB v.2.0.20
###############################################################
###############################################################
###############################################################
### CCP4 7.1.000: Gesamt version 7.1.000 : ##
###############################################################
User: filo Run date: 23/ 2/2020 Run time: 21:28:28
Please reference: Collaborative Computational Project, Number 4. 2011.
"Overview of the CCP4 suite and current developments". Acta Cryst. D67, 235-242.
as well as any specific reference in the program write-up.
$TEXT:Reference: $$Please cite$$
<NAME> (2012). Enhanced fold recognition using efficient
short fragment clustering. J. Mol. Biochem. 1(2) 76-85.
$$
<!--SUMMARY_BEGIN-->
===============================================================================
... reading FIXED structure : file '/home/filo/opt/CCP4/ccp4-7.0/lib/py2/swamp/library/db/pdb/5fvn_27A_29A.pdb', selection '*'
0 atoms selected with warning (rc=15)
***** ERROR #15 READ:
File can not be opened.
crystal data not found
... reading MOVING structure: file '/home/filo/opt/CCP4/ccp4-7.0/lib/py2/swamp/library/db/pdb/5fvn_23A_25A.pdb', selection '*'
0 atoms selected with warning (rc=15)
***** ERROR #15 READ:
File can not be opened.
crystal data not found
<!--SUMMARY_END-->
===============================================================================
CPU stage | |
<gh_stars>10-100
import tensorflow as tf
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
import seaborn as sns
import time
from utilities import neural_net, fwd_gradients, heaviside, \
tf_session, mean_squared_error, relative_error
class HiddenPathways:
# Initialize the class
def __init__(self, t_data, S_data, t_eqns, layers, meal_tq):
self.D = S_data.shape[1]
self.t_min = t_data.min(0)
self.t_max = t_data.max(0)
# self.S_scale = tf.Variable(np.array(self.D*[1.0]), dtype=tf.float32, trainable=False)
self.S_scale = S_data.std(0)
# data on all the species (only some are used as input)
self.t_data, self.S_data = t_data, S_data
self.t_eqns = t_eqns
# layers
self.layers = layers
self.mt = 2.0*(meal_tq[0] - self.t_min)/(self.t_max - self.t_min) - 1.0
self.mq = meal_tq[1]
# self.k = tf.Variable(1.0/120.0, dtype=tf.float32, trainable=False)
self.Rm = tf.Variable(209.0/100.0, dtype=tf.float32, trainable=False)
self.Vg = tf.Variable(10.0, dtype=tf.float32, trainable=False)
self.C1 = tf.Variable(300.0/100.0, dtype=tf.float32, trainable=False)
self.a1 = tf.Variable(6.6, dtype=tf.float32, trainable=False)
# self.Ub = tf.Variable(72.0/100.0, dtype=tf.float32, trainable=False)
# self.C2 = tf.Variable(144.0/100.0, dtype=tf.float32, trainable=False)
# self.U0 = tf.Variable(4.0/100.0, dtype=tf.float32, trainable=False)
# self.Um = tf.Variable(90.0/100.0, dtype=tf.float32, trainable=False)
# self.C3 = tf.Variable(100.0/100.0, dtype=tf.float32, trainable=False)
# self.C4 = tf.Variable(80.0/100.0, dtype=tf.float32, trainable=False)
self.Vi = tf.Variable(11.0, dtype=tf.float32, trainable=False)
self.E = tf.Variable(0.2, dtype=tf.float32, trainable=False)
self.ti = tf.Variable(100.0, dtype=tf.float32, trainable=False)
# self.beta = tf.Variable(1.772, dtype=tf.float32, trainable=False)
# self.Rg = tf.Variable(180.0/100.0, dtype=tf.float32, trainable=False)
# self.alpha = tf.Variable(7.5, dtype=tf.float32, trainable=False)
self.Vp = tf.Variable(3.0, dtype=tf.float32, trainable=False)
# self.C5 = tf.Variable(26.0/100.0, dtype=tf.float32, trainable=False)
self.tp = tf.Variable(6.0, dtype=tf.float32, trainable=False)
# self.td = tf.Variable(12.0, dtype=tf.float32, trainable=False)
self.logk = tf.Variable(-6.0, dtype=tf.float32, trainable=True)
# self.logRm = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVg = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logC1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.loga1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logUb = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC2 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logU0 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logUm = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC3 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC4 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVi = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logE = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logti = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logbeta = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logRg = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logalpha = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVp = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC5 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logtp = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logtd = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.var_list_eqns = [self.logk, self.logUb,
self.logC2, self.logU0, self.logUm, self.logC3, self.logC4,
self.logbeta, self.logRg, self.logalpha, self.logC5,
self.logtd]
self.k = tf.exp(self.logk)
# self.Rm = tf.exp(self.logRm)
# self.Vg = tf.exp(self.logVg)
# self.C1 = tf.exp(self.logC1)
# self.a1 = tf.exp(self.loga1)
self.Ub = tf.exp(self.logUb)
self.C2 = tf.exp(self.logC2)
self.U0 = tf.exp(self.logU0)
self.Um = tf.exp(self.logUm)
self.C3 = tf.exp(self.logC3)
self.C4 = tf.exp(self.logC4)
# self.Vi = tf.exp(self.logVi)
# self.E = tf.exp(self.logE)
# self.ti = tf.exp(self.logti)
self.beta = tf.exp(self.logbeta)
self.Rg = tf.exp(self.logRg)
self.alpha = tf.exp(self.logalpha)
# self.Vp = tf.exp(self.logVp)
self.C5 = tf.exp(self.logC5)
# self.tp = tf.exp(self.logtp)
self.td = tf.exp(self.logtd)
# placeholders for data
self.t_data_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.S_data_tf = tf.placeholder(tf.float32, shape=[None, self.D])
self.t_eqns_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.mt_tf = tf.placeholder(tf.float32, shape=[None, self.mt.shape[1]])
self.mq_tf = tf.placeholder(tf.float32, shape=[None, self.mq.shape[1]])
self.learning_rate = tf.placeholder(tf.float32, shape=[])
# physics uninformed neural networks
self.net_sysbio = neural_net(layers=self.layers)
self.H_data = 2.0*(self.t_data_tf - self.t_min)/(self.t_max - self.t_min) - 1.0
self.S_data_pred = self.S_data[0,:] + self.S_scale*(self.H_data+1.0)*self.net_sysbio(self.H_data)
# physics informed neural networks
self.H_eqns = 2.0*(self.t_eqns_tf - self.t_min)/(self.t_max - self.t_min) - 1.0
self.S_eqns_pred = self.S_data[0,:] + self.S_scale*(self.H_eqns+1.0)*self.net_sysbio(self.H_eqns)
self.E_eqns_pred, self.IG = self.SysODE(self.S_eqns_pred, self.t_eqns_tf,
self.H_eqns, self.mt_tf, self.mq_tf)
# Adaptive S_scale
# self.S_scale = 0.9*self.S_scale + 0.1*tf.math.reduce_std(self.S_eqns_pred, 0)
# scale_list = tf.unstack(self.S_scale)
# scale_list[2] = self.S_data.std(0)[2]
# self.S_scale = tf.stack(scale_list)
# loss
self.loss_data = mean_squared_error(self.S_data_tf[:,2:3]/self.S_scale[2:3], self.S_data_pred[:,2:3]/self.S_scale[2:3])
self.loss_eqns = mean_squared_error(0.0, self.E_eqns_pred/self.S_scale)
self.loss_auxl = mean_squared_error(self.S_data_tf[-1,:]/self.S_scale, self.S_data_pred[-1,:]/self.S_scale)
self.loss = 0.99*self.loss_data + 0.01*self.loss_eqns + 0.01*self.loss_auxl
# optimizers
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.optimizer_para = tf.train.AdamOptimizer(learning_rate=0.001)
self.train_op = self.optimizer.minimize(self.loss,
var_list=[self.net_sysbio.weights,
self.net_sysbio.biases,
self.net_sysbio.gammas])
self.trainpara_op = self.optimizer_para.minimize(self.loss,
var_list=self.var_list_eqns)
self.sess = tf_session()
def SysODE(self, S, t, H, mt, mq):
intake = self.k * mq * heaviside(H-mt) * tf.exp(self.k*(mt-H)*(self.t_max-self.t_min)/2.0)
IG = tf.reduce_sum(intake, axis=1, keepdims=True)
kappa = 1.0/self.Vi + 1.0/(self.E*self.ti)
f1 = self.Rm * tf.sigmoid(S[:,2:3]/(self.Vg*self.C1) - self.a1)
f2 = self.Ub * (1.0 - tf.exp(-S[:,2:3]/(self.Vg*self.C2)))
safe_log = tf.where(S[:,1:2] <= 0.0, tf.ones_like(S[:,1:2]), S[:,1:2])
f3 = (self.U0 + self.Um*tf.sigmoid(self.beta*tf.log(kappa*safe_log/self.C4))) / (self.Vg*self.C3)
f4 = self.Rg * tf.sigmoid(-self.alpha*(S[:,5:6]/(self.Vp*self.C5)-1.0))
F0 = f1 - self.E*(S[:,0:1]/self.Vp-S[:,1:2]/self.Vi) - S[:,0:1]/self.tp
F1 = self.E*(S[:,0:1]/self.Vp-S[:,1:2]/self.Vi) - S[:,1:2]/self.ti
F2 = f4 + IG - f2 - f3*S[:,2:3]
F3 = (S[:,0:1] - S[:,3:4]) / self.td
F4 = (S[:,3:4] - S[:,4:5]) / self.td
F5 = (S[:,4:5] - S[:,5:6]) / self.td
F = tf.concat([F0, F1, F2, F3, F4, F5], 1)
S_t = fwd_gradients(S, t)
E = S_t - F
return E, IG
def train(self, num_epochs, batch_size, learning_rate):
N_data = self.t_data.shape[0]
N_eqns = self.t_eqns.shape[0]
for epoch in range(num_epochs):
start_time = time.time()
for it in range(N_eqns//batch_size):
idx_data = np.concatenate([np.array([0]),
np.random.choice(np.arange(1, N_data-1), min(batch_size, N_data)-2),
np.array([N_data-1])])
idx_eqns = np.random.choice(N_eqns, batch_size)
t_data_batch, S_data_batch = self.t_data[idx_data,:], self.S_data[idx_data,:]
t_eqns_batch = self.t_eqns[idx_eqns,:]
mt_batch, mq_batch = self.mt[idx_eqns,:], self.mq[idx_eqns,:]
tf_dict = {self.t_data_tf: t_data_batch,
self.S_data_tf: S_data_batch,
self.t_eqns_tf: t_eqns_batch,
self.mt_tf: mt_batch, self.mq_tf: mq_batch,
self.learning_rate: learning_rate}
self.sess.run([self.train_op, self.trainpara_op], tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
[loss_data_value,
loss_eqns_value,
loss_auxl_value,
learning_rate_value] = self.sess.run([self.loss_data,
self.loss_eqns,
self.loss_auxl,
self.learning_rate], tf_dict)
print('Epoch: %d, It: %d, Loss Data: %.3e, Loss Eqns: %.3e, Loss Aux: %.3e, Time: %.3f, Learning Rate: %.1e'
%(epoch, it, loss_data_value, loss_eqns_value, loss_auxl_value, elapsed, learning_rate_value))
start_time = time.time()
def predict(self, t_star, meal_tq):
meal_tq[0] = 2.0*(meal_tq[0] - self.t_min)/(self.t_max - self.t_min) - 1.0
tf_dict = {self.t_eqns_tf: t_star,
self.mt_tf: meal_tq[0], self.mq_tf: meal_tq[1]}
S_star, IG = self.sess.run([self.S_eqns_pred, self.IG], tf_dict)
S_star = np.append(S_star[:,:], IG[:], axis=1)
return S_star
if __name__ == "__main__":
layers = [1] + 6*[6*30] + [6]
meal_t = [300., 650., 1100., 2000.]
meal_q = [60e3, 40e3, 50e3, 100e3]
def intake(tn, k):
def s(mjtj):
return k*mjtj[1]*np.heaviside(tn-mjtj[0], 0.5)*np.exp(k*(mjtj[0]-tn))
IG = np.array([s(mjtj) for mjtj in list(zip(meal_t, meal_q))]).sum()
return IG
# function that returns dx/dt
def f(x, t): # x is 6 x 1
k = 1./120.
Rm = 209.
Vg = 10.
C1 = 300.
a1 = 6.6
Ub = 72.
C2 = 144.
U0 = 4.
Um = 90.
C3 = 100.
C4 = 80.
Vi = 11.
E = 0.2
ti = 100.
beta = 1.772
Rg = 180.
alpha = 7.5
Vp = 3.
C5 = 26.
tp = 6.
td = 12.
kappa = 1.0/Vi + 1.0/E/ti
f1 = Rm / (1.0 + np.exp(-x[2]/Vg/C1 + a1))
f2 = Ub * (1.0 - np.exp(-x[2]/Vg/C2))
f3 = (U0 + Um/(1.0+np.exp(-beta*np.log(kappa*x[1]/C4)))) / Vg / C3
f4 = Rg / (1.0 + np.exp(alpha*(x[5]/Vp/C5-1.0)))
x0 = f1 - E*(x[0]/Vp-x[1]/Vi) - x[0]/tp
x1 = E*(x[0]/Vp-x[1]/Vi) - x[1]/ti
x2 = f4 + intake(t, k) - f2 - f3*x[2]
x3 = (x[0] - x[3]) / td
x4 = (x[3] - x[4]) / td
x5 = (x[4] - x[5]) / td
X = np.array([x0, x1, x2, x3, x4, x5])
return X
# function that returns dx/dt
def f_pred(x, t): # x is 6 x 1
k = 0.007751
Rm = 73.858517
Vg = 10.000000
C1 = 319.160032
a1 = 6.253946
Ub = 86.824888
C2 = 152.637362
U0 = 19.412358
Um = 141.051173
C3 = 235.955381
C4 = 251.580667
Vi = 2.689281
E = 0.147199
ti = 36.766254
beta = 2.475349
Rg = 212.777472
alpha = 7.182466
Vp = 0.707807
C5 = 101.811242
tp = 139.384628
td = 7.417875
kappa = 1.0/Vi + 1.0/E/ti
f1 = Rm / (1.0 + np.exp(-x[2]/Vg/C1 + a1))
f2 = Ub * (1.0 - np.exp(-x[2]/Vg/C2))
f3 = (U0 + Um/(1.0+np.exp(-beta*np.log(kappa*x[1]/C4)))) / Vg / C3
f4 = Rg / (1.0 + np.exp(alpha*(x[5]/Vp/C5-1.0)))
x0 = f1 - E*(x[0]/Vp-x[1]/Vi) - x[0]/tp
x1 = E*(x[0]/Vp-x[1]/Vi) - x[1]/ti
x2 = f4 + intake(t, k) - f2 - f3*x[2]
x3 = (x[0] - x[3]) / td
x4 = (x[3] - x[4]) / td
x5 = (x[4] - x[5]) / td
X = np.array([x0, x1, x2, x3, x4, x5])
return X
def plotting(t_star, S_star, S_pred, perm, Vg2, forecast=False):
sns.set()
fig, ax = newfig(2.0, 0.7)
gs0 = gridspec.GridSpec(1, 1)
gs0.update(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.3, wspace=0.3)
ax = plt.subplot(gs0[0:1, 0:1])
ax.plot(t_star,S_star[:,2],'C1',linewidth=2,label='input data')
ax.scatter(t_star[perm],S_star[perm,2],marker='o',s=40,label='sampled input')
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$G\ (mg/dl) $', fontsize=18)
ax.legend(fontsize='large')
####################################
fig, ax = newfig(1.8, 0.75)
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=0.85, bottom=0.15, left=0.1, right=0.95, hspace=0.3, wspace=0.3)
ax = plt.subplot(gs1[0:1, 0:1])
ax.plot(t_star,S_star[:,0]*Vg2,'C1',linewidth=2,label='exact')
ax.plot(t_star,S_pred[:,0]*Vg2,'g-.',linewidth=3,label='learned')
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$I_p\ (\mu U/ml)$', fontsize=18)
ax.legend(fontsize='large')
| |
#coding=utf-8
"""
Visualisations for the input-output function of neurons over time.
"""
import logging
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import cm, colors, pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from src.config import settings
from src.io_time_heatmap import fig_time, save_location
from src.iocurves.analysis import get_data, get_inst_firing_rate, get_params, get_var
logger = logging.getLogger("io curves vis")
def plot_compare_dcl(title, results, ifr_windowsize=0.01, combine=False):
"""
Plot voltage, chloride, and number spikes over time.
:param title: Figure title
:type title: str
:param results:
:type results: list of pd.DataFrame
:param ifr_windowsize: Instantanenous window size (in seconds)
:type ifr_windowsize: float
:param combine: Display static and dynamic cl- traces on a single axis (True) or on their own axes (False)
:type combine: bool
:return: Figure and axes create in method
:rtype: (plt.Figure, plt.Axes)
"""
f, axes = plt.subplots(3, 1 + (1 - int(combine)), sharex='all', sharey='row')
f.suptitle(title)
prev_legend = []
for i, result in enumerate(results):
multiIndex = result.columns
recorded_vars = multiIndex.levels[1]
for i_var, recorded_var in enumerate(recorded_vars):
legend = False
ax = axes[i_var] if combine else axes[i_var, i]
style = '--' if combine and i == 0 else '-'
trial_results, mean = get_var(result, recorded_var)
if recorded_var == 'spikes':
# get number of spikes in total
legend = [mean.iloc[-1]]
if combine:
name = 'static' if i == 0 else 'dynamic'
prev_legend.append("{} ({})".format(legend[0], name))
# convert spike times to instantaneous firing rate
trial_length = trial_results.shape[1] + 1
for j in range(1, trial_length):
trial_results.loc[:, j] = get_inst_firing_rate(trial_results[j], time_bin=ifr_windowsize)
mean.loc[:] = np.mean(trial_results, axis=1)
trial_results.plot(ax=ax, color='k', alpha=0.1, style=style, legend=legend)
mean.plot(ax=ax, color='k', alpha=0.8, style=style, legend=legend)
if legend and combine:
ax.legend(prev_legend, loc='lower right')
elif legend:
ax.legend(legend, loc='lower right')
if i == 0:
# first column (Y axis labels)
if recorded_var == 'v':
ylabel = 'Membrane Potential (mV)'
elif recorded_var == 'cli':
ylabel = '$[Cl^{-}]_i$ (mM)'
elif recorded_var == 'ifr' or recorded_var == 'spikes':
ylabel = "Instantaneous Firing Rate (Hz) \n "\
"[window size of {} ms]".format(ifr_windowsize*1000)
else:
ylabel = None
ax.set_ylabel(ylabel)
if combine:
axes[0].set_title("Static Chloride vs Dynamic Chloride")
else:
axes[0, 0].set_title("Static Chloride")
axes[0, 1].set_title("Dynamic Chloride")
for axis in axes.flatten():
sns.despine(ax=axis)
plt.xlabel('Time (ms)')
# plt.xlim([0, (len(t_rec_) - 1) * 0.025])
return f, axes
def plot_io_curve(io_runs, ifr_windowsize=0.1, combine=False, heatmap=False, fill=False, time_points=None,
show_cli=False, save_args=None):
"""
Plot input-output of a neuron.
Can be:
* heatmap and combine - plot static and dynamic heatmaps AND the difference between them
* heatmap - plot static and dynamic heatmaps
* combine - plot static and dynamic input-output curves on one axes (like `plot_compare_dcl`)
* neither - plot static and dynamic input-output curves on separate axes (like `plot_compare_dcl`)
* io_runs is a dictionary - plot a specific run. Only if not heatmap and not combine.
:param io_runs: Static and dynamic chloride dataframes with firing rate info (from `sim`).
:type io_runs: list of pd.DataFrame or dict of pd.DataFrame
:param ifr_windowsize: Window to compute instantanenous firing rate (in seconds) .
:type ifr_windowsize: float
:param combine: Plot static and dynamic on one axes or create a difference heatmap.
:type combine: bool
:param heatmap: Plot as heatmap(s).
:type heatmap: bool
:param fill: (Forward) Fill the dataframe.
:type fill: bool
:param time_points: Time points to plot (or None for a 3D scatter plot)
:type time_points: list of float
:param show_cli: Include internal chloride ion concentration heatmap (when heatmap is True, ignored otherwise).
:type show_cli: bool
:param save_args: File name (`fig_name`) and file types (`formats`) for saving.
:type save_args: dict
:return: Pair of figure, ax used
:rtype: (plt.Figure, plt.Axes)
"""
subplot_kw = None
gridspec_kw = None
if time_points is None:
subplot_columns = 1
heatmap = True
subplot_kw = {"projection": '3d'}
else:
subplot_columns = len(time_points) + 1
gridspec_kw = {'width_ratios': [5]*len(time_points) + [1]}
extra = ''
y_label_cl = None
heatmap_kwargs = {
"square": True,
"vmin": 0,
"vmax": 120,
}
label = 'IFR (Hz)'
if heatmap and combine:
f, axes = plt.subplots(3 + show_cli, subplot_columns, figsize=(settings.PAGE_W_FULL,
settings.PAGE_H_half + settings.PAGE_H_4th),
sharex='col', sharey=False,
subplot_kw=subplot_kw, gridspec_kw=gridspec_kw)
dfs = []
index = None
exc, inh = None, None
ylabel = 'Relative\ninhibitory\nconductance'
for i_cl, cl_state in enumerate(io_runs):
params = get_params(list(cl_state.keys())[0])
file_name = params[0]
line = '-' if 'KCC2' in file_name else '--'
axes_subplot = axes[i_cl] if time_points is None else axes[i_cl, :]
FRdf, exc, inh = get_data(cl_state, ifr_windowsize, time_points)
_plot_data(FRdf, exc, inh, label, axes_subplot, f, heatmap, fill, time_points, line,
first_plot=(i_cl == 0), cbar_ax=axes_subplot[-1], heatmap_kwargs=heatmap_kwargs)
dfs.append(FRdf)
y_label_cl = settings.DYNAMIC_CHLORIDE_STR_ABBR if 'KCC2' in file_name else\
settings.STATIC_CHLORIDE_STR_ABBR
axes_subplot = axes[i_cl] if time_points is None else axes[i_cl, 0]
axes_subplot.set_ylabel(y_label_cl + "\n" + ylabel, rotation=0, ha='right', va='center_baseline')
if show_cli and 'KCC2' in file_name:
if show_cli:
axes_subplot = axes[-1] if time_points is None else axes[-1, :]
# heatmap_kwargs["vmax"] = vmax
cli_df, exc, inh = get_data(cl_state, ifr_windowsize, time_points, var='cli')
cmap = sns.cubehelix_palette(16, start=2.7, rot=-.2, light=0.98, dark=0.40, as_cmap=True)
from matplotlib.colors import LogNorm
cl_min = cli_df.values.min()
cl_max = cli_df.values.max()
_plot_data(cli_df, exc, inh, f"{settings.CLI} (mM)", axes_subplot, f, heatmap, fill,
time_points, line, first_plot=False, cmap=cmap, cbar_ax=axes_subplot[-1],
cbar_kws={'ticks': np.logspace(np.log10(cl_min), np.log10(cl_max), 6, base=10)},
heatmap_kwargs={
'square': heatmap_kwargs['square'],
"vmin": cl_min,
"vmax": cl_max,
"norm": LogNorm(vmin=cl_min, vmax=cl_max)
})
logger.info("plotting difference")
# do extra 'Difference' plot
dif_df = dfs[1] - dfs[0]
# get maximum value of dataframe
vmax = dif_df.values.max()
axes_subplot = axes[2] if time_points is None else axes[2, :]
heatmap_kwargs["vmax"] = vmax
_plot_data(dif_df, exc, inh, f"{settings.DELTA}{label}", axes_subplot, f, heatmap, fill, time_points,
first_plot=False,
cmap='viridis', cbar_ax=axes_subplot[-1],
heatmap_kwargs=heatmap_kwargs)
y_label_cl = "Difference"
axes_subplot = axes[2] if time_points is None else axes[2, 0]
axes_subplot.set_ylabel(y_label_cl + "\n" + ylabel, rotation=0, ha='right', va='center_baseline')
if time_points is None:
ax = axes[-1]
else:
ax = axes[-1, 0]
f.subplots_adjust(wspace=0.1, hspace=0.2, left=0.3, right=0.93)
elif type(io_runs) == dict:
# just plot the one
f, axes = plt.subplots(1, subplot_columns, sharey='row', subplot_kw=subplot_kw)
params = get_params(list(io_runs.keys())[0])
file_name = params[0]
line = '-' if 'KCC2' in file_name else '--'
FRdf, exc, inh = get_data(io_runs, ifr_windowsize, time_points)
_plot_data(FRdf, exc, inh, label, axes, f, heatmap, fill, time_points, heatmap_kwargs=heatmap_kwargs)
ax = axes[0]
extra = ' Dynamic' if 'KCC2' in file_name else ' Static'
elif combine:
# plot both cl_states on one set of axes
f, axes = plt.subplots(1, subplot_columns, sharey='row', subplot_kw=subplot_kw)
for i_cl, cl_state in enumerate(io_runs):
params = get_params(cl_state.keys()[0])
file_name = params[0]
line = '-' if 'KCC2' in file_name else '--'
FRdf, exc, inh = get_data(cl_state, ifr_windowsize, time_points)
_plot_data(FRdf, exc, inh, label, axes, f, heatmap, fill, time_points, line,
first_plot=(i_cl == 0), cbar_ax=axes[-1],
heatmap_kwargs=heatmap_kwargs)
ax = axes[0]
else:
# plot cl_states on multiple axes
f, axes = plt.subplots(2, subplot_columns, sharex='all', sharey='all', subplot_kw=subplot_kw)
for i_cl, cl_state in enumerate(io_runs):
params = get_params(cl_state.keys()[0])
file_name = params[0]
line = '-' if 'KCC2' in file_name else '--'
FRdf, exc, inh = get_data(cl_state, ifr_windowsize, time_points)
_plot_data(FRdf, exc, inh, label, axes[i_cl, :], f, heatmap, fill, time_points, line,
first_plot=(i_cl == 0), cbar_ax=axes[-1],
heatmap_kwargs=heatmap_kwargs)
if heatmap:
y_label_cl = 'Dynamic ' if 'KCC2' in file_name else 'Static '
axes[i_cl, 0].set_ylabel(y_label_cl + 'Inhibition')
ax = axes[1, 0]
title = list(io_runs.keys())[0] if type(io_runs) == dict else list(io_runs[0].keys())[0]
f.suptitle(title + "\n" + "Input-Output curve" + extra)
# fit to a sigmoid curve
# xdata = np.log10(FRdf.index[1:].values)
# ydata = FRdf.iloc[1:,0].values
# popt, pcov = curve_fit(sigmoid, xdata, ydata)
# x = np.linspace(0, np.log10(FRdf.index[-1]), 100)
# y = sigmoid(x, *popt)
# ax[i].semilogx(10**xdata,ydata,'o',10**x,y,'-')
# ax[i].set_xlabel(str(time_point) + 'ms')
if heatmap and y_label_cl is None:
ax.set_ylabel("Relative inhibitory conductance")
elif not heatmap:
ax.set_ylabel("IFR (Hz)")
ax.legend(title='Inhibition')
ax.set_xlabel("Relative excitatory conductance")
if save_args is not None and type(save_args) is dict:
for fig_format in save_args["formats"]:
f.savefig("{save_location}{fname}_{fig_time}_{window_size}{combine}{heatmap}{fill}.{format}"
.format(save_location=save_location,
fname=save_args["fig_name"],
fig_time=fig_time,
window_size=ifr_windowsize*1000,
combine="_combine" if combine else "",
heatmap="_heatmap" if heatmap else "",
fill="_fill" if fill else "",
format=fig_format))
return f, axes
def _plot_data(FRdf, exc, inh, label, ax, fig, heatmap=False, fill=False, time_points=None, line='-',
first_plot=True, cmap=None, cbar_ax=None, cbar_kws=None, heatmap_kwargs=None):
"""
Plot data from the dataframe as heatmap, input-output curves, or 3D scatter plot (not advised).
:param FRdf: Firing rate data.
:type FRdf: pd.DataFrame
:param exc: List of excitation values in FRdf.
:type exc: list
:param inh: List of inhibition values in FRdf.
:type inh: list
:param label: Display name for FRdf values (e.g. Firing Rate or IFR).
:type label: str
:param ax: Axes to plot on. If t_points is not None, must be the same-size | |
<filename>source/gmm.py
from my_widgets import LabelSlider
from process import Image, FitFunctions, FitBroadening
from process_monitor import Monitor
from PyQt5 import QtCore, QtWidgets, QtGui, QtChart
from sys import getsizeof
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture._gaussian_mixture import _estimate_gaussian_parameters
from sklearn.mixture._gaussian_mixture import _compute_precision_cholesky
import configparser
import generate_report
import glob
import manual_fit
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.colors as mcolors
import numpy as np
import numbers
import os
import time
import sys
import profile_chart
import bar_chart
import pandas
from scipy.stats import rv_discrete
class Window(QtCore.QObject):
#Public Signals
STATUS_REQUESTED = QtCore.pyqtSignal()
PROGRESS_ADVANCE = QtCore.pyqtSignal(int,int,int)
PROGRESS_END = QtCore.pyqtSignal()
CONNECT_TO_CANVAS = QtCore.pyqtSignal()
DRAW_LINE_REQUESTED = QtCore.pyqtSignal(QtCore.QPointF,QtCore.QPointF,bool)
DRAW_RECT_REQUESTED = QtCore.pyqtSignal(QtCore.QPointF,QtCore.QPointF,float,bool)
COLOR = ['magenta','cyan','darkCyan','darkMagenta','darkRed','darkBlue','darkGray','green','darkGreen','darkYellow','yellow','black']
FONTS_CHANGED = QtCore.pyqtSignal(str,int)
STOP_WORKER = QtCore.pyqtSignal()
FEED_BACK_TO_FIT_WORKER = QtCore.pyqtSignal(list,tuple)
def __init__(self):
super(Window,self).__init__()
self.analysisRegion = [0,0,0,0,0]
self.config = configparser.ConfigParser()
self.config.read('./configuration.ini')
self.image_worker = Image()
self.fit_worker = FitFunctions()
self.stopped = False
primary_colors = ['salmon','bright green','bright pink','robin egg blue','bright lavender','deep sky blue','irish green','golden','greenish teal','light blue','butter yellow',\
'turquoise green','iris','off blue','plum','mauve','burgundy','coral','clay','emerald green','cadet blue','avocado','rose pink','aqua green','scarlet']
self.fit_colors = [mcolors.XKCD_COLORS['xkcd:'+name] for name in primary_colors]
for color in mcolors.XKCD_COLORS.keys():
if not color in primary_colors:
self.fit_colors.append(mcolors.XKCD_COLORS[color])
self.default_means = [[0,0],[2.3,0],[1.15,2],[-1.15,2],[-2.3,0],[-1.15,-2],[1.15,-2],[4.6,0],[2.3,4],[-2.3,4],[-4.6,0],[-2.3,-4],[2.3,-4],[3.45,2],[0,4],[-3.45,2],[-3.45,-2],[0,-4],[3.45,-2]]
def refresh(self,config):
self.config = config
try:
self.distributionChart.refresh(config)
self.costChart.refresh(config)
except:
pass
def set_status(self,status):
self.status = status
def main(self,path="c:/users/yux20/documents/05042018 MoS2/interpolated_2D_stack_large.csv"):
self.startIndex = "0"
self.endIndex = "3"
self.range = "5"
self.nsamp = '10'
self.ndraw = '2'
self.nzslices = '10'
self.nfeature = '2'
self.ncomp = '19'
self.tol = '0.001'
self.reg_covar = '1e-6'
self.max_itr = '1500'
self.n_init = '1'
self.wc_prior = '1000'
self.mean_precision_prior = '0.8'
self.dof = ''
self.rs = '2'
self.vb = '0'
self.vb_interval = '10'
self.defaultFileName = "GMM Fit"
self.cost_series_X = [1]
self.cost_series_Y = [1]
self.thread = QtCore.QThread(parent=self)
self.file_has_been_created = False
self.scatter_exist = False
self.path = os.path.dirname(path)
self.extension = os.path.splitext(path)[1]
self.currentSource = self.path
self.currentDestination = self.currentSource
self.Dialog = QtWidgets.QWidget()
self.Grid = QtWidgets.QGridLayout(self.Dialog)
self.LeftFrame = QtWidgets.QFrame()
self.RightFrame = QtWidgets.QFrame()
self.LeftGrid = QtWidgets.QGridLayout(self.LeftFrame)
self.RightGrid = QtWidgets.QGridLayout(self.RightFrame)
self.hSplitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
self.hSplitter.addWidget(self.RightFrame)
self.hSplitter.addWidget(self.LeftFrame)
self.hSplitter.setStretchFactor(0,1)
self.hSplitter.setStretchFactor(1,1)
self.hSplitter.setCollapsible(0,False)
self.hSplitter.setCollapsible(1,False)
self.leftScroll = QtWidgets.QScrollArea(self.hSplitter)
self.chooseSource = QtWidgets.QGroupBox("Input")
self.chooseSource.setStyleSheet('QGroupBox::title {color:blue;}')
self.sourceGrid = QtWidgets.QGridLayout(self.chooseSource)
self.sourceGrid.setAlignment(QtCore.Qt.AlignTop)
self.chooseSourceLabel = QtWidgets.QLabel("The input data directory is:\n"+self.currentSource)
self.chooseSourceLabel.setWordWrap(True)
self.chooseSourceButton = QtWidgets.QPushButton("Browse...")
self.chooseSourceButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.chooseSourceButton.clicked.connect(self.choose_source)
self.loadButton = QtWidgets.QPushButton("Load")
self.loadButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.loadButton.clicked.connect(self.load_data)
self.loadButton.setEnabled(False)
self.sourceGrid.addWidget(self.chooseSourceLabel,0,0,2,1)
self.sourceGrid.addWidget(self.chooseSourceButton,0,1,1,1)
self.sourceGrid.addWidget(self.loadButton,1,1,1,1)
self.information = QtWidgets.QGroupBox("Information")
self.information.setStyleSheet('QGroupBox::title {color:blue;}')
self.informationGrid = QtWidgets.QGridLayout(self.information)
self.informationGrid.setAlignment(QtCore.Qt.AlignTop)
self.informationLabel = QtWidgets.QLabel("")
self.informationLabel.setWordWrap(True)
self.informationGrid.addWidget(self.informationLabel,0,0)
self.chooseDestination = QtWidgets.QGroupBox("Output")
self.chooseDestination.setStyleSheet('QGroupBox::title {color:blue;}')
self.destinationGrid = QtWidgets.QGridLayout(self.chooseDestination)
self.chooseDestinationLabel = QtWidgets.QLabel("The output directory is:\n"+self.currentSource)
self.destinationNameLabel = QtWidgets.QLabel("The file name is:")
self.destinationNameEdit = QtWidgets.QLineEdit(self.defaultFileName)
self.fileTypeLabel = QtWidgets.QLabel("The file format is:")
self.fileType = QtWidgets.QComboBox()
self.fileType.addItem(".txt",".txt")
self.fileType.addItem(".xlsx",".xlsx")
self.chooseDestinationButton = QtWidgets.QPushButton("Browse...")
self.chooseDestinationButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.chooseDestinationButton.clicked.connect(self.choose_destination)
self.saveResultLabel = QtWidgets.QLabel("Save Results?")
self.saveResult = QtWidgets.QCheckBox()
self.saveResult.setChecked(False)
self.destinationGrid.addWidget(self.chooseDestinationLabel,0,0)
self.destinationGrid.addWidget(self.chooseDestinationButton,0,1)
self.destinationGrid.addWidget(self.destinationNameLabel,1,0)
self.destinationGrid.addWidget(self.destinationNameEdit,1,1)
self.destinationGrid.addWidget(self.fileTypeLabel,2,0)
self.destinationGrid.addWidget(self.fileType,2,1)
self.destinationGrid.addWidget(self.saveResultLabel,3,0)
self.destinationGrid.addWidget(self.saveResult,3,1)
self.destinationGrid.setAlignment(self.chooseDestinationButton,QtCore.Qt.AlignRight)
self.appearance = QtWidgets.QGroupBox("Appearance")
self.appearance.setMaximumHeight(100)
self.appearance.setStyleSheet('QGroupBox::title {color:blue;}')
self.appearanceGrid = QtWidgets.QGridLayout(self.appearance)
self.fontListLabel = QtWidgets.QLabel("Change Font")
self.fontList = QtWidgets.QFontComboBox()
self.fontList.setCurrentFont(QtGui.QFont("Arial"))
self.fontList.currentFontChanged.connect(self.refresh_font_name)
self.fontSizeLabel = QtWidgets.QLabel("Adjust Font Size ({})".format(12))
self.fontSizeLabel.setFixedWidth(160)
self.fontSizeSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.fontSizeSlider.setMinimum(1)
self.fontSizeSlider.setMaximum(100)
self.fontSizeSlider.setValue(12)
self.fontSizeSlider.valueChanged.connect(self.refresh_font_size)
self.appearanceGrid.addWidget(self.fontListLabel,0,0)
self.appearanceGrid.addWidget(self.fontList,0,1)
self.appearanceGrid.addWidget(self.fontSizeLabel,1,0)
self.appearanceGrid.addWidget(self.fontSizeSlider,1,1)
self.sampleOptions = QtWidgets.QGroupBox("Sample")
self.sampleOptions.setStyleSheet('QGroupBox::title {color:blue;}')
self.sampleOptionsGrid = QtWidgets.QGridLayout(self.sampleOptions)
self.numberOfSamplesLabel = QtWidgets.QLabel("Number of Samples")
self.numberOfSamplesEdit = QtWidgets.QLineEdit(self.nsamp)
self.numberOfDrawsLabel = QtWidgets.QLabel("Number of Draws")
self.numberOfDrawsEdit = QtWidgets.QLineEdit(self.ndraw)
self.numberOfZsLabel = QtWidgets.QLabel("Number of Z Slices")
self.numberOfZsEdit = QtWidgets.QLineEdit(self.nzslices)
self.drawSampleButton = QtWidgets.QPushButton("Draw Z=0")
self.drawSampleButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.drawSampleButton.clicked.connect(self.draw_sample)
self.drawSampleButton.setEnabled(False)
self.plotSampleButton = QtWidgets.QPushButton("Plot Z=0")
self.plotSampleButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.plotSampleButton.clicked.connect(self.plot_sample)
self.plotSampleButton.setEnabled(False)
self.sampleOptionsGrid.addWidget(self.numberOfSamplesLabel,0,0,1,2)
self.sampleOptionsGrid.addWidget(self.numberOfSamplesEdit,0,2,1,4)
self.sampleOptionsGrid.addWidget(self.numberOfDrawsLabel,1,0,1,2)
self.sampleOptionsGrid.addWidget(self.numberOfDrawsEdit,1,2,1,4)
self.sampleOptionsGrid.addWidget(self.numberOfZsLabel,2,0,1,2)
self.sampleOptionsGrid.addWidget(self.numberOfZsEdit,2,2,1,4)
self.sampleOptionsGrid.addWidget(self.drawSampleButton,3,0,1,3)
self.sampleOptionsGrid.addWidget(self.plotSampleButton,3,3,1,3)
self.fitOptions = QtWidgets.QGroupBox("Parameters")
self.fitOptions.setStyleSheet('QGroupBox::title {color:blue;}')
self.fitOptionsGrid = QtWidgets.QGridLayout(self.fitOptions)
self.numberOfFeaturesLabel = QtWidgets.QLabel("Number of Features")
self.numberOfFeaturesEdit = QtWidgets.QLineEdit(self.nfeature)
self.fitOptionsGrid.addWidget(self.numberOfFeaturesLabel,10,0,1,2)
self.fitOptionsGrid.addWidget(self.numberOfFeaturesEdit,10,2,1,4)
self.numberOfFeaturesEdit.textChanged.connect(self.covar_prior_table_change_features)
self.numberOfFeaturesEdit.textChanged.connect(self.mean_prior_table_change_features)
self.numberOfComponentsLabel = QtWidgets.QLabel("Number of Components")
self.numberOfComponentsEdit = QtWidgets.QLineEdit(self.ncomp)
self.fitOptionsGrid.addWidget(self.numberOfComponentsLabel,20,0,1,2)
self.fitOptionsGrid.addWidget(self.numberOfComponentsEdit,20,2,1,4)
self.numberOfComponentsEdit.textChanged.connect(self.mean_prior_table_initialize)
self.numberOfComponentsEdit.textChanged.connect(self.covar_prior_table_initialize)
self.tolLabel = QtWidgets.QLabel("Convergence Threshold")
self.tolEdit = QtWidgets.QLineEdit(self.tol)
self.fitOptionsGrid.addWidget(self.tolLabel,30,0,1,2)
self.fitOptionsGrid.addWidget(self.tolEdit,30,2,1,4)
self.regCovarLabel = QtWidgets.QLabel("Covariance Reg.")
self.regCovarEdit = QtWidgets.QLineEdit(self.reg_covar)
self.fitOptionsGrid.addWidget(self.regCovarLabel,40,0,1,2)
self.fitOptionsGrid.addWidget(self.regCovarEdit,40,2,1,4)
self.maxItrLabel = QtWidgets.QLabel("EM Iterations")
self.maxItrEdit = QtWidgets.QLineEdit(self.max_itr)
self.fitOptionsGrid.addWidget(self.maxItrLabel,50,0,1,2)
self.fitOptionsGrid.addWidget(self.maxItrEdit,50,2,1,4)
self.nInitLabel = QtWidgets.QLabel("Number of Initializations")
self.nInitEdit = QtWidgets.QLineEdit(self.n_init)
self.fitOptionsGrid.addWidget(self.nInitLabel,60,0,1,2)
self.fitOptionsGrid.addWidget(self.nInitEdit,60,2,1,4)
self.covarianceType = QtWidgets.QLabel("Covariance Type")
self.covarianceType.setFixedWidth(160)
self.covarianceTypeCombo = QtWidgets.QComboBox()
for types in ('full','tied','diag','spherical'):
self.covarianceTypeCombo.addItem(types,types)
self.fitOptionsGrid.addWidget(self.covarianceType,70,0,1,2)
self.fitOptionsGrid.addWidget(self.covarianceTypeCombo,70,2,1,4)
self.initMethodType = QtWidgets.QLabel("Initialization Method")
self.initMethodType.setFixedWidth(160)
self.initMethodTypeCombo = QtWidgets.QComboBox()
for types in ('random','kmeans'):
self.initMethodTypeCombo.addItem(types,types)
self.fitOptionsGrid.addWidget(self.initMethodType,75,0,1,2)
self.fitOptionsGrid.addWidget(self.initMethodTypeCombo,75,2,1,4)
self.wcPriorType = QtWidgets.QLabel("Weight Prior Type")
self.wcPriorType.setFixedWidth(160)
self.wcPriorTypeCombo = QtWidgets.QComboBox()
for types in ('dirichlet_process','dirichlet_distribution'):
self.wcPriorTypeCombo.addItem(types,types)
self.fitOptionsGrid.addWidget(self.wcPriorType,80,0,1,2)
self.fitOptionsGrid.addWidget(self.wcPriorTypeCombo,80,2,1,4)
self.wcPriorLabel = QtWidgets.QLabel("Weight Prior")
self.wcPriorCheck = QtWidgets.QCheckBox()
self.wcPriorCheck.stateChanged.connect(self.wc_prior_check_changed)
self.wcPriorEdit = QtWidgets.QLineEdit(self.wc_prior)
if not self.wc_prior:
self.wcPriorCheck.setChecked(False)
self.wcPriorEdit.setEnabled(False)
else:
self.wcPriorCheck.setChecked(True)
self.fitOptionsGrid.addWidget(self.wcPriorLabel,90,0,1,2)
self.fitOptionsGrid.addWidget(self.wcPriorCheck,90,2,1,1)
self.fitOptionsGrid.addWidget(self.wcPriorEdit,90,3,1,3)
self.meanPrecPriorLabel = QtWidgets.QLabel("Mean Precision Prior")
self.meanPrecPriorCheck = QtWidgets.QCheckBox()
self.meanPrecPriorCheck.stateChanged.connect(self.mean_precision_prior_check_changed)
self.meanPrecPriorEdit = QtWidgets.QLineEdit(self.mean_precision_prior)
if not self.mean_precision_prior:
self.meanPrecPriorCheck.setChecked(False)
self.meanPrecPriorEdit.setEnabled(False)
else:
self.meanPrecPriorCheck.setChecked(True)
self.fitOptionsGrid.addWidget(self.meanPrecPriorLabel,100,0,1,2)
self.fitOptionsGrid.addWidget(self.meanPrecPriorCheck,100,2,1,1)
self.fitOptionsGrid.addWidget(self.meanPrecPriorEdit,100,3,1,3)
self.dofLabel = QtWidgets.QLabel("Deg. of Freedom Prior")
self.dofCheck = QtWidgets.QCheckBox()
self.dofCheck.stateChanged.connect(self.dof_check_changed)
self.dofEdit = QtWidgets.QLineEdit(self.dof)
if not self.dof:
self.dofCheck.setChecked(False)
self.dofEdit.setEnabled(False)
else:
self.dofCheck.setChecked(True)
self.fitOptionsGrid.addWidget(self.dofLabel,120,0,1,2)
self.fitOptionsGrid.addWidget(self.dofCheck,120,2,1,1)
self.fitOptionsGrid.addWidget(self.dofEdit,120,3,1,3)
self.rsLabel = QtWidgets.QLabel("Random State")
self.rsCheck = QtWidgets.QCheckBox()
self.rsCheck.stateChanged.connect(self.rs_check_changed)
self.rsEdit = QtWidgets.QLineEdit(self.rs)
if not self.rs:
self.rsCheck.setChecked(False)
self.rsEdit.setEnabled(False)
else:
self.rsCheck.setChecked(True)
self.fitOptionsGrid.addWidget(self.rsLabel,130,0,1,2)
self.fitOptionsGrid.addWidget(self.rsCheck,130,2,1,1)
self.fitOptionsGrid.addWidget(self.rsEdit,130,3,1,3)
self.vbLabel = QtWidgets.QLabel("Verbose")
self.vbEdit = QtWidgets.QLineEdit(self.vb)
self.fitOptionsGrid.addWidget(self.vbLabel,140,0,1,2)
self.fitOptionsGrid.addWidget(self.vbEdit,140,2,1,4)
self.vbIntvLabel = QtWidgets.QLabel("Verbose Interval")
self.vbIntvEdit = QtWidgets.QLineEdit(self.vb_interval)
self.fitOptionsGrid.addWidget(self.vbIntvLabel,150,0,1,2)
self.fitOptionsGrid.addWidget(self.vbIntvEdit,150,2,1,4)
self.warmStartLabel = QtWidgets.QLabel("Warm Start?")
self.warmStartCheck = QtWidgets.QCheckBox()
self.warmStartCheck.setChecked(False)
self.fitOptionsGrid.addWidget(self.warmStartLabel,160,0,1,2)
self.fitOptionsGrid.addWidget(self.warmStartCheck,160,2,1,4)
self.meanPriorTable = QtWidgets.QGroupBox("Mean Prior")
self.meanPriorTable.setStyleSheet('QGroupBox::title {color:blue;}')
self.meanPriorTableGrid = QtWidgets.QGridLayout(self.meanPriorTable)
self.meanPriorLabel = QtWidgets.QLabel("Use Mean Prior?")
self.meanPriorCheck = QtWidgets.QCheckBox()
self.meanPriorCheck.setChecked(False)
self.meanPriorCheck.stateChanged.connect(self.mean_prior_table_check_changed)
self.resetMeanPriorButton = QtWidgets.QPushButton("Reset")
self.resetMeanPriorButton.clicked.connect(self.set_default_mean_priors)
self.mean_prior_table = QtWidgets.QTableWidget()
self.mean_prior_table.setMinimumHeight(200)
self.mean_prior_table_initialize(int(self.ncomp))
self.meanPriorTableGrid.addWidget(self.meanPriorLabel,0,0,1,2)
self.meanPriorTableGrid.addWidget(self.meanPriorCheck,0,2,1,2)
self.meanPriorTableGrid.addWidget(self.resetMeanPriorButton,0,4,1,2)
self.meanPriorTableGrid.addWidget(self.mean_prior_table,1,0,1,6)
self.covarPriorTable = QtWidgets.QGroupBox("Covariance Prior")
self.covarPriorTable.setStyleSheet('QGroupBox::title {color:blue;}')
self.covarPriorTableGrid = QtWidgets.QGridLayout(self.covarPriorTable)
self.covarPriorLabel = QtWidgets.QLabel("Use Covariance Prior?")
self.covarPriorCheck = QtWidgets.QCheckBox()
self.covarPriorCheck.setChecked(False)
self.covarPriorCheck.stateChanged.connect(self.covar_prior_check_changed)
self.covarTab = QtWidgets.QTabWidget()
self.covarTab.setContentsMargins(0,0,0,0)
self.covarTab.setTabsClosable(False)
self.covar_prior_table_initialize(int(self.ncomp))
self.covarTab.widget(0).setEnabled(self.covarPriorCheck.isChecked())
self.covarPriorTableGrid.addWidget(self.covarPriorLabel,0,0,1,2)
self.covarPriorTableGrid.addWidget(self.covarPriorCheck,0,2,1,4)
self.covarPriorTableGrid.addWidget(self.covarTab,1,0,1,6)
self.statusBar = QtWidgets.QGroupBox("Log")
self.statusBar.setStyleSheet('QGroupBox::title {color:blue;}')
self.statusGrid = QtWidgets.QGridLayout(self.statusBar)
self.statusBar.setFixedHeight(150)
self.statusBar.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Fixed)
self.progressBar = QtWidgets.QProgressBar()
self.progressBar.setFixedHeight(12)
self.progressBar.setFixedWidth(800)
self.progressBar.setVisible(False)
self.progressBar.setOrientation(QtCore.Qt.Horizontal)
self.progressBarSizePolicy = self.progressBar.sizePolicy()
self.progressBarSizePolicy.setRetainSizeWhenHidden(True)
self.progressBar.setSizePolicy(self.progressBarSizePolicy)
self.PROGRESS_ADVANCE.connect(self.progress)
self.PROGRESS_END.connect(self.progress_reset)
self.logBox = QtWidgets.QTextEdit(QtCore.QTime.currentTime().toString("hh:mm:ss")+\
"\u00A0\u00A0\u00A0\u00A0Initialized!")
self.logBox.ensureCursorVisible()
self.logBox.setAlignment(QtCore.Qt.AlignTop)
self.logBox.setFrameShape(QtWidgets.QFrame.NoFrame)
self.logBoxScroll = QtWidgets.QScrollArea()
self.logBoxScroll.setWidget(self.logBox)
self.logBoxScroll.setWidgetResizable(True)
self.logBoxScroll.setFrameShape(QtWidgets.QFrame.NoFrame)
self.statusGrid.addWidget(self.logBoxScroll,0,0)
self.statusGrid.setAlignment(self.progressBar,QtCore.Qt.AlignRight)
self.ButtonBox = QtWidgets.QDialogButtonBox()
self.ButtonBox.addButton("Start",QtWidgets.QDialogButtonBox.ActionRole)
self.ButtonBox.addButton("Stop",QtWidgets.QDialogButtonBox.ActionRole)
self.ButtonBox.addButton("Reset",QtWidgets.QDialogButtonBox.ResetRole)
self.ButtonBox.addButton("Quit",QtWidgets.QDialogButtonBox.DestructiveRole)
self.ButtonBox.setCenterButtons(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].clicked.\
connect(self.start)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].clicked. \
connect(self.stop)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].clicked.\
connect(self.reset)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].clicked.\
connect(self.reject)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].setEnabled(False)
self.distributionChartTitle = QtWidgets.QLabel('Distribution')
self.distributionChart = profile_chart.ProfileChart(self.config)
self.distributionChart.set_fonts(self.fontList.currentFont().family(),self.fontSizeSlider.value())
self.distributionChart.setFixedSize(1300,1300)
self.FONTS_CHANGED.connect(self.distributionChart.adjust_fonts)
self.costChartTitle = QtWidgets.QLabel('ELBO Change')
self.costChart = profile_chart.ProfileChart(self.config)
self.costChart.set_fonts(self.fontList.currentFont().family(),self.fontSizeSlider.value())
self.FONTS_CHANGED.connect(self.costChart.adjust_fonts)
self.weightChart = bar_chart.BarChart(self.config)
self.weightChart.set_fonts(self.fontList.currentFont().family(),self.fontSizeSlider.value())
self.FONTS_CHANGED.connect(self.weightChart.adjust_fonts)
self.LeftGrid.addWidget(self.chooseSource,0,0)
self.LeftGrid.addWidget(self.information,1,0)
self.LeftGrid.addWidget(self.chooseDestination,2,0)
self.LeftGrid.addWidget(self.appearance,3,0)
self.LeftGrid.addWidget(self.sampleOptions,4,0)
self.LeftGrid.addWidget(self.fitOptions,5,0)
self.LeftGrid.addWidget(self.meanPriorTable,6,0)
self.LeftGrid.addWidget(self.covarPriorTable,7,0)
self.LeftGrid.addWidget(self.ButtonBox,8,0)
self.RightGrid.addWidget(self.distributionChartTitle,0,0)
self.RightGrid.addWidget(self.costChartTitle,0,1)
self.RightGrid.addWidget(self.distributionChart,1,0)
self.RightGrid.addWidget(self.costChart,1,1)
self.RightGrid.addWidget(self.weightChart,2,0,1,2)
self.RightGrid.addWidget(self.statusBar,3,0,1,2)
self.RightGrid.addWidget(self.progressBar,4,0,1,2)
self.Grid.addWidget(self.hSplitter,0,0)
self.leftScroll.setWidget(self.LeftFrame)
self.leftScroll.setMinimumWidth(800)
self.leftScroll.setWidgetResizable(True)
self.leftScroll.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Dialog.setWindowTitle("Gaussian Mixture Modeling")
self.Dialog.setWindowModality(QtCore.Qt.WindowModal)
self.Dialog.showMaximized()
self.set_default_mean_priors()
def wc_prior_check_changed(self,state):
if state == 0:
self.wcPriorEdit.setEnabled(False)
elif state == 2:
self.wcPriorEdit.setEnabled(True)
def mean_precision_prior_check_changed(self,state):
if state == 0:
self.meanPrecPriorEdit.setEnabled(False)
elif state == 2:
self.meanPrecPriorEdit.setEnabled(True)
def dof_check_changed(self,state):
if state == 0:
self.dofEdit.setEnabled(False)
elif state == 2:
self.dofEdit.setEnabled(True)
def rs_check_changed(self,state):
if state == 0:
self.rsEdit.setEnabled(False)
elif state == 2:
self.rsEdit.setEnabled(True)
def mean_prior_table_check_changed(self,state):
if state == 0:
for c in range(self.mean_prior_table.columnCount()):
if self.mean_prior_table.item(0,2*c):
self.mean_prior_table.item(0,2*c).setBackground(QtCore.Qt.lightGray)
elif state == 2:
for c in range(self.mean_prior_table.columnCount()):
if self.mean_prior_table.item(0,2*c):
self.mean_prior_table.item(0,2*c).setBackground(QtCore.Qt.transparent)
def covar_prior_check_changed(self,state):
if state == 0:
self.covarTab.widget(0).setEnabled(False)
elif state == 2:
self.covarTab.widget(0).setEnabled(True)
def mean_prior_table_change_features(self,text):
ncomp = int(self.numberOfComponentsEdit.text())
nfeatures = int(text)
if nfeatures > 3:
self.mean_prior_table.clear()
self.raise_error('Dimension > 3 not supported')
else:
self.mean_prior_table.clear()
self.mean_prior_table.setColumnCount(2*nfeatures)
coords = ['Prior X', 'Posterior X', 'Prior Y', 'Posterior Y', 'Prior Z', 'Posterior Z']
for n in range(2*nfeatures):
header_item = QtWidgets.QTableWidgetItem(coords[n])
self.mean_prior_table.setHorizontalHeaderItem(n,header_item)
self.mean_prior_table.setRowCount(ncomp)
self.mean_prior_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.mean_prior_table.horizontalHeader().setBackgroundRole(QtGui.QPalette.Highlight)
for i in range(ncomp):
icon_pm = QtGui.QPixmap(50,50)
icon_pm.fill(QtGui.QColor(self.fit_colors[i]))
icon = QtGui.QIcon(icon_pm)
item = QtWidgets.QTableWidgetItem(icon,'{}'.format(i+1))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setVerticalHeaderItem(i,item)
def mean_prior_table_initialize(self,text):
ncomp = int(text)
nfeatures = int(self.numberOfFeaturesEdit.text())
if nfeatures > 3:
self.mean_prior_table.clear()
self.raise_error('Dimension > 3 not supported')
else:
self.mean_prior_table.clear()
self.mean_prior_table.setColumnCount(2*nfeatures)
coords = ['Prior X', 'Posterior X', 'Prior Y', 'Posterior Y', 'Prior Z', 'Posterior Z']
for n in range(2*nfeatures):
header_item = QtWidgets.QTableWidgetItem(coords[n])
self.mean_prior_table.setHorizontalHeaderItem(n,header_item)
self.mean_prior_table.setRowCount(ncomp)
self.mean_prior_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.mean_prior_table.horizontalHeader().setBackgroundRole(QtGui.QPalette.Highlight)
for i in range(ncomp):
icon_pm = QtGui.QPixmap(50,50)
icon_pm.fill(QtGui.QColor(self.fit_colors[i]))
icon = QtGui.QIcon(icon_pm)
item = QtWidgets.QTableWidgetItem(icon,'{}'.format(i+1))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setVerticalHeaderItem(i,item)
def get_mean_posteriors(self):
means = []
for i in range(self.mean_prior_table.rowCount()):
row = []
for j in range(int(self.numberOfFeaturesEdit.text())):
if not self.mean_prior_table.item(i,j*2+1):
row.append(0)
else:
row.append(float(self.mean_prior_table.item(i,j*2+1).text()))
means.append(row)
return means
def get_mean_priors(self):
means = []
for i in range(self.mean_prior_table.rowCount()):
row = []
for j in range(int(self.numberOfFeaturesEdit.text())):
if not self.mean_prior_table.item(i,j*2):
row.append(0)
else:
row.append(float(self.mean_prior_table.item(i,j*2).text()))
means.append(row)
return means
def set_default_mean_priors(self):
self.numberOfComponentsEdit.setText(str(len(self.default_means)))
for i in range(len(self.default_means)):
for j in range(int(self.numberOfFeaturesEdit.text())):
if not self.mean_prior_table.item(i,j*2):
item = QtWidgets.QTableWidgetItem('{:.2f}'.format(self.default_means[i][j]))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setItem(i,2*j,item)
else:
self.mean_prior_table.item(i,2*j).setText('{:.2f}'.format(self.default_means[i][j]))
def update_mean_posteriors(self,means):
for i in range(int(self.numberOfComponentsEdit.text())):
for j in range(int(self.numberOfFeaturesEdit.text())):
if not self.mean_prior_table.item(i,2*j+1):
item = QtWidgets.QTableWidgetItem('{:.2f}'.format(means[i,j]))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setItem(i,2*j+1,item)
else:
self.mean_prior_table.item(i,2*j+1).setText('{:.2f}'.format(means[i,j]))
def update_mean_priors(self):
for i in range(int(self.numberOfComponentsEdit.text())):
for j in range(int(self.numberOfFeaturesEdit.text())):
value = float(self.mean_prior_table.item(i,j*2+1).text())
if not self.mean_prior_table.item(i,2*j):
item = QtWidgets.QTableWidgetItem('{:.2f}'.format(value))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setItem(i,2*j,item)
else:
self.mean_prior_table.item(i,2*j).setText('{:.2f}'.format(value))
def covar_prior_table_initialize(self,text):
ncomp = int(text)
nfeatures = int(self.numberOfFeaturesEdit.text())
self.covarTab.clear()
if nfeatures > 3:
self.mean_prior_table.clear()
self.raise_error('Dimension > 3 not supported')
else:
for i in range(int(ncomp)+1):
covar_prior_table = QtWidgets.QTableWidget()
covar_prior_table.setColumnCount(nfeatures)
for j in range(nfeatures):
header_item = QtWidgets.QTableWidgetItem('C{}'.format(j))
covar_prior_table.setHorizontalHeaderItem(j,header_item)
covar_prior_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
covar_prior_table.horizontalHeader().setBackgroundRole(QtGui.QPalette.Highlight)
covar_prior_table.setMinimumHeight(200)
covar_prior_table.setRowCount(nfeatures)
for j in range(nfeatures):
item = QtWidgets.QTableWidgetItem('R{}'.format(j))
item.setTextAlignment(QtCore.Qt.AlignCenter)
covar_prior_table.setVerticalHeaderItem(j,item)
if i == 0:
self.covarTab.addTab(covar_prior_table,"Prior")
else:
icon_pm = QtGui.QPixmap(50,50)
icon_pm.fill(QtGui.QColor(self.fit_colors[i-1]))
icon = QtGui.QIcon(icon_pm)
self.covarTab.addTab(covar_prior_table,icon,"{}".format(i))
def covar_prior_table_change_features(self,text):
nfeatures = int(text)
self.covarTab.clear()
if nfeatures > 3:
self.mean_prior_table.clear()
self.raise_error('Dimension > 3 not supported')
else:
| |
j: float, k: float) -> float: ...',
'def one(i: float) -> float: ...',
'def one_None(i: float) -> None: ...',
]
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_typeshed_complex():
"""Tests functions that take and return complex."""
def one(i): return i
def one_None(i): return None
def many(i, j, k): return i * j * j
with type_inferencer.TypeInferencer() as ti:
one((4. + 0j))
one_None((4. + 0j))
many((1. + 0j), (2. + 0j), (3. + 0j))
expected = [
'def many(i: complex, j: complex, k: complex) -> complex: ...',
'def one(i: complex) -> complex: ...',
'def one_None(i: complex) -> None: ...',
]
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_typeshed_mixed_union():
"""Tests functions that take and return complex."""
def one(i): return i
with type_inferencer.TypeInferencer() as ti:
one(4)
one(4.)
one((4. + 0j))
expected = [
'def one(i: complex, float, int) -> Union[complex, float, int]: ...',
]
assert ti.pretty_format(__file__) == '\n'.join(expected)
# ---- END:Test numbers ---
# ==== END: Test some functions that take basic builtins ====
# def test_typeshed_base64():
# """From the typeshed: https://github.com/python/typeshed/blob/master/stdlib/2and3/base64.pyi
#
# def b64decode(s: _decodable, altchars: bytes = ...,
# validate: bool = ...) -> bytes: ...
# def b64encode(s: _encodable, altchars: bytes = ...) -> bytes: ...
#
# def decode(input: IO[bytes], output: IO[bytes]) -> None: ...
# def decodebytes(s: bytes) -> bytes: ...
# def decodestring(s: bytes) -> bytes: ...
# def encode(input: IO[bytes], output: IO[bytes]) -> None: ...
# def encodebytes(s: bytes) -> bytes: ...
# def encodestring(s: bytes) -> bytes: ...
# """
# with type_inferencer.TypeInferencer() as ti:
# base64.b64encode(b'')
# base64.b64decode(b'')
# # stream_in = io.StringIO()
# # stream_out = io.StringIO()
# # base64.encode(stream_in, stream_out)
# # base64.decode(stream_in, stream_out)
# # base64.encodebytes(b'')
# # base64.decodebytes(b'')
# # base64.decode(stream_in, stream_out)
# # encoded = base64.encodestring(b'')
# # decoded = base64.encodestring(encoded)
# print()
# print('test_typeshed_base64()')
# _pretty_print(ti)
# # def _input_type_check(s: 'bytes') -> NoneType: ...
# # def decode(input: '_io.StringIO', output: '_io.StringIO') -> NoneType: ...
# # def decodebytes(s: 'bytes') -> bytes: ...
# # def encode(input: '_io.StringIO', output: '_io.StringIO') -> NoneType: ...
# # def encodebytes(s: 'bytes') -> bytes: ...
# # def encodestring(s: 'bytes') -> bytes: ...
#
# # pprint.pprint(ti.function_map)
def test_typeshed_io_StringIO():
"""Based on base64 from the typeshed:
https://github.com/python/typeshed/blob/master/stdlib/2and3/base64.pyi
Should see stub file of:
def decode(input: IO[bytes], output: IO[bytes]) -> None: ...
def encode(input: IO[bytes], output: IO[bytes]) -> None: ...
"""
def decode(input, output):
pass
def encode(input, output):
pass
with type_inferencer.TypeInferencer() as ti:
stream_in = io.StringIO()
stream_out = io.StringIO()
encode(stream_in, stream_out)
decode(stream_in, stream_out)
expected = [
'def decode(input: IO[bytes], output: IO[bytes]) -> None: ...',
'def encode(input: IO[bytes], output: IO[bytes]) -> None: ...',
]
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_typeshed_encode_bytes():
"""Based on base64 from the typeshed:
https://github.com/python/typeshed/blob/master/stdlib/2and3/base64.pyi
Should see stub file of:
def decodebytes(s: 'bytes') -> bytes: ...
def encodebytes(s: 'bytes') -> bytes: ...
"""
def decodebytes(s):
return s
def encodebytes(s):
return s
with type_inferencer.TypeInferencer() as ti:
encodebytes(b'')
decodebytes(b'')
expected = [
'def decodebytes(s: bytes) -> bytes: ...',
'def encodebytes(s: bytes) -> bytes: ...',
]
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_simple_class():
"""A simple class with a constructor and a single method."""
class Simple:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
s = Simple('First', 'Last')
s.name()
expected = [
'class Simple:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# pprint.pprint(ti.function_map)
# print(ti.pretty_format(__file__))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_class_in_class():
"""A class in a class and a single method."""
class Outer:
class Inner:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
s = Outer.Inner('First', 'Last')
s.name()
expected = [
'class Outer:',
' class Inner:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# pprint.pprint(ti.function_map)
# print()
# print(ti.pretty_format(__file__))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_class_in_class_both_have_methods():
"""A simple class with a constructor and a single method."""
class Outer:
def z_function(self, s):
return s
class Inner:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
o = Outer()
o.z_function(b'')
s = Outer.Inner('First', 'Last')
s.name()
expected = [
'class Outer:',
' def z_function(self, s: bytes) -> bytes: ...',
' class Inner:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# pprint.pprint(ti.function_map)
# print()
# print(ti.pretty_format(__file__))
# print('\n'.join(expected))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_three_nested_classes_only_inner_has_methods():
"""Three nested classes and a single method."""
class A:
class B:
class C:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
s = A.B.C('First', 'Last')
s.name()
expected = [
'class A:',
' class B:',
' class C:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# pprint.pprint(ti.function_map)
# print()
# print(ti.pretty_format(__file__))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_three_nested_classes_only_inner_has_methods_rev_names():
"""Three nested classes and a single method, names reversed."""
class C:
class B:
class A:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
s = C.B.A('First', 'Last')
s.name()
expected = [
'class C:',
' class B:',
' class A:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# pprint.pprint(ti.function_map)
# print()
# print(ti.pretty_format(__file__))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_three_nested_classes_middle_and_inner_has_methods():
"""Three nested classes and a single method."""
class A:
class B:
class C:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
def some_function(self, byt): return byt
with type_inferencer.TypeInferencer() as ti:
c = A.B.C('First', 'Last')
c.name()
b = A.B()
b.some_function(b'')
expected = [
'class A:',
' class B:',
' def some_function(self, byt: bytes) -> bytes: ...',
' class C:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# pprint.pprint(ti.function_map)
# print()
# print(ti.pretty_format(__file__))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_four_nested_classes_A_and_inner_has_methods():
class A:
def some_function(self, byt): return byt
class B:
class C:
class D:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
d = A.B.C.D('First', 'Last')
d.name()
a = A()
a.some_function(b'')
expected = [
'class A:',
' def some_function(self, byt: bytes) -> bytes: ...',
' class B:',
' class C:',
' class D:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# pprint.pprint(ti.function_map)
# print()
# print(ti.pretty_format(__file__))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_four_nested_classes_B_and_inner_has_methods():
class A:
class B:
def some_function(self, byt): return byt
class C:
class D:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
d = A.B.C.D('First', 'Last')
d.name()
b = A.B()
b.some_function(b'')
expected = [
'class A:',
' class B:',
' def some_function(self, byt: bytes) -> bytes: ...',
' class C:',
' class D:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# print()
# pprint.pprint(ti.function_map[__file__])
# print(ti.pretty_format(__file__))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_four_nested_classes_C_and_inner_has_methods():
class A:
class B:
class C:
def some_function(self, byt): return byt
class D:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
d = A.B.C.D('First', 'Last')
d.name()
c = A.B.C()
c.some_function(b'')
expected = [
'class A:',
' class B:',
' class C:',
' def some_function(self, byt: bytes) -> bytes: ...',
' class D:',
' def __init__(self, first_name: str, last_name: str) -> None: ...',
' def name(self) -> str: ...',
]
# print()
# pprint.pprint(ti.function_map[__file__])
# print(ti.pretty_format(__file__))
assert ti.pretty_format(__file__) == '\n'.join(expected)
def test_four_nested_classes_all_have_methods():
class A:
def some_function(self, byt): return byt
class B:
def some_function(self, byt): return byt
class C:
def some_function(self, byt): return byt
class D:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def name(self):
return '{:s}, {:s}'.format(self.last_name, self.first_name)
with type_inferencer.TypeInferencer() as ti:
d = A.B.C.D('First', 'Last')
d.name()
c = A.B.C()
c.some_function(b'')
b = A.B()
b.some_function(b'')
a = A()
a.some_function(b'')
expected = [
'class A:',
' def some_function(self, byt: bytes) -> bytes: ...',
' class B:',
' def some_function(self, byt: bytes) -> bytes: ...',
' class C:',
' def some_function(self, byt: bytes) -> bytes: ...',
' class D:',
' | |
<filename>barajar/barajar.py<gh_stars>0
import random
import warnings
from _io import TextIOWrapper
virg = ','
pyignore = '#'#, '"""',"'''"
bloco = '#@B','#@A'
recuo = {
None:[4,1], # O índice de tamanhos, em ordem decrescente
'\t': 4,
' ': 1,
1: ' ',
4: '\t',
0:pyignore # comentários são sinalizados pelo 0
}
def recua (r = 0, valores = recuo):
'''Dados uma posição de tabulação inicial inteira r e o dicionário valores ordenando inteiros e caracteres de indentação,
retorna uma string equivalente ao número fornecido orientado pelo índice na chave None.'''
# r += contarecuo(p,r,valores)
t = ''
try:
for l in valores[None]:
t += valores[l]*(r//l)
r %= l
except TypeError:
warnings.warn('Os valores para recuo devem ser iteráveis e indicar os números das strings')
except KeyError:
warnings.warn('Os valores para recuo devem ter índice numérico ordenado coerente.')
return t
def contarecuo (p, r = 0, valores = recuo):
'''Retorna o recuo da linha p somada à tabulação inicial r conforme o dicionário dos valores dos caracteres,
se p já não tiver recuo calculado.'''
try:
if type(p) != str:
return p.recuo
for c in p:
r += valores[c]
except AttributeError:
if p != None:
return contarecuo(p[0],r,valores)
except KeyError:
pass
return r
def blocos (prog, novo = False, id = None, abre=bloco[0],fecha=bloco[1]):
'''Divide a lista de linhas prog (sendo essa lista copiada se novo for verdadeiro),
atribuindo, se possível, o id != None, abrindo os blocos e fechando de acordo com a presença das substrings fornecidas.
O id é composto pela tupla do valor dado precedido pelo índice da lista, se o referido valor não for None.'''
if novo or type(prog) != list:
prog = linhas(prog)
novo = True
b = c = 0
while c < len(prog):
ln = prog[c]
try:
ln = ln.upper()
if id != None:
prog[c].__id__((c,id))
except AttributeError:
pass
if abre in ln:
prog[c] = [prog[c]]
b = c
c += 1
novo = False
elif novo:
c += 1
else:
if fecha in ln:
novo = True
prog[b].append(prog.pop(c))
# c += novo
return prog
def texto (prog, r = 0, cab='',sep='\n',ind=recuo):
'''Retorna o texto da lista de linhas prog com o recuo geral inteiro r com os caracteres do dicionário ind, a string de cabeçalho cab e sep como separador de linha.'''
for ln in prog:
if type(r) != list:
r = [contarecuo(ln,r,ind)]
if type(ln) == list:
cab += texto(ln,r,sep=sep,ind=ind)
else:
cab += recua(r[0],ind) + str(ln.linha()) + sep
r[0] += ln.indentar()
return cab
def linhas (prog,prox=None,id=None,ind=recuo,carrega=True):
'''Inicializa a lista (e as referências de lista ligada) prog,
referenciando a linha prox ao final, se id != None, atribui-o e conta o recuo inteiro pelo dicionário de indentação ind.
O id é composto pela tupla do valor dado precedido pelo índice da lista, se o referido valor não for None.
Ao final da função, o argumento carrega (verdadeiro por padrão) é fornecido para que a lista ligada completa seja montada (ou não) na primeira linha'''
if type(prog) == TextIOWrapper:
p = prog.read()
# prog.close()
prog = p
if type(prog) == str:
prog = prog.splitlines()
else:
prog = list(prog)
c = len(prog)
while c > 0:
c += -1
prox = linha(prog[c],prox,ind)
prog[c] = prox
if id != None:
prog[c].__id__((c,id))
if len(prog) > 0:
prog[0].list(carrega)
return prog
class linha:
def linha (self, ln = None, ind = recuo):
'''Getter e setter do atributo .ln sem a indentação, calculando também o .recuo com base no dicionário de caracteres ou valor final ind, igualado ao recuo da próxima linha se esta não tiver valor significativo (for inteiramente comentada ou completamente vazia).
Guarda o original no atributo .cru'''
if ln == None:
try:
return self.ln
except AttributeError:
pass
elif self.__class__ == ln.__class__:
if self.id == None:
self.id = ln.id
if ind == recuo:
ind = ln.ind
ln = ln.cru
fonte = str(ln)
self.recuo = c = 0
self.cru = self.ln = ln
self.ind = ind
self.lower = fonte.lower
self.upper = fonte.upper
self.title = fonte.title
self.split = fonte.split
self.splitlines = fonte.splitlines
try:
while c < len(ln):
self.recuo += ind[ln[c]]
c += 1
except KeyError:
pass
except TypeError:
if type(ind) == int:
self.recuo = ind
return
try:
self.ln = ln[c:]
ln = 0
for c in ind[0]:
if self.find(c) == 0:
ln = self.__len__()
break
except KeyError:
pass
except:
warnings.warn('Ocorreu algum problema inesperado nas linhas anteriores!',RuntimeWarning)
if self.__len__() == ln:
self.recuo = contarecuo(self.seguinte())#self.prox.recuo
def anterior (self, previous = None, update = True):
'''Setter e getter do link para a linha prévia, por padrão também tenta atualizar o link dela para a seguinte (esta).
Atenção: não há atualização do link para a seguinte aqui se isso for solicitado explicitamente, assim podendo causar distorções e discordâncias se esse método for utilizado sozinho.'''
try:
if previous == None:
return self.prev
if update:
previous.seguinte(self)
except AttributeError:
pass
self.prev = previous
def seguinte (self, next = None):
'''Setter e getter do link para a próxima linha.
Atenção: não há atualização da lista ligada completa aqui, deve-se solicitar recarregamento explicitamente.'''
try:
if next == None:
return self.prox
next.anterior(self,False)
except AttributeError:
pass
self.prox = next
def list (self,refresh=False):
'''Getter e updater (se bool(refresh) == True) da lista ligada finita de todas as próximas linhas.
A lista termina no link None ou quando a segunda aparição do self for atingida
O retorno do método .seguinte é considerado link, não mais o atributo .prox
Se refresh for uma lista, .lista é atualizada para a nova referência'''
i = 0
if list == type(refresh):
self.lista = refresh
if refresh:
# self.lista.clear()
prox = self
while prox != None:
if i >= len(self.lista) or self.lista[i] != prox:
self.lista.insert(i,prox)
i += 1
try:
prox = prox.seguinte()
if self == prox:
break
except AttributeError:
break
while i < len(self.lista):
self.lista.pop(i)
return self.lista
def add (self,l,refresh=None):
'''Adiciona a linha ao final da lista (se for cíclica, considera o verdadeiro self.anterior() como o último elemento) e atualiza a lista ligada completa, se for pedido'''
self.append(l)
self.list(refresh)
def append (self, l):
'''Acrescenta a linha l ao final da lista.
Se descobre que a lista é circular (reencontrando self), adiciona imediatamente antes dele.'''
self.insert(True,l)
def insert (self, i,*l):
'''Insere as linhas l antes do índice i dado
Os objetos de l deve ter o método .seguinte, caso algum não tenha, a continuidade da lista é cortada na sua posição
A precisão do corte se um objeto for diferente de None não é garantida
Caso i seja o bool True, insere depois da última linha (na lista circular é a verdadeira self.anterior())
Não há atualização implícita da lista ligada completa'''
seguinte = self
anterior = self.anterior()
while i < 0 and anterior != None:
i += 1
seguinte = anterior
anterior = anterior.anterior()
while i > 0 and seguinte != None:
if type(i) != bool:
i -= 1
elif self==seguinte:
break
anterior = seguinte
seguinte = seguinte.seguinte()
for ln in l:
try:
if seguinte != None: #essa redundância garante o corte da lista se ln.seguinte não existir/puder ser executado
seguinte.anterior(ln)
if anterior != None:
anterior.seguinte(ln)
ln.seguinte(seguinte)
except AttributeError:
pass
except:
warnings.warn('Ocorreu algum problema inesperado nas atribuições da sequência;')
anterior = ln
'''
refresh = self.list(refresh)
if i >= len(refresh):
i = len(refresh)
if i >= 0:
if i:
anterior = refresh[i-1]
anterior.seguinte(l)
self.list(refresh).insert(i,l)'''
def indentar (self, r = 0):
'''Calcula a diferença entre o recuo da próxima linha e o recuo da linha atual, caso não exista algum dos dois recuos, retorna o argumento r'''
try:
return self.prox.recuo - self.recuo
except AttributeError:
return r
def index (self,*item):
'''Sem tratamento de erro,
mais informações, se houver, em self.ln.index.__doc__
'''
return self.ln.index(*item)
def find (self, *item):
'''Retorna o índice do item conforme .ln.index
Caso seja encontrado problema, retorna -1;
Caso não haja .index em .ln, retorna -2; '''
try:
return self.index(*item)
# return self.ln.find(*item)
except AttributeError:
return -2
except:
return -1
def startswith (self, prefixo, partida=0,limite=None, passo=1):
'''Retorna verdadeiro se o prefixo tiver índice igual à partida (0 por padrão) em .ln (caso seja string).
Caso .ln.__class__ != str, verifica se algum elemento entre a partida e o limite (__len__, por padrão), com incremento passo (por padrão 1), possui o prefixo em índice 0 ou igual à partida. A soma do índice com o passo não é protegida de exceções.
Caso nada seja encontrado, retorna self.find(prefixo) == partida.'''
try:
# if len(prefixo)>limite-partida:
# return
#except TypeError:
return self.ln.startswith(prefixo,partida,limite)
except AttributeError:
if limite == None:
limite = self.__len__()
p = partida
while p < limite:
try:
i = self[p].index(prefixo)#startswith(prefixo)
if i == 0 or i == partida:
return True
except AttributeError:
pass
except ValueError:
pass
p += passo
return self.find(prefixo)==partida;
def __contains__ (self, item):
'''item in self.ln
Excepcionalmente retorna None.
'''
try:
return self.ln.__contains__(item)
except Exception:
return
def __hash__ (self):
'''Hash da linha bruta, se .cru tiver .__hash__,
caso contrário, retorna a hash da variação de tabulação, se houver recuo na linha seguinte e,
se não tiver, utiliza a hash do recuo (inteiro).'''
try:
return self.cru.__hash__()
except TypeError:
return self.indentar(self.recuo).__hash__()
def __str__ (self):
'''str(self.cru)
'''
return self.cru.__str__()
def __repr__ (self, link = False):
'''Retorna a representação textual da linha refinada (.ln), se link != 0, inclui as link próximas linhas também (se link < 0, todas as próximas, sem tratamento de listas circulares).
A inicialização pela representação perde o dicionário do recuo .ind original e também perde o id se for exibir a lista ligada.'''
s = virg
if link and self.prox != None:
s += '\n' + ' '*self.recuo + self.prox.__repr__(link - (link > 0)) + virg
else:
if self.id != None:
s += ' id = ' + str(self.id) + virg
s += 'identation='
return self.__class__.__name__ + '(' + self.ln.__repr__() + s + '%d)'%self.recuo
def __reversed__ (self, refresh = None):
''' self.list(refresh).__reversed__()'''
return self.list(refresh).__reversed__()
def __iter__ (self, refresh = None):
''' self.list(refresh).__iter__()'''
return self.list(refresh).__iter__()
def __init__ (self, valor = None, prox = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.